summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/gotools/.eslintrc.yml192
-rw-r--r--src/mongo/gotools/.gitattributes2
-rw-r--r--src/mongo/gotools/.gitignore20
-rw-r--r--src/mongo/gotools/CONTRIBUTING.md60
-rw-r--r--src/mongo/gotools/Godeps13
-rw-r--r--src/mongo/gotools/LICENSE.md13
-rw-r--r--src/mongo/gotools/README.md45
-rw-r--r--src/mongo/gotools/THIRD-PARTY-NOTICES743
-rw-r--r--src/mongo/gotools/binaryurl.py70
-rw-r--r--src/mongo/gotools/bsondump/bsondump.go211
-rw-r--r--src/mongo/gotools/bsondump/bsondump_test.go156
-rw-r--r--src/mongo/gotools/bsondump/main/bsondump.go96
-rw-r--r--src/mongo/gotools/bsondump/options.go36
-rw-r--r--src/mongo/gotools/bsondump/testdata/sample.bsonbin0 -> 283 bytes
-rw-r--r--src/mongo/gotools/bsondump/testdata/sample.json4
-rw-r--r--src/mongo/gotools/build.bat16
-rwxr-xr-xsrc/mongo/gotools/build.sh29
-rw-r--r--src/mongo/gotools/common.yml1535
-rw-r--r--src/mongo/gotools/common/archive/archive.go55
-rw-r--r--src/mongo/gotools/common/archive/demultiplexer.go439
-rw-r--r--src/mongo/gotools/common/archive/multiplexer.go315
-rw-r--r--src/mongo/gotools/common/archive/multiplexer_roundtrip_test.go229
-rw-r--r--src/mongo/gotools/common/archive/parser.go152
-rw-r--r--src/mongo/gotools/common/archive/parser_test.go150
-rw-r--r--src/mongo/gotools/common/archive/prelude.go385
-rw-r--r--src/mongo/gotools/common/archive/prelude_test.go57
-rw-r--r--src/mongo/gotools/common/auth/auth_info.go63
-rw-r--r--src/mongo/gotools/common/bsonutil/bsonutil.go416
-rw-r--r--src/mongo/gotools/common/bsonutil/converter.go388
-rw-r--r--src/mongo/gotools/common/bsonutil/converter_test.go345
-rw-r--r--src/mongo/gotools/common/bsonutil/date_test.go169
-rw-r--r--src/mongo/gotools/common/bsonutil/marshal_d.go59
-rw-r--r--src/mongo/gotools/common/bsonutil/marshal_d_test.go124
-rw-r--r--src/mongo/gotools/common/bsonutil/maxkey_test.go38
-rw-r--r--src/mongo/gotools/common/bsonutil/minkey_test.go38
-rw-r--r--src/mongo/gotools/common/bsonutil/number.go18
-rw-r--r--src/mongo/gotools/common/bsonutil/numberint_test.go37
-rw-r--r--src/mongo/gotools/common/bsonutil/numberlong_test.go37
-rw-r--r--src/mongo/gotools/common/bsonutil/objectid_test.go38
-rw-r--r--src/mongo/gotools/common/bsonutil/regexp_test.go66
-rw-r--r--src/mongo/gotools/common/bsonutil/timestamp_test.go43
-rw-r--r--src/mongo/gotools/common/bsonutil/undefined_test.go38
-rw-r--r--src/mongo/gotools/common/common.go16
-rw-r--r--src/mongo/gotools/common/db/bson_stream.go140
-rw-r--r--src/mongo/gotools/common/db/bson_stream_test.go41
-rw-r--r--src/mongo/gotools/common/db/buffered_bulk.go79
-rw-r--r--src/mongo/gotools/common/db/buffered_bulk_test.go108
-rw-r--r--src/mongo/gotools/common/db/command.go210
-rw-r--r--src/mongo/gotools/common/db/connector.go52
-rw-r--r--src/mongo/gotools/common/db/connector_sasl_test.go60
-rw-r--r--src/mongo/gotools/common/db/connector_test.go134
-rw-r--r--src/mongo/gotools/common/db/db.go243
-rw-r--r--src/mongo/gotools/common/db/db_gssapi.go20
-rw-r--r--src/mongo/gotools/common/db/db_ssl.go20
-rw-r--r--src/mongo/gotools/common/db/db_test.go63
-rw-r--r--src/mongo/gotools/common/db/kerberos/gssapi.go58
-rw-r--r--src/mongo/gotools/common/db/namespaces.go159
-rw-r--r--src/mongo/gotools/common/db/openssl/openssl.go168
-rw-r--r--src/mongo/gotools/common/db/openssl/openssl_fips.go15
-rw-r--r--src/mongo/gotools/common/db/openssl/testdata/ca.pem34
-rw-r--r--src/mongo/gotools/common/db/openssl/testdata/server.pem32
-rw-r--r--src/mongo/gotools/common/db/read_preferences.go51
-rw-r--r--src/mongo/gotools/common/db/testdata/testdata.bsonbin0 -> 1800 bytes
-rw-r--r--src/mongo/gotools/common/db/write_concern.go123
-rw-r--r--src/mongo/gotools/common/db/write_concern_test.go166
-rw-r--r--src/mongo/gotools/common/intents/intent.go466
-rw-r--r--src/mongo/gotools/common/intents/intent_prioritizer.go241
-rw-r--r--src/mongo/gotools/common/intents/intent_prioritizer_test.go174
-rw-r--r--src/mongo/gotools/common/intents/intent_test.go81
-rw-r--r--src/mongo/gotools/common/json/bench_test.go189
-rw-r--r--src/mongo/gotools/common/json/bindata.go67
-rw-r--r--src/mongo/gotools/common/json/bindata_test.go89
-rw-r--r--src/mongo/gotools/common/json/boolean.go73
-rw-r--r--src/mongo/gotools/common/json/boolean_test.go368
-rw-r--r--src/mongo/gotools/common/json/constructor.go117
-rw-r--r--src/mongo/gotools/common/json/consts.go7
-rw-r--r--src/mongo/gotools/common/json/csv_format.go84
-rw-r--r--src/mongo/gotools/common/json/date.go80
-rw-r--r--src/mongo/gotools/common/json/date_test.go99
-rw-r--r--src/mongo/gotools/common/json/dbpointer.go71
-rw-r--r--src/mongo/gotools/common/json/dbpointer_test.go84
-rw-r--r--src/mongo/gotools/common/json/dbref.go69
-rw-r--r--src/mongo/gotools/common/json/dbref_test.go347
-rw-r--r--src/mongo/gotools/common/json/decode.go1273
-rw-r--r--src/mongo/gotools/common/json/decode_d_test.go118
-rw-r--r--src/mongo/gotools/common/json/decode_test.go1364
-rw-r--r--src/mongo/gotools/common/json/encode.go1186
-rw-r--r--src/mongo/gotools/common/json/encode_test.go453
-rw-r--r--src/mongo/gotools/common/json/example_test.go161
-rw-r--r--src/mongo/gotools/common/json/float_test.go93
-rw-r--r--src/mongo/gotools/common/json/fold.go143
-rw-r--r--src/mongo/gotools/common/json/fold_test.go116
-rw-r--r--src/mongo/gotools/common/json/frac_test.go98
-rw-r--r--src/mongo/gotools/common/json/helpers.go76
-rw-r--r--src/mongo/gotools/common/json/hex.go13
-rw-r--r--src/mongo/gotools/common/json/hex_test.go117
-rw-r--r--src/mongo/gotools/common/json/indent.go137
-rw-r--r--src/mongo/gotools/common/json/infinity.go13
-rw-r--r--src/mongo/gotools/common/json/infinity_test.go98
-rw-r--r--src/mongo/gotools/common/json/iso_date.go45
-rw-r--r--src/mongo/gotools/common/json/iso_date_test.go124
-rw-r--r--src/mongo/gotools/common/json/json_format.go163
-rw-r--r--src/mongo/gotools/common/json/maxkey.go13
-rw-r--r--src/mongo/gotools/common/json/maxkey_test.go180
-rw-r--r--src/mongo/gotools/common/json/minkey.go13
-rw-r--r--src/mongo/gotools/common/json/minkey_test.go184
-rw-r--r--src/mongo/gotools/common/json/mongo_extjson.go392
-rw-r--r--src/mongo/gotools/common/json/nan.go13
-rw-r--r--src/mongo/gotools/common/json/nan_test.go90
-rw-r--r--src/mongo/gotools/common/json/new.go92
-rw-r--r--src/mongo/gotools/common/json/new_test.go197
-rw-r--r--src/mongo/gotools/common/json/number.go136
-rw-r--r--src/mongo/gotools/common/json/number_test.go191
-rw-r--r--src/mongo/gotools/common/json/objectid.go55
-rw-r--r--src/mongo/gotools/common/json/objectid_test.go84
-rw-r--r--src/mongo/gotools/common/json/regexp.go275
-rw-r--r--src/mongo/gotools/common/json/regexp_test.go243
-rw-r--r--src/mongo/gotools/common/json/scanner.go669
-rw-r--r--src/mongo/gotools/common/json/scanner_test.go315
-rw-r--r--src/mongo/gotools/common/json/single_quoted.go74
-rw-r--r--src/mongo/gotools/common/json/single_quoted_test.go156
-rw-r--r--src/mongo/gotools/common/json/stream.go243
-rw-r--r--src/mongo/gotools/common/json/stream_test.go206
-rw-r--r--src/mongo/gotools/common/json/tagkey_test.go110
-rw-r--r--src/mongo/gotools/common/json/tags.go44
-rw-r--r--src/mongo/gotools/common/json/tags_test.go28
-rw-r--r--src/mongo/gotools/common/json/testdata/code.json.gzbin0 -> 120432 bytes
-rw-r--r--src/mongo/gotools/common/json/timestamp.go67
-rw-r--r--src/mongo/gotools/common/json/timestamp_test.go85
-rw-r--r--src/mongo/gotools/common/json/undefined.go13
-rw-r--r--src/mongo/gotools/common/json/undefined_test.go89
-rw-r--r--src/mongo/gotools/common/json/unquoted.go31
-rw-r--r--src/mongo/gotools/common/json/unquoted_test.go129
-rw-r--r--src/mongo/gotools/common/log/tool_logger.go156
-rw-r--r--src/mongo/gotools/common/log/tool_logger_test.go125
-rw-r--r--src/mongo/gotools/common/options/options.go346
-rw-r--r--src/mongo/gotools/common/options/options_gssapi.go12
-rw-r--r--src/mongo/gotools/common/options/options_ssl.go18
-rw-r--r--src/mongo/gotools/common/options/options_test.go75
-rw-r--r--src/mongo/gotools/common/password/pass_util.go22
-rw-r--r--src/mongo/gotools/common/password/pass_util_solaris.go107
-rw-r--r--src/mongo/gotools/common/password/password.go63
-rw-r--r--src/mongo/gotools/common/progress/manager.go138
-rw-r--r--src/mongo/gotools/common/progress/manager_test.go218
-rw-r--r--src/mongo/gotools/common/progress/progress_bar.go236
-rw-r--r--src/mongo/gotools/common/progress/progress_bar_test.go186
-rw-r--r--src/mongo/gotools/common/signals/signals.go51
-rw-r--r--src/mongo/gotools/common/testutil/auth.go76
-rw-r--r--src/mongo/gotools/common/testutil/kerberos.go42
-rw-r--r--src/mongo/gotools/common/testutil/ssl_integration.go19
-rw-r--r--src/mongo/gotools/common/testutil/testutil.go2
-rw-r--r--src/mongo/gotools/common/testutil/types.go57
-rw-r--r--src/mongo/gotools/common/text/grid.go165
-rw-r--r--src/mongo/gotools/common/text/grid_test.go104
-rw-r--r--src/mongo/gotools/common/text/units.go68
-rw-r--r--src/mongo/gotools/common/text/units_test.go102
-rw-r--r--src/mongo/gotools/common/util/bool.go32
-rw-r--r--src/mongo/gotools/common/util/bool_test.go58
-rw-r--r--src/mongo/gotools/common/util/exit_code.go17
-rw-r--r--src/mongo/gotools/common/util/file.go34
-rw-r--r--src/mongo/gotools/common/util/format_date.go30
-rw-r--r--src/mongo/gotools/common/util/format_date_test.go49
-rw-r--r--src/mongo/gotools/common/util/math.go78
-rw-r--r--src/mongo/gotools/common/util/math_test.go89
-rw-r--r--src/mongo/gotools/common/util/mongo.go187
-rw-r--r--src/mongo/gotools/common/util/mongo_test.go110
-rw-r--r--src/mongo/gotools/common/util/slice.go60
-rw-r--r--src/mongo/gotools/common/util/strings.go12
-rw-r--r--src/mongo/gotools/common/util/util.go2
-rw-r--r--src/mongo/gotools/mongodump/main/mongodump.go75
-rw-r--r--src/mongo/gotools/mongodump/metadata_dump.go107
-rw-r--r--src/mongo/gotools/mongodump/mongodump.go803
-rw-r--r--src/mongo/gotools/mongodump/mongodump_test.go616
-rw-r--r--src/mongo/gotools/mongodump/oplog_dump.go91
-rw-r--r--src/mongo/gotools/mongodump/options.go62
-rw-r--r--src/mongo/gotools/mongodump/prepare.go457
-rw-r--r--src/mongo/gotools/mongodump/prepare_test.go51
-rw-r--r--src/mongo/gotools/mongoexport/csv.go148
-rw-r--r--src/mongo/gotools/mongoexport/csv_test.go139
-rw-r--r--src/mongo/gotools/mongoexport/json.go109
-rw-r--r--src/mongo/gotools/mongoexport/json_test.go73
-rw-r--r--src/mongo/gotools/mongoexport/kerberos_test.go44
-rw-r--r--src/mongo/gotools/mongoexport/main/mongoexport.go143
-rw-r--r--src/mongo/gotools/mongoexport/mongoexport.go425
-rw-r--r--src/mongo/gotools/mongoexport/mongoexport_test.go44
-rw-r--r--src/mongo/gotools/mongoexport/options.go79
-rw-r--r--src/mongo/gotools/mongofiles/main/mongofiles.go76
-rw-r--r--src/mongo/gotools/mongofiles/mongofiles.go407
-rw-r--r--src/mongo/gotools/mongofiles/mongofiles_test.go536
-rw-r--r--src/mongo/gotools/mongofiles/options.go53
-rw-r--r--src/mongo/gotools/mongofiles/testdata/lorem_ipsum_287613_bytes.txt893
-rw-r--r--src/mongo/gotools/mongoimport/common.go472
-rw-r--r--src/mongo/gotools/mongoimport/common_test.go600
-rw-r--r--src/mongo/gotools/mongoimport/csv.go151
-rw-r--r--src/mongo/gotools/mongoimport/csv/reader.go363
-rw-r--r--src/mongo/gotools/mongoimport/csv_test.go354
-rw-r--r--src/mongo/gotools/mongoimport/dateconv/dateconv.go77
-rw-r--r--src/mongo/gotools/mongoimport/json.go239
-rw-r--r--src/mongo/gotools/mongoimport/json_test.go264
-rw-r--r--src/mongo/gotools/mongoimport/main/mongoimport.go86
-rw-r--r--src/mongo/gotools/mongoimport/mongoimport.go575
-rw-r--r--src/mongo/gotools/mongoimport/mongoimport_test.go757
-rw-r--r--src/mongo/gotools/mongoimport/options.go79
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test.csv3
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test.tsv3
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_array.json31
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_bad.csv3
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_blanks.csv3
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_bom.csv2
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_bom.json2
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_bom.tsv1
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_duplicate.csv5
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_fields_invalid.txt3
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_fields_valid.txt3
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_plain.json3
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_plain2.json10
-rw-r--r--src/mongo/gotools/mongoimport/testdata/test_type.csv4
-rw-r--r--src/mongo/gotools/mongoimport/tsv.go163
-rw-r--r--src/mongo/gotools/mongoimport/tsv_test.go232
-rw-r--r--src/mongo/gotools/mongoimport/typed_fields.go284
-rw-r--r--src/mongo/gotools/mongoimport/typed_fields_test.go407
-rw-r--r--src/mongo/gotools/mongooplog/main/mongooplog.go94
-rw-r--r--src/mongo/gotools/mongooplog/mongooplog.go151
-rw-r--r--src/mongo/gotools/mongooplog/mongooplog_test.go135
-rw-r--r--src/mongo/gotools/mongooplog/options.go23
-rw-r--r--src/mongo/gotools/mongorestore/filepath.go644
-rw-r--r--src/mongo/gotools/mongorestore/filepath_test.go337
-rw-r--r--src/mongo/gotools/mongorestore/main/mongorestore.go120
-rw-r--r--src/mongo/gotools/mongorestore/metadata.go524
-rw-r--r--src/mongo/gotools/mongorestore/metadata_test.go177
-rw-r--r--src/mongo/gotools/mongorestore/mongorestore.go540
-rw-r--r--src/mongo/gotools/mongorestore/mongorestore_test.go88
-rw-r--r--src/mongo/gotools/mongorestore/ns/ns.go235
-rw-r--r--src/mongo/gotools/mongorestore/ns/ns_test.go111
-rw-r--r--src/mongo/gotools/mongorestore/oplog.go173
-rw-r--r--src/mongo/gotools/mongorestore/oplog_test.go116
-rw-r--r--src/mongo/gotools/mongorestore/options.go68
-rw-r--r--src/mongo/gotools/mongorestore/restore.go320
-rw-r--r--src/mongo/gotools/mongorestore/testdata/auth_version_3.bsonbin0 -> 45 bytes
-rw-r--r--src/mongo/gotools/mongorestore/testdata/auth_version_5.bsonbin0 -> 45 bytes
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/badfile.txt0
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/db1/baddir/out.bson0
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/db1/c1.bsonbin0 -> 3300 bytes
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/db1/c1.metadata.json1
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/db1/c2.bson0
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/db1/c3.bson0
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/db1/c3.metadata.json0
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/db2/c1.bin0
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/db2/c2.txt0
-rw-r--r--src/mongo/gotools/mongorestore/testdata/testdirs/oplog.bson0
-rw-r--r--src/mongo/gotools/mongostat/main/mongostat.go236
-rw-r--r--src/mongo/gotools/mongostat/mongostat.go380
-rw-r--r--src/mongo/gotools/mongostat/mongostat_test.go122
-rw-r--r--src/mongo/gotools/mongostat/options.go27
-rw-r--r--src/mongo/gotools/mongostat/stat_consumer/formatter.go34
-rw-r--r--src/mongo/gotools/mongostat/stat_consumer/grid_line_formatter.go107
-rw-r--r--src/mongo/gotools/mongostat/stat_consumer/interactive_line_formatter.go231
-rw-r--r--src/mongo/gotools/mongostat/stat_consumer/json_line_formatter.go60
-rw-r--r--src/mongo/gotools/mongostat/stat_consumer/line/line.go45
-rw-r--r--src/mongo/gotools/mongostat/stat_consumer/line/stat_headers.go138
-rw-r--r--src/mongo/gotools/mongostat/stat_consumer/stat_consumer.go81
-rw-r--r--src/mongo/gotools/mongostat/status/readers.go521
-rw-r--r--src/mongo/gotools/mongostat/status/server_status.go231
-rw-r--r--src/mongo/gotools/mongostat/test_data/server_status_new.bsonbin0 -> 1689 bytes
-rw-r--r--src/mongo/gotools/mongostat/test_data/server_status_old.bsonbin0 -> 1689 bytes
-rw-r--r--src/mongo/gotools/mongotop/command.go235
-rw-r--r--src/mongo/gotools/mongotop/main/mongotop.go111
-rw-r--r--src/mongo/gotools/mongotop/mongotop.go125
-rw-r--r--src/mongo/gotools/mongotop/options.go19
-rwxr-xr-xsrc/mongo/gotools/mongotop/smoke.sh33
-rwxr-xr-xsrc/mongo/gotools/set_gopath.bat6
-rw-r--r--src/mongo/gotools/set_gopath.ps19
-rwxr-xr-xsrc/mongo/gotools/set_gopath.sh42
-rw-r--r--src/mongo/gotools/test/legacy24/buildscripts/buildlogger.py480
-rw-r--r--src/mongo/gotools/test/legacy24/buildscripts/cleanbb.py105
-rwxr-xr-xsrc/mongo/gotools/test/legacy24/buildscripts/smoke.py1314
-rw-r--r--src/mongo/gotools/test/legacy24/buildscripts/utils.py230
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/libs/use_extended_timeout.js12
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/replsets/rslib.js115
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/csv1.js42
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/csvexport1.js47
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/csvexport2.js31
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/csvimport1.js40
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/data/a.tsv2
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/data/csvimport1.csv8
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/data/dumprestore6/foo.bsonbin0 -> 44 bytes
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/data/dumprestore6/system.indexes.bsonbin0 -> 144 bytes
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumpauth.js29
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumpfilename1.js13
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore1.js23
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore10.js63
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore3.js60
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore4.js42
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore6.js27
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore7.js62
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore8.js105
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore9.js79
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestoreWithNoOptions.js117
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumprestore_auth.js28
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/dumpsecondary.js38
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/exportimport1.js66
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/exportimport3.js27
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/exportimport4.js56
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/exportimport5.js81
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/files1.js27
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/oplog1.js29
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/oplog_all_ops.js61
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/restorewithauth.js113
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/stat1.js23
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/tool_replset.js89
-rw-r--r--src/mongo/gotools/test/legacy24/jstests/tool/tsv1.js32
-rw-r--r--src/mongo/gotools/test/legacy26/buildscripts/buildlogger.py480
-rw-r--r--src/mongo/gotools/test/legacy26/buildscripts/cleanbb.py105
-rwxr-xr-xsrc/mongo/gotools/test/legacy26/buildscripts/smoke.py1314
-rw-r--r--src/mongo/gotools/test/legacy26/buildscripts/utils.py230
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/authTestsKey1
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/ca.pem17
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/client.pem101
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/client_revoked.pem34
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/cluster-cert.pem101
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/command_line/test_parsed_options.js202
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini1
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_auth.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_autosplit.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_httpinterface.json7
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_journal.json7
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_objcheck.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_paranoia.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_prealloc.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_scripting.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_unixsocket.json7
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_profiling.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_replsetname.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_shardingrole.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_verbosity.json5
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/crl.pem10
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/crl_client_revoked.pem12
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/crl_expired.pem10
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_first.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_last.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_good.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/fts.js18
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/fun.js32
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/geo_near_random.js99
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/grid.js171
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/key11
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/key21
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/localhostnameCN.pem101
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/localhostnameSAN.pem100
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/mockkrb5.conf13
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/mockservice.keytabbin0 -> 442 bytes
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/mockuser.keytabbin0 -> 340 bytes
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/network.js37
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/parallelTester.js259
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/password_protected.pem51
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/server.pem34
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/slow_weekly_util.js20
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/smoke.pem50
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/test_background_ops.js340
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/testconfig4
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/testconfig.json4
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/trace_missing_docs.js90
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/libs/use_extended_timeout.js12
-rwxr-xr-xsrc/mongo/gotools/test/legacy26/jstests/misc/biginsert.js18
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/replsets/rslib.js115
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/csv1.js42
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/csvexport1.js65
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/csvexport2.js31
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/csvimport1.js40
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/data/a.tsv2
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/data/csvimport1.csv8
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/foo.bsonbin0 -> 44 bytes
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bsonbin0 -> 144 bytes
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumpauth.js27
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumpfilename1.js14
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore1.js23
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore10.js63
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore3.js60
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore4.js42
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore6.js27
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore7.js66
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore8.js105
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore9.js79
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js107
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth.js35
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth2.js96
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth3.js199
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/dumpsecondary.js38
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/exportimport1.js66
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/exportimport3.js27
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/exportimport4.js57
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/exportimport5.js82
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/exportimport6.js26
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/exportimport_bigarray.js62
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/exportimport_date.js49
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/files1.js27
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/oplog1.js26
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/oplog_all_ops.js61
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/restorewithauth.js113
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/stat1.js22
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/tool1.js44
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/tool_replset.js89
-rw-r--r--src/mongo/gotools/test/legacy26/jstests/tool/tsv1.js32
-rw-r--r--src/mongo/gotools/test/legacy28/buildscripts/buildlogger.py479
-rw-r--r--src/mongo/gotools/test/legacy28/buildscripts/cleanbb.py105
-rwxr-xr-xsrc/mongo/gotools/test/legacy28/buildscripts/smoke.py1447
-rw-r--r--src/mongo/gotools/test/legacy28/buildscripts/utils.py235
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/analyze_plan.js80
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/authTestsKey1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/badSAN.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/ca.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/client.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/client_revoked.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/cluster_cert.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/command_line/test_parsed_options.js214
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_auth.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_dur.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_httpinterface.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_ipv6.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_journal.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_jsonp.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_jsonp.json7
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_moveparanoia.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noauth.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noautosplit.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nodur.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nohttpinterface.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noindexbuildretry.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nojournal.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nomoveparanoia.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noobjcheck.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noprealloc.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noscripting.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nounixsocket.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_objcheck.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_rest_interface.json7
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_auth.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_autosplit.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_httpinterface.json7
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_indexbuildretry.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_journal.json7
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_objcheck.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_paranoia.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_prealloc.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_scripting.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_unixsocket.json7
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_dur.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_journal.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_nodur.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_nojournal.ini1
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_component_verbosity.json16
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_profiling.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_replsetname.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_shardingrole.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_verbosity.json5
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/crl.pem38
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/crl_client_revoked.pem41
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/crl_expired.pem38
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_bad_first.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_bad_last.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_good.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/expired.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/fts.js18
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/geo_near_random.js101
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/host_ipaddr.js38
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/key11
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/key21
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/localhostnameCN.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/localhostnameSAN.pem49
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/mockkrb5.conf13
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/mockservice.keytabbin0 -> 442 bytes
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/mockuser.keytabbin0 -> 340 bytes
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/not_yet_valid.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/parallelTester.js259
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/password_protected.pem51
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/server.pem48
-rwxr-xr-xsrc/mongo/gotools/test/legacy28/jstests/libs/servers.js961
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/servers_misc.js357
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/slow_weekly_util.js20
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/smoke.pem48
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/test_background_ops.js340
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/testconfig6
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/testconfig.json4
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/libs/trace_missing_docs.js90
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/replsets/rslib.js115
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/csv1.js43
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/csvexport1.js65
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/csvexport2.js32
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/csvimport1.js41
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/data/a.tsv2
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/data/csvimport1.csv8
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/data/dumprestore6/foo.bsonbin0 -> 44 bytes
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/data/dumprestore6/system.indexes.bsonbin0 -> 144 bytes
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumpauth.js29
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumpfilename1.js13
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore1.js32
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore10.js64
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore3.js61
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore4.js43
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore6.js54
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore7.js66
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore8.js107
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore9.js79
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestoreWithNoOptions.js114
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth.js117
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth2.js98
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth3.js200
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_excludecollections.js112
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/dumpsecondary.js39
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/exportimport1.js67
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/exportimport3.js28
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/exportimport4.js57
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/exportimport5.js82
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/exportimport6.js27
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/exportimport_bigarray.js59
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/exportimport_date.js50
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/exportimport_minkey_maxkey.js38
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/files1.js28
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/oplog1.js29
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/oplog_all_ops.js62
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/restorewithauth.js117
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/stat1.js18
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/tool1.js44
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/tool_replset.js89
-rw-r--r--src/mongo/gotools/test/legacy28/jstests/tool/tsv1.js33
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/__init__.py1
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/buildlogger.py479
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/cleanbb.py105
-rwxr-xr-xsrc/mongo/gotools/test/qa-tests/buildscripts/resmoke.py216
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/__init__.py4
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py36
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml13
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml13
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml19
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml10
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py36
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml27
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml38
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml23
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml21
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/__init__.py7
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/config.py165
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/__init__.py5
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/network.py114
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/pipe.py87
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/process.py234
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/programs.py311
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/errors.py52
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/__init__.py14
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py284
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/config.py161
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/flush.py97
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/formatters.py50
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/handlers.py178
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/loggers.py37
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/parser.py368
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/selector.py291
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/__init__.py9
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/executor.py307
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py32
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py128
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py209
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py211
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py347
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py151
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/hooks.py704
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/job.py195
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/report.py330
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/suite.py140
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/summary.py22
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/testcases.py407
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py132
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/__init__.py88
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/globstar.py202
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py78
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/queue.py52
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/timer.py125
-rwxr-xr-xsrc/mongo/gotools/test/qa-tests/buildscripts/setup_multiversion_mongodb.py291
-rwxr-xr-xsrc/mongo/gotools/test/qa-tests/buildscripts/smoke.py1451
-rw-r--r--src/mongo/gotools/test/qa-tests/buildscripts/utils.py235
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/all_types.js33
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/all_types_json.js29
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/bad_files.js41
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/bsondump_options.js57
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/deep_nested.js8
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/output_file.js71
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/all_types.bsonbin0 -> 1090 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/bad_cstring.bsonbin0 -> 283 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/bad_type.bsonbin0 -> 283 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/broken_array.bsonbin0 -> 1090 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/deep_nested.bsonbin0 -> 5022 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/invalid_field_name.bsonbin0 -> 284 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/partial_file.bsonbin0 -> 177 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/random_bytes.bsonbin0 -> 1024 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/bson/testdata/sample.bsonbin0 -> 283 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/common/check_version.js47
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/common/topology_helper.js164
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/archive_targets.js32
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/auth_28.config.js38
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/gzip_targets.js36
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/kerberos.config.yml7
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/kerberos.linux.sh5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/kerberos_28.config.js39
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/kerberos_28_windows.config.js59
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/plain_26.config.js19
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/plain_28.config.js21
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/replset_28.config.js39
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/replset_auth_28.config.js58
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/sharding_28.config.js40
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/ssl_28.config.js26
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/configs/standard_dump_targets.config.js30
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/bad_options.js47
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/basic_data.js58
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/data_types.js70
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/field_file.js60
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/fields_csv.js173
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/fields_json.js92
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/force_table_scan.js111
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/json_array.js55
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/limit.js59
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/nested_fields_csv.js65
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/no_data.js21
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/pretty.js33
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/query.js198
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/slave_ok.js61
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/sort_and_skip.js67
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/stdout.js40
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/testdata/query.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/testdata/simple_field_file2
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/export/type_case.js115
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_db.js61
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_delete.js47
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_get.js81
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_host.js59
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_invalid.js37
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_list.js96
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_local.js102
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_port.js52
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_prefix.js49
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_put.js108
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_replace.js79
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_search.js110
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_type.js63
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_version.js29
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_write_concern.js54
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_write_concern_mongos.js59
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/testdata/files1.txt1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/testdata/files2.txt1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/testdata/files3.txt1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/files/util/mongofiles_common.js10
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/all_primaries_down_error_code.js65
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/boolean_type.js57
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/collections.js77
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/decimal128.js44
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/drop.js48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/fields.js107
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/import_document_validation.js108
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/import_types.js75
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/import_write_concern.js71
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/import_write_concern_mongos.js76
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/no_primary_error_code.js65
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/options.js123
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/parse_grace.js121
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/replset.js48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/stoponerror.js40
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/basic.json36
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/boolean.json19
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/csv_header.csv4
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/csv_noheader.csv3
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/dupes.json34
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/extrafields.csv3
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/fieldfile4
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/parse_grace.csv4
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/tsv_header.tsv4
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/tsv_noheader.tsv3
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_extrafields.csv3
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_extrafields.tsv3
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_header.csv3
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_header.tsv3
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_noheader.csv2
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_noheader.tsv2
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/typedfieldfile5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/types.json27
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert1.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert2.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert3.json2000
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/type_case.js98
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/typed_fields.js114
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/types.js117
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/upsert.js62
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/import/upsert_id_subdoc.js82
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/.eslintrc.yml3
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/analyze_plan.js76
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/authTestsKey1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/badSAN.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/ca.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/client.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/client_revoked.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/cluster_cert.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/command_line/test_parsed_options.js213
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_auth.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_dur.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_httpinterface.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_ipv6.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_journal.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_jsonp.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_jsonp.json7
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_moveparanoia.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noauth.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noautosplit.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nodur.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nohttpinterface.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noindexbuildretry.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nojournal.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nomoveparanoia.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noobjcheck.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noprealloc.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noscripting.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nounixsocket.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_objcheck.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_rest_interface.json7
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_auth.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_autosplit.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_httpinterface.json7
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_indexbuildretry.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_journal.json7
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_objcheck.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_paranoia.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_prealloc.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_scripting.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_unixsocket.json7
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_dur.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_journal.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nodur.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nojournal.ini1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_component_verbosity.json16
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_profiling.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_replsetname.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_shardingrole.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_verbosity.json5
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/crl.pem38
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/crl_client_revoked.pem41
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/crl_expired.pem38
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_bad_first.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_bad_last.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_good.journalbin0 -> 32768 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/expired.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/extended_assert.js61
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/fts.js22
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/geo_near_random.js100
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/host_ipaddr.js38
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/key11
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/key21
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/localhostnameCN.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/localhostnameSAN.pem49
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/mockkrb5.conf13
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/mockservice.keytabbin0 -> 442 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/mockuser.keytabbin0 -> 340 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/mongostat.js114
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/not_yet_valid.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/parallelTester.js268
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/password_protected.pem51
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/server.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/servers.js1079
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/servers_misc.js379
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/slow_weekly_util.js25
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/smoke.pem48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/test_background_ops.js334
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/testconfig6
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/testconfig.json4
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/trace_missing_docs.js99
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/libs/wc_framework.js72
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/all_ops_tests.js164
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/apply_ops_tests.js137
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/asymmetric_ssl_test.js48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/deprecated_flags_tests.js31
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/drop_database_test.js97
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/informational_flags_test.js28
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/oplog_server_ko_test.js52
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/real_oplog_tests.js75
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/oplog/unreachable_host_tests.js46
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/15k_collections.js38
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/24_to_28.js70
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/26_to_28.js65
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/28_to_26.js66
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/archive_stdout.js50
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/bad_options.js54
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/blank_collection_bson.js43
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/blank_db.js29
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js136
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/different_collection.js89
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/different_db.js84
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/drop_authenticated_user.js107
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/drop_nonexistent_db.js56
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/drop_one_collection.js86
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/drop_with_data.js73
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/duplicate_keys.js73
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/empty_users_and_roles.js33
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/extended_json_metadata.js42
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/indexes.js96
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/invalid_dump_target.js32
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/invalid_metadata.js22
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/keep_index_version.js88
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/large_bulk.js52
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/malformed_bson.js20
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/malformed_metadata.js22
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/missing_dump.js32
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/multiple_dbs.js72
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/namespaces.js152
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/no_index_restore.js75
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/no_options_restore.js125
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/nonempty_temp_users.js46
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/norestore_profile.js56
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/objcheck_valid_bson.js44
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js78
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_conflict.js33
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_main.js60
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js67
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js19
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_noop.js37
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js40
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js65
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js68
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/partial_restore.js77
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js25
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/restore_document_validation.js178
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/sharded_fullrestore.js43
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/stop_on_error.js48
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/symlinks.js46
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank.bson0
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.bson0
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankdb/README1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_empty/db_empty/coll_empty.bson0
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.bsonbin0 -> 429 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.bsonbin0 -> 9190 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_oplog_conflict/oplog.bsonbin0 -> 525 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_complex_id_oplog/oplog.bsonbin0 -> 269 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson0
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson0
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/missing_metadata.bson0
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_noop_in_oplog/oplog.bsonbin0 -> 370 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/oplog.bsonbin0 -> 525 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.bsonbin0 -> 180 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/system.indexes.bsonbin0 -> 64 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.bsonbin0 -> 220 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/system.indexes.bsonbin0 -> 65 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.bsonbin0 -> 220 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/system.indexes.bsonbin0 -> 65 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dirbin0 -> 525 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/extra_oplog.bsonbin0 -> 525 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_collection.bsonbin0 -> 220 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.bsonbin0 -> 220 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.metadata.json1
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/system.indexes.bsonbin0 -> 65 bytes
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles.js85
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_26_to_28_to_26.js143
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js157
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js142
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_temp_collections.js104
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/write_concern.js64
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/restore/write_concern_mongos.js69
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/stat/stat_auth.js31
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/stat/stat_custom_headers.js107
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/stat/stat_discover.js60
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/stat/stat_discover_shard.js14
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/stat/stat_header.js27
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/stat/stat_mixed_storage_engine.js45
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/stat/stat_rowcount.js56
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/top/mongotop_json.js44
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/top/mongotop_reports.js148
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/top/mongotop_sharded.js47
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/top/mongotop_stress.js49
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/top/mongotop_validation.js46
-rw-r--r--src/mongo/gotools/test/qa-tests/jstests/top/util/mongotop_common.js25
-rw-r--r--src/mongo/gotools/vendor.bat31
-rwxr-xr-xsrc/mongo/gotools/vendor.sh119
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/LICENSE27
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/README59
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/golint/golint.go67
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/lint.go1057
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/lint_test.go197
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/misc/emacs/golint.el51
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/misc/vim/ftplugin/go/lint.vim31
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/4.go34
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/5_test.go17
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-lib.go33
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-lib_test.go20
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-main.go12
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/common-methods.go16
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/const-block.go36
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/else-multi.go18
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/else.go23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/errorf.go22
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/errors.go35
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/import-dot.go6
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/inc.go14
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/make.go10
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/names.go54
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc1.go3
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc2.go5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc3.go7
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc4.go7
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-main.go5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/range.go27
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/receiver-names.go38
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/sort.go20
-rw-r--r--src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/var-decl.go48
-rw-r--r--src/mongo/gotools/vendor/src/github.com/howeyc/gopass/LICENSE.txt13
-rw-r--r--src/mongo/gotools/vendor/src/github.com/howeyc/gopass/README.md21
-rw-r--r--src/mongo/gotools/vendor/src/github.com/howeyc/gopass/nix.go29
-rw-r--r--src/mongo/gotools/vendor/src/github.com/howeyc/gopass/pass.go56
-rw-r--r--src/mongo/gotools/vendor/src/github.com/howeyc/gopass/win.go44
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/.gitignore5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/LICENSE202
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/README.md58
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/all_of.go70
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/all_of_test.go110
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any.go32
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_of.go94
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_of_test.go139
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_test.go53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/contains.go61
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/contains_test.go233
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/deep_equals.go88
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/deep_equals_test.go343
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/elements_are.go91
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/elements_are_test.go208
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/equals.go557
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/equals_test.go3843
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/error.go51
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/error_test.go92
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_or_equal.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_or_equal_test.go1059
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_than.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_than_test.go1079
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_same_type_as.go37
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_same_type_as_test.go181
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_substr.go46
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_substr_test.go93
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/identical_to.go134
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/identical_to_test.go849
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_or_equal.go41
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_or_equal_test.go1079
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_than.go152
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_than_test.go1059
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matcher.go86
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matches_regexp.go69
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matches_regexp_test.go92
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/new_matcher.go43
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/not.go53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/not_test.go108
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/panics.go74
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/panics_test.go141
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/pointee.go65
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/pointee_test.go152
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/transform_description.go36
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/.travis.yml35
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/LICENSE26
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/README.md135
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/arg.go24
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/arg_test.go133
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/assert_test.go177
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/check_crosscompile.sh16
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/closest.go59
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/command.go441
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/command_test.go544
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/completion.go300
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/completion_test.go294
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/convert.go341
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/convert_test.go159
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/error.go129
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/example_test.go110
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/add.go23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/bash-completion9
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/main.go75
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/rm.go23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/flags.go256
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/group.go379
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/group_test.go255
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/help.go466
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/help_test.go460
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/ini.go593
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/ini_test.go950
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/long_test.go85
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/man.go194
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/marshal_test.go97
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/multitag.go140
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/option.go414
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/options_test.go45
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/optstyle_other.go67
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/optstyle_windows.go106
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/parser.go652
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/parser_test.go487
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/pointer_test.go81
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/short_test.go194
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/tag_test.go38
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize.go28
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_linux.go7
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_nosysioctl.go7
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_other.go7
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_unix.go7
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/unknown_test.go66
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/LICENSE18
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/README.md89
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/context.go144
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/context_test.go139
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/gen_sym.go13
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/id_pool.go34
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags.go43
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags_js.go101
-rw-r--r--src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags_main.go61
-rw-r--r--src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/.travis.yml8
-rw-r--r--src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/README.mkd26
-rw-r--r--src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth.go464
-rw-r--r--src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_js.go8
-rw-r--r--src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_posix.go69
-rw-r--r--src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_test.go229
-rw-r--r--src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_windows.go24
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/AUTHORS4
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/LICENSE19
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/README.md28
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/editbox.go300
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/interrupt.go69
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/keyboard.go722
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/output.go228
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/paint.go105
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/random_output.go46
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/raw_input.go109
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api.go458
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api_common.go187
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api_windows.go239
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/nsf/termbox-go/collect_terminfo.py110
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_darwin.go41
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_darwin_amd64.go40
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_freebsd.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_linux.go33
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_netbsd.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_openbsd.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_windows.go61
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox.go514
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox_common.go59
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox_windows.go856
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/terminfo.go221
-rw-r--r--src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/terminfo_builtin.go64
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/.gitignore3
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/.travis.yml14
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/LICENSE.md23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/README.md575
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/assertions.goconvey3
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/collections.go244
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/collections_test.go157
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/doc.go105
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/doc_test.go57
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/equality.go280
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/equality_test.go269
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/filter.go23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/Makefile23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/.travis.yml21
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/LICENSE27
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py109
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/README.md78
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS26
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml78
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/render/render.go327
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/render/render_test.go170
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/.gitignore5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml4
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/LICENSE202
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/README.md58
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/all_of.go70
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/all_of_test.go110
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any.go32
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_of.go94
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_of_test.go139
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_test.go53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/contains.go61
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/contains_test.go233
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go88
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals_test.go343
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/elements_are.go91
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/elements_are_test.go208
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/equals.go541
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/equals_test.go3864
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/error.go51
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/error_test.go92
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal_test.go1101
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than_test.go1077
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as.go37
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as_test.go181
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_substr.go46
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_substr_test.go93
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/identical_to.go134
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/identical_to_test.go849
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go41
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal_test.go1077
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_than.go152
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_than_test.go1057
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matcher.go86
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp.go69
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp_test.go92
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/new_matcher.go43
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/not.go53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/not_test.go108
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/panics.go74
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/panics_test.go141
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/pointee.go65
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/pointee_test.go152
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go36
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/.gitignore5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/.travis.yml4
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/LICENSE202
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/README.md103
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/action.go36
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/controller.go480
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/controller_test.go1249
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/createmock.go245
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/createmock_test.go233
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs/bucket.go23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.gcs_bucket125
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.gcs_bucket_same_package124
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.no_interfaces1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.no_package1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.unknown_interface1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.unknown_package1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/do_all.go53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/do_all_test.go90
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/doc.go28
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/error_reporter.go29
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/expectation.go59
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/generate.go369
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/generate_test.go168
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/complicated_pkg/complicated_pkg.go41
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.complicated_pkg.go311
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.image.go238
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.io_reader_writer.go127
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.io_reader_writer_same_package.go126
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.renamed_pkg.go66
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg/renamed_pkg.go24
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/type_string.go147
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/type_string_test.go220
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/integration_test.go129
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/internal_expectation.go180
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/internal_expectation_test.go265
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/invoke.go73
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/invoke_test.go110
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/mock_object.go30
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/return.go251
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/return_test.go978
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/sample/README.markdown6
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/sample/mock_io/mock_io.go71
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/save_arg.go83
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/save_arg_test.go132
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/.gitignore5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/.travis.yml4
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/LICENSE202
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/README.md151
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/assert_aliases.go70
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/assert_that.go46
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/doc.go51
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_aliases.go64
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_call.go59
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_that.go100
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_that_test.go168
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/failure.go90
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/integration_test.go265
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/register.go86
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/register_test_suite.go193
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/run_tests.go354
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/docs.go5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/methods.go65
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/methods_test.go107
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/failing.test.go252
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/filtered.test.go79
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.failing_test278
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.filtered_test24
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.mock_test25
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.no_cases_test6
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.panicking_test90
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.passing_test22
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.run_twice_test14
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.stop_test13
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.unexported_test12
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/mock.test.go82
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/mock_image/mock_image.go115
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/no_cases.test.go41
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/panicking.test.go99
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/passing.test.go120
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/run_twice.test.go47
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/stop.test.go61
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/unexported.test.go43
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_info.go91
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/.gitignore24
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/LICENSE202
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/README.md53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/reqtrace.go132
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/trace_state.go175
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/messages.go94
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/panic.go115
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/panic_test.go53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/quantity.go141
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/quantity_test.go145
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/serializer.go69
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/serializer_test.go36
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/should/should.go73
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/strings.go227
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/strings_test.go118
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/time.go202
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/time_test.go159
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/type.go112
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/type_test.go76
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/utilities_for_test.go75
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/.gitignore5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/.travis.yml14
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/CONTRIBUTING.md22
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/LICENSE.md23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/README.md126
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/assertions.go68
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/context.go272
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/convey.goconvey4
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/discovery.go103
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/doc.go218
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/focused_execution_test.go72
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/gotest/doc_test.go1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/gotest/utils.go28
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/init.go81
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/isolated_execution_test.go774
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/nilReporter.go15
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/console.go16
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/doc.go5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/dot.go40
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/dot_test.go40
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/gotest.go33
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go66
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/init.go94
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/json.go88
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/printer.go57
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/printer_test.go181
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/problems.go80
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/problems_test.go51
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporter.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go94
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey2
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reports.go179
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/statistics.go89
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/story.go73
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go317
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/story_conventions_test.go175
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/dependencies.go4
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/doc_test.go1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/assertion_examples_test.go125
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/bowling_game.go75
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/bowling_game_test.go80
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/doc.go5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/examples.goconvey12
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/simple_example_test.go36
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/goconvey.go280
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/composer.html35
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/favicon.icobin0 -> 15086 bytes
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/index.html487
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/common.css962
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/composer.css65
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/font-awesome.min.css5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark-bigtext.css400
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark.css380
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/light.css328
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/tipsy.css97
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/FontAwesome.otfbin0 -> 75188 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.eotbin0 -> 72449 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.svg504
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.ttfbin0 -> 141564 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.woffbin0 -> 83760 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt202
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Bold.ttfbin0 -> 224592 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Italic.ttfbin0 -> 212896 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Light.ttfbin0 -> 222412 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-LightItalic.ttfbin0 -> 213128 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Regular.ttfbin0 -> 217360 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/OFL.txt93
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/Orbitron-Regular.ttfbin0 -> 39484 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/OFL.txt92
-rwxr-xr-xsrc/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/Oswald-Regular.ttfbin0 -> 50944 bytes
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-buildfail.icobin0 -> 15086 bytes
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-fail.icobin0 -> 15086 bytes
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-ok.icobin0 -> 15086 bytes
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-panic.icobin0 -> 15086 bytes
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/composer.js171
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/config.js15
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/convey.js46
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/goconvey.js1322
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/ansispan.js67
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/diff-match-patch.min.js49
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-2_1_0.min.js4
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-ui-1_10_3-custom.min.js5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery.pretty-text-diff.min.js5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery.tipsy.min.js5
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/markup.min.js6
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/moment.min.js6
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/taboverride.min.js3
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/poller.js130
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/api.goconvey2
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/server.go164
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/server_test.go462
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/contracts.go27
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/doc_test.go1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/result.go120
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/contract.go12
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/coordinator.go71
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor.go84
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor.goconvey2
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor_test.go160
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/tester.go56
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/tester_test.go254
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/messaging/doc_test.go1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/messaging/messages.go56
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/packageParser.go174
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/package_parser_test.go792
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser.go32
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser.goconvey2
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser_test.go47
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/rules.go43
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/testParser.go174
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/util.go45
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell.go174
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell_integration_test.go33
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell_test.go217
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/system.goconvey3
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/functional_core.go171
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/functional_core_test.go419
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/imperative_shell.go77
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration.go185
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_test.go200
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/doc_test.go1
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/main.go10
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/.gitignore2
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/stuff.go4
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/stuff_test.go17
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/sub.goconvey7
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/util_test.go92
-rw-r--r--src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/watch.goconvey3
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/LICENSE191
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/README.md26
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/bio.go355
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/build.go23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/cert.go407
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/cert_test.go139
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ciphers.go355
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ciphers_test.go307
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/conn.go625
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ctx.go831
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ctx_test.go48
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/dhparam.go65
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/digest.go53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/engine.go52
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/fips.go22
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/hostname.c367
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/hostname.go127
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/http.go61
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init.go155
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init_posix.go64
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init_windows.go60
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/key.go374
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/key_test.go149
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/net.go134
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/nid.go199
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/oracle_stubs.go162
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/password.c10
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/pem.go32
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha1.go99
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha1_test.go111
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha256.go92
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha256_test.go109
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sni.c23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sni_test.go23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ssl.go167
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ssl_test.go633
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/utils/errors.go50
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/utils/future.go79
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/verify.c31
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/version.go22
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/.travis.yml6
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/LICENSE191
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/README.md19
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture.go67
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture_other.go35
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture_windows.go23
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/collection.go229
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/convenience.go266
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/doc.go39
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/event.go75
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/handler.go53
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/level.go126
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/logger.go61
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output.go178
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output_other.go19
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output_windows.go17
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/setup.go183
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/setup/setup.go80
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/syslog.go63
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/syslog_windows.go26
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates.go69
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates_others.go22
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates_windows.go20
-rw-r--r--src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/text.go80
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/.gitattributes10
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/.gitignore2
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/AUTHORS3
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/CONTRIBUTING.md31
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/CONTRIBUTORS3
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/LICENSE27
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/PATENTS22
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/README3
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/base64.go35
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go294
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/bcrypt_test.go226
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/block.go159
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/blowfish_test.go274
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/cipher.go91
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/const.go199
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/bn256.go404
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/bn256_test.go304
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/constants.go44
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/curve.go278
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/example_test.go43
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp12.go200
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp2.go219
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp6.go296
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/optate.go395
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/twist.go249
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/cast5/cast5.go526
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/cast5/cast5_test.go106
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/codereview.cfg1
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/const_amd64.s20
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/cswap_amd64.s88
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/curve25519.go841
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/curve25519_test.go29
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/doc.go23
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/freeze_amd64.s94
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/ladderstep_amd64.s1398
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/mont25519_amd64.go240
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/mul_amd64.s191
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/square_amd64.s153
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/example_test.go61
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/hkdf.go75
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/hkdf_test.go370
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4.go118
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4_test.go71
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4block.go89
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/box/box.go85
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/box/box_test.go78
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/secretbox/secretbox.go149
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go91
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ocsp/ocsp.go673
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ocsp/ocsp_test.go584
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/armor.go219
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/armor_test.go95
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/encode.go160
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/canonical_text.go59
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/canonical_text_test.go52
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/clearsign/clearsign.go372
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go197
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/elgamal/elgamal.go122
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go49
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/errors/errors.go72
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/keys.go633
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/keys_test.go370
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/compressed.go123
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/compressed_test.go41
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/config.go91
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/encrypted_key.go199
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go146
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/literal.go89
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/ocfb.go143
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go46
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/one_pass_signature.go73
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/opaque.go162
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/opaque_test.go67
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/packet.go539
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/packet_test.go255
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/private_key.go326
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/private_key_test.go69
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key.go724
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_test.go202
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_v3.go280
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go82
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/reader.go76
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature.go699
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_test.go42
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_v3.go146
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go92
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go155
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go103
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go290
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go123
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userattribute.go91
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go109
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userid.go160
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userid_test.go87
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/read.go439
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/read_test.go512
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/s2k/s2k.go273
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go137
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/write.go378
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/write_test.go259
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/libotr_test_helper.c197
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/otr.go1408
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/otr_test.go470
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/smp.go572
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pbkdf2/pbkdf2.go77
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pbkdf2/pbkdf2_test.go157
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go50
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/bmp-string_test.go63
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/crypto.go131
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/crypto_test.go125
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/errors.go23
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go27
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go274
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go93
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/mac.go45
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/mac_test.go42
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go170
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pbkdf_test.go34
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go342
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pkcs12_test.go138
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/safebags.go57
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/const_amd64.s45
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305.go32
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_amd64.s497
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_arm.s379
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_test.go86
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_amd64.go24
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_arm.go24
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_ref.go1531
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160.go120
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160_test.go64
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160block.go161
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go144
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s902
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa208.go199
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go23
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go234
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go35
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa20.go54
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa20_test.go139
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/scrypt/scrypt.go243
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/scrypt/scrypt_test.go160
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/doc.go66
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/hashes.go65
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/keccakf.go410
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/register.go18
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/sha3.go193
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/sha3_test.go306
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/shake.go60
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflatebin0 -> 521342 bytes
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor.go16
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor_generic.go28
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor_unaligned.go58
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/client.go615
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/client_test.go287
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/forward.go103
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/keyring.go184
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/keyring_test.go78
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/server.go209
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/server_test.go77
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/testdata_test.go64
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/benchmark_test.go122
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/buffer.go98
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/buffer_test.go87
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/certs.go501
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/certs_test.go216
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/channel.go631
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/cipher.go552
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/cipher_test.go127
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client.go213
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_auth.go441
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_auth_test.go393
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_test.go39
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/common.go354
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/connection.go144
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/doc.go18
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/example_test.go211
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/handshake.go412
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/handshake_test.go415
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/kex.go526
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/kex_test.go50
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/keys.go720
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/keys_test.go437
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mac.go57
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mempipe_test.go110
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/messages.go725
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/messages_test.go254
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mux.go356
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mux_test.go525
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/server.go495
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/session.go605
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/session_test.go774
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/tcpip.go407
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/tcpip_test.go20
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/terminal.go892
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/terminal_test.go269
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util.go128
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_bsd.go12
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_linux.go11
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_windows.go174
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/agent_unix_test.go59
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/cert_test.go47
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/doc.go7
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/forward_unix_test.go160
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/session_test.go340
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/tcpip_test.go46
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/test_unix_test.go261
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/testdata_test.go64
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata/doc.go8
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata/keys.go43
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata_test.go63
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/transport.go332
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/transport_test.go109
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/tea/cipher.go109
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/tea/tea_test.go93
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/twofish/twofish.go342
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/twofish/twofish_test.go129
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/block.go66
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/cipher.go82
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/xtea_test.go229
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/xts/xts.go138
-rw-r--r--src/mongo/gotools/vendor/src/golang.org/x/crypto/xts/xts_test.go85
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/.travis.yml45
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/LICENSE25
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/Makefile5
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/README.md4
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/auth.go467
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/auth_test.go1180
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/LICENSE25
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/bson.go738
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/bson_test.go1832
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decimal.go310
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decimal_test.go4109
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decode.go849
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/encode.go514
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/json.go380
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/json_test.go184
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/specdata/update.sh27
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/specdata_test.go241
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bulk.go351
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bulk_test.go504
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/cluster.go682
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/cluster_test.go2090
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/dbserver.go196
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/dbserver_test.go108
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/export_test.go12
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/doc.go31
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/export_test.go33
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/gridfs.go761
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/gridfs_test.go708
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.crt20
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.key27
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.pem57
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.req17
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.crt22
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.key28
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.pem50
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/.env57
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/.empty0
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTestbin0 -> 204800 bytes
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/run9
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/run15
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/run12
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/run9
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/run9
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/run9
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/db/.empty0
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/run8
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s1/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s1/run7
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s2/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s2/run7
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s3/log/run3
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s3/run8
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/dropall.js66
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/init.js132
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/wait.js67
-rwxr-xr-xsrc/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/setup.sh96
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/LICENSE27
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/bench_test.go223
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/decode.go1685
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/decode_test.go1512
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/encode.go1256
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/encode_test.go613
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/example_test.go252
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/extension.go95
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/extension_test.go218
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/fold.go143
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/fold_test.go116
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/indent.go141
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/number_test.go133
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/scanner.go697
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/scanner_test.go316
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/stream.go510
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/stream_test.go418
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tagkey_test.go115
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tags.go44
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tags_test.go28
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/testdata/code.json.gzbin0 -> 120432 bytes
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl.c77
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl.go138
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c122
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go142
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h7
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c96
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h70
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/scram/scram.go266
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/scram/scram_test.go67
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/log.go133
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/queue.go91
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/queue_test.go101
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/raceoff.go5
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/raceon.go5
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/saslimpl.go11
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/saslstub.go11
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/server.go463
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/session.go4826
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/session_test.go4216
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/socket.go707
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/stats.go147
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/suite_test.go262
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/syscall_test.go15
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/syscall_windows_test.go11
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/chaos.go68
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/debug.go109
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/dockey_test.go205
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/flusher.go985
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/output.txt0
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/sim_test.go388
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/tarjan.go94
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/tarjan_test.go44
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/txn.go611
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/txn_test.go778
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/LICENSE29
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/README.md4
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/tomb.go223
-rw-r--r--src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/tomb_test.go183
1793 files changed, 257410 insertions, 0 deletions
diff --git a/src/mongo/gotools/.eslintrc.yml b/src/mongo/gotools/.eslintrc.yml
new file mode 100644
index 00000000000..4aea71ed9b2
--- /dev/null
+++ b/src/mongo/gotools/.eslintrc.yml
@@ -0,0 +1,192 @@
+env:
+ es6: true
+ mongo: true
+
+rules:
+ # Rules are documented at http://eslint.org/docs/rules/
+ #
+ # Possible Errors
+ comma-dangle: ["error", "only-multiline"]
+ no-cond-assign: 2
+ no-console: 2
+ no-constant-condition: 2
+ no-control-regex: 2
+ no-debugger: 2
+ no-dupe-args: 2
+ no-dupe-keys: 2
+ no-duplicate-case: 2
+ no-empty: 2
+ no-empty-character-class: 2
+ no-extra-parens: 0
+ no-ex-assign: 2
+ no-extra-boolean-cast: 2
+ no-extra-semi: 2
+ no-func-assign: 2
+ no-inner-declarations: 0
+ no-invalid-regexp: 2
+ no-irregular-whitespace: 2
+ no-negated-in-lhs: 2
+ no-obj-calls: 2
+ no-prototype-builtins: 0
+ no-regex-spaces: 2
+ no-sparse-arrays: 2
+ no-unexpected-multiline: 2
+ no-unreachable: 2
+ no-unsafe-finally: 2
+ use-isnan: 2
+ valid-jsdoc: 0
+ valid-typeof: 2
+ #
+ # Best Practices
+ accessor-pairs: 0
+ array-callback-return: 2
+ block-scoped-var: 0
+ consistent-return: 0
+ curly: ["error", "all"]
+ default-case: 2
+ dot-location: 0
+ dot-notation: 0
+ eqeqeq: 2
+ guard-for-in: 2
+ no-alert: 2
+ no-caller: 2
+ no-case-declarations: 0
+ no-div-regex: 2
+ no-else-return: 2
+ no-empty-function: 2
+ no-empty-pattern: 2
+ no-eq-null: 0
+ no-eval: 2
+ no-extend-native: 0
+ no-extra-bind: 2
+ no-extra-label: 2
+ no-fallthrough: 2
+ no-floating-decimal: 0
+ no-implicit-coercion: 0
+ no-implicit-globals: 0
+ no-implied-eval: 2
+ no-invalid-this: 2
+ no-iterator: 0
+ no-labels: 2
+ no-lone-blocks: 2
+ no-loop-func: 2
+ no-magic-numbers: 0
+ no-multi-spaces: 2
+ no-multi-str: 0
+ no-native-reassign: 2
+ no-new: 0
+ no-new-func: 0
+ no-new-wrappers: 2
+ no-octal: 2
+ no-octal-escape: 2
+ no-param-reassign: 0
+ no-proto: 2
+ no-redeclare: 2
+ no-return-assign: 2
+ no-script-url: 2
+ no-self-assign: 2
+ no-self-compare: 2
+ no-sequences: 2
+ no-throw-literal: 2
+ no-unmodified-loop-condition: 2
+ no-unused-expressions: 2
+ no-unused-labels: 2
+ no-useless-call: 2
+ no-useless-concat: 2
+ no-useless-escape: 2
+ no-void: 2
+ no-warning-comments: 0
+ no-with: 2
+ radix: 0
+ vars-on-top: 0
+ wrap-iife: 2
+ yoda: 2
+ #
+ # Variables
+ init-declarations: 0
+ no-catch-shadow: 2
+ no-delete-var: 0
+ no-label-var: 2
+ no-restricted-globals: 0
+ no-shadow: 0
+ no-shadow-restricted-names: 2
+ no-undef: 0
+ no-undef-init: 0
+ no-undefined: 0
+ no-unused-vars: ["error", {"varsIgnorePattern": "testName"}]
+ no-use-before-define: 2
+ #
+ # Style
+ array-bracket-spacing: ["error", "never"]
+ block-spacing: ["error", "always"]
+ brace-style: ["error", "1tbs"]
+ camelcase: 0
+ comma-spacing: ["error", {"before": false, "after": true}]
+ comma-style: ["error", "last"]
+ computed-property-spacing: ["error", "never"]
+ consistent-this: 0
+ eol-last: 2
+ func-names: 0
+ func-style: 0
+ id-blacklist: 0
+ id-length: 0
+ id-match: 0
+ indent: ["error", 2]
+ jsx-quotes: 0
+ key-spacing: 2
+ keyword-spacing: ["error", {"before": true, "after": true}]
+ linebreak-style: 2
+ lines-around-comment: 0
+ max-depth: 0
+ max-len: 0
+ max-lines: 0
+ max-nested-callbacks: 0
+ max-params: 0
+ max-statements: 0
+ max-statements-per-line: 0
+ new-cap: 0
+ new-parens: 2
+ newline-after-var: 0
+ newline-before-return: 0
+ newline-per-chained-call: 0
+ no-array-constructor: 2
+ no-bitwise: 0
+ no-continue: 0
+ no-inline-comments: 0
+ no-lonely-if: 2
+ no-mixed-operators: 0
+ no-mixed-spaces-and-tabs: 2
+ no-multiple-empty-lines: 2
+ no-negated-condition: 0
+ no-nested-ternary: 0
+ no-new-object: 0
+ no-plusplus: 0
+ no-restricted-syntax: 0
+ no-spaced-func: 2
+ no-ternary: 0
+ no-trailing-spaces: 2
+ no-underscore-dangle: 0
+ no-unneeded-ternary: 2
+ no-whitespace-before-property: 2
+ object-curly-newline: 0
+ object-curly-spacing: 2
+ object-property-newline: 0
+ one-var: 0
+ one-var-declaration-per-line: 0
+ operator-assignment: 0
+ operator-linebreak: 0
+ padded-blocks: 0
+ quote-props: 0
+ quotes: 0
+ require-jsdoc: 0
+ semi: 2
+ semi-spacing: 2
+ sort-vars: 0
+ space-before-blocks: 2
+ space-before-function-paren: 0
+ space-in-parens: 2
+ space-infix-ops: 0
+ space-unary-ops: 2
+ spaced-comment: 2
+ unicode-bom: 0
+ wrap-regex: 0
diff --git a/src/mongo/gotools/.gitattributes b/src/mongo/gotools/.gitattributes
new file mode 100644
index 00000000000..8de0d471626
--- /dev/null
+++ b/src/mongo/gotools/.gitattributes
@@ -0,0 +1,2 @@
+# Force gpm to always have lf endings even on Windows
+gpm text eol=lf
diff --git a/src/mongo/gotools/.gitignore b/src/mongo/gotools/.gitignore
new file mode 100644
index 00000000000..6de5c9d722b
--- /dev/null
+++ b/src/mongo/gotools/.gitignore
@@ -0,0 +1,20 @@
+*.swp
+*.exe
+*.pyc
+
+bin/
+
+vendor/pkg
+
+bsondump/main/bsondump
+mongodump/main/mongodump
+mongoexport/main/mongoexport
+mongofiles/main/mongofiles
+mongoimport/main/mongoimport
+mongooplog/main/mongooplog
+mongorestore/main/mongorestore
+mongotop/main/mongotop
+mongostat/main/mongostat
+
+.gopath
+.godeps
diff --git a/src/mongo/gotools/CONTRIBUTING.md b/src/mongo/gotools/CONTRIBUTING.md
new file mode 100644
index 00000000000..649708999de
--- /dev/null
+++ b/src/mongo/gotools/CONTRIBUTING.md
@@ -0,0 +1,60 @@
+Contributing to the MongoDB Tools Project
+===================================
+
+Pull requests are always welcome, and the MongoDB engineering team appreciates any help the community can give to make the MongoDB tools better.
+
+For any particular improvement you want to make, you can begin a discussion on the
+[MongoDB Developers Forum](https://groups.google.com/forum/?fromgroups#!forum/mongodb-dev). This is the best place to discuss your proposed improvement (and its
+implementation) with the core development team.
+
+If you're interested in contributing, we have a list of some suggested tickets that are easy enough to get started on here:
+https://jira.mongodb.org/issues/?jql=project%20%3D%20TOOLS%20AND%20labels%20%3D%20community%20and%20status%20%3D%20open
+
+Getting Started
+---------------
+
+1. Create a [MongoDB JIRA account](https://jira.mongodb.org/secure/Signup!default.jspa).
+2. Create a [Github account](https://github.com/signup/free).
+3. [Fork](https://help.github.com/articles/fork-a-repo/) the repository on Github at https://github.com/mongodb/mongo-tools.
+4. For more details see http://www.mongodb.org/about/contributors/.
+
+JIRA Tickets
+------------
+
+1. File a JIRA ticket in the [TOOLS project](https://jira.mongodb.org/browse/TOOLS).
+2. All commit messages to the MongoDB Tools repository must be prefaced with the relevant JIRA ticket number e.g. "TOOLS-XXX: add support for xyz".
+
+In filing JIRA tickets for bugs, please clearly describe the issue you are resolving, including the platforms on which the issue is present and clear steps to reproduce.
+
+For improvements or feature requests, be sure to explain the goal or use case, and the approach
+your solution will take.
+
+Style Guide
+-----------
+
+All commits to the MongoDB Tools repository must pass golint:
+
+```go run vendor/src/github.com/3rf/mongo-lint/golint/golint.go mongo* bson* common/*```
+
+_We use a modified version of [golint](https://github.com/golang/lint)_
+
+Testing
+-------
+
+To run unit and integration tests:
+
+```
+go test -v -test.types=unit,integration
+```
+
+This should be run in all package directories - common, mongorestore, mongoexport, etc.
+
+The `test.types` flag indicates what kinds of tests to run. Integration tests require a `mongod` (running on port 33333) while unit tests do not.
+
+To run the quality assurance tests, you need to have the latest stable version of the rebuilt tools, `mongod`, `mongos`, and `mongo` in your current working directory.
+
+```
+cd test/qa-tests
+python buildscripts/smoke.py bson export files import oplog restore stat top
+```
+_Some tests require older binaries that are named accordingly (e.g. `mongod-2.4`, `mongod-2.6`, etc). You can use [setup_multiversion_mongodb.py](test/qa-tests/buildscripts/setup_multiversion_mongodb.py) to download those binaries_
diff --git a/src/mongo/gotools/Godeps b/src/mongo/gotools/Godeps
new file mode 100644
index 00000000000..6ceb3ad9317
--- /dev/null
+++ b/src/mongo/gotools/Godeps
@@ -0,0 +1,13 @@
+gopkg.in/mgo.v2 1e52f6152a9b262873f831bb5a94bcd29ef38c38 github.com/10gen/mgo
+gopkg.in/tomb.v2 14b3d72120e8d10ea6e6b7f87f7175734b1faab8
+github.com/jtolds/gls 8ddce2a84170772b95dd5d576c48d517b22cac63
+github.com/jacobsa/oglematchers 3ecefc49db07722beca986d9bb71ddd026b133f0
+github.com/smartystreets/assertions 287b4346dc4e71a038c346375a9d572453bc469b
+github.com/smartystreets/goconvey bf58a9a1291224109919756b4dcc469c670cc7e4
+github.com/jessevdk/go-flags 97448c91aac742cbca3d020b3e769013a420a06f
+github.com/3rf/mongo-lint 3550fdcf1f43b89aaeabaa4559eaae6dc4407e42
+github.com/spacemonkeygo/openssl 688903e99b30b3f3a54c03f069085a246bf300b1 github.com/10gen/openssl
+github.com/howeyc/gopass 44476384cd4721b68705e72f19e95d1a3a504370
+github.com/nsf/termbox-go 0723e7c3d0a317dea811f0fbe4d6edd81908c971
+github.com/mattn/go-runewidth d6bea18f789704b5f83375793155289da36a3c7f
+golang.org/x/crypto 1f22c0103821b9390939b6776727195525381532
diff --git a/src/mongo/gotools/LICENSE.md b/src/mongo/gotools/LICENSE.md
new file mode 100644
index 00000000000..01b6a37e4a2
--- /dev/null
+++ b/src/mongo/gotools/LICENSE.md
@@ -0,0 +1,13 @@
+Copyright 2014 MongoDB, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/src/mongo/gotools/README.md b/src/mongo/gotools/README.md
new file mode 100644
index 00000000000..c3e6f670c82
--- /dev/null
+++ b/src/mongo/gotools/README.md
@@ -0,0 +1,45 @@
+MongoDB Tools
+===================================
+
+ - **bsondump** - _display BSON files in a human-readable format_
+ - **mongoimport** - _Convert data from JSON, TSV or CSV and insert them into a collection_
+ - **mongoexport** - _Write an existing collection to CSV or JSON format_
+ - **mongodump/mongorestore** - _Dump MongoDB backups to disk in .BSON format, or restore them to a live database_
+ - **mongostat** - _Monitor live MongoDB servers, replica sets, or sharded clusters_
+ - **mongofiles** - _Read, write, delete, or update files in [GridFS](http://docs.mongodb.org/manual/core/gridfs/)_
+ - **mongooplog** - _Replay oplog entries between MongoDB servers_
+ - **mongotop** - _Monitor read/write activity on a mongo server_
+
+Report any bugs, improvements, or new feature requests at https://jira.mongodb.org/browse/TOOLS
+
+Setup
+---------------
+Clone the repo and run `. ./set_gopath.sh` (`set_gopath.bat` on Windows) to setup your GOPATH:
+
+```
+git clone https://github.com/mongodb/mongo-tools
+cd mongo-tools
+. ./set_gopath.sh
+```
+
+Building Tools
+---------------
+To build the tools, you need to have Go version 1.3 and up.
+
+An additional flag, `-tags`, can be passed to the `go build` command in order to build the tools with support for SSL and/or SASL. For example:
+
+```
+mkdir bin
+go build -o bin/mongoimport mongoimport/main/mongoimport.go # build mongoimport
+go build -o bin/mongoimport -tags ssl mongoimport/main/mongoimport.go # build mongoimport with SSL support enabled
+go build -o bin/mongoimport -tags "ssl sasl" mongoimport/main/mongoimport.go # build mongoimport with SSL and SASL support enabled
+```
+
+Contributing
+---------------
+See our [Contributor's Guide](CONTRIBUTING.md).
+
+Documentation
+---------------
+See the MongoDB packages [documentation](http://docs.mongodb.org/master/reference/program/).
+
diff --git a/src/mongo/gotools/THIRD-PARTY-NOTICES b/src/mongo/gotools/THIRD-PARTY-NOTICES
new file mode 100644
index 00000000000..76e6e2520e0
--- /dev/null
+++ b/src/mongo/gotools/THIRD-PARTY-NOTICES
@@ -0,0 +1,743 @@
+MongoDB uses third-party libraries or other resources that may
+be distributed under licenses different than the MongoDB software.
+
+In the event that we accidentally failed to list a required notice,
+please bring it to our attention through any of the ways detailed here :
+
+ mongodb-dev@googlegroups.com
+
+The attached notices are provided for information only.
+
+
+1) License notice for github.com/nsf/gocode
+------------------------------------------------
+
+Copyright (C) 2010 nsf <no.smile.face@gmail.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+2) License notice for github.com/spacemonkeygo/spacelog
+------------------------------------------------
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+3) License notice for gopkg.in/mgo.v2
+------------------------------------------------
+mgo - MongoDB driver for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+4) License notice for gopkg.in/mgo.v2/bson
+------------------------------------------------
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+5) License notice for github.com/jacobsa/oglematchers
+------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+6) License notice for gopkg.in/tomb.v2
+------------------------------------------------
+tomb - support for clean goroutine termination in Go.
+
+Copyright (c) 2010-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+7) License notice for github.com/jessevdk/go-flags
+------------------------------------------------
+Copyright (c) 2012 Jesse van den Kieboom. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+8) License notice for github.com/spacemonkeygo/openssl
+------------------------------------------------
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/binaryurl.py b/src/mongo/gotools/binaryurl.py
new file mode 100644
index 00000000000..45a5a170542
--- /dev/null
+++ b/src/mongo/gotools/binaryurl.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+"""
+Command line utility returns the URL of the most recent archive file
+satisfying given version, edition, and operating system requirements.
+"""
+
+import argparse
+import json
+import sys
+import urllib2
+
+url_current = "http://downloads.mongodb.org/current.json"
+url_full = "http://downloads.mongodb.org/full.json"
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--edition", help="edition of MongoDB to use (e.g. 'targeted', 'enterprise'); defaults to 'base'")
+parser.add_argument("--target", help="system in use (e.g. 'ubuntu1204', 'windows_x86_64-2008plus-ssl', 'rhel71')")
+parser.add_argument("--version", help="version branch (e.g. '2.6', '3.2.8-rc1', 'latest')")
+opts = parser.parse_args()
+
+if not opts.edition:
+ opts.edition = "base"
+if not opts.target:
+ sys.exit("must specify target")
+if not opts.version:
+ sys.exit("must specify version")
+
+# prior to the 2.6 branch, the enterprise edition was called 'subscription'
+if opts.version == "2.4" and opts.edition == "enterprise":
+ opts.edition = "subscription"
+
+def isCorrectVersion(version):
+ if not "-rc" in opts.version and version["release_candidate"]:
+ return False
+ actual = version["version"].split(".")
+ desired = opts.version.split(".")
+ for i in range(len(desired)):
+ if desired[i] and not actual[i] == desired[i]:
+ return False
+ return True
+
+def isCorrectDownload(download):
+ return download["edition"] == opts.edition and download["target"] == opts.target
+
+def locateUrl(specs, override):
+ versions = specs["versions"]
+ if not override:
+ versions = filter(isCorrectVersion, versions)
+ for item in versions:
+ downloads = filter(isCorrectDownload, item["downloads"])
+ urls = map(lambda download : download["archive"]["url"], downloads)
+ if len(urls) > 0:
+ if override:
+ return urls[0].replace(item["version"], override)
+ return urls[0]
+
+override = "latest" if opts.version == "latest" else None
+
+specs = json.load(urllib2.urlopen(url_current))
+url = locateUrl(specs, override)
+
+if not url:
+ specs = json.load(urllib2.urlopen(url_full))
+ url = locateUrl(specs, override)
+
+if not url:
+ sys.exit("No info for version "+opts.version+" found")
+
+sys.stdout.write(url)
diff --git a/src/mongo/gotools/bsondump/bsondump.go b/src/mongo/gotools/bsondump/bsondump.go
new file mode 100644
index 00000000000..f279ab9b9e7
--- /dev/null
+++ b/src/mongo/gotools/bsondump/bsondump.go
@@ -0,0 +1,211 @@
+// Package bsondump converts BSON files into human-readable formats such as JSON.
+package bsondump
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2/bson"
+ "io"
+ "os"
+ "strings"
+)
+
+// BSONDump is a container for the user-specified options and
+// internal state used for running bsondump.
+type BSONDump struct {
+ // generic mongo tool options
+ ToolOptions *options.ToolOptions
+
+ // BSONDumpOptions defines options used to control how BSON data is displayed
+ BSONDumpOptions *BSONDumpOptions
+
+ // File handle for the output data.
+ Out io.WriteCloser
+
+ BSONSource *db.BSONSource
+}
+
+type ReadNopCloser struct {
+ io.Reader
+}
+
+func (ReadNopCloser) Close() error { return nil }
+
+type WriteNopCloser struct {
+ io.Writer
+}
+
+func (WriteNopCloser) Close() error { return nil }
+
+// GetWriter opens and returns an io.WriteCloser for the OutFileName in BSONDumpOptions
+// or nil if none is set. The caller is responsible for closing it.
+func (bdo *BSONDumpOptions) GetWriter() (io.WriteCloser, error) {
+ if bdo.OutFileName != "" {
+ file, err := os.Create(util.ToUniversalPath(bdo.OutFileName))
+ if err != nil {
+ return nil, err
+ }
+ return file, nil
+ }
+
+ return WriteNopCloser{os.Stdout}, nil
+}
+
+// GetBSONReader opens and returns an io.ReadCloser for the BSONFileName in BSONDumpOptions
+// or nil if none is set. The caller is responsible for closing it.
+func (bdo *BSONDumpOptions) GetBSONReader() (io.ReadCloser, error) {
+ if bdo.BSONFileName != "" {
+ file, err := os.Open(util.ToUniversalPath(bdo.BSONFileName))
+ if err != nil {
+ return nil, fmt.Errorf("couldn't open BSON file: %v", err)
+ }
+ return file, nil
+ }
+ return ReadNopCloser{os.Stdin}, nil
+}
+
+func printJSON(doc *bson.Raw, out io.Writer, pretty bool) error {
+ decodedDoc := bson.D{}
+ err := bson.Unmarshal(doc.Data, &decodedDoc)
+ if err != nil {
+ return err
+ }
+
+ extendedDoc, err := bsonutil.ConvertBSONValueToJSON(decodedDoc)
+ if err != nil {
+ return fmt.Errorf("error converting BSON to extended JSON: %v", err)
+ }
+ jsonBytes, err := json.Marshal(extendedDoc)
+ if pretty {
+ var jsonFormatted bytes.Buffer
+ json.Indent(&jsonFormatted, jsonBytes, "", "\t")
+ jsonBytes = jsonFormatted.Bytes()
+ }
+ if err != nil {
+ return fmt.Errorf("error converting doc to JSON: %v", err)
+ }
+ _, err = out.Write(jsonBytes)
+ return err
+}
+
+// JSON iterates through the BSON file and for each document it finds,
+// recursively descends into objects and arrays and prints the human readable
+// JSON representation.
+// It returns the number of documents processed and a non-nil error if one is
+// encountered before the end of the file is reached.
+func (bd *BSONDump) JSON() (int, error) {
+ numFound := 0
+
+ if bd.BSONSource == nil {
+ panic("Tried to call JSON() before opening file")
+ }
+
+ decodedStream := db.NewDecodedBSONSource(bd.BSONSource)
+
+ var result bson.Raw
+ for decodedStream.Next(&result) {
+ if err := printJSON(&result, bd.Out, bd.BSONDumpOptions.Pretty); err != nil {
+ log.Logvf(log.Always, "unable to dump document %v: %v", numFound+1, err)
+
+ //if objcheck is turned on, stop now. otherwise keep on dumpin'
+ if bd.BSONDumpOptions.ObjCheck {
+ return numFound, err
+ }
+ } else {
+ _, err := bd.Out.Write([]byte("\n"))
+ if err != nil {
+ return numFound, err
+ }
+ }
+ numFound++
+ }
+ if err := decodedStream.Err(); err != nil {
+ return numFound, err
+ }
+ return numFound, nil
+}
+
+// Debug iterates through the BSON file and for each document it finds,
+// recursively descends into objects and arrays and prints a human readable
+// BSON representation containing the type and size of each field.
+// It returns the number of documents processed and a non-nil error if one is
+// encountered before the end of the file is reached.
+func (bd *BSONDump) Debug() (int, error) {
+ numFound := 0
+
+ if bd.BSONSource == nil {
+ panic("Tried to call Debug() before opening file")
+ }
+
+ var result bson.Raw
+ for {
+ doc := bd.BSONSource.LoadNext()
+ if doc == nil {
+ break
+ }
+ result.Data = doc
+
+ if bd.BSONDumpOptions.ObjCheck {
+ validated := bson.M{}
+ err := bson.Unmarshal(result.Data, &validated)
+ if err != nil {
+ // ObjCheck is turned on and we hit an error, so short-circuit now.
+ return numFound, fmt.Errorf("failed to validate bson during objcheck: %v", err)
+ }
+ }
+ err := printBSON(result, 0, bd.Out)
+ if err != nil {
+ log.Logvf(log.Always, "encountered error debugging BSON data: %v", err)
+ }
+ numFound++
+ }
+
+ if err := bd.BSONSource.Err(); err != nil {
+ // This error indicates the BSON document header is corrupted;
+ // either the 4-byte header couldn't be read in full, or
+ // the size in the header would require reading more bytes
+ // than the file has left
+ return numFound, err
+ }
+ return numFound, nil
+}
+
+func printBSON(raw bson.Raw, indentLevel int, out io.Writer) error {
+ indent := strings.Repeat("\t", indentLevel)
+ fmt.Fprintf(out, "%v--- new object ---\n", indent)
+ fmt.Fprintf(out, "%v\tsize : %v\n", indent, len(raw.Data))
+
+ //Convert raw into an array of RawD we can iterate over.
+ var rawD bson.RawD
+ err := bson.Unmarshal(raw.Data, &rawD)
+ if err != nil {
+ return err
+ }
+ for _, rawElem := range rawD {
+ fmt.Fprintf(out, "%v\t\t%v\n", indent, rawElem.Name)
+
+ // the size of an element is the combined size of the following:
+ // 1. 1 byte for the BSON type
+ // 2. 'e_name' : the BSON key, which is a null-terminated cstring
+ // 3. The BSON value
+ // So size == 1 [size of type byte] + 1 [null byte for cstring key] + len(bson key) + len(bson value)
+ // see http://bsonspec.org/spec.html for more details
+ fmt.Fprintf(out, "%v\t\t\ttype: %4v size: %v\n", indent, int8(rawElem.Value.Kind),
+ 2+len(rawElem.Name)+len(rawElem.Value.Data))
+
+ //For nested objects or arrays, recurse.
+ if rawElem.Value.Kind == 0x03 || rawElem.Value.Kind == 0x04 {
+ err = printBSON(rawElem.Value, indentLevel+3, out)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/bsondump/bsondump_test.go b/src/mongo/gotools/bsondump/bsondump_test.go
new file mode 100644
index 00000000000..77e91c68b3e
--- /dev/null
+++ b/src/mongo/gotools/bsondump/bsondump_test.go
@@ -0,0 +1,156 @@
+package bsondump
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func TestBsondump(t *testing.T) {
+
+ Convey("Test bsondump reading from stdin and writing to stdout", t, func() {
+ cmd := exec.Command("../bin/bsondump")
+
+ // Attach a file to stdin of the command.
+ inFile, err := os.Open("testdata/sample.bson")
+ So(err, ShouldBeNil)
+ cmd.Stdin = inFile
+
+ // Attach a buffer to stdout of the command.
+ cmdOutput := &bytes.Buffer{}
+ cmd.Stdout = cmdOutput
+
+ err = cmd.Run()
+ So(err, ShouldBeNil)
+
+ // Get the correct bsondump result from a file to use as a reference.
+ outReference, err := os.Open("testdata/sample.json")
+ So(err, ShouldBeNil)
+ bufRef := new(bytes.Buffer)
+ bufRef.ReadFrom(outReference)
+ bufRefStr := bufRef.String()
+
+ bufDumpStr := cmdOutput.String()
+ So(bufDumpStr, ShouldEqual, bufRefStr)
+ })
+
+ Convey("Test bsondump reading from stdin and writing to a file", t, func() {
+ cmd := exec.Command("../bin/bsondump", "--outFile", "out.json")
+
+ // Attach a file to stdin of the command.
+ inFile, err := os.Open("testdata/sample.bson")
+ So(err, ShouldBeNil)
+ cmd.Stdin = inFile
+
+ err = cmd.Run()
+ So(err, ShouldBeNil)
+
+ // Get the correct bsondump result from a file to use as a reference.
+ outReference, err := os.Open("testdata/sample.json")
+ So(err, ShouldBeNil)
+ bufRef := new(bytes.Buffer)
+ bufRef.ReadFrom(outReference)
+ bufRefStr := bufRef.String()
+
+ // Get the output from a file.
+ outDump, err := os.Open("out.json")
+ So(err, ShouldBeNil)
+ bufDump := new(bytes.Buffer)
+ bufDump.ReadFrom(outDump)
+ bufDumpStr := bufDump.String()
+
+ So(bufDumpStr, ShouldEqual, bufRefStr)
+ })
+
+ Convey("Test bsondump reading from a file with --bsonFile and writing to stdout", t, func() {
+ cmd := exec.Command("../bin/bsondump", "--bsonFile", "testdata/sample.bson")
+
+ // Attach a buffer to stdout of the command.
+ cmdOutput := &bytes.Buffer{}
+ cmd.Stdout = cmdOutput
+
+ err := cmd.Run()
+ So(err, ShouldBeNil)
+
+ // Get the correct bsondump result from a file to use as a reference.
+ outReference, err := os.Open("testdata/sample.json")
+ So(err, ShouldBeNil)
+ bufRef := new(bytes.Buffer)
+ bufRef.ReadFrom(outReference)
+ bufRefStr := bufRef.String()
+
+ bufDumpStr := cmdOutput.String()
+ So(bufDumpStr, ShouldEqual, bufRefStr)
+ })
+
+ Convey("Test bsondump reading from a file with a positional arg and writing to stdout", t, func() {
+ cmd := exec.Command("../bin/bsondump", "testdata/sample.bson")
+
+ // Attach a buffer to stdout of command.
+ cmdOutput := &bytes.Buffer{}
+ cmd.Stdout = cmdOutput
+
+ err := cmd.Run()
+ So(err, ShouldBeNil)
+
+ // Get the correct bsondump result from a file to use as a reference.
+ outReference, err := os.Open("testdata/sample.json")
+ So(err, ShouldBeNil)
+ bufRef := new(bytes.Buffer)
+ bufRef.ReadFrom(outReference)
+ bufRefStr := bufRef.String()
+
+ bufDumpStr := cmdOutput.String()
+ So(bufDumpStr, ShouldEqual, bufRefStr)
+ })
+
+ Convey("Test bsondump reading from a file with --bsonFile and writing to a file", t, func() {
+ cmd := exec.Command("../bin/bsondump", "--outFile", "out.json",
+ "--bsonFile", "testdata/sample.bson")
+
+ err := cmd.Run()
+ So(err, ShouldBeNil)
+
+ // Get the correct bsondump result from a file to use as a reference.
+ outReference, err := os.Open("testdata/sample.json")
+ So(err, ShouldBeNil)
+ bufRef := new(bytes.Buffer)
+ bufRef.ReadFrom(outReference)
+ bufRefStr := bufRef.String()
+
+ // Get the output from a file.
+ outDump, err := os.Open("out.json")
+ So(err, ShouldBeNil)
+ bufDump := new(bytes.Buffer)
+ bufDump.ReadFrom(outDump)
+ bufDumpStr := bufDump.String()
+
+ So(bufDumpStr, ShouldEqual, bufRefStr)
+ })
+
+ Convey("Test bsondump reading from a file with a positional arg and writing to a file", t, func() {
+ cmd := exec.Command("../bin/bsondump", "--outFile", "out.json", "testdata/sample.bson")
+
+ err := cmd.Run()
+ So(err, ShouldBeNil)
+
+ // Get the correct bsondump result from a file to use as a reference.
+ outReference, err := os.Open("testdata/sample.json")
+ So(err, ShouldBeNil)
+ bufRef := new(bytes.Buffer)
+ bufRef.ReadFrom(outReference)
+ bufRefStr := bufRef.String()
+
+ // Get the output from a file.
+ outDump, err := os.Open("out.json")
+ So(err, ShouldBeNil)
+ bufDump := new(bytes.Buffer)
+ bufDump.ReadFrom(outDump)
+ bufDumpStr := bufDump.String()
+
+ So(bufDumpStr, ShouldEqual, bufRefStr)
+ })
+}
diff --git a/src/mongo/gotools/bsondump/main/bsondump.go b/src/mongo/gotools/bsondump/main/bsondump.go
new file mode 100644
index 00000000000..2613d3a014a
--- /dev/null
+++ b/src/mongo/gotools/bsondump/main/bsondump.go
@@ -0,0 +1,96 @@
+// Main package for the bsondump tool.
+package main
+
+import (
+ "github.com/mongodb/mongo-tools/bsondump"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "os"
+)
+
+func main() {
+ // initialize command-line opts
+ opts := options.New("bsondump", bsondump.Usage, options.EnabledOptions{})
+ bsonDumpOpts := &bsondump.BSONDumpOptions{}
+ opts.AddOptions(bsonDumpOpts)
+
+ args, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'bsondump --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // print help, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ // print version, if specified
+ if opts.PrintVersion() {
+ return
+ }
+
+ log.SetVerbosity(opts.Verbosity)
+ signals.Handle()
+
+ if len(args) > 1 {
+ log.Logvf(log.Always, "too many positional arguments: %v", args)
+ log.Logvf(log.Always, "try 'bsondump --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // If the user specified a bson input file
+ if len(args) == 1 {
+ if bsonDumpOpts.BSONFileName != "" {
+ log.Logvf(log.Always, "Cannot specify both a positional argument and --bsonFile")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ bsonDumpOpts.BSONFileName = args[0]
+ }
+
+ dumper := bsondump.BSONDump{
+ ToolOptions: opts,
+ BSONDumpOptions: bsonDumpOpts,
+ }
+
+ reader, err := bsonDumpOpts.GetBSONReader()
+ if err != nil {
+ log.Logvf(log.Always, "Getting BSON Reader Failed: %v", err)
+ os.Exit(util.ExitError)
+ }
+ dumper.BSONSource = db.NewBSONSource(reader)
+ defer dumper.BSONSource.Close()
+
+ writer, err := bsonDumpOpts.GetWriter()
+ if err != nil {
+ log.Logvf(log.Always, "Getting Writer Failed: %v", err)
+ os.Exit(util.ExitError)
+ }
+ dumper.Out = writer
+ defer dumper.Out.Close()
+
+ log.Logvf(log.DebugLow, "running bsondump with --objcheck: %v", bsonDumpOpts.ObjCheck)
+
+ if len(bsonDumpOpts.Type) != 0 && bsonDumpOpts.Type != "debug" && bsonDumpOpts.Type != "json" {
+ log.Logvf(log.Always, "Unsupported output type '%v'. Must be either 'debug' or 'json'", bsonDumpOpts.Type)
+ os.Exit(util.ExitBadOptions)
+ }
+
+ var numFound int
+ if bsonDumpOpts.Type == "debug" {
+ numFound, err = dumper.Debug()
+ } else {
+ numFound, err = dumper.JSON()
+ }
+
+ log.Logvf(log.Always, "%v objects found", numFound)
+ if err != nil {
+ log.Logv(log.Always, err.Error())
+ os.Exit(util.ExitError)
+ }
+}
diff --git a/src/mongo/gotools/bsondump/options.go b/src/mongo/gotools/bsondump/options.go
new file mode 100644
index 00000000000..a7eacfcb384
--- /dev/null
+++ b/src/mongo/gotools/bsondump/options.go
@@ -0,0 +1,36 @@
+package bsondump
+
+var Usage = `<options> <file>
+
+View and debug .bson files.
+
+See http://docs.mongodb.org/manual/reference/program/bsondump/ for more information.`
+
+type BSONDumpOptions struct {
+ // Format to display the BSON data file
+ Type string `long:"type" value-name:"<type>" default:"json" default-mask:"-" description:"type of output: debug, json (default 'json')"`
+
+ // Validate each BSON document before displaying
+ ObjCheck bool `long:"objcheck" description:"validate BSON during processing"`
+
+ // Display JSON data with indents
+ Pretty bool `long:"pretty" description:"output JSON formatted to be human-readable"`
+
+ // Path to input BSON file
+ BSONFileName string `long:"bsonFile" description:"path to BSON file to dump to JSON; default is stdin"`
+
+ // Path to output file
+ OutFileName string `long:"outFile" description:"path to output file to dump BSON to; default is stdout"`
+}
+
+func (_ *BSONDumpOptions) Name() string {
+ return "output"
+}
+
+func (_ *BSONDumpOptions) PostParse() error {
+ return nil
+}
+
+func (_ *BSONDumpOptions) Validate() error {
+ return nil
+}
diff --git a/src/mongo/gotools/bsondump/testdata/sample.bson b/src/mongo/gotools/bsondump/testdata/sample.bson
new file mode 100644
index 00000000000..35b3b843d66
--- /dev/null
+++ b/src/mongo/gotools/bsondump/testdata/sample.bson
Binary files differ
diff --git a/src/mongo/gotools/bsondump/testdata/sample.json b/src/mongo/gotools/bsondump/testdata/sample.json
new file mode 100644
index 00000000000..2087ac196f2
--- /dev/null
+++ b/src/mongo/gotools/bsondump/testdata/sample.json
@@ -0,0 +1,4 @@
+{"_id":{"$oid":"546651e74bf6e4cb017c5312"},"a":1.0,"b":"I am a string","c":{"$timestamp":{"t":1415991783,"i":1}},"d":{"$binary":"VEVTVCBUM1NU","$type":"00"}}
+{"_id":{"$oid":"546651f74bf6e4cb017c5313"},"a":2.5,"b":"I am a string","c":{"$timestamp":{"t":1415991799,"i":1}},"d":{"$binary":"VEVTVCBUM1ND","$type":"00"}}
+{"_id":{"$oid":"546652084bf6e4cb017c5314"},"a":4.0,"b":"string2"}
+{"_id":{"$oid":"546652254bf6e4cb017c5315"},"a":4.01,"b":"string3","c":{"key":"value"}}
diff --git a/src/mongo/gotools/build.bat b/src/mongo/gotools/build.bat
new file mode 100644
index 00000000000..32418513a34
--- /dev/null
+++ b/src/mongo/gotools/build.bat
@@ -0,0 +1,16 @@
+@echo off
+
+REM This is not handling tags
+
+
+if exist "%cd%\vendor\pkg" rd /s /q "%cd%\vendor\pkg"
+
+call set_gopath.bat
+
+if not exist "%cd%\bin" mkdir "%cd%\bin"
+
+for %%i in (bsondump, mongostat, mongofiles, mongoexport, mongoimport, mongorestore, mongodump, mongotop, mongooplog) do (
+ echo Building %%i
+
+ go build -o "%cd%\bin\%%i.exe" "%cd%\%%i\main\%%i.go"
+)
diff --git a/src/mongo/gotools/build.sh b/src/mongo/gotools/build.sh
new file mode 100755
index 00000000000..300100610af
--- /dev/null
+++ b/src/mongo/gotools/build.sh
@@ -0,0 +1,29 @@
+#!/bin/sh
+set -o errexit
+tags=""
+if [ ! -z "$1" ]
+ then
+ tags="$@"
+fi
+
+# make sure we're in the directory where the script lives
+SCRIPT_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})" && pwd)"
+cd $SCRIPT_DIR
+
+sed -i.bak -e "s/built-without-version-string/$(git describe)/" \
+ -e "s/built-without-git-spec/$(git rev-parse HEAD)/" \
+ common/options/options.go
+
+# remove stale packages
+rm -rf vendor/pkg
+
+. ./set_gopath.sh
+mkdir -p bin
+
+for i in bsondump mongostat mongofiles mongoexport mongoimport mongorestore mongodump mongotop mongooplog; do
+ echo "Building ${i}..."
+ go build -o "bin/$i" -tags "$tags" "$i/main/$i.go"
+ ./bin/$i --version
+done
+
+mv -f common/options/options.go.bak common/options/options.go
diff --git a/src/mongo/gotools/common.yml b/src/mongo/gotools/common.yml
new file mode 100644
index 00000000000..8bf5b266d89
--- /dev/null
+++ b/src/mongo/gotools/common.yml
@@ -0,0 +1,1535 @@
+#######################################
+# Tools Driver Config for MCI #
+#######################################
+# default command type
+command_type: system
+
+# run the same task in the previous revision if the current task fails
+stepback: true
+disable_cleanup: true
+
+mongo_tools_variables:
+
+## List of tests to run on each buildvariant
+ mongo_tools_task_lists:
+ osx_1010_task_list: &osx_1010_tasks
+ - name: db
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: legacy28
+ - name: legacy26
+ - name: legacy24
+ - name: qa-tests
+ - name: qa-dump-restore-archiving
+ - name: qa-dump-restore-gzip
+ - name: unit
+ osx_1010_ssl_task_list: &osx_1010_ssl_tasks
+ - name: dist
+ - name: qa-tests
+ solaris_task_list: &solaris_tasks
+ - name: db
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: legacy28
+ - name: legacy26
+ - name: legacy24
+ ubuntu1204_task_list: &ubuntu1204_tasks
+ - name: db
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: legacy28
+ - name: legacy28-wt
+ - name: lint-go
+ - name: lint-js
+ - name: qa-tests
+ - name: qa-tests-unstable
+ - name: qa-dump-restore-archiving
+ - name: qa-dump-restore-gzip
+ - name: qa-tests-wt
+ - name: text
+ - name: unit
+ - name: util
+ - name: vet
+ ubuntu1204_ssl_task_list: &ubuntu1204_ssl_tasks
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: legacy28
+ - name: qa-tests
+ - name: qa-tests-unstable
+ ubuntu1204_enterprise_task_list: &ubuntu1204_enterprise_tasks
+ - name: db
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: kerberos
+ - name: legacy28
+ - name: legacy26
+ - name: legacy24
+ - name: qa-tests
+ - name: text
+ - name: util
+ ubuntu1204_race_task_list: &ubuntu1204_race_tasks
+ - name: db
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: legacy28
+ - name: legacy28-wt
+ - name: legacy26
+ - name: legacy24
+ - name: qa-tests
+ - name: qa-tests-wt
+ - name: text
+ - name: unit
+ - name: util
+ windows_32_task_list: &windows_32_tasks
+ - name: db
+ distros:
+ - windows-64-vs2013-test
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: legacy28
+ distros:
+ - windows-64-vs2013-test
+ - name: legacy26
+ distros:
+ - windows-64-vs2013-test
+ - name: legacy24
+ distros:
+ - windows-64-vs2013-test
+ - name: unit
+ windows_64_task_list: &windows_64_tasks
+ - name: db
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: legacy28
+ distros:
+ - windows-64-vs2013-test
+ - name: legacy26
+ distros:
+ - windows-64-vs2013-test
+ - name: legacy24
+ distros:
+ - windows-64-vs2013-test
+ - name: qa-tests
+ distros:
+ - windows-64-vs2013-test
+ - name: qa-dump-restore-archiving
+ distros:
+ - windows-64-vs2013-test
+ - name: qa-dump-restore-gzip
+ distros:
+ - windows-64-vs2013-test
+ - name: unit
+ windows_64_ssl_task_list: &windows_64_ssl_tasks
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: legacy28
+ - name: qa-tests
+ windows_64_enterprise_task_list: &windows_64_enterprise_tasks
+ - name: db
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ - name: kerberos
+ - name: legacy28
+ distros:
+ - windows-64-vs2013-test
+ - name: qa-tests
+ distros:
+ - windows-64-vs2013-test
+ - name: unit
+ rhel71_enterprise_task_list: &rhel71_enterprise_tasks
+ - name: db
+ - name: dist
+ - name: integration
+ - name: integration-auth
+ #- name: kerberos
+ - name: qa-dump-restore-archiving
+ - name: qa-dump-restore-gzip
+ - name: qa-tests
+ - name: qa-tests-unstable
+ - name: qa-tests-wt
+ - name: text
+ - name: util
+
+
+## Common mongodb arguments
+ mongod_arguments:
+ default: &mongod_default_startup_args
+ mongod_args: &mongod_default_startup_args_string "--port 33333"
+ ssl: &mongod_ssl_startup_args
+ mongod_args: "--port 33333 --sslMode requireSSL --sslCAFile common/db/openssl/testdata/ca.pem --sslPEMKeyFile common/db/openssl/testdata/server.pem"
+ # Set storage engine as mmapv1 for 32 bit variants because WiredTiger requires 64 bit support.
+ win32: &mongod_win32_startup_args
+ mongod_args: "--port 33333 --storageEngine=mmapv1"
+
+ mongo_arguments:
+ default: &mongo_default_startup_args
+ mongo_args: &mongo_default_startup_args_string "--port 33333"
+ ssl: &mongo_ssl_startup_args
+ mongo_args: "--port 33333 --ssl --sslCAFile ./common/db/openssl/testdata/ca.pem --sslPEMKeyFile ./common/db/openssl/testdata/server.pem --sslAllowInvalidCertificates"
+
+functions:
+ "run legacy tests":
+ command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ mv ./mongodb/mongod${extension} .
+ mv ./mongodb/mongo${extension} .
+ mv ${test_path}/* .
+ chmod +x mongo*
+ python buildscripts/smoke.py ${smoke_use_ssl} --nopreallocj --with-cleanbb --mongod ./mongod --mongo ./mongo --report-file report.json --continue-on-failure --buildlogger-builder MCI_${build_variant} --buildlogger-buildnum ${builder_num|} --buildlogger-credentials ./mci.buildlogger --buildlogger-phase ${task_name}_${execution} ${smoke_args} tool
+
+ "run qa-tests":
+ command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ set -o verbose
+ set -e
+ mv ./mongodb/mongod${extension} .
+ mv ./mongodb/mongo${extension} .
+ mv ./mongodb/mongos${extension} .
+ mv test/qa-tests/* .
+ chmod +x mongo*
+ if [ -f bsondump ]; then
+ chmod +x bsondump
+ fi
+ rm -rf /data/install /data/multiversion
+ if [ "${multiversion_override}" != "skip" ]; then
+ python buildscripts/setup_multiversion_mongodb.py /data/install /data/multiversion ${arch} ${multiversion_override|2.6 2.4} --latest ${smoke_use_ssl} --os="${mongo_os}"
+ fi
+ chmod 400 jstests/libs/key*
+
+ PATH=$PATH:/data/multiversion python buildscripts/resmoke.py --suite=${resmoke_suite} --continueOnFailure --log=buildlogger --reportFile=report.json ${resmoke_args}
+
+ "build tool":
+ command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ echo "Building ${tool}..."
+ if [ "Windows_NT" = "$OS" ]; then
+ set -o igncr
+ fi;
+ if [ '${library_path}' != '' ]; then
+ export ${library_path}
+ fi
+
+ # In RHEL 5.5, /usr/bin/ld can't handle --build-id parameters, so
+ # use a wrapper if it's present on the system
+ #
+ if [ -d /opt/ldwrapper/bin ]
+ then
+ export PATH=/opt/ldwrapper/bin:$PATH
+ fi
+
+ sed -i.bak "s/built-without-version-string/$(git describe)/" common/options/options.go
+ sed -i.bak "s/built-without-git-spec/$(git rev-parse HEAD)/" common/options/options.go
+
+ . ./set_gopath.sh
+ ${gorootvars} go build ${args} ${build_tags} -o bin/${tool} ${tool}/main/${tool}.go
+ ./bin/${tool} --version
+
+ "download mongod":
+ command: shell.exec
+ params:
+ working_dir: src
+ script: |
+ set -x
+ mongotarget=$(if [ "${mongo_target}" ]; then echo "${mongo_target}"; else echo "${mongo_os}"; fi)
+ dlurl=$(python binaryurl.py --edition=${mongo_edition} --target=$mongotarget --version=${mongo_version})
+ filename=$(echo $dlurl | sed -e "s_.*/__")
+ mkdir mongodb
+ cd mongodb
+ curl -s $dlurl --output $filename
+ ${decompress} $filename
+ rm $filename
+ chmod +x ./mongodb-*/bin/*
+ if [ "${only_shell}" ]; then
+ mv -f ./mongodb-*/bin/mongo${extension} .
+ else
+ mv -f ./mongodb-*/bin/* .
+ fi
+ rm -rf ./mongodb-*
+
+ "fetch tool" :
+ command: s3.get
+ params:
+ bucket: mciuploads
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ local_file: src/${tool}${extension}
+ remote_file: mongo-tools/binaries/${build_id}/${edition|community}/${tool}${extension}
+
+ "generate coverage html + text":
+ command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ set -o verbose
+ if [ "${coverage}" = "true" ]; then
+ if [ "Windows_NT" = "$OS" ]; then
+ set -o igncr
+ fi;
+ . ./set_gopath.sh
+ go tool cover -html=coverage.out -o coverage.html
+ go tool cover -func=coverage.out -o coverage.txt
+ fi;
+
+ "get buildnumber":
+ command: keyval.inc
+ params:
+ key: "${build_variant}_tools"
+ destination: "builder_num"
+
+ "move coverage data":
+ command: shell.exec
+ params:
+ working_dir: src
+ script: |
+ set -o verbose
+ if [ "${coverage}" = "true" ]; then
+ mv ${package}/coverage.out .
+ fi
+
+ "run unit test":
+ command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ if [ "Windows_NT" = "$OS" ]; then
+ set -o igncr
+ fi;
+ . ./set_gopath.sh
+ if [ '${library_path}' != '' ]; then
+ export ${library_path}
+ fi
+ export MONGODB_KERBEROS_PASSWORD=${kerberos_password}
+ export basedir=`pwd`
+ cd ${package}
+ go test ${coverage_args} ${args} ${build_tags} -test.v > unit.suite
+ export exitcode=$?
+ cat unit.suite
+ cp unit.suite $basedir/.
+ exit $exitcode
+
+ "setup integration test":
+ command: shell.exec
+ params:
+ working_dir: src
+ # Set up Kerberos stuff: run kinit if necessary, and add KDC to registry
+ # on Windows (see https://wiki.mongodb.com/display/DH/Testing+Kerberos)
+ script: |
+ if [ '${run_kinit}' = 'true' ]
+ then
+ echo "${kerberos_password}" | kinit -p drivers@LDAPTEST.10GEN.CC;
+ fi;
+ if [ "Windows_NT" = "$OS" ]; then
+ cmd /c "REG ADD HKLM\SYSTEM\ControlSet001\Control\Lsa\Kerberos\Domains\LDAPTEST.10GEN.CC /v KdcNames /d ldaptest.10gen.cc /t REG_MULTI_SZ /f"
+ fi;
+
+ "setup permissions":
+ command: shell.exec
+ params:
+ working_dir: src
+ script: |
+ chmod 700 set_gopath.sh
+
+ "run tool unit tests":
+ command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ if [ "Windows_NT" = "$OS" ]; then
+ set -o igncr
+ fi;
+ . ./set_gopath.sh
+
+ # run unit tests under common package
+ for i in "common/bsonutil" "common/json" "common/archive" "common/log" "common/options" "common/progress" "common/text"; do
+ cd $i
+ COMMON_SUBPKG=$(basename $i)
+ COVERAGE_ARGS=""
+ if [ "${run_coverage}" ]; then
+ COVERAGE_ARGS="-coverprofile=coverage_$COMMON_SUBPKG.out"
+ fi;
+ ${library_path} go test $COVERAGE_ARGS ${args} ${build_tags} -test.v > $COMMON_SUBPKG.suite
+ if [ $? -ne 0 ]; then
+ echo "suite failed, will exit with error on finish"
+ export exitcode=1
+ fi
+ cat $COMMON_SUBPKG.suite
+ cp $COMMON_SUBPKG.suite ../../
+ cd ../..
+ done
+
+ #TODO mongotop needs a test
+ for i in mongoimport mongoexport mongostat mongooplog mongorestore mongodump mongofiles; do
+ cd $i
+ COVERAGE_ARGS=""
+ if [ "${run_coverage}" ]; then
+ COVERAGE_ARGS="-coverprofile=coverage_$i.out"
+ fi;
+ ${library_path} go test $COVERAGE_ARGS ${args} ${build_tags} -test.v > $i.suite
+ if [ $? -ne 0 ]; then
+ echo "suite failed, will exit with error on finish"
+ export exitcode=1
+ fi
+ cat $i.suite
+ cp $i.suite ../.
+ cd ..
+ done
+ exit $exitcode
+
+ "run tool integration tests":
+ command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ if [ "Windows_NT" = "$OS" ]; then
+ set -o igncr
+ export MONGODB_KERBEROS_PASSWORD=${kerberos_password}
+ fi;
+ . ./set_gopath.sh
+ if [ "${library_path}" != "" ]; then
+ export ${library_path}
+ fi;
+
+ if [ "${create_mongod_users_command}" != "" ]; then
+ export AUTH_USERNAME=${auth_username}
+ export AUTH_PASSWORD=${auth_password}
+ echo "${create_mongod_users_command}" | ./mongodb/mongo${extension} ${mongo_args} admin
+ fi;
+
+ for i in mongoimport mongoexport mongostat mongooplog mongorestore mongodump mongofiles; do
+ cd $i
+ COVERAGE_ARGS=""
+ if [ "${run_coverage}" ]; then
+ COVERAGE_ARGS="-coverprofile=coverage_$i.out"
+ fi;
+ ${library_path} go test $COVERAGE_ARGS -test.v ${args} ${build_tags} > $i.suite
+ if [ $? -ne 0 ]; then
+ echo "suite failed, will exit with error on finish"
+ export exitcode=1
+ fi
+ cat $i.suite
+ cp $i.suite ../.
+ cd ..
+ done
+ exit $exitcode
+
+ "create coverage reports":
+ command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ if [ "Windows_NT" = "$OS" ]; then
+ set -o igncr
+ fi;
+
+ . ./set_gopath.sh
+
+ for i in mongoimport mongoexport mongostat mongooplog mongorestore mongodump mongofiles; do
+ cd $i
+ perl -pe 's/.*src/github.com\/mongodb\/mongo-tools/' coverage_$i.out > coverage_$i_rewrite.out
+ ${library_path} go tool cover -html=coverage_$i_rewrite.out -o coverage_$i.html
+ ${library_path} go tool cover -func=coverage_$i_rewrite.out -o coverage_$i.txt
+ cd ..
+ done
+
+ "upload html coverage":
+ command: s3.put
+ params:
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ local_file: src/${coverage_pkg}/coverage_${coverage_pkg}.html
+ remote_file: mongo-tools/coverage/${coverage_pkg}/${task_id}.html
+ bucket: mciuploads
+ permissions: public-read
+ content_type: text/html
+ build_variants: ["ubuntu", "windows-64"]
+ display_name: ${coverage_pkg}-html
+
+ "upload text coverage":
+ command: s3.put
+ params:
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ local_file: src/${coverage_pkg}/coverage_${coverage_pkg}.txt
+ remote_file: mongo-tools/coverage/${coverage_pkg}/${task_id}.txt
+ bucket: mciuploads
+ permissions: public-read
+ content_type: text/plain
+ build_variants: ["ubuntu", "windows-64"]
+ display_name: ${coverage_pkg}-text
+
+ "setup credentials" :
+ command: shell.exec
+ params:
+ working_dir: src
+ silent: true
+ script: |
+ cat > mci.buildlogger <<END_OF_CREDS
+ slavename='${slave}'
+ passwd='${passwd}'
+ builder='MCI_${build_variant}'
+ build_num=${builder_num}
+ build_phase='${task_name}_${execution}'
+ END_OF_CREDS
+
+ "start mongod":
+ command: shell.exec
+ params:
+ working_dir: src
+ background: true
+ script: |
+ set -o verbose
+ rm -rf mongodb/${db_files_dir|db_files} mongodb/${logfile|run.log};
+ mkdir mongodb/${db_files_dir|db_files};
+ echo "Starting mongod...";
+ ./mongodb/mongod${extension} ${mongod_args} --dbpath mongodb/db_files --setParameter=enableTestCommands=1
+
+ "wait for mongod to be ready":
+ command: shell.exec
+ params:
+ working_dir: src
+ script: |
+ set -o verbose
+ SECONDS=0
+ while true ; do
+ ./mongodb/mongo${extension} ${mongo_args} </dev/null 2>/dev/null >/dev/null
+ if [ "$?" = "0" ]; then
+ echo "mongod ready";
+ exit 0
+ else
+ SECONDS=`expr $SECONDS + 1`
+ if [ $SECONDS -gt 20 ]; then
+ echo "mongod not ready after 20 seconds"
+ exit 1
+ fi
+ echo "waiting for mongod to be ready..." ;
+ sleep 1 ;
+ fi
+ done
+
+ "upload tool":
+ command: s3.put
+ params:
+ aws_key: ${aws_key}
+ aws_secret: ${aws_secret}
+ local_file: src/bin/${tool}
+ remote_file: mongo-tools/binaries/${build_id}/${edition|community}/${tool}${extension}
+ bucket: mciuploads
+ permissions: public-read
+ content_type: application/octet-stream
+ display_name: ${tool}
+
+pre:
+ - command: shell.exec
+ params:
+ silent: true
+ script: |
+ ${killall_mci|pkill -9 mongo; pkill -9 mongodump; pkill -9 mongoexport; pkill -9 mongoimport; pkill -9 mongofiles; pkill -9 mongooplog; pkill -9 mongorestore; pkill -9 mongostat; pkill -9 mongotop; pkill -9 mongod; pkill -9 mongos; pkill -f buildlogger.py; pkill -f smoke.py} >/dev/null 2>&1
+ rm -rf src /data/db/*
+ exit 0
+
+post:
+ - command: attach.results
+ params:
+ file_location: src/report.json
+ - command: shell.exec
+ params:
+ silent: true
+ script: |
+ ${killall_mci|pkill -9 mongo; pkill -9 mongodump; pkill -9 mongoexport; pkill -9 mongoimport; pkill -9 mongofiles; pkill -9 mongooplog; pkill -9 mongorestore; pkill -9 mongostat; pkill -9 mongotop; pkill -9 mongod; pkill -9 mongos; pkill -f buildlogger.py; pkill -f smoke.py} >/dev/null 2>&1
+ exit 0
+ - command: gotest.parse_files
+ params:
+ files: ["src/*.suite"]
+ - command: shell.exec
+ params:
+ script: |
+ rm -rf /data/db/*
+ exit 0
+
+
+tasks:
+- name: db
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - command: expansions.update
+ params:
+ updates:
+ - key: "package"
+ value: "common/db"
+ - key: "args"
+ value: "-test.types=db"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.2"
+ - func: "start mongod"
+ - func: "wait for mongod to be ready"
+ - func: "setup integration test"
+ - func: "run unit test"
+
+- name: dist
+ depends_on:
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - command: shell.exec
+ params:
+ working_dir: src
+ script: |
+ rm -rf bin/
+ mkdir bin
+ # bsondump
+ - func: "build tool"
+ vars:
+ tool: bsondump
+ - func: "upload tool"
+ vars:
+ tool: bsondump
+ # mongodump
+ - func: "build tool"
+ vars:
+ tool: mongodump
+ - func: "upload tool"
+ vars:
+ tool: mongodump
+ # mongoexport
+ - func: "build tool"
+ vars:
+ tool: mongoexport
+ - func: "upload tool"
+ vars:
+ tool: mongoexport
+ # mongofiles
+ - func: "build tool"
+ vars:
+ tool: mongofiles
+ - func: "upload tool"
+ vars:
+ tool: mongofiles
+ # mongoimport
+ - func: "build tool"
+ vars:
+ tool: mongoimport
+ - func: "upload tool"
+ vars:
+ tool: mongoimport
+ # mongooplog
+ - func: "build tool"
+ vars:
+ tool: mongooplog
+ - func: "upload tool"
+ vars:
+ tool: mongooplog
+ # mongorestore
+ - func: "build tool"
+ vars:
+ tool: mongorestore
+ - func: "upload tool"
+ vars:
+ tool: mongorestore
+ # mongostat
+ - func: "build tool"
+ vars:
+ tool: mongostat
+ - func: "upload tool"
+ vars:
+ tool: mongostat
+ # mongotop
+ - func: "build tool"
+ vars:
+ tool: mongotop
+ - func: "upload tool"
+ vars:
+ tool: mongotop
+
+- name: integration
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - command: expansions.update
+ params:
+ updates:
+ - key: "args"
+ value: "-test.types=${integration_test_args}"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.2"
+ - func: "start mongod"
+ - func: "wait for mongod to be ready"
+ - func: "run tool integration tests"
+
+- name: integration-auth
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ # Concat auth args
+ - command: expansions.update
+ params:
+ updates:
+ - key: "args"
+ value: "-test.types=${integration_test_args},auth"
+ - key: "mongod_args"
+ concat: " --auth"
+ - key: "auth_username"
+ value: "passwordIsTaco"
+ - key: "auth_password"
+ value: "Taco"
+ - key: "create_mongod_users_command"
+ value: "db.createUser({ user: '${auth_username}', pwd: '${auth_password}', roles: [{ role: '__system', db: 'admin' }] });"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.2"
+ - func: "start mongod"
+ - func: "wait for mongod to be ready"
+ - func: "run tool integration tests"
+
+- name: kerberos
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ # Explicitly run ONLY Kerberos tests
+ - command: expansions.update
+ params:
+ updates:
+ - key: "args"
+ value: "-test.types=kerberos"
+ - func: "setup integration test"
+ - func: "run tool integration tests"
+
+- name: legacy28
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.0"
+ - func: "fetch tool"
+ vars:
+ tool: mongoimport
+ - func: "fetch tool"
+ vars:
+ tool: mongoexport
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongostat
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "fetch tool"
+ vars:
+ tool: mongooplog
+ - func: "fetch tool"
+ vars:
+ tool: mongofiles
+ - func: "run legacy tests"
+ vars:
+ test_path: "test/legacy28"
+ smoke_args: "--authMechanism SCRAM-SHA-1"
+
+- name: legacy28-wt
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.0"
+ - func: "fetch tool"
+ vars:
+ tool: mongoimport
+ - func: "fetch tool"
+ vars:
+ tool: mongoexport
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongostat
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "fetch tool"
+ vars:
+ tool: mongooplog
+ - func: "fetch tool"
+ vars:
+ tool: mongofiles
+ - func: "run legacy tests"
+ vars:
+ test_path: "test/legacy28"
+ smoke_args: "--authMechanism SCRAM-SHA-1 --storageEngine=wiredTiger"
+
+- name: legacy26
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "2.6"
+ - func: "fetch tool"
+ vars:
+ tool: mongoimport
+ - func: "fetch tool"
+ vars:
+ tool: mongoexport
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongostat
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "fetch tool"
+ vars:
+ tool: mongooplog
+ - func: "fetch tool"
+ vars:
+ tool: mongofiles
+ - func: "run legacy tests"
+ vars:
+ test_path: "test/legacy26"
+ smoke_use_ssl: ""
+
+- name: legacy24
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "2.4"
+ - func: "download mongod"
+ vars:
+ mongo_version: "2.6"
+ only_shell: true
+ - func: "fetch tool"
+ vars:
+ tool: mongoimport
+ - func: "fetch tool"
+ vars:
+ tool: mongoexport
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongostat
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "fetch tool"
+ vars:
+ tool: mongooplog
+ - func: "fetch tool"
+ vars:
+ tool: mongofiles
+ - func: "run legacy tests"
+ vars:
+ test_path: "test/legacy24"
+ smoke_use_ssl: ""
+
+- name: lint-go
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ set -o errexit
+ set -o verbose
+ retVal=$(. ./set_gopath.sh && go run vendor/src/github.com/3rf/mongo-lint/golint/golint.go mongo* bson* common/*);
+ if [ "$retVal" = "" ]; then exit 0; else echo $retVal; exit 1; fi;
+
+- name: lint-js
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ set -o verbose
+ /opt/node/bin/npm install eslint@3.2
+ /opt/node/bin/node node_modules/eslint/bin/eslint.js test/qa-tests/jstests/**/*.js
+
+- name: qa-tests-unstable
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "latest"
+ - func: "fetch tool"
+ vars:
+ tool: mongoimport
+ - func: "fetch tool"
+ vars:
+ tool: mongoexport
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "fetch tool"
+ vars:
+ tool: mongostat
+ - func: "fetch tool"
+ vars:
+ tool: mongotop
+ - func: "fetch tool"
+ vars:
+ tool: mongooplog
+ - func: "fetch tool"
+ vars:
+ tool: mongofiles
+ - func: "fetch tool"
+ vars:
+ tool: bsondump
+ - func: "run qa-tests"
+ vars:
+ resmoke_suite: "core${resmoke_use_ssl}"
+
+- name: qa-tests
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.2"
+ - func: "fetch tool"
+ vars:
+ tool: mongoimport
+ - func: "fetch tool"
+ vars:
+ tool: mongoexport
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "fetch tool"
+ vars:
+ tool: mongostat
+ - func: "fetch tool"
+ vars:
+ tool: mongotop
+ - func: "fetch tool"
+ vars:
+ tool: mongooplog
+ - func: "fetch tool"
+ vars:
+ tool: mongofiles
+ - func: "fetch tool"
+ vars:
+ tool: bsondump
+ - func: "run qa-tests"
+ vars:
+ resmoke_suite: "core${resmoke_use_ssl}"
+
+- name: qa-dump-restore-archiving
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.2"
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "run qa-tests"
+ vars:
+ resmoke_suite: "restore_archive"
+
+- name: qa-dump-restore-gzip
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.2"
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "run qa-tests"
+ vars:
+ resmoke_suite: "restore_gzip"
+
+- name: qa-tests-wt
+ depends_on:
+ - name: dist
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "get buildnumber"
+ - func: "setup credentials"
+ - func: "download mongod"
+ vars:
+ mongo_version: "3.2"
+ - func: "fetch tool"
+ vars:
+ tool: mongoimport
+ - func: "fetch tool"
+ vars:
+ tool: mongoexport
+ - func: "fetch tool"
+ vars:
+ tool: mongodump
+ - func: "fetch tool"
+ vars:
+ tool: mongorestore
+ - func: "fetch tool"
+ vars:
+ tool: mongostat
+ - func: "fetch tool"
+ vars:
+ tool: mongotop
+ - func: "fetch tool"
+ vars:
+ tool: mongooplog
+ - func: "fetch tool"
+ vars:
+ tool: mongofiles
+ - func: "fetch tool"
+ vars:
+ tool: bsondump
+ - func: "run qa-tests"
+ vars:
+ resmoke_suite: "core"
+ resmoke_args: "--storageEngine=wiredTiger ${resmoke_args}"
+
+- name: text
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - command: expansions.update
+ params:
+ updates:
+ - key: "package"
+ value: "common/text"
+ - func: "run unit test"
+
+- name: unit
+ commands:
+ - command: expansions.update
+ params:
+ updates:
+ - key: "run_coverage"
+ value: "true"
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - func: "run tool unit tests"
+ - func: "create coverage reports"
+ - command: expansions.update
+ params:
+ updates:
+ - key: "coverage_pkg"
+ value: "mongoimport"
+ - func: "upload html coverage"
+ - func: "upload text coverage"
+ - command: expansions.update
+ params:
+ updates:
+ - key: "coverage_pkg"
+ value: "mongoexport"
+ - func: "upload html coverage"
+ - func: "upload text coverage"
+ - command: expansions.update
+ params:
+ updates:
+ - key: "coverage_pkg"
+ value: "mongostat"
+ - func: "upload html coverage"
+ - func: "upload text coverage"
+ - command: expansions.update
+ params:
+ updates:
+ - key: "coverage_pkg"
+ value: "mongooplog"
+ - func: "upload html coverage"
+ - func: "upload text coverage"
+ - command: expansions.update
+ params:
+ updates:
+ - key: "coverage_pkg"
+ value: "mongodump"
+ - func: "upload html coverage"
+ - func: "upload text coverage"
+ - command: expansions.update
+ params:
+ updates:
+ - key: "coverage_pkg"
+ value: "mongorestore"
+ - func: "upload html coverage"
+ - func: "upload text coverage"
+
+- name: util
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - command: expansions.update
+ params:
+ updates:
+ - key: "package"
+ value: "common/util"
+ - func: "run unit test"
+
+- name: vet
+ commands:
+ - command: git.get_project
+ params:
+ directory: src
+ - command: git.apply_patch
+ params:
+ directory: src
+ - command: shell.exec
+ type: test
+ params:
+ working_dir: src
+ script: |
+ set -o verbose
+ . ./set_gopath.sh
+ go tool vet bsondump common mongo*
+
+buildvariants:
+#######################################
+# OSX Buildvariant #
+#######################################
+- name: osx-1010
+ display_name: OSX 10.10 64-bit
+ run_on:
+ - osx-1010
+ expansions:
+ <<: *mongod_default_startup_args
+ <<: *mongo_default_startup_args
+ mongo_os: "osx"
+ arch: "osx/x86_64"
+ build_tags: -tags "ssl"
+ resmoke_args: --excludeWithAnyTags=requires_many_files
+ tasks: *osx_1010_tasks
+
+- name: osx-1010-ssl
+ display_name: OSX 10.10 64-bit SSL
+ run_on:
+ - osx-1010
+ expansions:
+ <<: *mongod_ssl_startup_args
+ <<: *mongo_ssl_startup_args
+ mongo_os: "osx"
+ mongo_target: "osx-ssl"
+ arch: "osx/x86_64"
+ build_tags: -tags "ssl"
+ edition: ssl
+ resmoke_args: --excludeWithAnyTags=requires_many_files
+ tasks: *osx_1010_ssl_tasks
+
+#######################################
+# Ubuntu Buildvariants #
+#######################################
+
+- name: ubuntu
+ display_name: Linux 64-bit
+ run_on:
+ - ubuntu1204-test
+ expansions:
+ <<: *mongod_default_startup_args
+ <<: *mongo_default_startup_args
+ mongo_os: "ubuntu1204"
+ mongo_edition: "targeted"
+ build_tags: -tags "ssl"
+ arch: "linux/x86_64"
+ integration_test_args: integration
+ resmoke_args: --jobs $(grep -c ^processor /proc/cpuinfo)
+ tasks: *ubuntu1204_tasks
+
+- name: ubuntu-ssl
+ display_name: Linux 64-bit SSL
+ run_on:
+ - ubuntu1204-test
+ expansions:
+ <<: *mongod_ssl_startup_args
+ <<: *mongo_ssl_startup_args
+ mongo_os: "ubuntu1204"
+ mongo_edition: "enterprise"
+ build_tags: -tags "ssl"
+ edition: ssl
+ arch: "linux/x86_64"
+ smoke_use_ssl: --use-ssl
+ resmoke_use_ssl: _ssl
+ resmoke_args: --jobs $(grep -c ^processor /proc/cpuinfo)
+ integration_test_args: "integration,ssl"
+ tasks: *ubuntu1204_ssl_tasks
+
+- name: ubuntu-enterprise
+ display_name: Linux 64-bit Enterprise
+ run_on:
+ - ubuntu1204-test
+ expansions:
+ <<: *mongod_default_startup_args
+ <<: *mongo_default_startup_args
+ mongo_os: "ubuntu1204"
+ mongo_edition: "enterprise"
+ build_tags: -tags "ssl sasl"
+ smoke_use_ssl: --use-ssl
+ resmoke_use_ssl: _ssl
+ arch: "linux/x86_64"
+ edition: enterprise
+ run_kinit: true
+ integration_test_args: integration
+ resmoke_args: --jobs $(grep -c ^processor /proc/cpuinfo)
+ tasks: *ubuntu1204_enterprise_tasks
+
+- name: rhel71-ppc64le-enterprise
+ display_name: Linux PPC64LE RHEL 7.1 Enterprise
+ run_on:
+ - rhel71-power8-test
+ expansions:
+ <<: *mongod_default_startup_args
+ <<: *mongo_default_startup_args
+ mongo_os: "rhel71"
+ mongo_edition: "enterprise"
+ # RHEL 7.1 PPC64LE machines kerberos setup does not work for mongo-tools
+ #build_tags: -gccgoflags "$(pkg-config --libs --cflags libssl libsasl2)" -tags 'sasl ssl'
+ build_tags: -gccgoflags "$(pkg-config --libs --cflags libssl)" -tags 'ssl'
+ resmoke_use_ssl: _ssl
+ library_path: PATH="/opt/mongodbtoolchain/v2/bin/:$PATH"
+ resmoke_args: --excludeWithAnyTags=requires_mmap_available,requires_large_ram,requires_mongo_24,requires_mongo_26,requires_mongo_30 -j 4
+ multiversion_override: "skip"
+ arch: "linux/ppc64le"
+ edition: enterprise
+ run_kinit: true
+ integration_test_args: integration
+ tasks: *rhel71_enterprise_tasks
+
+#######################################
+# Solaris Buildvariant #
+#######################################
+- name: solaris
+ display_name: Solaris 64-bit
+ run_on:
+ - solaris
+ expansions:
+ <<: *mongod_default_startup_args
+ <<: *mongo_default_startup_args
+ mongo_os: "sunos5"
+ library_path: PATH="/opt/mongodbtoolchain/v2/bin/:$PATH"
+ build_tags: -gccgoflags "-lsocket -lnsl"
+ resmoke_args: --excludeWithAnyTags=requires_large_ram -j$(kstat cpu | sort -u | grep -c "^module")
+ tasks: *solaris_tasks
+
+#######################################
+# Windows Buildvariants #
+#######################################
+- name: windows-32
+ display_name: Windows 32-bit
+ run_on:
+ - windows-32
+ expansions:
+ <<: *mongod_win32_startup_args
+ <<: *mongo_default_startup_args
+ mongo_target: "windows_i686"
+ extension: .exe
+ preproc_gpm: "perl -pi -e 's/\\r\\n/\\n/g' "
+ resmoke_args: --excludeWithAnyTags=requires_large_ram
+ integration_test_args: "integration"
+ tasks: *windows_32_tasks
+
+- name: windows-64
+ display_name: Windows 64-bit
+ run_on:
+ - windows-64-vs2013-test
+ expansions:
+ <<: *mongod_default_startup_args
+ <<: *mongo_default_startup_args
+ mongo_os: "windows-64"
+ mongo_target: "windows_x86_64-2008plus"
+ resmoke_args: --excludeWithAnyTags=requires_large_ram
+ extension: .exe
+ arch: "win32/x86_64"
+ preproc_gpm: "perl -pi -e 's/\\r\\n/\\n/g' "
+ integration_test_args: "integration"
+ tasks: *windows_64_tasks
+
+- name: windows-64-ssl
+ display_name: Windows 64-bit SSL
+ run_on:
+ - windows-64-vs2013-compile
+ expansions:
+ <<: *mongod_ssl_startup_args
+ <<: *mongo_ssl_startup_args
+ mongo_os: "windows-64"
+ mongo_target: "windows_x86_64-2008plus-ssl"
+ build_tags: -tags "ssl"
+ edition: ssl
+ smoke_use_ssl: --use-ssl
+ resmoke_use_ssl: _ssl
+ resmoke_args: --excludeWithAnyTags=requires_large_ram,requires_mongo_24
+ multiversion_override: "2.6"
+ extension: .exe
+ arch: "win32/x86_64"
+ library_path: PATH="/cygdrive/c/mingw-w64/x86_64-4.9.1-posix-seh-rt_v3-rev1/mingw64/bin:/cygdrive/c/sasl/:$PATH"
+ preproc_gpm: "perl -pi -e 's/\\r\\n/\\n/g' "
+ integration_test_args: "integration,ssl"
+ tasks: *windows_64_ssl_tasks
+
+- name: windows-64-enterprise
+ display_name: Windows 64-bit Enterprise
+ run_on:
+ - windows-64-vs2013-compile
+ expansions:
+ <<: *mongod_default_startup_args
+ <<: *mongo_default_startup_args
+ mongo_os: "windows-64"
+ mongo_edition: "enterprise"
+ mongo_target: "windows"
+ build_tags: -tags "sasl ssl"
+ smoke_use_ssl: --use-ssl
+ resmoke_use_ssl: _ssl
+ resmoke_args: --excludeWithAnyTags=requires_large_ram,requires_mongo_24
+ multiversion_override: "2.6"
+ edition: enterprise
+ extension: .exe
+ arch: "win32/x86_64"
+ library_path: PATH="/cygdrive/c/mingw-w64/x86_64-4.9.1-posix-seh-rt_v3-rev1/mingw64/bin:/cygdrive/c/sasl/:$PATH"
+ preproc_gpm: "perl -pi -e 's/\\r\\n/\\n/g' "
+ integration_test_args: "integration"
+ tasks: *windows_64_enterprise_tasks
+
+#######################################
+# Experimental Buildvariants #
+#######################################
+
+- name: ubuntu-race
+ stepback: false
+ batchtime: 1440 # daily
+ display_name: z Race Detector Linux 64-bit
+ run_on:
+ - ubuntu1204-test
+ expansions:
+ <<: *mongod_default_startup_args
+ <<: *mongo_default_startup_args
+ mongo_os: "ubuntu1204"
+ mongo_edition: "enterprise"
+ build_tags: -tags "ssl"
+ arch: "linux/x86_64"
+ args: "-race"
+ resmoke_args: --excludeWithAnyTags=requires_large_ram
+ integration_test_args: integration
+ tasks: *ubuntu1204_race_tasks
+
+#######################################
+# Dist only Buildvariants #
+#######################################
+
+- name: suse11
+ display_name: SUSE 11 SSL
+ run_on:
+ - suse11-test
+ expansions:
+ build_tags: -tags "sasl ssl"
+ tasks:
+ - name: dist
+
+- name: suse12
+ display_name: SUSE 12 SSL
+ run_on:
+ - suse12-test
+ expansions:
+ build_tags: -tags "sasl ssl"
+ tasks:
+ - name: dist
+
+- name: rhel55
+ display_name: RHEL 5.5 SSL
+ run_on:
+ - rhel55
+ expansions:
+ gorootvars: GOROOT=/opt/go PATH="/opt/go/bin:$PATH"
+ build_tags: -tags "sasl ssl"
+ tasks:
+ - name: dist
+
+- name: rhel62
+ display_name: RHEL 6.2 SSL
+ run_on:
+ - rhel62-test
+ expansions:
+ gorootvars: GOROOT=/opt/go PATH="/opt/go/bin:$PATH"
+ build_tags: -tags "sasl ssl"
+ tasks:
+ - name: dist
+
+- name: rhel70
+ display_name: RHEL 7.0 SSL
+ run_on:
+ - rhel70
+ expansions:
+ gorootvars: GOROOT=/opt/go PATH="/opt/go/bin:$PATH"
+ build_tags: -tags "sasl ssl"
+ tasks:
+ - name: dist
+
+- name: ubuntu1404
+ display_name: Ubuntu 14.04 SSL
+ run_on:
+ - ubuntu1404-test
+ expansions:
+ build_tags: -tags "sasl ssl"
+ tasks:
+ - name: dist
+
+- name: debian71
+ display_name: Debian 7.1 SSL
+ run_on:
+ - debian71-test
+ expansions:
+ gorootvars: GOROOT=/opt/go PATH="/opt/go/bin:$PATH"
+ build_tags: -tags "sasl ssl"
+ tasks:
+ - name: dist
diff --git a/src/mongo/gotools/common/archive/archive.go b/src/mongo/gotools/common/archive/archive.go
new file mode 100644
index 00000000000..d2a920090f1
--- /dev/null
+++ b/src/mongo/gotools/common/archive/archive.go
@@ -0,0 +1,55 @@
+package archive
+
+import "io"
+
+// NamespaceHeader is a data structure that, as BSON, is found in archives where it indicates
+// that either the subsequent stream of BSON belongs to this new namespace, or that the
+// indicated namespace will have no more documents (EOF)
+type NamespaceHeader struct {
+ Database string `bson:"db"`
+ Collection string `bson:"collection"`
+ EOF bool `bson:"EOF"`
+ CRC int64 `bson:"CRC"`
+}
+
+// CollectionMetadata is a data structure that, as BSON, is found in the prelude of the archive.
+// There is one CollectionMetadata per collection that will be in the archive.
+type CollectionMetadata struct {
+ Database string `bson:"db"`
+ Collection string `bson:"collection"`
+ Metadata string `bson:"metadata"`
+ Size int `bson:"size"`
+}
+
+// Header is a data structure that, as BSON, is found immediately after the magic
+// number in the archive, before any CollectionMetadatas. It is the home of any archive level information
+type Header struct {
+ ConcurrentCollections int32 `bson:"concurrent_collections"`
+ FormatVersion string `bson:"version"`
+ ServerVersion string `bson:"server_version"`
+ ToolVersion string `bson:"tool_version"`
+}
+
+const minBSONSize = 4 + 1 // an empty BSON document should be exactly five bytes long
+
+var terminator int32 = -1
+var terminatorBytes = []byte{0xFF, 0xFF, 0xFF, 0xFF} // TODO, rectify this with terminator
+
+// MagicNumber is four bytes that are found at the beginning of the archive that indicate that
+// the byte stream is an archive, as opposed to anything else, including a stream of BSON documents
+const MagicNumber uint32 = 0x8199e26d
+const archiveFormatVersion = "0.1"
+
+// Writer is the top level object to contain information about archives in mongodump
+type Writer struct {
+ Out io.WriteCloser
+ Prelude *Prelude
+ Mux *Multiplexer
+}
+
+// Reader is the top level object to contain information about archives in mongorestore
+type Reader struct {
+ In io.ReadCloser
+ Demux *Demultiplexer
+ Prelude *Prelude
+}
diff --git a/src/mongo/gotools/common/archive/demultiplexer.go b/src/mongo/gotools/common/archive/demultiplexer.go
new file mode 100644
index 00000000000..5b0503b0208
--- /dev/null
+++ b/src/mongo/gotools/common/archive/demultiplexer.go
@@ -0,0 +1,439 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "hash"
+ "hash/crc64"
+ "io"
+ "sync"
+ "sync/atomic"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/log"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// DemuxOut is a Demultiplexer output consumer
+// The Write() and Close() occur in the same thread as the Demultiplexer runs in.
+type DemuxOut interface {
+ Write([]byte) (int, error)
+ Close() error
+ Sum64() (uint64, bool)
+}
+
+// Demultiplexer implements Parser.
+type Demultiplexer struct {
+ In io.Reader
+ //TODO wrap up these three into a structure
+ outs map[string]DemuxOut
+ lengths map[string]int64
+ currentNamespace string
+ buf [db.MaxBSONSize]byte
+ NamespaceChan chan string
+ NamespaceErrorChan chan error
+}
+
+// Run creates and runs a parser with the Demultiplexer as a consumer
+func (demux *Demultiplexer) Run() error {
+ parser := Parser{In: demux.In}
+ err := parser.ReadAllBlocks(demux)
+ if len(demux.outs) > 0 {
+ log.Logvf(log.Always, "demux finishing when there are still outs (%v)", len(demux.outs))
+ }
+ log.Logvf(log.DebugLow, "demux finishing (err:%v)", err)
+ return err
+}
+
+type demuxError struct {
+ Err error
+ Msg string
+}
+
+// Error is part of the Error interface. It formats a demuxError for human readability.
+func (pe *demuxError) Error() string {
+ err := fmt.Sprintf("error demultiplexing archive; %v", pe.Msg)
+ if pe.Err != nil {
+ err = fmt.Sprintf("%v ( %v )", err, pe.Err)
+ }
+ return err
+}
+
+// newError creates a demuxError with just a message
+func newError(msg string) error {
+ return &demuxError{
+ Msg: msg,
+ }
+}
+
+// newWrappedError creates a demuxError with a message as well as an underlying cause error
+func newWrappedError(msg string, err error) error {
+ return &demuxError{
+ Err: err,
+ Msg: msg,
+ }
+}
+
+// HeaderBSON is part of the ParserConsumer interface and receives headers from parser.
+// Its main role is to implement opens and EOFs of the embedded stream.
+func (demux *Demultiplexer) HeaderBSON(buf []byte) error {
+ colHeader := NamespaceHeader{}
+ err := bson.Unmarshal(buf, &colHeader)
+ if err != nil {
+ return newWrappedError("header bson doesn't unmarshal as a collection header", err)
+ }
+ log.Logvf(log.DebugHigh, "demux namespaceHeader: %v", colHeader)
+ if colHeader.Collection == "" {
+ return newError("collection header is missing a Collection")
+ }
+ demux.currentNamespace = colHeader.Database + "." + colHeader.Collection
+ if _, ok := demux.outs[demux.currentNamespace]; !ok {
+ if demux.NamespaceChan != nil {
+ demux.NamespaceChan <- demux.currentNamespace
+ err := <-demux.NamespaceErrorChan
+ if err == io.EOF {
+ // if the Prioritizer sends us back an io.EOF then it's telling us that
+ // it's finishing and doesn't need any more namespace announcements.
+ close(demux.NamespaceChan)
+ demux.NamespaceChan = nil
+ return nil
+ }
+ if err != nil {
+ return newWrappedError("failed arranging a consumer for new namespace", err)
+ }
+ }
+ }
+ if colHeader.EOF {
+ demux.outs[demux.currentNamespace].Close()
+ length := int64(demux.lengths[demux.currentNamespace])
+ crcUInt64, ok := demux.outs[demux.currentNamespace].Sum64()
+ if ok {
+ crc := int64(crcUInt64)
+ if crc != colHeader.CRC {
+ return fmt.Errorf("CRC mismatch for namespace %v, %v!=%v",
+ demux.currentNamespace,
+ crc,
+ colHeader.CRC,
+ )
+ }
+ log.Logvf(log.DebugHigh,
+ "demux checksum for namespace %v is correct (%v), %v bytes",
+ demux.currentNamespace, crc, length)
+ } else {
+ log.Logvf(log.DebugHigh,
+ "demux checksum for namespace %v was not calculated.",
+ demux.currentNamespace)
+ }
+ delete(demux.outs, demux.currentNamespace)
+ delete(demux.lengths, demux.currentNamespace)
+ // in case we get a BSONBody with this block,
+ // we want to ensure that that causes an error
+ demux.currentNamespace = ""
+ }
+ return nil
+}
+
+// End is part of the ParserConsumer interface and receives the end of archive notification.
+func (demux *Demultiplexer) End() error {
+ log.Logvf(log.DebugHigh, "demux End")
+ if len(demux.outs) != 0 {
+ openNss := []string{}
+ for ns := range demux.outs {
+ openNss = append(openNss, ns)
+ }
+ return newError(fmt.Sprintf("archive finished but contained files were unfinished (%v)", openNss))
+ }
+
+ if demux.NamespaceChan != nil {
+ close(demux.NamespaceChan)
+ }
+ return nil
+}
+
+// BodyBSON is part of the ParserConsumer interface and receives BSON bodies from the parser.
+// Its main role is to dispatch the body to the Read() function of the current DemuxOut.
+func (demux *Demultiplexer) BodyBSON(buf []byte) error {
+ if demux.currentNamespace == "" {
+ return newError("collection data without a collection header")
+ }
+
+ demux.lengths[demux.currentNamespace] += int64(len(buf))
+
+ out, ok := demux.outs[demux.currentNamespace]
+ if !ok {
+ return newError("no demux consumer currently consuming namespace " + demux.currentNamespace)
+ }
+ _, err := out.Write(buf)
+ return err
+}
+
+// Open installs the DemuxOut as the handler for data for the namespace ns
+func (demux *Demultiplexer) Open(ns string, out DemuxOut) {
+ // In the current implementation where this is either called before the demultiplexing is running
+ // or while the demutiplexer is inside of the NamespaceChan NamespaceErrorChan conversation
+ // I think that we don't need to lock outs, but I suspect that if the implementation changes
+ // we may need to lock when outs is accessed
+ log.Logvf(log.DebugHigh, "demux Open")
+ if demux.outs == nil {
+ demux.outs = make(map[string]DemuxOut)
+ demux.lengths = make(map[string]int64)
+ }
+ demux.outs[ns] = out
+ demux.lengths[ns] = 0
+}
+
+// RegularCollectionReceiver implements the intents.file interface.
+type RegularCollectionReceiver struct {
+ pos int64 // updated atomically, aligned at the beginning of the struct
+ readLenChan chan int
+ readBufChan chan []byte
+ Intent *intents.Intent
+ Origin string
+ Demux *Demultiplexer
+ partialReadArray []byte
+ partialReadBuf []byte
+ hash hash.Hash64
+ closeOnce sync.Once
+ openOnce sync.Once
+}
+
+func (receiver *RegularCollectionReceiver) Sum64() (uint64, bool) {
+ return receiver.hash.Sum64(), true
+}
+
+// Read() runs in the restoring goroutine
+func (receiver *RegularCollectionReceiver) Read(r []byte) (int, error) {
+ if receiver.partialReadBuf != nil && len(receiver.partialReadBuf) > 0 {
+ wLen := len(receiver.partialReadBuf)
+ copyLen := copy(r, receiver.partialReadBuf)
+ if wLen == copyLen {
+ receiver.partialReadBuf = nil
+ } else {
+ receiver.partialReadBuf = receiver.partialReadBuf[copyLen:]
+ }
+ atomic.AddInt64(&receiver.pos, int64(copyLen))
+ return copyLen, nil
+ }
+ // Since we're the "reader" here, not the "writer" we need to start with a read, in case the chan is closed
+ wLen, ok := <-receiver.readLenChan
+ if !ok {
+ close(receiver.readBufChan)
+ return 0, io.EOF
+ }
+ if wLen > db.MaxBSONSize {
+ return 0, fmt.Errorf("incomming buffer size is too big %v", wLen)
+ }
+ rLen := len(r)
+ if wLen > rLen {
+ // if the incomming write size is larger then the incomming read buffer then we need to accept
+ // the write in a larger buffer, fill the read buffer, then cache the remainder
+ receiver.partialReadBuf = receiver.partialReadArray[:wLen]
+ receiver.readBufChan <- receiver.partialReadBuf
+ writtenLength := <-receiver.readLenChan
+ if wLen != writtenLength {
+ return 0, fmt.Errorf("regularCollectionReceiver didn't send what it said it would")
+ }
+ receiver.hash.Write(receiver.partialReadBuf)
+ copy(r, receiver.partialReadBuf)
+ receiver.partialReadBuf = receiver.partialReadBuf[rLen:]
+ atomic.AddInt64(&receiver.pos, int64(rLen))
+ return rLen, nil
+ }
+ // Send the read buff to the BodyBSON ParserConsumer to fill
+ receiver.readBufChan <- r
+ // Receiver the wLen of data written
+ wLen = <-receiver.readLenChan
+ receiver.hash.Write(r[:wLen])
+ atomic.AddInt64(&receiver.pos, int64(wLen))
+ return wLen, nil
+}
+
+func (receiver *RegularCollectionReceiver) Pos() int64 {
+ return atomic.LoadInt64(&receiver.pos)
+}
+
+// Open is part of the intents.file interface. It creates the chan's in the
+// RegularCollectionReceiver and adds the RegularCollectionReceiver to the set of
+// RegularCollectonReceivers in the demultiplexer
+func (receiver *RegularCollectionReceiver) Open() error {
+ // TODO move this implementation to some non intents.file method, to be called from prioritizer.Get
+ // So that we don't have to enable this double open stuff.
+ // Currently the open needs to finish before the prioritizer.Get finishes, so we open the intents.file
+ // in prioritizer.Get even though it's going to get opened again in DumpIntent.
+ receiver.openOnce.Do(func() {
+ receiver.readLenChan = make(chan int)
+ receiver.readBufChan = make(chan []byte)
+ receiver.hash = crc64.New(crc64.MakeTable(crc64.ECMA))
+ receiver.Demux.Open(receiver.Origin, receiver)
+ })
+ return nil
+}
+
+func (receiver *RegularCollectionReceiver) TakeIOBuffer(ioBuf []byte) {
+ receiver.partialReadArray = ioBuf
+
+}
+func (receiver *RegularCollectionReceiver) ReleaseIOBuffer() {
+ receiver.partialReadArray = nil
+}
+
+// Write is part of the DemuxOut interface.
+func (receiver *RegularCollectionReceiver) Write(buf []byte) (int, error) {
+ // As a writer, we need to write first, so that the reader can properly detect EOF
+ // Additionally, the reader needs to know the write size, so that it can give us a
+ // properly sized buffer. Sending the incomming buffersize fills both of these needs.
+ receiver.readLenChan <- len(buf)
+ // Receive from the reader a buffer to put the bytes into
+ readBuf := <-receiver.readBufChan
+ if len(readBuf) < len(buf) {
+ return 0, fmt.Errorf("readbuf is not large enough for incoming BodyBSON (%v<%v)",
+ len(readBuf), len(buf))
+ }
+ copy(readBuf, buf)
+ // Send back the length of the data copied in to the buffer
+ receiver.readLenChan <- len(buf)
+ return len(buf), nil
+}
+
+// Close is part of the DemuxOut as well as the intents.file interface. It only closes the readLenChan, as that is what will
+// cause the RegularCollectionReceiver.Read() to receive EOF
+// Close will get called twice, once in the demultiplexer, and again when the restore goroutine is done with its intent.file
+func (receiver *RegularCollectionReceiver) Close() error {
+ receiver.closeOnce.Do(func() {
+ close(receiver.readLenChan)
+ // make sure that we don't return until any reader has finished
+ <-receiver.readBufChan
+ })
+ return nil
+}
+
+// SpecialCollectionCache implemnts both DemuxOut as well as intents.file
+type SpecialCollectionCache struct {
+ pos int64 // updated atomically, aligned at the beginning of the struct
+ Intent *intents.Intent
+ Demux *Demultiplexer
+ buf bytes.Buffer
+ hash hash.Hash64
+}
+
+func NewSpecialCollectionCache(intent *intents.Intent, demux *Demultiplexer) *SpecialCollectionCache {
+ return &SpecialCollectionCache{
+ Intent: intent,
+ Demux: demux,
+ hash: crc64.New(crc64.MakeTable(crc64.ECMA)),
+ }
+}
+
+// Open is part of the both interfaces, and it does nothing
+func (cache *SpecialCollectionCache) Open() error {
+ return nil
+}
+
+// Close is part of the both interfaces, and it does nothing
+func (cache *SpecialCollectionCache) Close() error {
+ cache.Intent.Size = int64(cache.buf.Len())
+ return nil
+}
+
+func (cache *SpecialCollectionCache) Read(p []byte) (int, error) {
+ n, err := cache.buf.Read(p)
+ atomic.AddInt64(&cache.pos, int64(n))
+ return n, err
+}
+
+func (cache *SpecialCollectionCache) Pos() int64 {
+ return atomic.LoadInt64(&cache.pos)
+}
+
+func (cache *SpecialCollectionCache) Write(b []byte) (int, error) {
+ cache.hash.Write(b)
+ return cache.buf.Write(b)
+}
+
+func (cache *SpecialCollectionCache) Sum64() (uint64, bool) {
+ return cache.hash.Sum64(), true
+}
+
+// MutedCollection implements both DemuxOut as well as intents.file. It serves as a way to
+// let the demutiplexer ignore certain embedded streams
+type MutedCollection struct {
+ Intent *intents.Intent
+ Demux *Demultiplexer
+}
+
+// Read is part of the intents.file interface, and does nothing
+func (*MutedCollection) Read([]byte) (int, error) {
+ // Read is part of the intents.file interface, and does nothing
+ return 0, io.EOF
+}
+
+// Write is part of the intents.file interface, and does nothing
+func (*MutedCollection) Write(b []byte) (int, error) {
+ return len(b), nil
+}
+
+// Close is part of the intents.file interface, and does nothing
+func (*MutedCollection) Close() error {
+ return nil
+}
+
+// Open is part of the intents.file interface, and does nothing
+func (*MutedCollection) Open() error {
+ return nil
+}
+
+// Sum64 is part of the DemuxOut interface
+func (*MutedCollection) Sum64() (uint64, bool) {
+ return 0, false
+}
+
+//===== Archive Manager Prioritizer =====
+
+// NewPrioritizer careates a new Prioritizer and hooks up its Namespace channels to the ones in demux
+func (demux *Demultiplexer) NewPrioritizer(mgr *intents.Manager) *Prioritizer {
+ return &Prioritizer{
+ NamespaceChan: demux.NamespaceChan,
+ NamespaceErrorChan: demux.NamespaceErrorChan,
+ mgr: mgr,
+ }
+}
+
+// Prioritizer is a completely reactive prioritizer
+// Intents are handed out as they arrive in the archive
+type Prioritizer struct {
+ NamespaceChan <-chan string
+ NamespaceErrorChan chan<- error
+ mgr *intents.Manager
+}
+
+// Get waits for a new namespace from the NamespaceChan, and returns a Intent found for it
+func (prioritizer *Prioritizer) Get() *intents.Intent {
+ namespace, ok := <-prioritizer.NamespaceChan
+ if !ok {
+ return nil
+ }
+ intent := prioritizer.mgr.IntentForNamespace(namespace)
+ if intent == nil {
+ prioritizer.NamespaceErrorChan <- fmt.Errorf("no intent for namespace %v", namespace)
+ } else {
+ if intent.BSONFile != nil {
+ intent.BSONFile.Open()
+ }
+ if intent.IsOplog() {
+ // once we see the oplog we
+ // cause the RestoreIntents to finish because we don't
+ // want RestoreIntents to restore the oplog
+ prioritizer.NamespaceErrorChan <- io.EOF
+ return nil
+ }
+ prioritizer.NamespaceErrorChan <- nil
+ }
+ return intent
+}
+
+// Finish is part of the IntentPrioritizer interface, and does nothing
+func (prioritizer *Prioritizer) Finish(*intents.Intent) {
+ // no-op
+ return
+}
diff --git a/src/mongo/gotools/common/archive/multiplexer.go b/src/mongo/gotools/common/archive/multiplexer.go
new file mode 100644
index 00000000000..fe4502d5a7e
--- /dev/null
+++ b/src/mongo/gotools/common/archive/multiplexer.go
@@ -0,0 +1,315 @@
+package archive
+
+import (
+ "fmt"
+ "hash"
+ "hash/crc64"
+ "io"
+ "reflect"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/log"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// bufferSize enables or disables the MuxIn buffering
+// TODO: remove this constant and the non-buffered MuxIn implementations
+const bufferWrites = true
+const bufferSize = db.MaxBSONSize
+
+// Multiplexer is what one uses to create interleaved intents in an archive
+type Multiplexer struct {
+ Out io.WriteCloser
+ Control chan *MuxIn
+ Completed chan error
+ // shutdownInputs allows the mux to tell the intent dumping worker
+ // go routines to shutdown, so that we can shutdown
+ shutdownInputs notifier
+ // ins and selectCases are correlating slices
+ ins []*MuxIn
+ selectCases []reflect.SelectCase
+ currentNamespace string
+}
+
+type notifier interface {
+ Notify()
+}
+
+// NewMultiplexer creates a Multiplexer and populates its Control/Completed chans
+// it takes a WriteCloser, which is where in imputs will get multiplexed on to,
+// and it takes a notifier, which should allow the multiplexer to ask for the shutdown
+// of the inputs.
+func NewMultiplexer(out io.WriteCloser, shutdownInputs notifier) *Multiplexer {
+ mux := &Multiplexer{
+ Out: out,
+ Control: make(chan *MuxIn),
+ Completed: make(chan error),
+ shutdownInputs: shutdownInputs,
+ ins: []*MuxIn{
+ nil, // There is no MuxIn for the Control case
+ },
+ }
+ mux.selectCases = []reflect.SelectCase{
+ reflect.SelectCase{
+ Dir: reflect.SelectRecv,
+ Chan: reflect.ValueOf(mux.Control),
+ Send: reflect.Value{},
+ },
+ }
+ return mux
+}
+
+// Run multiplexes until it receives an EOF on its Control chan.
+func (mux *Multiplexer) Run() {
+ var err, completionErr error
+ for {
+ index, value, notEOF := reflect.Select(mux.selectCases)
+ EOF := !notEOF
+ if index == 0 { //Control index
+ if EOF {
+ log.Logvf(log.DebugLow, "Mux finish")
+ mux.Out.Close()
+ if completionErr != nil {
+ mux.Completed <- completionErr
+ } else if len(mux.selectCases) != 1 {
+ mux.Completed <- fmt.Errorf("Mux ending but selectCases still open %v",
+ len(mux.selectCases))
+ } else {
+ mux.Completed <- nil
+ }
+ return
+ }
+ muxIn, ok := value.Interface().(*MuxIn)
+ if !ok {
+ mux.Completed <- fmt.Errorf("non MuxIn received on Control chan") // one for the MuxIn.Open
+ return
+ }
+ log.Logvf(log.DebugLow, "Mux open namespace %v", muxIn.Intent.Namespace())
+ mux.selectCases = append(mux.selectCases, reflect.SelectCase{
+ Dir: reflect.SelectRecv,
+ Chan: reflect.ValueOf(muxIn.writeChan),
+ Send: reflect.Value{},
+ })
+ mux.ins = append(mux.ins, muxIn)
+ } else {
+ if EOF {
+ // We need to let the MuxIn know that we've
+ // noticed this close. This fixes a race where
+ // the intent processing threads finish, then the main
+ // thread closes the mux's control chan and the mux
+ // processes the close on the control chan before it processes
+ // the close on the MuxIn chan
+ mux.ins[index].writeCloseFinishedChan <- struct{}{}
+
+ err = mux.formatEOF(index, mux.ins[index])
+ if err != nil {
+ mux.shutdownInputs.Notify()
+ mux.Out = &nopCloseNopWriter{}
+ completionErr = err
+ }
+ log.Logvf(log.DebugLow, "Mux close namespace %v", mux.ins[index].Intent.Namespace())
+ mux.currentNamespace = ""
+ mux.selectCases = append(mux.selectCases[:index], mux.selectCases[index+1:]...)
+ mux.ins = append(mux.ins[:index], mux.ins[index+1:]...)
+ } else {
+ bsonBytes, ok := value.Interface().([]byte)
+ if !ok {
+ mux.Completed <- fmt.Errorf("multiplexer received a value that wasn't a []byte")
+ return
+ }
+ err = mux.formatBody(mux.ins[index], bsonBytes)
+ if err != nil {
+ mux.shutdownInputs.Notify()
+ mux.Out = &nopCloseNopWriter{}
+ completionErr = err
+ }
+ }
+ }
+ }
+}
+
+type nopCloseNopWriter struct{}
+
+func (*nopCloseNopWriter) Close() error { return nil }
+func (*nopCloseNopWriter) Write(p []byte) (int, error) { return len(p), nil }
+
+// formatBody writes the BSON in to the archive, potentially writing a new header
+// if the document belongs to a different namespace from the last header.
+func (mux *Multiplexer) formatBody(in *MuxIn, bsonBytes []byte) error {
+ var err error
+ var length int
+ defer func() {
+ in.writeLenChan <- length
+ }()
+ if in.Intent.Namespace() != mux.currentNamespace {
+ // Handle the change of which DB/Collection we're writing docs for
+ // If mux.currentNamespace then we need to terminate the current block
+ if mux.currentNamespace != "" {
+ l, err := mux.Out.Write(terminatorBytes)
+ if err != nil {
+ return err
+ }
+ if l != len(terminatorBytes) {
+ return io.ErrShortWrite
+ }
+ }
+ header, err := bson.Marshal(NamespaceHeader{
+ Database: in.Intent.DB,
+ Collection: in.Intent.C,
+ })
+ if err != nil {
+ return err
+ }
+ l, err := mux.Out.Write(header)
+ if err != nil {
+ return err
+ }
+ if l != len(header) {
+ return io.ErrShortWrite
+ }
+ }
+ mux.currentNamespace = in.Intent.Namespace()
+ length, err = mux.Out.Write(bsonBytes)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// formatEOF writes the EOF header in to the archive
+func (mux *Multiplexer) formatEOF(index int, in *MuxIn) error {
+ var err error
+ if mux.currentNamespace != "" {
+ l, err := mux.Out.Write(terminatorBytes)
+ if err != nil {
+ return err
+ }
+ if l != len(terminatorBytes) {
+ return io.ErrShortWrite
+ }
+ }
+ eofHeader, err := bson.Marshal(NamespaceHeader{
+ Database: in.Intent.DB,
+ Collection: in.Intent.C,
+ EOF: true,
+ CRC: int64(in.hash.Sum64()),
+ })
+ if err != nil {
+ return err
+ }
+ l, err := mux.Out.Write(eofHeader)
+ if err != nil {
+ return err
+ }
+ if l != len(eofHeader) {
+ return io.ErrShortWrite
+ }
+ l, err = mux.Out.Write(terminatorBytes)
+ if err != nil {
+ return err
+ }
+ if l != len(terminatorBytes) {
+ return io.ErrShortWrite
+ }
+ return nil
+}
+
+// MuxIn is an implementation of the intents.file interface.
+// They live in the intents, and are potentially owned by different threads than
+// the thread owning the Multiplexer.
+// They are out the intents write data to the multiplexer
+type MuxIn struct {
+ writeChan chan []byte
+ writeLenChan chan int
+ writeCloseFinishedChan chan struct{}
+ buf []byte
+ hash hash.Hash64
+ Intent *intents.Intent
+ Mux *Multiplexer
+}
+
+// Read does nothing for MuxIns
+func (muxIn *MuxIn) Read([]byte) (int, error) {
+ return 0, nil
+}
+
+func (muxIn *MuxIn) Pos() int64 {
+ return 0
+}
+
+// Close closes the chans in the MuxIn.
+// Ultimately the multiplexer will detect that they are closed and cause a
+// formatEOF to occur.
+func (muxIn *MuxIn) Close() error {
+ // the mux side of this gets closed in the mux when it gets an eof on the read
+ log.Logvf(log.DebugHigh, "MuxIn close %v", muxIn.Intent.Namespace())
+ if bufferWrites {
+ muxIn.writeChan <- muxIn.buf
+ length := <-muxIn.writeLenChan
+ if length != len(muxIn.buf) {
+ return io.ErrShortWrite
+ }
+ muxIn.buf = nil
+ }
+ close(muxIn.writeChan)
+ close(muxIn.writeLenChan)
+ // We need to wait for the close on the writeChan to be processed before proceeding
+ // Otherwise we might assume that all work is finished and exit the program before
+ // the mux finishes writing the end of the archive
+ <-muxIn.writeCloseFinishedChan
+ return nil
+}
+
+// Open is implemented in Mux.open, but in short, it creates chans and a select case
+// and adds the SelectCase and the MuxIn in to the Multiplexer.
+func (muxIn *MuxIn) Open() error {
+ log.Logvf(log.DebugHigh, "MuxIn open %v", muxIn.Intent.Namespace())
+ muxIn.writeChan = make(chan []byte)
+ muxIn.writeLenChan = make(chan int)
+ muxIn.writeCloseFinishedChan = make(chan struct{})
+ muxIn.buf = make([]byte, 0, bufferSize)
+ muxIn.hash = crc64.New(crc64.MakeTable(crc64.ECMA))
+ if bufferWrites {
+ muxIn.buf = make([]byte, 0, db.MaxBSONSize)
+ }
+ muxIn.Mux.Control <- muxIn
+ return nil
+}
+
+// Write hands a buffer to the Multiplexer and receives a written length from the multiplexer
+// after the length is received, the buffer is free to be reused.
+func (muxIn *MuxIn) Write(buf []byte) (int, error) {
+ size := int(
+ (uint32(buf[0]) << 0) |
+ (uint32(buf[1]) << 8) |
+ (uint32(buf[2]) << 16) |
+ (uint32(buf[3]) << 24),
+ )
+ // TODO remove these checks, they're for debugging
+ if len(buf) < size {
+ panic(fmt.Errorf("corrupt bson in MuxIn.Write (size %v/%v)", size, len(buf)))
+ }
+ if buf[size-1] != 0 {
+ panic(fmt.Errorf("corrupt bson in MuxIn.Write bson has no-zero terminator %v, (size %v/%v)", buf[size-1], size, len(buf)))
+ }
+ if bufferWrites {
+ if len(muxIn.buf)+len(buf) > cap(muxIn.buf) {
+ muxIn.writeChan <- muxIn.buf
+ length := <-muxIn.writeLenChan
+ if length != len(muxIn.buf) {
+ return 0, io.ErrShortWrite
+ }
+ muxIn.buf = muxIn.buf[:0]
+ }
+ muxIn.buf = append(muxIn.buf, buf...)
+ } else {
+ muxIn.writeChan <- buf
+ length := <-muxIn.writeLenChan
+ if length != len(buf) {
+ return 0, io.ErrShortWrite
+ }
+ }
+ muxIn.hash.Write(buf)
+ return len(buf), nil
+}
diff --git a/src/mongo/gotools/common/archive/multiplexer_roundtrip_test.go b/src/mongo/gotools/common/archive/multiplexer_roundtrip_test.go
new file mode 100644
index 00000000000..025121b704a
--- /dev/null
+++ b/src/mongo/gotools/common/archive/multiplexer_roundtrip_test.go
@@ -0,0 +1,229 @@
+package archive
+
+import (
+ "bytes"
+ "hash"
+ "hash/crc32"
+ "io"
+ "os"
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+var testIntents = []*intents.Intent{
+ &intents.Intent{
+ DB: "foo",
+ C: "bar",
+ Location: "foo.bar",
+ },
+ &intents.Intent{
+ DB: "ding",
+ C: "bats",
+ Location: "ding.bats",
+ },
+ &intents.Intent{
+ DB: "flim",
+ C: "flam.fooey",
+ Location: "flim.flam.fooey",
+ },
+ &intents.Intent{
+ DB: "crow",
+ C: "bar",
+ Location: "crow.bar",
+ },
+}
+
+type testDoc struct {
+ Bar int
+ Baz string
+}
+
+type closingBuffer struct {
+ bytes.Buffer
+}
+
+func (*closingBuffer) Close() error {
+ return nil
+}
+
+type testNotifier struct{}
+
+func (n *testNotifier) Notify() {}
+
+func TestBasicMux(t *testing.T) {
+ var err error
+
+ Convey("with 10000 docs in each of five collections", t, func() {
+ buf := &closingBuffer{bytes.Buffer{}}
+
+ mux := NewMultiplexer(buf, new(testNotifier))
+ muxIns := map[string]*MuxIn{}
+
+ inChecksum := map[string]hash.Hash{}
+ inLengths := map[string]*int{}
+ outChecksum := map[string]hash.Hash{}
+ outLengths := map[string]*int{}
+
+ // To confirm that what we multiplex is the same as what we demultiplex, we
+ // create input and output hashes for each namespace. After we finish
+ // multiplexing and demultiplexing we will compare all of the CRCs for each
+ // namespace
+ errChan := make(chan error)
+ makeIns(testIntents, mux, inChecksum, muxIns, inLengths, errChan)
+
+ Convey("each document should be multiplexed", func() {
+ go mux.Run()
+
+ for range testIntents {
+ err := <-errChan
+ So(err, ShouldBeNil)
+ }
+ close(mux.Control)
+ err = <-mux.Completed
+ So(err, ShouldBeNil)
+
+ demux := &Demultiplexer{In: buf}
+ demuxOuts := map[string]*RegularCollectionReceiver{}
+
+ errChan := make(chan error)
+ makeOuts(testIntents, demux, outChecksum, demuxOuts, outLengths, errChan)
+
+ Convey("and demultiplexed successfully", func() {
+ demux.Run()
+ So(err, ShouldBeNil)
+
+ for range testIntents {
+ err := <-errChan
+ So(err, ShouldBeNil)
+ }
+ for _, dbc := range testIntents {
+ ns := dbc.Namespace()
+ So(*inLengths[ns], ShouldEqual, *outLengths[ns])
+ inSum := inChecksum[ns].Sum([]byte{})
+ outSum := outChecksum[ns].Sum([]byte{})
+ So(inSum, ShouldResemble, outSum)
+ }
+ })
+ })
+ })
+ return
+}
+
+func TestParallelMux(t *testing.T) {
+ Convey("parallel mux/demux over a pipe", t, func() {
+ readPipe, writePipe, err := os.Pipe()
+ So(err, ShouldBeNil)
+
+ mux := NewMultiplexer(writePipe, new(testNotifier))
+ muxIns := map[string]*MuxIn{}
+
+ demux := &Demultiplexer{In: readPipe}
+ demuxOuts := map[string]*RegularCollectionReceiver{}
+
+ inChecksum := map[string]hash.Hash{}
+ inLengths := map[string]*int{}
+
+ outChecksum := map[string]hash.Hash{}
+ outLengths := map[string]*int{}
+
+ writeErrChan := make(chan error)
+ readErrChan := make(chan error)
+
+ makeIns(testIntents, mux, inChecksum, muxIns, inLengths, writeErrChan)
+ makeOuts(testIntents, demux, outChecksum, demuxOuts, outLengths, readErrChan)
+
+ go demux.Run()
+ go mux.Run()
+
+ for range testIntents {
+ err := <-writeErrChan
+ So(err, ShouldBeNil)
+ err = <-readErrChan
+ So(err, ShouldBeNil)
+ }
+ close(mux.Control)
+ muxErr := <-mux.Completed
+ So(muxErr, ShouldBeNil)
+
+ for _, dbc := range testIntents {
+ ns := dbc.Namespace()
+ So(*inLengths[ns], ShouldEqual, *outLengths[ns])
+ inSum := inChecksum[ns].Sum([]byte{})
+ outSum := outChecksum[ns].Sum([]byte{})
+ So(inSum, ShouldResemble, outSum)
+ }
+ })
+ return
+}
+
+func makeIns(testIntents []*intents.Intent, mux *Multiplexer, inChecksum map[string]hash.Hash, muxIns map[string]*MuxIn, inLengths map[string]*int, errCh chan<- error) {
+ for index, dbc := range testIntents {
+ ns := dbc.Namespace()
+ sum := crc32.NewIEEE()
+ muxIn := &MuxIn{Intent: dbc, Mux: mux}
+ inLength := 0
+
+ inChecksum[ns] = sum
+ muxIns[ns] = muxIn
+ inLengths[ns] = &inLength
+
+ go func(index int) {
+ err := muxIn.Open()
+ if err != nil {
+ errCh <- err
+ return
+ }
+ staticBSONBuf := make([]byte, db.MaxBSONSize)
+ for i := 0; i < 10000; i++ {
+ bsonBytes, _ := bson.Marshal(testDoc{Bar: index * i, Baz: ns})
+ bsonBuf := staticBSONBuf[:len(bsonBytes)]
+ copy(bsonBuf, bsonBytes)
+ muxIn.Write(bsonBuf)
+ sum.Write(bsonBuf)
+ inLength += len(bsonBuf)
+ }
+ err = muxIn.Close()
+ errCh <- err
+ }(index)
+ }
+}
+
+func makeOuts(testIntents []*intents.Intent, demux *Demultiplexer, outChecksum map[string]hash.Hash, demuxOuts map[string]*RegularCollectionReceiver, outLengths map[string]*int, errCh chan<- error) {
+ for _, dbc := range testIntents {
+ ns := dbc.Namespace()
+ sum := crc32.NewIEEE()
+ muxOut := &RegularCollectionReceiver{
+ Intent: dbc,
+ Demux: demux,
+ Origin: ns,
+ }
+ outLength := 0
+
+ outChecksum[ns] = sum
+ demuxOuts[ns] = muxOut
+ outLengths[ns] = &outLength
+
+ demuxOuts[ns].Open()
+ go func() {
+ bs := make([]byte, db.MaxBSONSize)
+ var err error
+ for {
+ var length int
+ length, err = muxOut.Read(bs)
+ if err != nil {
+ break
+ }
+ sum.Write(bs[:length])
+ outLength += len(bs[:length])
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ errCh <- err
+ }()
+ }
+}
diff --git a/src/mongo/gotools/common/archive/parser.go b/src/mongo/gotools/common/archive/parser.go
new file mode 100644
index 00000000000..2dd75807a8b
--- /dev/null
+++ b/src/mongo/gotools/common/archive/parser.go
@@ -0,0 +1,152 @@
+package archive
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/mongodb/mongo-tools/common/db"
+)
+
+// parser.go implements the parsing of the low-level archive format
+// The low level archive format is defined as zero or more blocks
+// where each block is defined as:
+// a header BSON document
+// zero or more body BSON documents
+// a four byte terminator (0xFFFFFFFF)
+
+// ParserConsumer is the interface that one needs to implement to consume data from the Parser
+type ParserConsumer interface {
+ HeaderBSON([]byte) error
+ BodyBSON([]byte) error
+ End() error
+}
+
+// Parser encapsulates the small amount of state that the parser needs to keep
+type Parser struct {
+ In io.Reader
+ buf [db.MaxBSONSize]byte
+ length int
+}
+
+type parserError struct {
+ Err error
+ Msg string
+}
+
+// Error is part of the Error interface. It formats a parserError for human readability.
+func (pe *parserError) Error() string {
+ err := fmt.Sprintf("corruption found in archive; %v", pe.Msg)
+ if pe.Err != nil {
+ err = fmt.Sprintf("%v ( %v )", err, pe.Err)
+ }
+ return err
+}
+
+// newParserError creates a parserError with just a message
+func newParserError(msg string) error {
+ return &parserError{
+ Msg: msg,
+ }
+}
+
+// newParserWrappedError creates a parserError with a message as well as an underlying cause error
+func newParserWrappedError(msg string, err error) error {
+ return &parserError{
+ Err: err,
+ Msg: msg,
+ }
+}
+
+// readBSONOrTerminator reads at least four bytes, determines
+// if the first four bytes are a terminator, a bson lenght, or something else.
+// If they are a terminator, true,nil are returned. If they are a BSON length,
+// then the remainder of the BSON document are read in to the parser, otherwise
+// an error is returned.
+func (parse *Parser) readBSONOrTerminator() (isTerminator bool, err error) {
+ parse.length = 0
+ _, err = io.ReadFull(parse.In, parse.buf[0:4])
+ if err == io.EOF {
+ return false, err
+ }
+ if err != nil {
+ return false, newParserWrappedError("I/O error reading length or terminator", err)
+ }
+ size := int32(
+ (uint32(parse.buf[0]) << 0) |
+ (uint32(parse.buf[1]) << 8) |
+ (uint32(parse.buf[2]) << 16) |
+ (uint32(parse.buf[3]) << 24),
+ )
+ if size == terminator {
+ return true, nil
+ }
+ if size < minBSONSize || size > db.MaxBSONSize {
+ return false, newParserError(fmt.Sprintf("%v is neither a valid bson length nor a archive terminator", size))
+ }
+ // TODO Because we're reusing this same buffer for all of our IO, we are basically guaranteeing that we'll
+ // copy the bytes twice. At some point we should fix this. It's slightly complex, because we'll need consumer
+ // methods closing one buffer and acquiring another
+ _, err = io.ReadFull(parse.In, parse.buf[4:size])
+ if err != nil {
+ // any error, including EOF is an error so we wrap it up
+ return false, newParserWrappedError("read bson", err)
+ }
+ if parse.buf[size-1] != 0x00 {
+ return false, newParserError(fmt.Sprintf("bson (size: %v, byte: %d) doesn't end with a null byte", size, parse.buf[size-1]))
+ }
+ parse.length = int(size)
+ return false, nil
+}
+
+// ReadAllBlocks calls ReadBlock() until it returns an error.
+// If the error is EOF, then nil is returned, otherwise it returns the error
+func (parse *Parser) ReadAllBlocks(consumer ParserConsumer) (err error) {
+ for err == nil {
+ err = parse.ReadBlock(consumer)
+ }
+ if err == io.EOF {
+ return nil
+ }
+ return err
+}
+
+// ReadBlock reads one archive block ( header + body* + terminator )
+// calling consumer.HeaderBSON() on the header, consumer.BodyBSON() on each piece of body,
+// and consumer.EOF() when EOF is encountered before any data was read.
+// It returns nil if a whole block was read, io.EOF if nothing was read,
+// and a parserError if there was any io error in the middle of the block,
+// if either of the consumer methods return error, or if there was any sort of
+// parsing failure.
+func (parse *Parser) ReadBlock(consumer ParserConsumer) (err error) {
+ isTerminator, err := parse.readBSONOrTerminator()
+ if err == io.EOF {
+ handlerErr := consumer.End()
+ if handlerErr != nil {
+ return newParserWrappedError("ParserConsumer.End", handlerErr)
+ }
+ return err
+ }
+ if err != nil {
+ return err
+ }
+ if isTerminator {
+ return newParserError("consecutive terminators / headerless blocks are not allowed")
+ }
+ err = consumer.HeaderBSON(parse.buf[:parse.length])
+ if err != nil {
+ return newParserWrappedError("ParserConsumer.HeaderBSON()", err)
+ }
+ for {
+ isTerminator, err = parse.readBSONOrTerminator()
+ if err != nil { // all errors, including EOF are errors here
+ return newParserWrappedError("ParserConsumer.BodyBSON()", err)
+ }
+ if isTerminator {
+ return nil
+ }
+ err = consumer.BodyBSON(parse.buf[:parse.length])
+ if err != nil {
+ return newParserWrappedError("ParserConsumer.BodyBSON()", err)
+ }
+ }
+}
diff --git a/src/mongo/gotools/common/archive/parser_test.go b/src/mongo/gotools/common/archive/parser_test.go
new file mode 100644
index 00000000000..dba7e1d0263
--- /dev/null
+++ b/src/mongo/gotools/common/archive/parser_test.go
@@ -0,0 +1,150 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+type testConsumer struct {
+ headers []string // header data
+ bodies []string // body data
+ eof bool
+}
+
+func (tc *testConsumer) HeaderBSON(b []byte) error {
+ ss := strStruct{}
+ err := bson.Unmarshal(b, &ss)
+ tc.headers = append(tc.headers, ss.Str)
+ return err
+}
+
+func (tc *testConsumer) BodyBSON(b []byte) error {
+ ss := strStruct{}
+ err := bson.Unmarshal(b, &ss)
+ tc.bodies = append(tc.bodies, ss.Str)
+ return err
+}
+
+func (tc *testConsumer) End() (err error) {
+ if tc.eof {
+ err = fmt.Errorf("double end")
+ }
+ tc.eof = true
+ return err
+}
+
+type strStruct struct {
+ Str string
+}
+
+var term = []byte{0xFF, 0xFF, 0xFF, 0xFF}
+var notTerm = []byte{0xFF, 0xFF, 0xFF, 0xFE}
+
+func TestParsing(t *testing.T) {
+
+ Convey("With a parser with a simple parser consumer", t, func() {
+ tc := &testConsumer{}
+ parser := Parser{}
+ Convey("a well formed header and body data parse correctly", func() {
+ buf := bytes.Buffer{}
+ b, _ := bson.Marshal(strStruct{"header"})
+ buf.Write(b)
+ b, _ = bson.Marshal(strStruct{"body"})
+ buf.Write(b)
+ buf.Write(term)
+ parser.In = &buf
+ err := parser.ReadBlock(tc)
+ So(err, ShouldBeNil)
+ So(tc.eof, ShouldBeFalse)
+ So(tc.headers[0], ShouldEqual, "header")
+ So(tc.bodies[0], ShouldEqual, "body")
+
+ err = parser.ReadBlock(tc)
+ So(err, ShouldEqual, io.EOF)
+ So(tc.eof, ShouldBeTrue)
+ })
+ Convey("a well formed header and multiple body datas parse correctly", func() {
+ buf := bytes.Buffer{}
+ b, _ := bson.Marshal(strStruct{"header"})
+ buf.Write(b)
+ b, _ = bson.Marshal(strStruct{"body0"})
+ buf.Write(b)
+ b, _ = bson.Marshal(strStruct{"body1"})
+ buf.Write(b)
+ b, _ = bson.Marshal(strStruct{"body2"})
+ buf.Write(b)
+ buf.Write(term)
+ parser.In = &buf
+ err := parser.ReadBlock(tc)
+ So(err, ShouldBeNil)
+ So(tc.eof, ShouldBeFalse)
+ So(tc.headers[0], ShouldEqual, "header")
+ So(tc.bodies[0], ShouldEqual, "body0")
+ So(tc.bodies[1], ShouldEqual, "body1")
+ So(tc.bodies[2], ShouldEqual, "body2")
+
+ err = parser.ReadBlock(tc)
+ So(err, ShouldEqual, io.EOF)
+ So(tc.eof, ShouldBeTrue)
+ })
+ Convey("an incorrect terminator should cause an error", func() {
+ buf := bytes.Buffer{}
+ b, _ := bson.Marshal(strStruct{"header"})
+ buf.Write(b)
+ b, _ = bson.Marshal(strStruct{"body"})
+ buf.Write(b)
+ buf.Write(notTerm)
+ parser.In = &buf
+ err := parser.ReadBlock(tc)
+ So(err, ShouldNotBeNil)
+ })
+ Convey("an empty block should result in EOF", func() {
+ buf := bytes.Buffer{}
+ parser.In = &buf
+ err := parser.ReadBlock(tc)
+ So(err, ShouldEqual, io.EOF)
+ So(tc.eof, ShouldBeTrue)
+ })
+ Convey("an error comming from the consumer should propigate through the parser", func() {
+ tc.eof = true
+ buf := bytes.Buffer{}
+ parser.In = &buf
+ err := parser.ReadBlock(tc)
+ So(err.Error(), ShouldContainSubstring, "double end")
+ })
+ Convey("a partial block should result in a non-EOF error", func() {
+ buf := bytes.Buffer{}
+ b, _ := bson.Marshal(strStruct{"header"})
+ buf.Write(b)
+ b, _ = bson.Marshal(strStruct{"body"})
+ buf.Write(b)
+ parser.In = &buf
+ err := parser.ReadBlock(tc)
+ So(err, ShouldNotBeNil)
+ So(tc.eof, ShouldBeFalse)
+ So(tc.headers[0], ShouldEqual, "header")
+ So(tc.bodies[0], ShouldEqual, "body")
+ })
+ Convey("a block with a missing terminator shoud result in a non-EOF error", func() {
+ buf := bytes.Buffer{}
+ b, _ := bson.Marshal(strStruct{"header"})
+ buf.Write(b)
+ b, _ = bson.Marshal(strStruct{"body"})
+ buf.Write(b[:len(b)-1])
+ buf.Write([]byte{0x01})
+ buf.Write(notTerm)
+ parser.In = &buf
+ err := parser.ReadBlock(tc)
+ So(err, ShouldNotBeNil)
+ So(tc.eof, ShouldBeFalse)
+ So(tc.headers[0], ShouldEqual, "header")
+ So(tc.bodies, ShouldBeNil)
+ })
+ })
+ return
+}
diff --git a/src/mongo/gotools/common/archive/prelude.go b/src/mongo/gotools/common/archive/prelude.go
new file mode 100644
index 00000000000..a006924fa5a
--- /dev/null
+++ b/src/mongo/gotools/common/archive/prelude.go
@@ -0,0 +1,385 @@
+package archive
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "path/filepath"
+ "sync/atomic"
+
+ "github.com/mongodb/mongo-tools/common"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "gopkg.in/mgo.v2/bson"
+)
+
+//MetadataFile implements intents.file
+type MetadataFile struct {
+ pos int64 // updated atomically, aligned at the beginning of the struct
+ *bytes.Buffer
+ Intent *intents.Intent
+}
+
+func (md *MetadataFile) Open() error {
+ return nil
+}
+func (md *MetadataFile) Close() error {
+ return nil
+}
+
+func (md *MetadataFile) Read(p []byte) (int, error) {
+ n, err := md.Buffer.Read(p)
+ atomic.AddInt64(&md.pos, int64(n))
+ return n, err
+}
+
+func (md *MetadataFile) Pos() int64 {
+ return atomic.LoadInt64(&md.pos)
+}
+
+// DirLike represents the group of methods done on directories and files in dump directories,
+// or in archives, when mongorestore is figuring out what intents to create.
+type DirLike interface {
+ Name() string
+ Path() string
+ Size() int64
+ IsDir() bool
+ Stat() (DirLike, error)
+ ReadDir() ([]DirLike, error)
+ Parent() DirLike
+}
+
+// Prelude represents the knowledge gleaned from reading the prelude out of the archive.
+type Prelude struct {
+ Header *Header
+ DBS []string
+ NamespaceMetadatas []*CollectionMetadata
+ NamespaceMetadatasByDB map[string][]*CollectionMetadata
+}
+
+// Read consumes and checks the magic number at the beginning of the archive,
+// then it runs the parser with a Prelude as its consumer.
+func (prelude *Prelude) Read(in io.Reader) error {
+ readMagicNumberBuf := make([]byte, 4)
+ _, err := io.ReadAtLeast(in, readMagicNumberBuf, 4)
+ if err != nil {
+ return fmt.Errorf("I/O failure reading beginning of archive: %v", err)
+ }
+ readMagicNumber := uint32(
+ (uint32(readMagicNumberBuf[0]) << 0) |
+ (uint32(readMagicNumberBuf[1]) << 8) |
+ (uint32(readMagicNumberBuf[2]) << 16) |
+ (uint32(readMagicNumberBuf[3]) << 24),
+ )
+
+ if readMagicNumber != MagicNumber {
+ return fmt.Errorf("stream or file does not appear to be a mongodump archive")
+ }
+
+ if prelude.NamespaceMetadatasByDB != nil {
+ prelude.NamespaceMetadatasByDB = make(map[string][]*CollectionMetadata, 0)
+ }
+
+ parser := Parser{In: in}
+ parserConsumer := &preludeParserConsumer{prelude: prelude}
+ return parser.ReadBlock(parserConsumer)
+}
+
+// NewPrelude generates a Prelude using the contents of an intent.Manager.
+func NewPrelude(manager *intents.Manager, concurrentColls int, serverVersion string) (*Prelude, error) {
+ prelude := Prelude{
+ Header: &Header{
+ FormatVersion: archiveFormatVersion,
+ ServerVersion: serverVersion,
+ ToolVersion: options.VersionStr,
+ ConcurrentCollections: int32(concurrentColls),
+ },
+ NamespaceMetadatasByDB: make(map[string][]*CollectionMetadata, 0),
+ }
+ allIntents := manager.Intents()
+ for _, intent := range allIntents {
+ if intent.MetadataFile != nil {
+ archiveMetadata, ok := intent.MetadataFile.(*MetadataFile)
+ if !ok {
+ return nil, fmt.Errorf("MetadataFile is not an archive.Metadata")
+ }
+ prelude.AddMetadata(&CollectionMetadata{
+ Database: intent.DB,
+ Collection: intent.C,
+ Metadata: archiveMetadata.Buffer.String(),
+ })
+ } else {
+ prelude.AddMetadata(&CollectionMetadata{
+ Database: intent.DB,
+ Collection: intent.C,
+ })
+ }
+ }
+ return &prelude, nil
+}
+
+// AddMetadata adds a metadata data structure to a prelude and does the required bookkeeping.
+func (prelude *Prelude) AddMetadata(cm *CollectionMetadata) {
+ prelude.NamespaceMetadatas = append(prelude.NamespaceMetadatas, cm)
+ if prelude.NamespaceMetadatasByDB == nil {
+ prelude.NamespaceMetadatasByDB = make(map[string][]*CollectionMetadata)
+ }
+ _, ok := prelude.NamespaceMetadatasByDB[cm.Database]
+ if !ok {
+ prelude.DBS = append(prelude.DBS, cm.Database)
+ }
+ prelude.NamespaceMetadatasByDB[cm.Database] = append(prelude.NamespaceMetadatasByDB[cm.Database], cm)
+ log.Logvf(log.Info, "archive prelude %v.%v", cm.Database, cm.Collection)
+}
+
+// Write writes the archive header.
+func (prelude *Prelude) Write(out io.Writer) error {
+ magicNumberBytes := make([]byte, 4)
+ for i := range magicNumberBytes {
+ magicNumberBytes[i] = byte(uint32(MagicNumber) >> uint(i*8))
+ }
+ _, err := out.Write(magicNumberBytes)
+ if err != nil {
+ return err
+ }
+ buf, err := bson.Marshal(prelude.Header)
+ if err != nil {
+ return err
+ }
+ _, err = out.Write(buf)
+ if err != nil {
+ return err
+ }
+ for _, cm := range prelude.NamespaceMetadatas {
+ buf, err = bson.Marshal(cm)
+ if err != nil {
+ return err
+ }
+ _, err = out.Write(buf)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = out.Write(terminatorBytes)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// preludeParserConsumer wraps a Prelude, and implements ParserConsumer.
+type preludeParserConsumer struct {
+ prelude *Prelude
+}
+
+// HeaderBSON is part of the ParserConsumer interface, it unmarshals archive Headers.
+func (hpc *preludeParserConsumer) HeaderBSON(data []byte) error {
+ hpc.prelude.Header = &Header{}
+ err := bson.Unmarshal(data, hpc.prelude.Header)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// BodyBSON is part of the ParserConsumer interface, it unmarshals CollectionMetadata's.
+func (hpc *preludeParserConsumer) BodyBSON(data []byte) error {
+ cm := &CollectionMetadata{}
+ err := bson.Unmarshal(data, cm)
+ if err != nil {
+ return err
+ }
+ hpc.prelude.AddMetadata(cm)
+ return nil
+}
+
+// BodyBSON is part of the ParserConsumer interface.
+func (hpc *preludeParserConsumer) End() error {
+ return nil
+}
+
+// PreludeExplorer implements DirLike. PreludeExplorer represent the databases, collections,
+// and their metadata json files, of an archive, in such a way that they can be explored like a filesystem.
+type PreludeExplorer struct {
+ prelude *Prelude
+ database string
+ collection string
+ isMetadata bool
+}
+
+// NewPreludeExplorer creates a PreludeExplorer from a Prelude.
+func (prelude *Prelude) NewPreludeExplorer() (*PreludeExplorer, error) {
+ pe := &PreludeExplorer{
+ prelude: prelude,
+ }
+ return pe, nil
+}
+
+// Name is part of the DirLike interface. It synthesizes a filename for the given "location" the prelude.
+func (pe *PreludeExplorer) Name() string {
+ if pe.collection == "" {
+ return pe.database
+ }
+ if pe.isMetadata {
+ return pe.collection + ".metadata.json"
+ }
+ return pe.collection + ".bson"
+}
+
+// Path is part of the DirLike interface. It creates the full path for the "location" in the prelude.
+func (pe *PreludeExplorer) Path() string {
+ if pe.collection == "" {
+ return pe.database
+ }
+ if pe.database == "" {
+ return pe.Name()
+ }
+ return filepath.Join(pe.database, pe.Name())
+}
+
+// Size is part of the DirLike interface. It returns the size from the metadata
+// of the prelude, if the "location" is a collection.
+func (pe *PreludeExplorer) Size() int64 {
+ if pe.IsDir() {
+ return 0
+ }
+ for _, ns := range pe.prelude.NamespaceMetadatas {
+ if ns.Database == pe.database && ns.Collection == pe.collection {
+ return int64(ns.Size)
+ }
+ }
+ return 0
+}
+
+// IsDir is part of the DirLike interface. All pes that are not collections are Dirs.
+func (pe *PreludeExplorer) IsDir() bool {
+ return pe.collection == ""
+}
+
+// Stat is part of the DirLike interface. os.Stat returns a FileInfo, and since
+// DirLike is similar to FileInfo, we just return the pe, here.
+func (pe *PreludeExplorer) Stat() (DirLike, error) {
+ return pe, nil
+}
+
+// ReadDir is part of the DirLIke interface. ReadDir generates a list of PreludeExplorers
+// whose "locations" are encapsulated by the current pes "location".
+//
+// "dump/oplog.bson" => &PreludeExplorer{ database: "", collection: "oplog.bson" }
+// "dump/test/" => &PreludeExplorer{ database: "test", collection: "foo.bson" }
+// "dump/test/foo.bson" => &PreludeExplorer{ database: "test", collection: "" }
+// "dump/test/foo.json" => &PreludeExplorer{ database: "test", collection: "foo", isMetadata: true }
+//
+func (pe *PreludeExplorer) ReadDir() ([]DirLike, error) {
+ if !pe.IsDir() {
+ return nil, fmt.Errorf("not a directory")
+ }
+ pes := []DirLike{}
+ if pe.database == "" {
+ // when reading the top level of the archive, we need return all of the
+ // collections that are not bound to a database, aka, the oplog, and then all of
+ // the databases the prelude stores all top-level collections as collections in
+ // the "" database
+ topLevelNamespaceMetadatas, ok := pe.prelude.NamespaceMetadatasByDB[""]
+ if ok {
+ for _, topLevelNamespaceMetadata := range topLevelNamespaceMetadatas {
+ pes = append(pes, &PreludeExplorer{
+ prelude: pe.prelude,
+ collection: topLevelNamespaceMetadata.Collection,
+ })
+ if topLevelNamespaceMetadata.Metadata != "" {
+ pes = append(pes, &PreludeExplorer{
+ prelude: pe.prelude,
+ collection: topLevelNamespaceMetadata.Collection,
+ isMetadata: true,
+ })
+ }
+ }
+ }
+ for _, db := range pe.prelude.DBS {
+ pes = append(pes, &PreludeExplorer{
+ prelude: pe.prelude,
+ database: db,
+ })
+ }
+ } else {
+ // when reading the contents of a database directory, we just return all of the bson and
+ // json files for all of the collections bound to that database
+ namespaceMetadatas, ok := pe.prelude.NamespaceMetadatasByDB[pe.database]
+ if !ok {
+ return nil, fmt.Errorf("no such directory") //TODO: replace with real ERRNOs?
+ }
+ for _, namespaceMetadata := range namespaceMetadatas {
+ pes = append(pes, &PreludeExplorer{
+ prelude: pe.prelude,
+ database: pe.database,
+ collection: namespaceMetadata.Collection,
+ })
+ if namespaceMetadata.Metadata != "" {
+ pes = append(pes, &PreludeExplorer{
+ prelude: pe.prelude,
+ database: pe.database,
+ collection: namespaceMetadata.Collection,
+ isMetadata: true,
+ })
+ }
+ }
+ }
+ return pes, nil
+}
+
+// Parent is part of the DirLike interface. It returns a pe without a collection, if there is one,
+// otherwise, without a database.
+func (pe *PreludeExplorer) Parent() DirLike {
+ if pe.collection != "" {
+ return &PreludeExplorer{
+ prelude: pe.prelude,
+ database: pe.database,
+ }
+ }
+ return &PreludeExplorer{
+ prelude: pe.prelude,
+ }
+}
+
+// MetadataPreludeFile is part of the intents.file. It allows the metadata contained in the prelude to be opened and read
+type MetadataPreludeFile struct {
+ pos int64 // updated atomically, aligned at the beginning of the struct
+ Intent *intents.Intent
+ Origin string
+ Prelude *Prelude
+ *bytes.Buffer
+}
+
+// Open is part of the intents.file interface, it finds the metadata in the prelude and creates a bytes.Buffer from it.
+func (mpf *MetadataPreludeFile) Open() error {
+ db, c := common.SplitNamespace(mpf.Origin)
+ dbMetadatas, ok := mpf.Prelude.NamespaceMetadatasByDB[db]
+ if !ok {
+ return fmt.Errorf("no metadata found for '%s'", db)
+ }
+ for _, metadata := range dbMetadatas {
+ if metadata.Collection == c {
+ mpf.Buffer = bytes.NewBufferString(metadata.Metadata)
+ return nil
+ }
+ }
+ return fmt.Errorf("no matching metadata found for '%s'", mpf.Origin)
+}
+
+// Close is part of the intents.file interface.
+func (mpf *MetadataPreludeFile) Close() error {
+ mpf.Buffer = nil
+ return nil
+}
+
+func (mpf *MetadataPreludeFile) Read(p []byte) (int, error) {
+ n, err := mpf.Buffer.Read(p)
+ atomic.AddInt64(&mpf.pos, int64(n))
+ return n, err
+}
+
+func (mpf *MetadataPreludeFile) Pos() int64 {
+ return atomic.LoadInt64(&mpf.pos)
+}
diff --git a/src/mongo/gotools/common/archive/prelude_test.go b/src/mongo/gotools/common/archive/prelude_test.go
new file mode 100644
index 00000000000..ba57346d4b8
--- /dev/null
+++ b/src/mongo/gotools/common/archive/prelude_test.go
@@ -0,0 +1,57 @@
+package archive
+
+import (
+ "bytes"
+
+ . "github.com/smartystreets/goconvey/convey"
+ // "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestPrelude(t *testing.T) {
+ var err error
+
+ Convey("WritePrelude/ReadPrelude roundtrip", t, func() {
+
+ cm1 := &CollectionMetadata{
+ Database: "db1",
+ Collection: "c1",
+ Metadata: "m1",
+ }
+ cm2 := &CollectionMetadata{
+ Database: "db1",
+ Collection: "c2",
+ Metadata: "m2",
+ }
+ cm3 := &CollectionMetadata{
+ Database: "db2",
+ Collection: "c3",
+ Metadata: "m3",
+ }
+ cm4 := &CollectionMetadata{
+ Database: "db3",
+ Collection: "c4",
+ Metadata: "m4",
+ }
+
+ archivePrelude := &Prelude{
+ Header: &Header{
+ FormatVersion: "version-foo",
+ },
+ NamespaceMetadatas: []*CollectionMetadata{cm1, cm2, cm3, cm4},
+ DBS: []string{"db1", "db2", "db3"},
+ NamespaceMetadatasByDB: map[string][]*CollectionMetadata{
+ "db1": []*CollectionMetadata{cm1, cm2},
+ "db2": []*CollectionMetadata{cm3},
+ "db3": []*CollectionMetadata{cm4},
+ },
+ }
+ buf := &bytes.Buffer{}
+ err = archivePrelude.Write(buf)
+ So(err, ShouldBeNil)
+ archivePrelude2 := &Prelude{}
+ err := archivePrelude2.Read(buf)
+ So(err, ShouldBeNil)
+ So(archivePrelude2, ShouldResemble, archivePrelude)
+ })
+}
diff --git a/src/mongo/gotools/common/auth/auth_info.go b/src/mongo/gotools/common/auth/auth_info.go
new file mode 100644
index 00000000000..a420770ebf2
--- /dev/null
+++ b/src/mongo/gotools/common/auth/auth_info.go
@@ -0,0 +1,63 @@
+// Package auth provides utilities for performing tasks related to authentication.
+package auth
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/db"
+ "gopkg.in/mgo.v2/bson"
+ "strings"
+)
+
+// GetAuthVersion gets the authentication schema version of the connected server
+// and returns that value as an integer along with any error that occurred.
+func GetAuthVersion(commander db.CommandRunner) (int, error) {
+ results := bson.M{}
+ err := commander.Run(
+ bson.D{
+ {"getParameter", 1},
+ {"authSchemaVersion", 1},
+ },
+ &results,
+ "admin",
+ )
+
+ if err != nil {
+ errMessage := err.Error()
+ // as a necessary hack, if the error message takes a certain form,
+ // we can infer version 1. This is because early versions of mongodb
+ // had no concept of an "auth schema version", so asking for the
+ // authSchemaVersion value will return a "no option found" or "no such cmd"
+ if errMessage == "no option found to get" ||
+ strings.HasPrefix(errMessage, "no such cmd") {
+ return 1, nil
+ }
+ // otherwise it's a connection error, so bubble it up
+ return 0, err
+ }
+
+ version, ok := results["authSchemaVersion"].(int)
+ if !ok {
+ // very unlikely this will ever happen
+ return 0, fmt.Errorf(
+ "getParameter command returned non-numeric result: %v",
+ results["authSchemaVersion"])
+ }
+ return version, nil
+}
+
+// VerifySystemAuthVersion returns an error if authentication is not set up for
+// the given server.
+func VerifySystemAuthVersion(sessionProvider *db.SessionProvider) error {
+ session, err := sessionProvider.GetSession()
+ if err != nil {
+ return fmt.Errorf("error getting session from server: %v", err)
+ }
+ defer session.Close()
+ versionEntries := session.DB("admin").C("system.version")
+ if count, err := versionEntries.Count(); err != nil {
+ return fmt.Errorf("error checking pressence of auth version: %v", err)
+ } else if count == 0 {
+ return fmt.Errorf("found no auth version")
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/common/bsonutil/bsonutil.go b/src/mongo/gotools/common/bsonutil/bsonutil.go
new file mode 100644
index 00000000000..2bda211d547
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/bsonutil.go
@@ -0,0 +1,416 @@
+// Package bsonutil provides utilities for processing BSON data.
+package bsonutil
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "strconv"
+ "time"
+)
+
+var ErrNoSuchField = errors.New("no such field")
+
+// ConvertJSONDocumentToBSON iterates through the document map and converts JSON
+// values to their corresponding BSON values. It also replaces any extended JSON
+// type value (e.g. $date) with the corresponding BSON type.
+func ConvertJSONDocumentToBSON(doc map[string]interface{}) error {
+ for key, jsonValue := range doc {
+ var bsonValue interface{}
+ var err error
+
+ switch v := jsonValue.(type) {
+ case map[string]interface{}, bson.D: // subdocument
+ bsonValue, err = ParseSpecialKeys(v)
+ default:
+ bsonValue, err = ConvertJSONValueToBSON(v)
+ }
+ if err != nil {
+ return err
+ }
+
+ doc[key] = bsonValue
+ }
+ return nil
+}
+
+// GetExtendedBsonD iterates through the document and returns a bson.D that adds type
+// information for each key in document.
+func GetExtendedBsonD(doc bson.D) (bson.D, error) {
+ var err error
+ var bsonDoc bson.D
+ for _, docElem := range doc {
+ var bsonValue interface{}
+ switch v := docElem.Value.(type) {
+ case map[string]interface{}, bson.D: // subdocument
+ bsonValue, err = ParseSpecialKeys(v)
+ default:
+ bsonValue, err = ConvertJSONValueToBSON(v)
+ }
+ if err != nil {
+ return nil, err
+ }
+ bsonDoc = append(bsonDoc, bson.DocElem{
+ Name: docElem.Name,
+ Value: bsonValue,
+ })
+ }
+ return bsonDoc, nil
+}
+
+// FindValueByKey returns the value of keyName in document. If keyName is not found
+// in the top-level of the document, ErrNoSuchField is returned as the error.
+func FindValueByKey(keyName string, document *bson.D) (interface{}, error) {
+ for _, key := range *document {
+ if key.Name == keyName {
+ return key.Value, nil
+ }
+ }
+ return nil, ErrNoSuchField
+}
+
+// ParseSpecialKeys takes a JSON document and inspects it for any extended JSON
+// type (e.g $numberLong) and replaces any such values with the corresponding
+// BSON type.
+func ParseSpecialKeys(special interface{}) (interface{}, error) {
+ // first ensure we are using a correct document type
+ var doc map[string]interface{}
+ switch v := special.(type) {
+ case bson.D:
+ doc = v.Map()
+ case map[string]interface{}:
+ doc = v
+ default:
+ return nil, fmt.Errorf("%v (type %T) is not valid input to ParseSpecialKeys", special, special)
+ }
+ // check document to see if it is special
+ switch len(doc) {
+ case 1: // document has a single field
+ if jsonValue, ok := doc["$date"]; ok {
+ switch v := jsonValue.(type) {
+ case string:
+ return util.FormatDate(v)
+ case bson.D:
+ asMap := v.Map()
+ if jsonValue, ok := asMap["$numberLong"]; ok {
+ n, err := parseNumberLongField(jsonValue)
+ if err != nil {
+ return nil, err
+ }
+ return time.Unix(n/1e3, n%1e3*1e6), err
+ }
+ return nil, errors.New("expected $numberLong field in $date")
+ case map[string]interface{}:
+ if jsonValue, ok := v["$numberLong"]; ok {
+ n, err := parseNumberLongField(jsonValue)
+ if err != nil {
+ return nil, err
+ }
+ return time.Unix(n/1e3, n%1e3*1e6), err
+ }
+ return nil, errors.New("expected $numberLong field in $date")
+
+ case json.Number:
+ n, err := v.Int64()
+ return time.Unix(n/1e3, n%1e3*1e6), err
+ case float64:
+ n := int64(v)
+ return time.Unix(n/1e3, n%1e3*1e6), nil
+ case int32:
+ n := int64(v)
+ return time.Unix(n/1e3, n%1e3*1e6), nil
+ case int64:
+ return time.Unix(v/1e3, v%1e3*1e6), nil
+
+ case json.ISODate:
+ return v, nil
+
+ default:
+ return nil, errors.New("invalid type for $date field")
+ }
+ }
+
+ if jsonValue, ok := doc["$code"]; ok {
+ switch v := jsonValue.(type) {
+ case string:
+ return bson.JavaScript{Code: v}, nil
+ default:
+ return nil, errors.New("expected $code field to have string value")
+ }
+ }
+
+ if jsonValue, ok := doc["$oid"]; ok {
+ switch v := jsonValue.(type) {
+ case string:
+ if !bson.IsObjectIdHex(v) {
+ return nil, errors.New("expected $oid field to contain 24 hexadecimal character")
+ }
+ return bson.ObjectIdHex(v), nil
+
+ default:
+ return nil, errors.New("expected $oid field to have string value")
+ }
+ }
+
+ if jsonValue, ok := doc["$numberLong"]; ok {
+ return parseNumberLongField(jsonValue)
+ }
+
+ if jsonValue, ok := doc["$numberInt"]; ok {
+ switch v := jsonValue.(type) {
+ case string:
+ // all of decimal, hex, and octal are supported here
+ n, err := strconv.ParseInt(v, 0, 32)
+ return int32(n), err
+
+ default:
+ return nil, errors.New("expected $numberInt field to have string value")
+ }
+ }
+
+ if jsonValue, ok := doc["$timestamp"]; ok {
+ ts := json.Timestamp{}
+
+ var tsDoc map[string]interface{}
+ switch internalDoc := jsonValue.(type) {
+ case map[string]interface{}:
+ tsDoc = internalDoc
+ case bson.D:
+ tsDoc = internalDoc.Map()
+ default:
+ return nil, errors.New("expected $timestamp key to have internal document")
+ }
+
+ if seconds, ok := tsDoc["t"]; ok {
+ if asUint32, err := util.ToUInt32(seconds); err == nil {
+ ts.Seconds = asUint32
+ } else {
+ return nil, errors.New("expected $timestamp 't' field to be a numeric type")
+ }
+ } else {
+ return nil, errors.New("expected $timestamp to have 't' field")
+ }
+ if inc, ok := tsDoc["i"]; ok {
+ if asUint32, err := util.ToUInt32(inc); err == nil {
+ ts.Increment = asUint32
+ } else {
+ return nil, errors.New("expected $timestamp 'i' field to be a numeric type")
+ }
+ } else {
+ return nil, errors.New("expected $timestamp to have 'i' field")
+ }
+ // see BSON spec for details on the bit fiddling here
+ return bson.MongoTimestamp(int64(ts.Seconds)<<32 | int64(ts.Increment)), nil
+ }
+
+ if jsonValue, ok := doc["$numberDecimal"]; ok {
+ switch v := jsonValue.(type) {
+ case string:
+ return bson.ParseDecimal128(v)
+ default:
+ return nil, errors.New("expected $numberDecimal field to have string value")
+ }
+ }
+
+ if _, ok := doc["$undefined"]; ok {
+ return bson.Undefined, nil
+ }
+
+ if _, ok := doc["$maxKey"]; ok {
+ return bson.MaxKey, nil
+ }
+
+ if _, ok := doc["$minKey"]; ok {
+ return bson.MinKey, nil
+ }
+
+ case 2: // document has two fields
+ if jsonValue, ok := doc["$code"]; ok {
+ code := bson.JavaScript{}
+ switch v := jsonValue.(type) {
+ case string:
+ code.Code = v
+ default:
+ return nil, errors.New("expected $code field to have string value")
+ }
+
+ if jsonValue, ok = doc["$scope"]; ok {
+ switch v2 := jsonValue.(type) {
+ case map[string]interface{}, bson.D:
+ x, err := ParseSpecialKeys(v2)
+ if err != nil {
+ return nil, err
+ }
+ code.Scope = x
+ return code, nil
+ default:
+ return nil, errors.New("expected $scope field to contain map")
+ }
+ } else {
+ return nil, errors.New("expected $scope field with $code field")
+ }
+ }
+
+ if jsonValue, ok := doc["$regex"]; ok {
+ regex := bson.RegEx{}
+
+ switch pattern := jsonValue.(type) {
+ case string:
+ regex.Pattern = pattern
+
+ default:
+ return nil, errors.New("expected $regex field to have string value")
+ }
+ if jsonValue, ok = doc["$options"]; !ok {
+ return nil, errors.New("expected $options field with $regex field")
+ }
+
+ switch options := jsonValue.(type) {
+ case string:
+ regex.Options = options
+
+ default:
+ return nil, errors.New("expected $options field to have string value")
+ }
+
+ // Validate regular expression options
+ for i := range regex.Options {
+ switch o := regex.Options[i]; o {
+ default:
+ return nil, fmt.Errorf("invalid regular expression option '%v'", o)
+
+ case 'g', 'i', 'm', 's': // allowed
+ }
+ }
+ return regex, nil
+ }
+
+ if jsonValue, ok := doc["$binary"]; ok {
+ binary := bson.Binary{}
+
+ switch data := jsonValue.(type) {
+ case string:
+ bytes, err := base64.StdEncoding.DecodeString(data)
+ if err != nil {
+ return nil, err
+ }
+ binary.Data = bytes
+
+ default:
+ return nil, errors.New("expected $binary field to have string value")
+ }
+ if jsonValue, ok = doc["$type"]; !ok {
+ return nil, errors.New("expected $type field with $binary field")
+ }
+
+ switch typ := jsonValue.(type) {
+ case string:
+ kind, err := hex.DecodeString(typ)
+ if err != nil {
+ return nil, err
+ } else if len(kind) != 1 {
+ return nil, errors.New("expected single byte (as hexadecimal string) for $type field")
+ }
+ binary.Kind = kind[0]
+
+ default:
+ return nil, errors.New("expected $type field to have string value")
+ }
+ return binary, nil
+ }
+
+ if jsonValue, ok := doc["$ref"]; ok {
+ dbRef := mgo.DBRef{}
+
+ switch data := jsonValue.(type) {
+ case string:
+ dbRef.Collection = data
+ default:
+ return nil, errors.New("expected string for $ref field")
+ }
+ if jsonValue, ok = doc["$id"]; ok {
+ switch v2 := jsonValue.(type) {
+ case map[string]interface{}, bson.D:
+ x, err := ParseSpecialKeys(v2)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing $id field: %v", err)
+ }
+ dbRef.Id = x
+ default:
+ dbRef.Id = v2
+ }
+ return dbRef, nil
+ }
+ }
+ case 3:
+ if jsonValue, ok := doc["$ref"]; ok {
+ dbRef := mgo.DBRef{}
+
+ switch data := jsonValue.(type) {
+ case string:
+ dbRef.Collection = data
+ default:
+ return nil, errors.New("expected string for $ref field")
+ }
+ if jsonValue, ok = doc["$id"]; ok {
+ switch v2 := jsonValue.(type) {
+ case map[string]interface{}, bson.D:
+ x, err := ParseSpecialKeys(v2)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing $id field: %v", err)
+ }
+ dbRef.Id = x
+ default:
+ dbRef.Id = v2
+ }
+ if dbValue, ok := doc["$db"]; ok {
+ switch v3 := dbValue.(type) {
+ case string:
+ dbRef.Database = v3
+ default:
+ return nil, errors.New("expected string for $db field")
+ }
+ return dbRef, nil
+ }
+ }
+ }
+ }
+
+ // nothing matched, so we recurse deeper
+ switch v := special.(type) {
+ case bson.D:
+ return GetExtendedBsonD(v)
+ case map[string]interface{}:
+ return ConvertJSONValueToBSON(v)
+ default:
+ return nil, fmt.Errorf("%v (type %T) is not valid input to ParseSpecialKeys", special, special)
+ }
+}
+
+// ParseJSONValue takes any value generated by the json package and returns a
+// BSON version of that value.
+func ParseJSONValue(jsonValue interface{}) (interface{}, error) {
+ switch v := jsonValue.(type) {
+ case map[string]interface{}, bson.D: // subdocument
+ return ParseSpecialKeys(v)
+
+ default:
+ return ConvertJSONValueToBSON(v)
+ }
+}
+
+func parseNumberLongField(jsonValue interface{}) (int64, error) {
+ switch v := jsonValue.(type) {
+ case string:
+ // all of decimal, hex, and octal are supported here
+ return strconv.ParseInt(v, 0, 64)
+
+ default:
+ return 0, errors.New("expected $numberLong field to have string value")
+ }
+}
diff --git a/src/mongo/gotools/common/bsonutil/converter.go b/src/mongo/gotools/common/bsonutil/converter.go
new file mode 100644
index 00000000000..02d091e21a4
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/converter.go
@@ -0,0 +1,388 @@
+package bsonutil
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "time"
+)
+
+// ConvertJSONValueToBSON walks through a document or an array and
+// replaces any extended JSON value with its corresponding BSON type.
+func ConvertJSONValueToBSON(x interface{}) (interface{}, error) {
+ switch v := x.(type) {
+ case nil:
+ return nil, nil
+ case bool:
+ return v, nil
+ case map[string]interface{}: // document
+ for key, jsonValue := range v {
+ bsonValue, err := ParseJSONValue(jsonValue)
+ if err != nil {
+ return nil, err
+ }
+ v[key] = bsonValue
+ }
+ return v, nil
+ case bson.D:
+ for i := range v {
+ var err error
+ v[i].Value, err = ParseJSONValue(v[i].Value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return v, nil
+
+ case []interface{}: // array
+ for i, jsonValue := range v {
+ bsonValue, err := ParseJSONValue(jsonValue)
+ if err != nil {
+ return nil, err
+ }
+ v[i] = bsonValue
+ }
+ return v, nil
+
+ case string, float64, int32, int64:
+ return v, nil // require no conversion
+
+ case json.ObjectId: // ObjectId
+ s := string(v)
+ if !bson.IsObjectIdHex(s) {
+ return nil, errors.New("expected ObjectId to contain 24 hexadecimal characters")
+ }
+ return bson.ObjectIdHex(s), nil
+
+ case json.Decimal128:
+ return v.Decimal128, nil
+
+ case json.Date: // Date
+ n := int64(v)
+ return time.Unix(n/1e3, n%1e3*1e6), nil
+
+ case json.ISODate: // ISODate
+ n := string(v)
+ return util.FormatDate(n)
+
+ case json.NumberLong: // NumberLong
+ return int64(v), nil
+
+ case json.NumberInt: // NumberInt
+ return int32(v), nil
+
+ case json.NumberFloat: // NumberFloat
+ return float64(v), nil
+ case json.BinData: // BinData
+ data, err := base64.StdEncoding.DecodeString(v.Base64)
+ if err != nil {
+ return nil, err
+ }
+ return bson.Binary{v.Type, data}, nil
+
+ case json.DBRef: // DBRef
+ var err error
+ v.Id, err = ParseJSONValue(v.Id)
+ if err != nil {
+ return nil, err
+ }
+ return mgo.DBRef{v.Collection, v.Id, v.Database}, nil
+
+ case json.DBPointer: // DBPointer, for backwards compatibility
+ return bson.DBPointer{v.Namespace, v.Id}, nil
+
+ case json.RegExp: // RegExp
+ return bson.RegEx{v.Pattern, v.Options}, nil
+
+ case json.Timestamp: // Timestamp
+ ts := (int64(v.Seconds) << 32) | int64(v.Increment)
+ return bson.MongoTimestamp(ts), nil
+
+ case json.JavaScript: // Javascript
+ return bson.JavaScript{v.Code, v.Scope}, nil
+
+ case json.MinKey: // MinKey
+ return bson.MinKey, nil
+
+ case json.MaxKey: // MaxKey
+ return bson.MaxKey, nil
+
+ case json.Undefined: // undefined
+ return bson.Undefined, nil
+
+ default:
+ return nil, fmt.Errorf("conversion of JSON value '%v' of type '%T' not supported", v, v)
+ }
+}
+
+func convertKeys(v bson.M) (bson.M, error) {
+ for key, value := range v {
+ jsonValue, err := ConvertBSONValueToJSON(value)
+ if err != nil {
+ return nil, err
+ }
+ v[key] = jsonValue
+ }
+ return v, nil
+}
+
+func getConvertedKeys(v bson.M) (bson.M, error) {
+ out := bson.M{}
+ for key, value := range v {
+ jsonValue, err := GetBSONValueAsJSON(value)
+ if err != nil {
+ return nil, err
+ }
+ out[key] = jsonValue
+ }
+ return out, nil
+}
+
+// ConvertBSONValueToJSON walks through a document or an array and
+// converts any BSON value to its corresponding extended JSON type.
+// It returns the converted JSON document and any error encountered.
+func ConvertBSONValueToJSON(x interface{}) (interface{}, error) {
+ switch v := x.(type) {
+ case nil:
+ return nil, nil
+ case bool:
+ return v, nil
+
+ case *bson.M: // document
+ doc, err := convertKeys(*v)
+ if err != nil {
+ return nil, err
+ }
+ return doc, err
+ case bson.M: // document
+ return convertKeys(v)
+ case map[string]interface{}:
+ return convertKeys(v)
+ case bson.D:
+ for i, value := range v {
+ jsonValue, err := ConvertBSONValueToJSON(value.Value)
+ if err != nil {
+ return nil, err
+ }
+ v[i].Value = jsonValue
+ }
+ return MarshalD(v), nil
+ case MarshalD:
+ return v, nil
+ case []interface{}: // array
+ for i, value := range v {
+ jsonValue, err := ConvertBSONValueToJSON(value)
+ if err != nil {
+ return nil, err
+ }
+ v[i] = jsonValue
+ }
+ return v, nil
+
+ case string:
+ return v, nil // require no conversion
+
+ case int:
+ return json.NumberInt(v), nil
+
+ case bson.ObjectId: // ObjectId
+ return json.ObjectId(v.Hex()), nil
+
+ case bson.Decimal128:
+ return json.Decimal128{v}, nil
+
+ case time.Time: // Date
+ return json.Date(v.Unix()*1000 + int64(v.Nanosecond()/1e6)), nil
+
+ case int64: // NumberLong
+ return json.NumberLong(v), nil
+
+ case int32: // NumberInt
+ return json.NumberInt(v), nil
+
+ case float64:
+ return json.NumberFloat(v), nil
+
+ case float32:
+ return json.NumberFloat(float64(v)), nil
+
+ case []byte: // BinData (with generic type)
+ data := base64.StdEncoding.EncodeToString(v)
+ return json.BinData{0x00, data}, nil
+
+ case bson.Binary: // BinData
+ data := base64.StdEncoding.EncodeToString(v.Data)
+ return json.BinData{v.Kind, data}, nil
+
+ case mgo.DBRef: // DBRef
+ return json.DBRef{v.Collection, v.Id, v.Database}, nil
+
+ case bson.DBPointer: // DBPointer
+ return json.DBPointer{v.Namespace, v.Id}, nil
+
+ case bson.RegEx: // RegExp
+ return json.RegExp{v.Pattern, v.Options}, nil
+
+ case bson.MongoTimestamp: // Timestamp
+ timestamp := int64(v)
+ return json.Timestamp{
+ Seconds: uint32(timestamp >> 32),
+ Increment: uint32(timestamp),
+ }, nil
+
+ case bson.JavaScript: // JavaScript
+ var scope interface{}
+ var err error
+ if v.Scope != nil {
+ scope, err = ConvertBSONValueToJSON(v.Scope)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return json.JavaScript{v.Code, scope}, nil
+
+ default:
+ switch x {
+ case bson.MinKey: // MinKey
+ return json.MinKey{}, nil
+
+ case bson.MaxKey: // MaxKey
+ return json.MaxKey{}, nil
+
+ case bson.Undefined: // undefined
+ return json.Undefined{}, nil
+ }
+ }
+
+ return nil, fmt.Errorf("conversion of BSON value '%v' of type '%T' not supported", x, x)
+}
+
+// GetBSONValueAsJSON is equivalent to ConvertBSONValueToJSON, but does not mutate its argument.
+func GetBSONValueAsJSON(x interface{}) (interface{}, error) {
+ switch v := x.(type) {
+ case nil:
+ return nil, nil
+ case bool:
+ return v, nil
+
+ case *bson.M: // document
+ doc, err := getConvertedKeys(*v)
+ if err != nil {
+ return nil, err
+ }
+ return doc, err
+ case bson.M: // document
+ return getConvertedKeys(v)
+ case map[string]interface{}:
+ return getConvertedKeys(v)
+ case bson.D:
+ out := bson.D{}
+ for _, value := range v {
+ jsonValue, err := GetBSONValueAsJSON(value.Value)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, bson.DocElem{
+ Name: value.Name,
+ Value: jsonValue,
+ })
+ }
+ return MarshalD(out), nil
+ case MarshalD:
+ out, err := GetBSONValueAsJSON(bson.D(v))
+ if err != nil {
+ return nil, err
+ }
+ return MarshalD(out.(bson.D)), nil
+ case []interface{}: // array
+ out := []interface{}{}
+ for _, value := range v {
+ jsonValue, err := GetBSONValueAsJSON(value)
+ if err != nil {
+ return nil, err
+ }
+ out = append(out, jsonValue)
+ }
+ return out, nil
+
+ case string:
+ return v, nil // require no conversion
+
+ case int:
+ return json.NumberInt(v), nil
+
+ case bson.ObjectId: // ObjectId
+ return json.ObjectId(v.Hex()), nil
+
+ case bson.Decimal128:
+ return json.Decimal128{v}, nil
+
+ case time.Time: // Date
+ return json.Date(v.Unix()*1000 + int64(v.Nanosecond()/1e6)), nil
+
+ case int64: // NumberLong
+ return json.NumberLong(v), nil
+
+ case int32: // NumberInt
+ return json.NumberInt(v), nil
+
+ case float64:
+ return json.NumberFloat(v), nil
+
+ case float32:
+ return json.NumberFloat(float64(v)), nil
+
+ case []byte: // BinData (with generic type)
+ data := base64.StdEncoding.EncodeToString(v)
+ return json.BinData{0x00, data}, nil
+
+ case bson.Binary: // BinData
+ data := base64.StdEncoding.EncodeToString(v.Data)
+ return json.BinData{v.Kind, data}, nil
+
+ case mgo.DBRef: // DBRef
+ return json.DBRef{v.Collection, v.Id, v.Database}, nil
+
+ case bson.DBPointer: // DBPointer
+ return json.DBPointer{v.Namespace, v.Id}, nil
+
+ case bson.RegEx: // RegExp
+ return json.RegExp{v.Pattern, v.Options}, nil
+
+ case bson.MongoTimestamp: // Timestamp
+ timestamp := int64(v)
+ return json.Timestamp{
+ Seconds: uint32(timestamp >> 32),
+ Increment: uint32(timestamp),
+ }, nil
+
+ case bson.JavaScript: // JavaScript
+ var scope interface{}
+ var err error
+ if v.Scope != nil {
+ scope, err = GetBSONValueAsJSON(v.Scope)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return json.JavaScript{v.Code, scope}, nil
+
+ default:
+ switch x {
+ case bson.MinKey: // MinKey
+ return json.MinKey{}, nil
+
+ case bson.MaxKey: // MaxKey
+ return json.MaxKey{}, nil
+
+ case bson.Undefined: // undefined
+ return json.Undefined{}, nil
+ }
+ }
+
+ return nil, fmt.Errorf("conversion of BSON value '%v' of type '%T' not supported", x, x)
+}
diff --git a/src/mongo/gotools/common/bsonutil/converter_test.go b/src/mongo/gotools/common/bsonutil/converter_test.go
new file mode 100644
index 00000000000..8d329057f52
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/converter_test.go
@@ -0,0 +1,345 @@
+package bsonutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+ "time"
+)
+
+func TestObjectIdBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting a BSON ObjectId", t, func() {
+ Convey("that is valid to JSON should produce a json.ObjectId", func() {
+ bsonObjId := bson.NewObjectId()
+ jsonObjId := json.ObjectId(bsonObjId.Hex())
+
+ _jObjId, err := ConvertBSONValueToJSON(bsonObjId)
+ So(err, ShouldBeNil)
+ jObjId, ok := _jObjId.(json.ObjectId)
+ So(ok, ShouldBeTrue)
+
+ So(jObjId, ShouldNotEqual, bsonObjId)
+ So(jObjId, ShouldEqual, jsonObjId)
+ })
+ })
+}
+
+func TestArraysBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting BSON arrays to JSON arrays", t, func() {
+ Convey("should work for empty arrays", func() {
+ jArr, err := ConvertBSONValueToJSON([]interface{}{})
+ So(err, ShouldBeNil)
+
+ So(jArr, ShouldResemble, []interface{}{})
+ })
+
+ Convey("should work for one-level deep arrays", func() {
+ objId := bson.NewObjectId()
+ bsonArr := []interface{}{objId, 28, 0.999, "plain"}
+ _jArr, err := ConvertBSONValueToJSON(bsonArr)
+ So(err, ShouldBeNil)
+ jArr, ok := _jArr.([]interface{})
+ So(ok, ShouldBeTrue)
+
+ So(len(jArr), ShouldEqual, 4)
+ So(jArr[0], ShouldEqual, json.ObjectId(objId.Hex()))
+ So(jArr[1], ShouldEqual, 28)
+ So(jArr[2], ShouldEqual, 0.999)
+ So(jArr[3], ShouldEqual, "plain")
+ })
+
+ Convey("should work for arrays with embedded objects", func() {
+ bsonObj := []interface{}{
+ 80,
+ bson.M{
+ "a": int64(20),
+ "b": bson.M{
+ "c": bson.RegEx{Pattern: "hi", Options: "i"},
+ },
+ },
+ }
+
+ __jObj, err := ConvertBSONValueToJSON(bsonObj)
+ So(err, ShouldBeNil)
+ _jObj, ok := __jObj.([]interface{})
+ So(ok, ShouldBeTrue)
+ jObj, ok := _jObj[1].(bson.M)
+ So(ok, ShouldBeTrue)
+ So(len(jObj), ShouldEqual, 2)
+ So(jObj["a"], ShouldEqual, json.NumberLong(20))
+ jjObj, ok := jObj["b"].(bson.M)
+ So(ok, ShouldBeTrue)
+
+ So(jjObj["c"], ShouldResemble, json.RegExp{"hi", "i"})
+ So(jjObj["c"], ShouldNotResemble, json.RegExp{"i", "hi"})
+ })
+
+ })
+}
+
+func TestDateBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ timeNow := time.Now()
+ secs := int64(timeNow.Unix())
+ nanosecs := timeNow.Nanosecond()
+ millis := int64(nanosecs / 1e6)
+
+ timeNowSecs := time.Unix(secs, int64(0))
+ timeNowMillis := time.Unix(secs, int64(millis*1e6))
+
+ Convey("Converting BSON time.Time 's dates to JSON", t, func() {
+ // json.Date is stored as an int64 representing the number of milliseconds since the epoch
+ Convey(fmt.Sprintf("should work with second granularity: %v", timeNowSecs), func() {
+ _jObj, err := ConvertBSONValueToJSON(timeNowSecs)
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.Date)
+ So(ok, ShouldBeTrue)
+
+ So(int64(jObj), ShouldEqual, secs*1e3)
+ })
+
+ Convey(fmt.Sprintf("should work with millisecond granularity: %v", timeNowMillis), func() {
+ _jObj, err := ConvertBSONValueToJSON(timeNowMillis)
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.Date)
+ So(ok, ShouldBeTrue)
+
+ So(int64(jObj), ShouldEqual, secs*1e3+millis)
+ })
+
+ Convey(fmt.Sprintf("should work with nanosecond granularity: %v", timeNow), func() {
+ _jObj, err := ConvertBSONValueToJSON(timeNow)
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.Date)
+ So(ok, ShouldBeTrue)
+
+ // we lose nanosecond precision
+ So(int64(jObj), ShouldEqual, secs*1e3+millis)
+ })
+
+ })
+}
+
+func TestMaxKeyBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting a BSON Maxkey to JSON", t, func() {
+ Convey("should produce a json.MaxKey", func() {
+ _jObj, err := ConvertBSONValueToJSON(bson.MaxKey)
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.MaxKey)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldResemble, json.MaxKey{})
+ })
+ })
+}
+
+func TestMinKeyBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting a BSON Maxkey to JSON", t, func() {
+ Convey("should produce a json.MinKey", func() {
+ _jObj, err := ConvertBSONValueToJSON(bson.MinKey)
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.MinKey)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldResemble, json.MinKey{})
+ })
+ })
+}
+
+func Test64BitIntBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting a BSON int64 to JSON", t, func() {
+ Convey("should produce a json.NumberLong", func() {
+ _jObj, err := ConvertBSONValueToJSON(int32(243))
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.NumberInt)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldEqual, json.NumberInt(243))
+ })
+ })
+
+}
+
+func Test32BitIntBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting a BSON int32 integer to JSON", t, func() {
+ Convey("should produce a json.NumberInt", func() {
+ _jObj, err := ConvertBSONValueToJSON(int64(888234334343))
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.NumberLong)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldEqual, json.NumberLong(888234334343))
+ })
+ })
+
+}
+
+func TestRegExBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting a BSON Regular Expression (= /decision/gi) to JSON", t, func() {
+ Convey("should produce a json.RegExp", func() {
+ _jObj, err := ConvertBSONValueToJSON(bson.RegEx{"decision", "gi"})
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.RegExp)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldResemble, json.RegExp{"decision", "gi"})
+ })
+ })
+
+}
+
+func TestUndefinedValueBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting a BSON Undefined type to JSON", t, func() {
+ Convey("should produce a json.Undefined", func() {
+ _jObj, err := ConvertBSONValueToJSON(bson.Undefined)
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.Undefined)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldResemble, json.Undefined{})
+ })
+ })
+}
+
+func TestDBRefBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting BSON DBRef to JSON", t, func() {
+ Convey("should produce a json.DBRef", func() {
+ _jObj, err := ConvertBSONValueToJSON(mgo.DBRef{"coll1", "some_id", "test"})
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.DBRef)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldResemble, json.DBRef{"coll1", "some_id", "test"})
+ So(jObj, ShouldNotResemble, json.DBRef{"coll1", "test", "some_id"})
+ })
+ })
+}
+
+func TestTimestampBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting a BSON Timestamp to JSON", t, func() {
+ Convey("should produce a json.Timestamp", func() {
+ // {t:803434343, i:9} == bson.MongoTimestamp(803434343*2**32 + 9)
+ _jObj, err := ConvertBSONValueToJSON(bson.MongoTimestamp(uint64(803434343<<32) | uint64(9)))
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.Timestamp)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldResemble, json.Timestamp{Seconds: 803434343, Increment: 9})
+ So(jObj, ShouldNotResemble, json.Timestamp{Seconds: 803434343, Increment: 8})
+ })
+ })
+}
+
+func TestBinaryBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting BSON Binary data to JSON", t, func() {
+ Convey("should produce a json.BinData", func() {
+ _jObj, err := ConvertBSONValueToJSON(bson.Binary{'\x01', []byte("\x05\x20\x02\xae\xf7")})
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.BinData)
+ So(ok, ShouldBeTrue)
+
+ base64data1 := base64.StdEncoding.EncodeToString([]byte("\x05\x20\x02\xae\xf7"))
+ base64data2 := base64.StdEncoding.EncodeToString([]byte("\x05\x20\x02\xaf\xf7"))
+ So(jObj, ShouldResemble, json.BinData{'\x01', base64data1})
+ So(jObj, ShouldNotResemble, json.BinData{'\x01', base64data2})
+ })
+ })
+}
+
+func TestGenericBytesBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting Go bytes to JSON", t, func() {
+ Convey("should produce a json.BinData with Type=0x00 (Generic)", func() {
+ _jObj, err := ConvertBSONValueToJSON([]byte("this is something that's cool"))
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.BinData)
+ So(ok, ShouldBeTrue)
+
+ base64data := base64.StdEncoding.EncodeToString([]byte("this is something that's cool"))
+ So(jObj, ShouldResemble, json.BinData{0x00, base64data})
+ So(jObj, ShouldNotResemble, json.BinData{0x01, base64data})
+ })
+ })
+}
+
+func TestUnknownBSONTypeToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting an unknown BSON type to JSON", t, func() {
+ Convey("should produce an error", func() {
+ _, err := ConvertBSONValueToJSON(func() {})
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestDBPointerBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting BSON DBPointer to JSON", t, func() {
+ Convey("should produce a json.DBPointer", func() {
+ objId := bson.NewObjectId()
+ _jObj, err := ConvertBSONValueToJSON(bson.DBPointer{"dbrefnamespace", objId})
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.DBPointer)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldResemble, json.DBPointer{"dbrefnamespace", objId})
+ })
+ })
+}
+
+func TestJSCodeBSONToJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Converting BSON Javascript code to JSON", t, func() {
+ Convey("should produce a json.Javascript", func() {
+ Convey("without scope if the scope for the BSON Javascript code is nil", func() {
+ _jObj, err := ConvertBSONValueToJSON(bson.JavaScript{"function() { return null; }", nil})
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.JavaScript)
+ So(ok, ShouldBeTrue)
+
+ So(jObj, ShouldResemble, json.JavaScript{"function() { return null; }", nil})
+ })
+
+ Convey("with scope if the scope for the BSON Javascript code is non-nil", func() {
+ _jObj, err := ConvertBSONValueToJSON(bson.JavaScript{"function() { return x; }", bson.M{"x": 2}})
+ So(err, ShouldBeNil)
+ jObj, ok := _jObj.(json.JavaScript)
+ So(ok, ShouldBeTrue)
+ So(jObj.Scope.(bson.M)["x"], ShouldEqual, 2)
+ So(jObj.Code, ShouldEqual, "function() { return x; }")
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/date_test.go b/src/mongo/gotools/common/bsonutil/date_test.go
new file mode 100644
index 00000000000..a2553219379
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/date_test.go
@@ -0,0 +1,169 @@
+package bsonutil
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+ "time"
+)
+
+func TestDateValue(t *testing.T) {
+
+ Convey("When converting JSON with Date values", t, func() {
+
+ Convey("works for Date object", func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: json.Date(100),
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(time.Time)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Equal(time.Unix(0, int64(100*time.Millisecond))), ShouldBeTrue)
+ })
+
+ Convey("works for Date document", func() {
+
+ dates := []string{
+ "2006-01-02T15:04:05.000Z",
+ "2006-01-02T15:04:05.000-0700",
+ "2006-01-02T15:04:05Z",
+ "2006-01-02T15:04:05-0700",
+ "2006-01-02T15:04Z",
+ "2006-01-02T15:04-0700",
+ }
+
+ for _, dateString := range dates {
+ example := fmt.Sprintf(`{ "$date": "%v" }`, dateString)
+ Convey(fmt.Sprintf("of string ('%v')", example), func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$date": dateString,
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+
+ // dateString is a valid time format string
+ date, err := time.Parse(dateString, dateString)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(time.Time)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Equal(date), ShouldBeTrue)
+ })
+ }
+
+ date := time.Unix(0, int64(time.Duration(1136214245000)*time.Millisecond))
+
+ Convey(`of $numberLong ('{ "$date": { "$numberLong": "1136214245000" } }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$date": map[string]interface{}{
+ "$numberLong": "1136214245000",
+ },
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(time.Time)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Equal(date), ShouldBeTrue)
+ })
+
+ Convey(`of json.Number ('{ "$date": 1136214245000 }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$date": json.Number("1136214245000"),
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(time.Time)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Equal(date), ShouldBeTrue)
+ })
+
+ Convey(`of numeric int64 ('{ "$date": 1136214245000 }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$date": int64(1136214245000),
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(time.Time)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Equal(date), ShouldBeTrue)
+ })
+
+ Convey(`of numeric float64 ('{ "$date": 1136214245000 }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$date": float64(1136214245000),
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(time.Time)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Equal(date), ShouldBeTrue)
+ })
+ Convey(`of numeric int32 ('{ "$date": 2136800000 }')`, func() {
+ key := "key"
+
+ date = time.Unix(0, int64(time.Duration(2136800000)*time.Millisecond))
+
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$date": int32(2136800000),
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(time.Time)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Equal(date), ShouldBeTrue)
+ })
+
+ Convey(`of negative numeric int32 ('{ "$date": -2136800000 }')`, func() {
+ key := "key"
+
+ date = time.Unix(0, int64(time.Duration(-2136800000)*time.Millisecond))
+
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$date": int32(-2136800000),
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(time.Time)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Equal(date), ShouldBeTrue)
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/marshal_d.go b/src/mongo/gotools/common/bsonutil/marshal_d.go
new file mode 100644
index 00000000000..e47eea9c220
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/marshal_d.go
@@ -0,0 +1,59 @@
+package bsonutil
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// MarshalD is a wrapper for bson.D that allows unmarshalling
+// of bson.D with preserved order. Necessary for printing
+// certain database commands.
+type MarshalD bson.D
+
+// MarshalJSON makes the MarshalD type usable by
+// the encoding/json package.
+func (md MarshalD) MarshalJSON() ([]byte, error) {
+ var buff bytes.Buffer
+ buff.WriteString("{")
+ for i, item := range md {
+ key, err := json.Marshal(item.Name)
+ if err != nil {
+ return nil, fmt.Errorf("cannot marshal key %v: %v", item.Name, err)
+ }
+ val, err := json.Marshal(item.Value)
+ if err != nil {
+ return nil, fmt.Errorf("cannot marshal value %v: %v", item.Value, err)
+ }
+ buff.Write(key)
+ buff.WriteString(":")
+ buff.Write(val)
+ if i != len(md)-1 {
+ buff.WriteString(",")
+ }
+ }
+ buff.WriteString("}")
+ return buff.Bytes(), nil
+}
+
+// MakeSortString takes a bson.D object and converts it to a slice of strings
+// that can be used as the input args to mgo's .Sort(...) function.
+// For example:
+// {a:1, b:-1} -> ["+a", "-b"]
+func MakeSortString(sortObj bson.D) ([]string, error) {
+ sortStrs := make([]string, 0, len(sortObj))
+ for _, docElem := range sortObj {
+ valueAsNumber, err := util.ToFloat64(docElem.Value)
+ if err != nil {
+ return nil, err
+ }
+ prefix := "+"
+ if valueAsNumber < 0 {
+ prefix = "-"
+ }
+ sortStrs = append(sortStrs, fmt.Sprintf("%v%v", prefix, docElem.Name))
+ }
+ return sortStrs, nil
+}
diff --git a/src/mongo/gotools/common/bsonutil/marshal_d_test.go b/src/mongo/gotools/common/bsonutil/marshal_d_test.go
new file mode 100644
index 00000000000..dcc3a53415e
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/marshal_d_test.go
@@ -0,0 +1,124 @@
+package bsonutil
+
+import (
+ "encoding/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "strings"
+ "testing"
+)
+
+func TestMarshalDMarshalJSON(t *testing.T) {
+
+ Convey("With a valid bson.D", t, func() {
+ testD := bson.D{
+ {"cool", "rad"},
+ {"aaa", 543.2},
+ {"I", 0},
+ {"E", 0},
+ {"map", bson.M{"1": 1, "2": "two"}},
+ }
+
+ Convey("wrapping with MarshalD should allow json.Marshal to work", func() {
+ asJSON, err := json.Marshal(MarshalD(testD))
+ So(err, ShouldBeNil)
+ strJSON := string(asJSON)
+
+ Convey("with order preserved", func() {
+ So(strings.Index(strJSON, "cool"), ShouldBeLessThan, strings.Index(strJSON, "aaa"))
+ So(strings.Index(strJSON, "aaa"), ShouldBeLessThan, strings.Index(strJSON, "I"))
+ So(strings.Index(strJSON, "I"), ShouldBeLessThan, strings.Index(strJSON, "E"))
+ So(strings.Index(strJSON, "E"), ShouldBeLessThan, strings.Index(strJSON, "map"))
+ So(strings.Count(strJSON, ","), ShouldEqual, 5) // 4 + 1 from internal map
+ })
+
+ Convey("but still usable by the json parser", func() {
+ var asMap bson.M
+ err := json.Unmarshal(asJSON, &asMap)
+ So(err, ShouldBeNil)
+
+ Convey("with types & values preserved", func() {
+ So(asMap["cool"], ShouldEqual, "rad")
+ So(asMap["aaa"], ShouldEqual, 543.2)
+ So(asMap["I"], ShouldEqual, 0)
+ So(asMap["E"], ShouldEqual, 0)
+ So(asMap["map"].(map[string]interface{})["1"], ShouldEqual, 1)
+ So(asMap["map"].(map[string]interface{})["2"], ShouldEqual, "two")
+ })
+ })
+
+ Convey("putting it inside another map should still be usable by json.Marshal", func() {
+ _, err := json.Marshal(bson.M{"x": 0, "y": MarshalD(testD)})
+ So(err, ShouldBeNil)
+ })
+ })
+ })
+
+ Convey("With en empty bson.D", t, func() {
+ testD := bson.D{}
+
+ Convey("wrapping with MarshalD should allow json.Marshal to work", func() {
+ asJSON, err := json.Marshal(MarshalD(testD))
+ So(err, ShouldBeNil)
+ strJSON := string(asJSON)
+ So(strJSON, ShouldEqual, "{}")
+
+ Convey("but still usable by the json parser", func() {
+ var asInterface interface{}
+ err := json.Unmarshal(asJSON, &asInterface)
+ So(err, ShouldBeNil)
+ asMap, ok := asInterface.(map[string]interface{})
+ So(ok, ShouldBeTrue)
+ So(len(asMap), ShouldEqual, 0)
+ })
+ })
+ })
+}
+
+func TestFindValueByKey(t *testing.T) {
+ Convey("Given a bson.D document and a specific key", t, func() {
+ subDocument := &bson.D{
+ bson.DocElem{Name: "field4", Value: "c"},
+ }
+ document := &bson.D{
+ bson.DocElem{Name: "field1", Value: "a"},
+ bson.DocElem{Name: "field2", Value: "b"},
+ bson.DocElem{Name: "field3", Value: subDocument},
+ }
+ Convey("the corresponding value top-level keys should be returned", func() {
+ value, err := FindValueByKey("field1", document)
+ So(value, ShouldEqual, "a")
+ So(err, ShouldBeNil)
+ })
+ Convey("the corresponding value top-level keys with sub-document values should be returned", func() {
+ value, err := FindValueByKey("field3", document)
+ So(value, ShouldEqual, subDocument)
+ So(err, ShouldBeNil)
+ })
+ Convey("for non-existent keys nil and an error should be returned", func() {
+ value, err := FindValueByKey("field4", document)
+ So(value, ShouldBeNil)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestEscapedKey(t *testing.T) {
+ Convey("Given a bson.D document with a key that requires escaping", t, func() {
+ document := bson.D{
+ bson.DocElem{Name: `foo"bar`, Value: "a"},
+ }
+ Convey("it can be marshaled without error", func() {
+ asJSON, err := json.Marshal(MarshalD(document))
+ So(err, ShouldBeNil)
+ Convey("and subsequently unmarshaled without error", func() {
+ var asMap bson.M
+ err := json.Unmarshal(asJSON, &asMap)
+ So(err, ShouldBeNil)
+ Convey("with the original value being correctly found with the unescaped key", func() {
+ So(asMap[`foo"bar`], ShouldEqual, "a")
+ })
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/maxkey_test.go b/src/mongo/gotools/common/bsonutil/maxkey_test.go
new file mode 100644
index 00000000000..8676c449e32
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/maxkey_test.go
@@ -0,0 +1,38 @@
+package bsonutil
+
+import (
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestMaxKeyValue(t *testing.T) {
+
+ Convey("When converting JSON with MaxKey values", t, func() {
+
+ Convey("works for MaxKey literal", func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: json.MaxKey{},
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.MaxKey)
+ })
+
+ Convey(`works for MaxKey document ('{ "$maxKey": 1 }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$maxKey": 1,
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.MaxKey)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/minkey_test.go b/src/mongo/gotools/common/bsonutil/minkey_test.go
new file mode 100644
index 00000000000..149bcd42796
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/minkey_test.go
@@ -0,0 +1,38 @@
+package bsonutil
+
+import (
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestMinKeyValue(t *testing.T) {
+
+ Convey("When converting JSON with MinKey values", t, func() {
+
+ Convey("works for MinKey literal", func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: json.MinKey{},
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.MinKey)
+ })
+
+ Convey(`works for MinKey document ('{ "$minKey": 1 }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$minKey": 1,
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.MinKey)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/number.go b/src/mongo/gotools/common/bsonutil/number.go
new file mode 100644
index 00000000000..044edbc5274
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/number.go
@@ -0,0 +1,18 @@
+package bsonutil
+
+import (
+ "fmt"
+ "reflect"
+)
+
+var floatType = reflect.TypeOf(float64(0))
+
+func getFloat(unk interface{}) (float64, error) {
+ v := reflect.ValueOf(unk)
+ v = reflect.Indirect(v)
+ if !v.Type().ConvertibleTo(floatType) {
+ return 0, fmt.Errorf("cannot convert %v to float64", v.Type())
+ }
+ fv := v.Convert(floatType)
+ return fv.Float(), nil
+}
diff --git a/src/mongo/gotools/common/bsonutil/numberint_test.go b/src/mongo/gotools/common/bsonutil/numberint_test.go
new file mode 100644
index 00000000000..8dc368b6668
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/numberint_test.go
@@ -0,0 +1,37 @@
+package bsonutil
+
+import (
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestNumberIntValue(t *testing.T) {
+
+ Convey("When converting JSON with NumberInt values", t, func() {
+
+ Convey("works for NumberInt constructor", func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: json.NumberInt(42),
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldEqual, int32(42))
+ })
+
+ Convey(`works for NumberInt document ('{ "$numberInt": "42" }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$numberInt": "42",
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldEqual, int32(42))
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/numberlong_test.go b/src/mongo/gotools/common/bsonutil/numberlong_test.go
new file mode 100644
index 00000000000..d2706b61847
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/numberlong_test.go
@@ -0,0 +1,37 @@
+package bsonutil
+
+import (
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestNumberLongValue(t *testing.T) {
+
+ Convey("When converting JSON with NumberLong values", t, func() {
+
+ Convey("works for NumberLong constructor", func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: json.NumberLong(42),
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldEqual, int64(42))
+ })
+
+ Convey(`works for NumberLong document ('{ "$numberLong": "42" }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$numberLong": "42",
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldEqual, int64(42))
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/objectid_test.go b/src/mongo/gotools/common/bsonutil/objectid_test.go
new file mode 100644
index 00000000000..bc1df9d6b4a
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/objectid_test.go
@@ -0,0 +1,38 @@
+package bsonutil
+
+import (
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestObjectIdValue(t *testing.T) {
+
+ Convey("When converting JSON with ObjectId values", t, func() {
+
+ Convey("works for ObjectId constructor", func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: json.ObjectId("0123456789abcdef01234567"),
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldEqual, bson.ObjectIdHex("0123456789abcdef01234567"))
+ })
+
+ Convey(`works for ObjectId document ('{ "$oid": "0123456789abcdef01234567" }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$oid": "0123456789abcdef01234567",
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldEqual, bson.ObjectIdHex("0123456789abcdef01234567"))
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/regexp_test.go b/src/mongo/gotools/common/bsonutil/regexp_test.go
new file mode 100644
index 00000000000..fe4fd350323
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/regexp_test.go
@@ -0,0 +1,66 @@
+package bsonutil
+
+import (
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestRegExpValue(t *testing.T) {
+
+ Convey("When converting JSON with RegExp values", t, func() {
+
+ Convey("works for RegExp constructor", func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: json.RegExp{"foo", "i"},
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.RegEx{"foo", "i"})
+ })
+
+ Convey(`works for RegExp document ('{ "$regex": "foo", "$options": "i" }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$regex": "foo",
+ "$options": "i",
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.RegEx{"foo", "i"})
+ })
+
+ Convey(`can use multiple options ('{ "$regex": "bar", "$options": "gims" }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$regex": "bar",
+ "$options": "gims",
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.RegEx{"bar", "gims"})
+ })
+
+ Convey(`fails for an invalid option ('{ "$regex": "baz", "$options": "y" }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$regex": "baz",
+ "$options": "y",
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/timestamp_test.go b/src/mongo/gotools/common/bsonutil/timestamp_test.go
new file mode 100644
index 00000000000..05899febc2d
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/timestamp_test.go
@@ -0,0 +1,43 @@
+package bsonutil
+
+import (
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestTimestampValue(t *testing.T) {
+
+ Convey("When converting JSON with Timestamp values", t, func() {
+ testTS := bson.MongoTimestamp(123456<<32 | 55)
+
+ Convey("works for Timestamp literal", func() {
+
+ jsonMap := map[string]interface{}{
+ "ts": json.Timestamp{123456, 55},
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap["ts"], ShouldEqual, testTS)
+ })
+
+ Convey(`works for Timestamp document`, func() {
+ Convey(`{"ts":{"$timestamp":{"t":123456, "i":55}}}`, func() {
+ jsonMap := map[string]interface{}{
+ "ts": map[string]interface{}{
+ "$timestamp": map[string]interface{}{
+ "t": 123456.0,
+ "i": 55.0,
+ },
+ },
+ }
+
+ bsonMap, err := ConvertJSONValueToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(bsonMap.(map[string]interface{})["ts"], ShouldEqual, testTS)
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/bsonutil/undefined_test.go b/src/mongo/gotools/common/bsonutil/undefined_test.go
new file mode 100644
index 00000000000..0126e426ebc
--- /dev/null
+++ b/src/mongo/gotools/common/bsonutil/undefined_test.go
@@ -0,0 +1,38 @@
+package bsonutil
+
+import (
+ "github.com/mongodb/mongo-tools/common/json"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestUndefinedValue(t *testing.T) {
+
+ Convey("When converting JSON with undefined values", t, func() {
+
+ Convey("works for undefined literal", func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: json.Undefined{},
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.Undefined)
+ })
+
+ Convey(`works for undefined document ('{ "$undefined": true }')`, func() {
+ key := "key"
+ jsonMap := map[string]interface{}{
+ key: map[string]interface{}{
+ "$undefined": true,
+ },
+ }
+
+ err := ConvertJSONDocumentToBSON(jsonMap)
+ So(err, ShouldBeNil)
+ So(jsonMap[key], ShouldResemble, bson.Undefined)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/common.go b/src/mongo/gotools/common/common.go
new file mode 100644
index 00000000000..f2922d806d8
--- /dev/null
+++ b/src/mongo/gotools/common/common.go
@@ -0,0 +1,16 @@
+// Package common contains subpackages that are shared amongst the mongo
+// tools.
+package common
+
+import (
+ "strings"
+)
+
+// SplitNamespace returns the db and column from a single namespace string.
+func SplitNamespace(ns string) (string, string) {
+ i := strings.Index(ns, ".")
+ if i != -1 {
+ return ns[:i], ns[i+1:]
+ }
+ return "", ns
+}
diff --git a/src/mongo/gotools/common/db/bson_stream.go b/src/mongo/gotools/common/db/bson_stream.go
new file mode 100644
index 00000000000..780d76cca19
--- /dev/null
+++ b/src/mongo/gotools/common/db/bson_stream.go
@@ -0,0 +1,140 @@
+package db
+
+import (
+ "fmt"
+ "gopkg.in/mgo.v2/bson"
+ "io"
+)
+
+// BSONSource reads documents from the underlying io.ReadCloser, Stream which
+// wraps a stream of BSON documents.
+type BSONSource struct {
+ reusableBuf []byte
+ Stream io.ReadCloser
+ err error
+}
+
+// DecodedBSONSource reads documents from the underlying io.ReadCloser, Stream which
+// wraps a stream of BSON documents.
+type DecodedBSONSource struct {
+ RawDocSource
+ err error
+}
+
+// RawDocSource wraps basic functions for reading a BSON source file.
+type RawDocSource interface {
+ LoadNext() []byte
+ Close() error
+ Err() error
+}
+
+// NewBSONSource creates a BSONSource with a reusable I/O buffer
+func NewBSONSource(in io.ReadCloser) *BSONSource {
+ return &BSONSource{make([]byte, MaxBSONSize), in, nil}
+}
+
+// NewBufferlessBSONSource creates a BSONSource without a reusable I/O buffer
+func NewBufferlessBSONSource(in io.ReadCloser) *BSONSource {
+ return &BSONSource{nil, in, nil}
+}
+
+// Close closes the BSONSource, rendering it unusable for I/O.
+// It returns an error, if any.
+func (bs *BSONSource) Close() error {
+ return bs.Stream.Close()
+}
+
+func NewDecodedBSONSource(ds RawDocSource) *DecodedBSONSource {
+ return &DecodedBSONSource{ds, nil}
+}
+
+// Err returns any error in the DecodedBSONSource or its RawDocSource.
+func (dbs *DecodedBSONSource) Err() error {
+ if dbs.err != nil {
+ return dbs.err
+ }
+ return dbs.RawDocSource.Err()
+}
+
+// Next unmarshals the next BSON document into result. Returns true if no errors
+// are encountered and false otherwise.
+func (dbs *DecodedBSONSource) Next(result interface{}) bool {
+ doc := dbs.LoadNext()
+ if doc == nil {
+ return false
+ }
+ if err := bson.Unmarshal(doc, result); err != nil {
+ dbs.err = err
+ return false
+ }
+ dbs.err = nil
+ return true
+}
+
+// LoadNext reads and returns the next BSON document in the stream. If the
+// BSONSource was created with NewBSONSource then each returned []byte will be
+// a slice of a single reused I/O buffer. If the BSONSource was created with
+// NewBufferlessBSONSource then each returend []byte will be individually
+// allocated
+func (bs *BSONSource) LoadNext() []byte {
+ var into []byte
+ if bs.reusableBuf == nil {
+ into = make([]byte, 4)
+ } else {
+ into = bs.reusableBuf
+ }
+ // read the bson object size (a 4 byte integer)
+ _, err := io.ReadAtLeast(bs.Stream, into[0:4], 4)
+ if err != nil {
+ if err != io.EOF {
+ bs.err = err
+ return nil
+ }
+ // we hit EOF right away, so we're at the end of the stream.
+ bs.err = nil
+ return nil
+ }
+
+ bsonSize := int32(
+ (uint32(into[0]) << 0) |
+ (uint32(into[1]) << 8) |
+ (uint32(into[2]) << 16) |
+ (uint32(into[3]) << 24),
+ )
+
+ // Verify that the size of the BSON object we are about to read can
+ // actually fit into the buffer that was provided. If not, either the BSON is
+ // invalid, or the buffer passed in is too small.
+ // Verify that we do not have an invalid BSON document with size < 5.
+ if bsonSize > MaxBSONSize || bsonSize < 5 {
+ bs.err = fmt.Errorf("invalid BSONSize: %v bytes", bsonSize)
+ return nil
+ }
+ if int(bsonSize) > cap(into) {
+ bigInto := make([]byte, bsonSize)
+ copy(bigInto, into)
+ into = bigInto
+ if bs.reusableBuf != nil {
+ bs.reusableBuf = bigInto
+ }
+ }
+ into = into[:int(bsonSize)]
+ _, err = io.ReadAtLeast(bs.Stream, into[4:], int(bsonSize-4))
+ if err != nil {
+ if err != io.EOF {
+ bs.err = err
+ return nil
+ }
+ // this case means we hit EOF but read a partial document,
+ // so there's a broken doc in the stream. Treat this as error.
+ bs.err = fmt.Errorf("invalid bson: %v", err)
+ return nil
+ }
+
+ bs.err = nil
+ return into
+}
+
+func (bs *BSONSource) Err() error {
+ return bs.err
+}
diff --git a/src/mongo/gotools/common/db/bson_stream_test.go b/src/mongo/gotools/common/db/bson_stream_test.go
new file mode 100644
index 00000000000..657c038e0ea
--- /dev/null
+++ b/src/mongo/gotools/common/db/bson_stream_test.go
@@ -0,0 +1,41 @@
+package db
+
+import (
+ "bytes"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "io/ioutil"
+ "testing"
+)
+
+func TestBufferlessBSONSource(t *testing.T) {
+ var testValues = []bson.M{
+ {"_": bson.Binary{Kind: 0x80, Data: []byte("apples")}},
+ {"_": bson.Binary{Kind: 0x80, Data: []byte("bananas")}},
+ {"_": bson.Binary{Kind: 0x80, Data: []byte("cherries")}},
+ }
+ Convey("with a buffer containing several bson documents with binary fields", t, func() {
+ writeBuf := bytes.NewBuffer(make([]byte, 0, 1024))
+ for _, tv := range testValues {
+ data, err := bson.Marshal(&tv)
+ So(err, ShouldBeNil)
+ _, err = writeBuf.Write(data)
+ So(err, ShouldBeNil)
+ }
+ Convey("that we parse correctly with a BufferlessBSONSource", func() {
+ bsonSource := NewDecodedBSONSource(
+ NewBufferlessBSONSource(ioutil.NopCloser(writeBuf)))
+ docs := []bson.M{}
+ count := 0
+ doc := &bson.M{}
+ for bsonSource.Next(doc) {
+ count++
+ docs = append(docs, *doc)
+ doc = &bson.M{}
+ }
+ So(bsonSource.Err(), ShouldBeNil)
+ So(count, ShouldEqual, len(testValues))
+ So(docs, ShouldResemble, testValues)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/db/buffered_bulk.go b/src/mongo/gotools/common/db/buffered_bulk.go
new file mode 100644
index 00000000000..be2673b5876
--- /dev/null
+++ b/src/mongo/gotools/common/db/buffered_bulk.go
@@ -0,0 +1,79 @@
+package db
+
+import (
+ "fmt"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// BufferedBulkInserter implements a bufio.Writer-like design for queuing up
+// documents and inserting them in bulk when the given doc limit (or max
+// message size) is reached. Must be flushed at the end to ensure that all
+// documents are written.
+type BufferedBulkInserter struct {
+ bulk *mgo.Bulk
+ collection *mgo.Collection
+ continueOnError bool
+ docLimit int
+ byteCount int
+ docCount int
+ unordered bool
+}
+
+// NewBufferedBulkInserter returns an initialized BufferedBulkInserter
+// for writing.
+func NewBufferedBulkInserter(collection *mgo.Collection, docLimit int,
+ continueOnError bool) *BufferedBulkInserter {
+ bb := &BufferedBulkInserter{
+ collection: collection,
+ continueOnError: continueOnError,
+ docLimit: docLimit,
+ }
+ bb.resetBulk()
+ return bb
+}
+
+func (bb *BufferedBulkInserter) Unordered() {
+ bb.unordered = true
+ bb.bulk.Unordered()
+}
+
+// throw away the old bulk and init a new one
+func (bb *BufferedBulkInserter) resetBulk() {
+ bb.bulk = bb.collection.Bulk()
+ if bb.continueOnError || bb.unordered {
+ bb.bulk.Unordered()
+ }
+ bb.byteCount = 0
+ bb.docCount = 0
+}
+
+// Insert adds a document to the buffer for bulk insertion. If the buffer is
+// full, the bulk insert is made, returning any error that occurs.
+func (bb *BufferedBulkInserter) Insert(doc interface{}) error {
+ rawBytes, err := bson.Marshal(doc)
+ if err != nil {
+ return fmt.Errorf("bson encoding error: %v", err)
+ }
+ // flush if we are full
+ if bb.docCount >= bb.docLimit || bb.byteCount+len(rawBytes) > MaxBSONSize {
+ err = bb.Flush()
+ }
+ // buffer the document
+ bb.docCount++
+ bb.byteCount += len(rawBytes)
+ bb.bulk.Insert(bson.Raw{Data: rawBytes})
+ return err
+}
+
+// Flush writes all buffered documents in one bulk insert then resets the buffer.
+func (bb *BufferedBulkInserter) Flush() error {
+ if bb.docCount == 0 {
+ return nil
+ }
+ defer bb.resetBulk()
+ if _, err := bb.bulk.Run(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/common/db/buffered_bulk_test.go b/src/mongo/gotools/common/db/buffered_bulk_test.go
new file mode 100644
index 00000000000..d4664dadd20
--- /dev/null
+++ b/src/mongo/gotools/common/db/buffered_bulk_test.go
@@ -0,0 +1,108 @@
+package db
+
+import (
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestBufferedBulkInserterInserts(t *testing.T) {
+ var bufBulk *BufferedBulkInserter
+
+ testutil.VerifyTestType(t, "db")
+
+ Convey("With a valid session", t, func() {
+ opts := options.ToolOptions{
+ Connection: &options.Connection{
+ Port: DefaultTestPort,
+ },
+ SSL: &options.SSL{},
+ Auth: &options.Auth{},
+ }
+ provider, err := NewSessionProvider(opts)
+ session, err := provider.GetSession()
+ So(session, ShouldNotBeNil)
+ So(err, ShouldBeNil)
+
+ Convey("using a test collection and a doc limit of 3", func() {
+ testCol := session.DB("tools-test").C("bulk1")
+ bufBulk = NewBufferedBulkInserter(testCol, 3, false)
+ So(bufBulk, ShouldNotBeNil)
+
+ Convey("inserting 10 documents into the BufferedBulkInserter", func() {
+ flushCount := 0
+ for i := 0; i < 10; i++ {
+ So(bufBulk.Insert(bson.D{}), ShouldBeNil)
+ if bufBulk.docCount%3 == 0 {
+ flushCount++
+ }
+ }
+
+ Convey("should have flushed 3 times with one doc still buffered", func() {
+ So(flushCount, ShouldEqual, 3)
+ So(bufBulk.byteCount, ShouldBeGreaterThan, 0)
+ So(bufBulk.docCount, ShouldEqual, 1)
+ })
+ })
+ })
+
+ Convey("using a test collection and a doc limit of 1", func() {
+ testCol := session.DB("tools-test").C("bulk2")
+ bufBulk = NewBufferedBulkInserter(testCol, 1, false)
+ So(bufBulk, ShouldNotBeNil)
+
+ Convey("inserting 10 documents into the BufferedBulkInserter and flushing", func() {
+ for i := 0; i < 10; i++ {
+ So(bufBulk.Insert(bson.D{}), ShouldBeNil)
+ }
+ So(bufBulk.Flush(), ShouldBeNil)
+
+ Convey("should have no docs buffered", func() {
+ So(bufBulk.docCount, ShouldEqual, 0)
+ So(bufBulk.byteCount, ShouldEqual, 0)
+ })
+ })
+ })
+
+ Convey("using a test collection and a doc limit of 1000", func() {
+ testCol := session.DB("tools-test").C("bulk3")
+ bufBulk = NewBufferedBulkInserter(testCol, 100, false)
+ So(bufBulk, ShouldNotBeNil)
+
+ Convey("inserting 1,000,000 documents into the BufferedBulkInserter and flushing", func() {
+ session.SetSocketTimeout(0)
+
+ for i := 0; i < 1000000; i++ {
+ bufBulk.Insert(bson.M{"_id": i})
+ }
+ So(bufBulk.Flush(), ShouldBeNil)
+
+ Convey("should have inserted all of the documents", func() {
+ count, err := testCol.Count()
+ So(err, ShouldBeNil)
+ So(count, ShouldEqual, 1000000)
+
+ // test values
+ testDoc := bson.M{}
+ err = testCol.Find(bson.M{"_id": 477232}).One(&testDoc)
+ So(err, ShouldBeNil)
+ So(testDoc["_id"], ShouldEqual, 477232)
+ err = testCol.Find(bson.M{"_id": 999999}).One(&testDoc)
+ So(err, ShouldBeNil)
+ So(testDoc["_id"], ShouldEqual, 999999)
+ err = testCol.Find(bson.M{"_id": 1}).One(&testDoc)
+ So(err, ShouldBeNil)
+ So(testDoc["_id"], ShouldEqual, 1)
+
+ })
+ })
+ })
+
+ Reset(func() {
+ session.DB("tools-test").DropDatabase()
+ })
+ })
+
+}
diff --git a/src/mongo/gotools/common/db/command.go b/src/mongo/gotools/common/db/command.go
new file mode 100644
index 00000000000..6016d25f5fb
--- /dev/null
+++ b/src/mongo/gotools/common/db/command.go
@@ -0,0 +1,210 @@
+package db
+
+import (
+ "fmt"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "strings"
+)
+
+// Query flags
+const (
+ Snapshot = 1 << iota
+ LogReplay
+ Prefetch
+)
+
+type NodeType string
+
+const (
+ Mongos NodeType = "mongos"
+ Standalone = "standalone"
+ ReplSet = "replset"
+ Unknown = "unknown"
+)
+
+// CommandRunner exposes functions that can be run against a server
+type CommandRunner interface {
+ Run(command interface{}, out interface{}, database string) error
+ FindOne(db, collection string, skip int, query interface{}, sort []string, into interface{}, opts int) error
+ Remove(db, collection string, query interface{}) error
+ DatabaseNames() ([]string, error)
+ CollectionNames(db string) ([]string, error)
+}
+
+// Remove removes all documents matched by query q in the db database and c collection.
+func (sp *SessionProvider) Remove(db, c string, q interface{}) error {
+ session, err := sp.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+ _, err = session.DB(db).C(c).RemoveAll(q)
+ return err
+}
+
+// Run issues the provided command on the db database and unmarshals its result
+// into out.
+func (sp *SessionProvider) Run(command interface{}, out interface{}, db string) error {
+ session, err := sp.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+ return session.DB(db).Run(command, out)
+}
+
+// DatabaseNames returns a slice containing the names of all the databases on the
+// connected server.
+func (sp *SessionProvider) DatabaseNames() ([]string, error) {
+ session, err := sp.GetSession()
+ if err != nil {
+ return nil, err
+ }
+ session.SetSocketTimeout(0)
+ defer session.Close()
+ return session.DatabaseNames()
+}
+
+// CollectionNames returns the names of all the collections in the dbName database.
+func (sp *SessionProvider) CollectionNames(dbName string) ([]string, error) {
+ session, err := sp.GetSession()
+ if err != nil {
+ return nil, err
+ }
+ defer session.Close()
+ session.SetSocketTimeout(0)
+ return session.DB(dbName).CollectionNames()
+}
+
+// GetNodeType checks if the connected SessionProvider is a mongos, standalone, or replset,
+// by looking at the result of calling isMaster.
+func (sp *SessionProvider) GetNodeType() (NodeType, error) {
+ session, err := sp.GetSession()
+ if err != nil {
+ return Unknown, err
+ }
+ session.SetSocketTimeout(0)
+ defer session.Close()
+ masterDoc := struct {
+ SetName interface{} `bson:"setName"`
+ Hosts interface{} `bson:"hosts"`
+ Msg string `bson:"msg"`
+ }{}
+ err = session.Run("isMaster", &masterDoc)
+ if err != nil {
+ return Unknown, err
+ }
+
+ if masterDoc.SetName != nil || masterDoc.Hosts != nil {
+ return ReplSet, nil
+ } else if masterDoc.Msg == "isdbgrid" {
+ // isdbgrid is always the msg value when calling isMaster on a mongos
+ // see http://docs.mongodb.org/manual/core/sharded-cluster-query-router/
+ return Mongos, nil
+ }
+ return Standalone, nil
+}
+
+// IsReplicaSet returns a boolean which is true if the connected server is part
+// of a replica set.
+func (sp *SessionProvider) IsReplicaSet() (bool, error) {
+ nodeType, err := sp.GetNodeType()
+ if err != nil {
+ return false, err
+ }
+ return nodeType == ReplSet, nil
+}
+
+// IsMongos returns true if the connected server is a mongos.
+func (sp *SessionProvider) IsMongos() (bool, error) {
+ nodeType, err := sp.GetNodeType()
+ if err != nil {
+ return false, err
+ }
+ return nodeType == Mongos, nil
+}
+
+// SupportsRepairCursor takes in an example db and collection name and
+// returns true if the connected server supports the repairCursor command.
+// It returns false and the error that occurred if it is not supported.
+func (sp *SessionProvider) SupportsRepairCursor(db, collection string) (bool, error) {
+ session, err := sp.GetSession()
+ if err != nil {
+ return false, err
+ }
+ session.SetSocketTimeout(0)
+ defer session.Close()
+
+ // This check is slightly hacky, but necessary to allow users to run repair without
+ // permissions to all collections. There are multiple reasons a repair command could fail,
+ // but we are only interested in the ones that imply that the repair command is not
+ // usable by the connected server. If we do not get one of these specific error messages,
+ // we will let the error happen again later.
+ repairIter := session.DB(db).C(collection).Repair()
+ repairIter.Next(bson.D{})
+ err = repairIter.Err()
+ if err == nil {
+ return true, nil
+ }
+ if strings.Index(err.Error(), "no such cmd: repairCursor") > -1 {
+ // return a helpful error message for early server versions
+ return false, fmt.Errorf("--repair flag cannot be used on mongodb versions before 2.7.8")
+ }
+ if strings.Index(err.Error(), "repair iterator not supported") > -1 {
+ // helpful error message if the storage engine does not support repair (WiredTiger)
+ return false, fmt.Errorf("--repair is not supported by the connected storage engine")
+ }
+
+ return true, nil
+}
+
+// SupportsWriteCommands returns true if the connected server supports write
+// commands, returns false otherwise.
+func (sp *SessionProvider) SupportsWriteCommands() (bool, error) {
+ session, err := sp.GetSession()
+ if err != nil {
+ return false, err
+ }
+ session.SetSocketTimeout(0)
+ defer session.Close()
+ masterDoc := struct {
+ Ok int `bson:"ok"`
+ MaxWire int `bson:"maxWireVersion"`
+ }{}
+ err = session.Run("isMaster", &masterDoc)
+ if err != nil {
+ return false, err
+ }
+ // the connected server supports write commands if
+ // the maxWriteVersion field is present
+ return (masterDoc.Ok == 1 && masterDoc.MaxWire >= 2), nil
+}
+
+// FindOne retuns the first document in the collection and database that matches
+// the query after skip, sort and query flags are applied.
+func (sp *SessionProvider) FindOne(db, collection string, skip int, query interface{}, sort []string, into interface{}, flags int) error {
+ session, err := sp.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ q := session.DB(db).C(collection).Find(query).Sort(sort...).Skip(skip)
+ q = ApplyFlags(q, session, flags)
+ return q.One(into)
+}
+
+// ApplyFlags applies flags to the given query session.
+func ApplyFlags(q *mgo.Query, session *mgo.Session, flags int) *mgo.Query {
+ if flags&Snapshot > 0 {
+ q = q.Snapshot()
+ }
+ if flags&LogReplay > 0 {
+ q = q.LogReplay()
+ }
+ if flags&Prefetch > 0 {
+ session.SetPrefetch(1.0)
+ }
+ return q
+}
diff --git a/src/mongo/gotools/common/db/connector.go b/src/mongo/gotools/common/db/connector.go
new file mode 100644
index 00000000000..85e8d6e653c
--- /dev/null
+++ b/src/mongo/gotools/common/db/connector.go
@@ -0,0 +1,52 @@
+package db
+
+import (
+ "time"
+
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+)
+
+// Interface type for connecting to the database.
+type DBConnector interface {
+ // configure, based on the options passed in
+ Configure(options.ToolOptions) error
+
+ // dial the database and get a fresh new session
+ GetNewSession() (*mgo.Session, error)
+}
+
+// Basic connector for dialing the database, with no authentication.
+type VanillaDBConnector struct {
+ dialInfo *mgo.DialInfo
+}
+
+// Configure sets up the db connector using the options in opts. It parses the
+// connection string and then sets up the dial information using the default
+// dial timeout.
+func (self *VanillaDBConnector) Configure(opts options.ToolOptions) error {
+ // create the addresses to be used to connect
+ connectionAddrs := util.CreateConnectionAddrs(opts.Host, opts.Port)
+
+ timeout := time.Duration(opts.Timeout) * time.Second
+
+ // set up the dial info
+ self.dialInfo = &mgo.DialInfo{
+ Addrs: connectionAddrs,
+ Timeout: timeout,
+ Direct: opts.Direct,
+ ReplicaSetName: opts.ReplicaSetName,
+ Username: opts.Auth.Username,
+ Password: opts.Auth.Password,
+ Source: opts.GetAuthenticationDatabase(),
+ Mechanism: opts.Auth.Mechanism,
+ }
+ return nil
+}
+
+// GetNewSession connects to the server and returns the established session and any
+// error encountered.
+func (self *VanillaDBConnector) GetNewSession() (*mgo.Session, error) {
+ return mgo.DialWithInfo(self.dialInfo)
+}
diff --git a/src/mongo/gotools/common/db/connector_sasl_test.go b/src/mongo/gotools/common/db/connector_sasl_test.go
new file mode 100644
index 00000000000..c585c92f842
--- /dev/null
+++ b/src/mongo/gotools/common/db/connector_sasl_test.go
@@ -0,0 +1,60 @@
+// +build sasl
+
+package db
+
+// This file runs Kerberos tests if build with sasl is enabled
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/db/kerberos"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "os"
+ "runtime"
+ "testing"
+)
+
+var (
+ KERBEROS_HOST = "ldaptest.10gen.cc"
+ KERBEROS_USER = "drivers@LDAPTEST.10GEN.CC"
+)
+
+func TestKerberosDBConnector(t *testing.T) {
+ Convey("should be able to successfully connect", t, func() {
+ connector := &kerberos.KerberosDBConnector{}
+
+ opts := options.ToolOptions{
+ Connection: &options.Connection{
+ Host: KERBEROS_HOST,
+ Port: "27017",
+ },
+ Auth: &options.Auth{
+ Username: KERBEROS_USER,
+ },
+ Kerberos: &options.Kerberos{
+ Service: "mongodb",
+ ServiceHost: KERBEROS_HOST,
+ },
+ }
+
+ if runtime.GOOS == "windows" {
+ opts.Auth.Password = os.Getenv(testutil.WinKerberosPwdEnv)
+ if opts.Auth.Password == "" {
+ panic(fmt.Sprintf("Need to set %v environment variable to run kerberos tests on windows",
+ testutil.WinKerberosPwdEnv))
+ }
+ }
+
+ So(connector.Configure(opts), ShouldBeNil)
+
+ session, err := connector.GetNewSession()
+ So(err, ShouldBeNil)
+ So(session, ShouldNotBeNil)
+
+ n, err := session.DB("kerberos").C("test").Find(bson.M{}).Count()
+ So(err, ShouldBeNil)
+ So(n, ShouldEqual, 1)
+ })
+}
diff --git a/src/mongo/gotools/common/db/connector_test.go b/src/mongo/gotools/common/db/connector_test.go
new file mode 100644
index 00000000000..a04cd9cfb4e
--- /dev/null
+++ b/src/mongo/gotools/common/db/connector_test.go
@@ -0,0 +1,134 @@
+package db
+
+import (
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2"
+ "testing"
+ "time"
+)
+
+func TestVanillaDBConnector(t *testing.T) {
+
+ testutil.VerifyTestType(t, "db")
+
+ Convey("With a vanilla db connector", t, func() {
+
+ var connector *VanillaDBConnector
+
+ Convey("calling Configure should populate the addrs and dial timeout"+
+ " appropriately with no error", func() {
+
+ connector = &VanillaDBConnector{}
+
+ opts := options.ToolOptions{
+ Connection: &options.Connection{
+ Host: "host1,host2",
+ Port: "20000",
+ },
+ Auth: &options.Auth{},
+ }
+ So(connector.Configure(opts), ShouldBeNil)
+ So(connector.dialInfo.Addrs, ShouldResemble,
+ []string{"host1:20000", "host2:20000"})
+ So(connector.dialInfo.Timeout, ShouldResemble, time.Duration(opts.Timeout)*time.Second)
+
+ })
+
+ Convey("calling GetNewSession with a running mongod should connect"+
+ " successfully", func() {
+
+ connector = &VanillaDBConnector{}
+
+ opts := options.ToolOptions{
+ Connection: &options.Connection{
+ Host: "localhost",
+ Port: DefaultTestPort,
+ },
+ Auth: &options.Auth{},
+ }
+ So(connector.Configure(opts), ShouldBeNil)
+
+ session, err := connector.GetNewSession()
+ So(err, ShouldBeNil)
+ So(session, ShouldNotBeNil)
+ session.Close()
+
+ })
+
+ })
+
+}
+
+func TestVanillaDBConnectorWithAuth(t *testing.T) {
+ testutil.VerifyTestType(t, "auth")
+ session, err := mgo.Dial("localhost:33333")
+ if err != nil {
+ t.Fatalf("error dialing server: %v", err)
+ }
+
+ err = testutil.CreateUserAdmin(session)
+ So(err, ShouldBeNil)
+ err = testutil.CreateUserWithRole(session, "cAdmin", "password",
+ mgo.RoleClusterAdmin, true)
+ So(err, ShouldBeNil)
+ session.Close()
+
+ Convey("With a vanilla db connector and a mongod running with"+
+ " auth", t, func() {
+
+ var connector *VanillaDBConnector
+
+ Convey("connecting without authentication should not be able"+
+ " to run commands", func() {
+
+ connector = &VanillaDBConnector{}
+
+ opts := options.ToolOptions{
+ Connection: &options.Connection{
+ Host: "localhost",
+ Port: DefaultTestPort,
+ },
+ Auth: &options.Auth{},
+ }
+ So(connector.Configure(opts), ShouldBeNil)
+
+ session, err := connector.GetNewSession()
+ So(err, ShouldBeNil)
+ So(session, ShouldNotBeNil)
+
+ So(session.DB("admin").Run("top", &struct{}{}), ShouldNotBeNil)
+ session.Close()
+
+ })
+
+ Convey("connecting with authentication should succeed and"+
+ " authenticate properly", func() {
+
+ connector = &VanillaDBConnector{}
+
+ opts := options.ToolOptions{
+ Connection: &options.Connection{
+ Host: "localhost",
+ Port: DefaultTestPort,
+ },
+ Auth: &options.Auth{
+ Username: "cAdmin",
+ Password: "password",
+ },
+ }
+ So(connector.Configure(opts), ShouldBeNil)
+
+ session, err := connector.GetNewSession()
+ So(err, ShouldBeNil)
+ So(session, ShouldNotBeNil)
+
+ So(session.DB("admin").Run("top", &struct{}{}), ShouldBeNil)
+ session.Close()
+
+ })
+
+ })
+
+}
diff --git a/src/mongo/gotools/common/db/db.go b/src/mongo/gotools/common/db/db.go
new file mode 100644
index 00000000000..a3207c4b467
--- /dev/null
+++ b/src/mongo/gotools/common/db/db.go
@@ -0,0 +1,243 @@
+// Package db implements generic connection to MongoDB, and contains
+// subpackages for specific methods of connection.
+package db
+
+import (
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/password"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+)
+
+type (
+ sessionFlag uint32
+ // Used to get appropriate the DBConnector(s) based on opts
+ GetConnectorFunc func(opts options.ToolOptions) DBConnector
+)
+
+// Session flags.
+const (
+ None sessionFlag = 0
+ Monotonic sessionFlag = 1 << iota
+ DisableSocketTimeout
+)
+
+// MongoDB enforced limits.
+const (
+ MaxBSONSize = 16 * 1024 * 1024 // 16MB - maximum BSON document size
+)
+
+// Default port for integration tests
+const (
+ DefaultTestPort = "33333"
+)
+
+const (
+ ErrLostConnection = "lost connection to server"
+ ErrNoReachableServers = "no reachable servers"
+ ErrNsNotFound = "ns not found"
+ // replication errors list the replset name if we are talking to a mongos,
+ // so we can only check for this universal prefix
+ ErrReplTimeoutPrefix = "waiting for replication timed out"
+ ErrCouldNotContactPrimaryPrefix = "could not contact primary for replica set"
+ ErrCouldNotFindPrimaryPrefix = `could not find host matching read preference { mode: "primary"`
+ ErrUnableToTargetPrefix = "unable to target"
+ ErrNotMaster = "not master"
+ ErrConnectionRefusedSuffix = "Connection refused"
+)
+
+var (
+ GetConnectorFuncs = []GetConnectorFunc{}
+)
+
+// Used to manage database sessions
+type SessionProvider struct {
+
+ // For connecting to the database
+ connector DBConnector
+
+ // used to avoid a race condition around creating the master session
+ masterSessionLock sync.Mutex
+
+ // the master session to use for connection pooling
+ masterSession *mgo.Session
+
+ // flags for generating the master session
+ bypassDocumentValidation bool
+ flags sessionFlag
+ readPreference mgo.Mode
+ tags bson.D
+}
+
+// ApplyOpsResponse represents the response from an 'applyOps' command.
+type ApplyOpsResponse struct {
+ Ok bool `bson:"ok"`
+ ErrMsg string `bson:"errmsg"`
+}
+
+// Oplog represents a MongoDB oplog document.
+type Oplog struct {
+ Timestamp bson.MongoTimestamp `bson:"ts"`
+ HistoryID int64 `bson:"h"`
+ Version int `bson:"v"`
+ Operation string `bson:"op"`
+ Namespace string `bson:"ns"`
+ Object bson.D `bson:"o"`
+ Query bson.D `bson:"o2"`
+}
+
+// Returns a session connected to the database server for which the
+// session provider is configured.
+func (self *SessionProvider) GetSession() (*mgo.Session, error) {
+ self.masterSessionLock.Lock()
+ defer self.masterSessionLock.Unlock()
+
+ // The master session is initialized
+ if self.masterSession != nil {
+ return self.masterSession.Copy(), nil
+ }
+
+ // initialize the provider's master session
+ var err error
+ self.masterSession, err = self.connector.GetNewSession()
+ if err != nil {
+ return nil, fmt.Errorf("error connecting to db server: %v", err)
+ }
+
+ // update masterSession based on flags
+ self.refresh()
+
+ // copy the provider's master session, for connection pooling
+ return self.masterSession.Copy(), nil
+}
+
+// refresh is a helper for modifying the session based on the
+// session provider flags passed in with SetFlags.
+// This helper assumes a lock is already taken.
+func (self *SessionProvider) refresh() {
+ // handle bypassDocumentValidation
+ self.masterSession.SetBypassValidation(self.bypassDocumentValidation)
+
+ // handle readPreference
+ self.masterSession.SetMode(self.readPreference, true)
+
+ // disable timeouts
+ if (self.flags & DisableSocketTimeout) > 0 {
+ self.masterSession.SetSocketTimeout(0)
+ }
+ if self.tags != nil {
+ self.masterSession.SelectServers(self.tags)
+ }
+}
+
+// SetFlags allows certain modifications to the masterSession after initial creation.
+func (self *SessionProvider) SetFlags(flagBits sessionFlag) {
+ self.masterSessionLock.Lock()
+ defer self.masterSessionLock.Unlock()
+
+ self.flags = flagBits
+
+ // make sure we update the master session if one already exists
+ if self.masterSession != nil {
+ self.refresh()
+ }
+}
+
+// SetReadPreference sets the read preference mode in the SessionProvider
+// and eventually in the masterSession
+func (self *SessionProvider) SetReadPreference(pref mgo.Mode) {
+ self.masterSessionLock.Lock()
+ defer self.masterSessionLock.Unlock()
+
+ self.readPreference = pref
+
+ if self.masterSession != nil {
+ self.refresh()
+ }
+}
+
+// SetBypassDocumentValidation sets whether to bypass document validation in the SessionProvider
+// and eventually in the masterSession
+func (self *SessionProvider) SetBypassDocumentValidation(bypassDocumentValidation bool) {
+ self.masterSessionLock.Lock()
+ defer self.masterSessionLock.Unlock()
+
+ self.bypassDocumentValidation = bypassDocumentValidation
+
+ if self.masterSession != nil {
+ self.refresh()
+ }
+}
+
+// SetTags sets the server selection tags in the SessionProvider
+// and eventually in the masterSession
+func (self *SessionProvider) SetTags(tags bson.D) {
+ self.masterSessionLock.Lock()
+ defer self.masterSessionLock.Unlock()
+
+ self.tags = tags
+
+ if self.masterSession != nil {
+ self.refresh()
+ }
+}
+
+// NewSessionProvider constructs a session provider but does not attempt to
+// create the initial session.
+func NewSessionProvider(opts options.ToolOptions) (*SessionProvider, error) {
+ // create the provider
+ provider := &SessionProvider{
+ readPreference: mgo.Primary,
+ bypassDocumentValidation: false,
+ }
+
+ // finalize auth options, filling in missing passwords
+ if opts.Auth.ShouldAskForPassword() {
+ opts.Auth.Password = password.Prompt()
+ }
+
+ // create the connector for dialing the database
+ provider.connector = getConnector(opts)
+
+ // configure the connector
+ err := provider.connector.Configure(opts)
+ if err != nil {
+ return nil, fmt.Errorf("error configuring the connector: %v", err)
+ }
+ return provider, nil
+}
+
+// IsConnectionError returns a boolean indicating if a given error is due to
+// an error in an underlying DB connection (as opposed to some other write
+// failure such as a duplicate key error)
+func IsConnectionError(err error) bool {
+ if err == nil {
+ return false
+ }
+ if err.Error() == ErrNoReachableServers ||
+ err.Error() == io.EOF.Error() ||
+ strings.HasPrefix(err.Error(), ErrReplTimeoutPrefix) ||
+ strings.HasPrefix(err.Error(), ErrCouldNotContactPrimaryPrefix) ||
+ strings.HasPrefix(err.Error(), ErrCouldNotFindPrimaryPrefix) ||
+ strings.HasPrefix(err.Error(), ErrUnableToTargetPrefix) ||
+ err.Error() == ErrNotMaster ||
+ strings.HasSuffix(err.Error(), ErrConnectionRefusedSuffix) {
+ return true
+ }
+ return false
+}
+
+// Get the right type of connector, based on the options
+func getConnector(opts options.ToolOptions) DBConnector {
+ for _, getConnectorFunc := range GetConnectorFuncs {
+ if connector := getConnectorFunc(opts); connector != nil {
+ return connector
+ }
+ }
+ return &VanillaDBConnector{}
+}
diff --git a/src/mongo/gotools/common/db/db_gssapi.go b/src/mongo/gotools/common/db/db_gssapi.go
new file mode 100644
index 00000000000..656e81987a9
--- /dev/null
+++ b/src/mongo/gotools/common/db/db_gssapi.go
@@ -0,0 +1,20 @@
+// +build sasl
+
+package db
+
+import (
+ "github.com/mongodb/mongo-tools/common/db/kerberos"
+ "github.com/mongodb/mongo-tools/common/options"
+)
+
+func init() {
+ GetConnectorFuncs = append(GetConnectorFuncs, getGSSAPIConnector)
+}
+
+// return the Kerberos DB connector if using SSL, otherwise return nil.
+func getGSSAPIConnector(opts options.ToolOptions) DBConnector {
+ if opts.Auth.Mechanism == "GSSAPI" {
+ return &kerberos.KerberosDBConnector{}
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/common/db/db_ssl.go b/src/mongo/gotools/common/db/db_ssl.go
new file mode 100644
index 00000000000..68d3850b525
--- /dev/null
+++ b/src/mongo/gotools/common/db/db_ssl.go
@@ -0,0 +1,20 @@
+// +build ssl
+
+package db
+
+import (
+ "github.com/mongodb/mongo-tools/common/db/openssl"
+ "github.com/mongodb/mongo-tools/common/options"
+)
+
+func init() {
+ GetConnectorFuncs = append(GetConnectorFuncs, getSSLConnector)
+}
+
+// return the SSL DB connector if using SSL, otherwise, return nil.
+func getSSLConnector(opts options.ToolOptions) DBConnector {
+ if opts.SSL.UseSSL {
+ return &openssl.SSLDBConnector{}
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/common/db/db_test.go b/src/mongo/gotools/common/db/db_test.go
new file mode 100644
index 00000000000..59d1c53b929
--- /dev/null
+++ b/src/mongo/gotools/common/db/db_test.go
@@ -0,0 +1,63 @@
+package db
+
+import (
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "reflect"
+ "testing"
+)
+
+func TestNewSessionProvider(t *testing.T) {
+
+ testutil.VerifyTestType(t, "db")
+
+ Convey("When initializing a session provider", t, func() {
+
+ Convey("with the standard options, a provider with a standard"+
+ " connector should be returned", func() {
+ opts := options.ToolOptions{
+ Connection: &options.Connection{
+ Port: DefaultTestPort,
+ },
+ SSL: &options.SSL{},
+ Auth: &options.Auth{},
+ }
+ provider, err := NewSessionProvider(opts)
+ So(err, ShouldBeNil)
+ So(reflect.TypeOf(provider.connector), ShouldEqual,
+ reflect.TypeOf(&VanillaDBConnector{}))
+
+ })
+
+ Convey("the master session should be successfully "+
+ " initialized", func() {
+ opts := options.ToolOptions{
+ Connection: &options.Connection{
+ Port: DefaultTestPort,
+ },
+ SSL: &options.SSL{},
+ Auth: &options.Auth{},
+ }
+ provider, err := NewSessionProvider(opts)
+ So(err, ShouldBeNil)
+ So(provider.masterSession, ShouldBeNil)
+ session, err := provider.GetSession()
+ So(err, ShouldBeNil)
+ So(session, ShouldNotBeNil)
+ So(provider.masterSession, ShouldNotBeNil)
+
+ })
+
+ })
+
+}
+
+type listDatabasesCommand struct {
+ Databases []map[string]interface{} `json:"databases"`
+ Ok bool `json:"ok"`
+}
+
+func (self *listDatabasesCommand) AsRunnable() interface{} {
+ return "listDatabases"
+}
diff --git a/src/mongo/gotools/common/db/kerberos/gssapi.go b/src/mongo/gotools/common/db/kerberos/gssapi.go
new file mode 100644
index 00000000000..e9827b04109
--- /dev/null
+++ b/src/mongo/gotools/common/db/kerberos/gssapi.go
@@ -0,0 +1,58 @@
+// Package kerberos implements connection to MongoDB using kerberos.
+package kerberos
+
+// #cgo windows CFLAGS: -Ic:/sasl/include
+// #cgo windows LDFLAGS: -Lc:/sasl/lib
+
+import (
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "time"
+)
+
+const (
+ KERBEROS_AUTHENTICATION_MECHANISM = "GSSAPI"
+)
+
+type KerberosDBConnector struct {
+ dialInfo *mgo.DialInfo
+}
+
+// Configure the db connector. Parses the connection string and sets up
+// the dial info with the default dial timeout.
+func (self *KerberosDBConnector) Configure(opts options.ToolOptions) error {
+
+ // create the addresses to be used to connect
+ connectionAddrs := util.CreateConnectionAddrs(opts.Host, opts.Port)
+
+ timeout := time.Duration(opts.Timeout) * time.Second
+
+ // set up the dial info
+ self.dialInfo = &mgo.DialInfo{
+ Addrs: connectionAddrs,
+ Timeout: timeout,
+ Direct: opts.Direct,
+ ReplicaSetName: opts.ReplicaSetName,
+
+ // Kerberos principal
+ Username: opts.Auth.Username,
+ // Note: Password is only used on Windows. SASL doesn't allow you to specify
+ // a password, so this field is ignored on Linux and OSX. Run the kinit
+ // command to get a ticket first.
+ Password: opts.Auth.Password,
+ // This should always be '$external', but legacy tools still allow you to
+ // specify a source DB
+ Source: opts.Auth.Source,
+ Service: opts.Kerberos.Service,
+ ServiceHost: opts.Kerberos.ServiceHost,
+ Mechanism: KERBEROS_AUTHENTICATION_MECHANISM,
+ }
+
+ return nil
+}
+
+// Dial the database.
+func (self *KerberosDBConnector) GetNewSession() (*mgo.Session, error) {
+ return mgo.DialWithInfo(self.dialInfo)
+}
diff --git a/src/mongo/gotools/common/db/namespaces.go b/src/mongo/gotools/common/db/namespaces.go
new file mode 100644
index 00000000000..149400543ef
--- /dev/null
+++ b/src/mongo/gotools/common/db/namespaces.go
@@ -0,0 +1,159 @@
+package db
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/log"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "strings"
+)
+
+// IsNoCmd reeturns true if err indicates a query command is not supported,
+// otherwise, returns false.
+func IsNoCmd(err error) bool {
+ e, ok := err.(*mgo.QueryError)
+ return ok && strings.HasPrefix(e.Message, "no such cmd:")
+}
+
+// IsNoCollection returns true if err indicates a query resulted in a "no collection" error
+// otherwise, returns false.
+func IsNoCollection(err error) bool {
+ e, ok := err.(*mgo.QueryError)
+ return ok && e.Message == "no collection"
+}
+
+// buildBsonArray takes a cursor iterator and returns an array of
+// all of its documents as bson.D objects.
+func buildBsonArray(iter *mgo.Iter) ([]bson.D, error) {
+ ret := make([]bson.D, 0, 0)
+ index := new(bson.D)
+ for iter.Next(index) {
+ ret = append(ret, *index)
+ index = new(bson.D)
+ }
+
+ if iter.Err() != nil {
+ return nil, iter.Err()
+ }
+ return ret, nil
+
+}
+
+// GetIndexes returns an iterator to thethe raw index info for a collection by
+// using the listIndexes command if available, or by falling back to querying
+// against system.indexes (pre-3.0 systems). nil is returned if the collection
+// does not exist.
+func GetIndexes(coll *mgo.Collection) (*mgo.Iter, error) {
+ var cmdResult struct {
+ Cursor struct {
+ FirstBatch []bson.Raw `bson:"firstBatch"`
+ NS string
+ Id int64
+ }
+ }
+
+ err := coll.Database.Run(bson.D{{"listIndexes", coll.Name}, {"cursor", bson.M{}}}, &cmdResult)
+ switch {
+ case err == nil:
+ ns := strings.SplitN(cmdResult.Cursor.NS, ".", 2)
+ if len(ns) < 2 {
+ return nil, fmt.Errorf("server returned invalid cursor.ns `%v` on listIndexes for `%v`: %v",
+ cmdResult.Cursor.NS, coll.FullName, err)
+ }
+
+ ses := coll.Database.Session
+ return ses.DB(ns[0]).C(ns[1]).NewIter(ses, cmdResult.Cursor.FirstBatch, cmdResult.Cursor.Id, nil), nil
+ case IsNoCmd(err):
+ log.Logvf(log.DebugLow, "No support for listIndexes command, falling back to querying system.indexes")
+ return getIndexesPre28(coll)
+ case IsNoCollection(err):
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("error running `listIndexes`. Collection: `%v` Err: %v", coll.FullName, err)
+ }
+}
+
+func getIndexesPre28(coll *mgo.Collection) (*mgo.Iter, error) {
+ indexColl := coll.Database.C("system.indexes")
+ iter := indexColl.Find(&bson.M{"ns": coll.FullName}).Iter()
+ return iter, nil
+}
+
+func GetCollections(database *mgo.Database, name string) (*mgo.Iter, bool, error) {
+ var cmdResult struct {
+ Cursor struct {
+ FirstBatch []bson.Raw `bson:"firstBatch"`
+ NS string
+ Id int64
+ }
+ }
+
+ command := bson.D{{"listCollections", 1}, {"cursor", bson.M{}}}
+ if len(name) > 0 {
+ command = bson.D{{"listCollections", 1}, {"filter", bson.M{"name": name}}, {"cursor", bson.M{}}}
+ }
+
+ err := database.Run(command, &cmdResult)
+ switch {
+ case err == nil:
+ ns := strings.SplitN(cmdResult.Cursor.NS, ".", 2)
+ if len(ns) < 2 {
+ return nil, false, fmt.Errorf("server returned invalid cursor.ns `%v` on listCollections for `%v`: %v",
+ cmdResult.Cursor.NS, database.Name, err)
+ }
+
+ return database.Session.DB(ns[0]).C(ns[1]).NewIter(database.Session, cmdResult.Cursor.FirstBatch, cmdResult.Cursor.Id, nil), false, nil
+ case IsNoCmd(err):
+ log.Logvf(log.DebugLow, "No support for listCollections command, falling back to querying system.namespaces")
+ iter, err := getCollectionsPre28(database, name)
+ return iter, true, err
+ default:
+ return nil, false, fmt.Errorf("error running `listCollections`. Database: `%v` Err: %v",
+ database.Name, err)
+ }
+}
+
+func getCollectionsPre28(database *mgo.Database, name string) (*mgo.Iter, error) {
+ indexColl := database.C("system.namespaces")
+ selector := bson.M{}
+ if len(name) > 0 {
+ selector["name"] = database.Name + "." + name
+ }
+ iter := indexColl.Find(selector).Iter()
+ return iter, nil
+}
+
+func GetCollectionOptions(coll *mgo.Collection) (*bson.D, error) {
+ iter, useFullName, err := GetCollections(coll.Database, coll.Name)
+ if err != nil {
+ return nil, err
+ }
+ comparisonName := coll.Name
+ if useFullName {
+ comparisonName = coll.FullName
+ }
+ collInfo := &bson.D{}
+ for iter.Next(collInfo) {
+ name, err := bsonutil.FindValueByKey("name", collInfo)
+ if err != nil {
+ collInfo = nil
+ continue
+ }
+ if nameStr, ok := name.(string); ok {
+ if nameStr == comparisonName {
+ // we've found the collection we're looking for
+ return collInfo, nil
+ }
+ } else {
+ collInfo = nil
+ continue
+ }
+ }
+ err = iter.Err()
+ if err != nil {
+ return nil, err
+ }
+ // The given collection was not found, but no error encountered.
+ return nil, nil
+}
diff --git a/src/mongo/gotools/common/db/openssl/openssl.go b/src/mongo/gotools/common/db/openssl/openssl.go
new file mode 100644
index 00000000000..9b3c50c0e90
--- /dev/null
+++ b/src/mongo/gotools/common/db/openssl/openssl.go
@@ -0,0 +1,168 @@
+// Package openssl implements connection to MongoDB over ssl.
+package openssl
+
+import (
+ "fmt"
+ "net"
+ "time"
+
+ "gopkg.in/mgo.v2"
+
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/spacemonkeygo/openssl"
+)
+
+// For connecting to the database over ssl
+type SSLDBConnector struct {
+ dialInfo *mgo.DialInfo
+ dialError error
+ ctx *openssl.Ctx
+}
+
+// Configure the connector to connect to the server over ssl. Parses the
+// connection string, and sets up the correct function to dial the server
+// based on the ssl options passed in.
+func (self *SSLDBConnector) Configure(opts options.ToolOptions) error {
+
+ // create the addresses to be used to connect
+ connectionAddrs := util.CreateConnectionAddrs(opts.Host, opts.Port)
+
+ var err error
+ self.ctx, err = setupCtx(opts)
+ if err != nil {
+ return fmt.Errorf("openssl configuration: %v", err)
+ }
+
+ var flags openssl.DialFlags
+ flags = 0
+ if opts.SSLAllowInvalidCert || opts.SSLAllowInvalidHost || opts.SSLCAFile == "" {
+ flags = openssl.InsecureSkipHostVerification
+ }
+ // create the dialer func that will be used to connect
+ dialer := func(addr *mgo.ServerAddr) (net.Conn, error) {
+ conn, err := openssl.Dial("tcp", addr.String(), self.ctx, flags)
+ self.dialError = err
+ return conn, err
+ }
+
+ timeout := time.Duration(opts.Timeout) * time.Second
+
+ // set up the dial info
+ self.dialInfo = &mgo.DialInfo{
+ Addrs: connectionAddrs,
+ Timeout: timeout,
+ Direct: opts.Direct,
+ ReplicaSetName: opts.ReplicaSetName,
+ DialServer: dialer,
+ Username: opts.Auth.Username,
+ Password: opts.Auth.Password,
+ Source: opts.GetAuthenticationDatabase(),
+ Mechanism: opts.Auth.Mechanism,
+ }
+
+ return nil
+
+}
+
+// Dial the server.
+func (self *SSLDBConnector) GetNewSession() (*mgo.Session, error) {
+ session, err := mgo.DialWithInfo(self.dialInfo)
+ if err != nil && self.dialError != nil {
+ return nil, fmt.Errorf("%v, openssl error: %v", err, self.dialError)
+ }
+ return session, err
+}
+
+// To be handed to mgo.DialInfo for connecting to the server.
+type dialerFunc func(addr *mgo.ServerAddr) (net.Conn, error)
+
+// Handle optionally compiled SSL initialization functions (fips mode set)
+type sslInitializationFunction func(options.ToolOptions) error
+
+var sslInitializationFunctions []sslInitializationFunction
+
+// Creates and configures an openssl.Ctx
+func setupCtx(opts options.ToolOptions) (*openssl.Ctx, error) {
+ var ctx *openssl.Ctx
+ var err error
+
+ for _, sslInitFunc := range sslInitializationFunctions {
+ sslInitFunc(opts)
+ }
+
+ if ctx, err = openssl.NewCtxWithVersion(openssl.AnyVersion); err != nil {
+ return nil, fmt.Errorf("failure creating new openssl context with "+
+ "NewCtxWithVersion(AnyVersion): %v", err)
+ }
+
+ // OpAll - Activate all bug workaround options, to support buggy client SSL's.
+ // NoSSLv2 - Disable SSL v2 support
+ ctx.SetOptions(openssl.OpAll | openssl.NoSSLv2)
+
+ // HIGH - Enable strong ciphers
+ // !EXPORT - Disable export ciphers (40/56 bit)
+ // !aNULL - Disable anonymous auth ciphers
+ // @STRENGTH - Sort ciphers based on strength
+ ctx.SetCipherList("HIGH:!EXPORT:!aNULL@STRENGTH")
+
+ // add the PEM key file with the cert and private key, if specified
+ if opts.SSLPEMKeyFile != "" {
+ if err = ctx.UseCertificateChainFile(opts.SSLPEMKeyFile); err != nil {
+ return nil, fmt.Errorf("UseCertificateChainFile: %v", err)
+ }
+ if opts.SSLPEMKeyPassword != "" {
+ if err = ctx.UsePrivateKeyFileWithPassword(
+ opts.SSLPEMKeyFile, openssl.FiletypePEM, opts.SSLPEMKeyPassword); err != nil {
+ return nil, fmt.Errorf("UsePrivateKeyFile: %v", err)
+ }
+ } else {
+ if err = ctx.UsePrivateKeyFile(opts.SSLPEMKeyFile, openssl.FiletypePEM); err != nil {
+ return nil, fmt.Errorf("UsePrivateKeyFile: %v", err)
+ }
+ }
+ // Verify that the certificate and the key go together.
+ if err = ctx.CheckPrivateKey(); err != nil {
+ return nil, fmt.Errorf("CheckPrivateKey: %v", err)
+ }
+ }
+
+ // If renegotiation is needed, don't return from recv() or send() until it's successful.
+ // Note: this is for blocking sockets only.
+ ctx.SetMode(openssl.AutoRetry)
+
+ // Disable session caching (see SERVER-10261)
+ ctx.SetSessionCacheMode(openssl.SessionCacheOff)
+
+ if opts.SSLCAFile != "" {
+ calist, err := openssl.LoadClientCAFile(opts.SSLCAFile)
+ if err != nil {
+ return nil, fmt.Errorf("LoadClientCAFile: %v", err)
+ }
+ ctx.SetClientCAList(calist)
+
+ if err = ctx.LoadVerifyLocations(opts.SSLCAFile, ""); err != nil {
+ return nil, fmt.Errorf("LoadVerifyLocations: %v", err)
+ }
+
+ var verifyOption openssl.VerifyOptions
+ if opts.SSLAllowInvalidCert {
+ verifyOption = openssl.VerifyNone
+ } else {
+ verifyOption = openssl.VerifyPeer
+ }
+ ctx.SetVerify(verifyOption, nil)
+ }
+
+ if opts.SSLCRLFile != "" {
+ store := ctx.GetCertificateStore()
+ store.SetFlags(openssl.CRLCheck)
+ lookup, err := store.AddLookup(openssl.X509LookupFile())
+ if err != nil {
+ return nil, fmt.Errorf("AddLookup(X509LookupFile()): %v", err)
+ }
+ lookup.LoadCRLFile(opts.SSLCRLFile)
+ }
+
+ return ctx, nil
+}
diff --git a/src/mongo/gotools/common/db/openssl/openssl_fips.go b/src/mongo/gotools/common/db/openssl/openssl_fips.go
new file mode 100644
index 00000000000..2c4705e23ff
--- /dev/null
+++ b/src/mongo/gotools/common/db/openssl/openssl_fips.go
@@ -0,0 +1,15 @@
+// +build ssl
+// +build -darwin
+
+package openssl
+
+import "github.com/spacemonkeygo/openssl"
+
+func init() { sslInitializationFunctions = append(sslInitializationFunctions, SetUpFIPSMode) }
+
+func SetUpFIPSMode(opts *ToolOptions) error {
+ if err := openssl.FIPSModeSet(opts.SSLFipsMode); err != nil {
+ return fmt.Errorf("couldn't set FIPS mode to %v: %v", opts.SSLFipsMode, err)
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/common/db/openssl/testdata/ca.pem b/src/mongo/gotools/common/db/openssl/testdata/ca.pem
new file mode 100644
index 00000000000..b1b6f2628da
--- /dev/null
+++ b/src/mongo/gotools/common/db/openssl/testdata/ca.pem
@@ -0,0 +1,34 @@
+-----BEGIN PRIVATE KEY-----
+MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBAMbN8D5Au+xWdY+s
+GpUuSFSbHGzYfHmw0yajA9J8PiwDePRMl71OMMsByNsykjzXEr0BBOn4PNO6KW7K
+HdDicRavuC/iFucVpILUiJoLOUCPKb/EyAHUk0r2fdr3Ypd2ZXkD1EXmM9WTQnyW
+PEWqr1T7MmM9PhsD0r8ZbQVu8R49AgMBAAECgYBbC+mguQjXfektOUabV6zsgnUM
+LEElgiPRqAqSFTBr+9MjHwjHO84Ayvpv2MM8dcsxIAxeEr/Yv4NGJ+5rwajESir6
+/7UzqzhXmj6ylqTfbMRJCRsqnwvSfNwpsxtMSYieCxtdYqTLaJLAItBjuZPAYL8W
+9Tf/NMc4AjLLHx7PyQJBAOyOcIS/i23td6ZX+QtppXL1fF/JMiKooE9m/npAT5K/
+hQEaAatdLyQ669id181KY9F0JR1TEbzb0A1yo73soRsCQQDXJSG4ID8lfR9SXnEE
+y/RqYv0eKneER+V7e1Cy7bYHvJxZK0sWXYzIZhTl8PABh3PCoLdxjY0IM7UNWlwU
+dAuHAkAOUaTv9CQ9eDVY5VRW44M3TTLFHYmiXXCuvb5Dqibm7B7h7TASrmZPHB3w
+k8VfUNRv9kbU2pVlSCz0026j7XHnAkEAk/qZP8EGTe3K3mfRCsCSA57EhLwm6phd
+ElrWPcvc2WN0kqyBgAembqwwEZxwKE0XZTYQFw2KhKq0DFQrY3IR/wJAIAnLtabL
+aF819WI/VYlMmwb3GAO2w5KQilGhYl7tv1BghH+Qmg7HZEcIRmSwPKEQveT3YpCH
+nCu38jgPXhhqdg==
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIC3DCCAkWgAwIBAgIJAKwksc/otf2iMA0GCSqGSIb3DQEBCwUAMIGGMQswCQYD
+VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp
+dHkxHTAbBgNVBAoMFE1vbmdvREIgS2VybmVsIFRvb2xzMRkwFwYDVQQLDBBUb29s
+cyBUZXN0aW5nIENBMRIwEAYDVQQDDAlsb2NhbGhvc3QwHhcNMTUwNjA1MTU1MTQ1
+WhcNMzUwNjA0MTU1MTQ1WjCBhjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZ
+b3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MR0wGwYDVQQKDBRNb25nb0RCIEtl
+cm5lbCBUb29sczEZMBcGA1UECwwQVG9vbHMgVGVzdGluZyBDQTESMBAGA1UEAwwJ
+bG9jYWxob3N0MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGzfA+QLvsVnWP
+rBqVLkhUmxxs2Hx5sNMmowPSfD4sA3j0TJe9TjDLAcjbMpI81xK9AQTp+DzTuilu
+yh3Q4nEWr7gv4hbnFaSC1IiaCzlAjym/xMgB1JNK9n3a92KXdmV5A9RF5jPVk0J8
+ljxFqq9U+zJjPT4bA9K/GW0FbvEePQIDAQABo1AwTjAdBgNVHQ4EFgQU+QOiCHTF
+8At8aMOBvHF6wWZpcZUwHwYDVR0jBBgwFoAU+QOiCHTF8At8aMOBvHF6wWZpcZUw
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQCbbIAjdV+M8RR3ZF1WMBYD
+8aMr55kgtnCWn4mTCDdombCYgtbaPq5sy8Hb/2wLQ9Zl4UuFL5wKWcx3kOLo3cw/
+boj8jnUDnwrsBd2nN7sYdjF+M7FLp6U1AxrE5ejijtg2KCl+p4b7jJgJBSFIQD45
+7CAJVjIrajY4LlJj3x+caQ==
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/common/db/openssl/testdata/server.pem b/src/mongo/gotools/common/db/openssl/testdata/server.pem
new file mode 100644
index 00000000000..d2aaa930ff5
--- /dev/null
+++ b/src/mongo/gotools/common/db/openssl/testdata/server.pem
@@ -0,0 +1,32 @@
+-----BEGIN PRIVATE KEY-----
+MIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBALOkdwU9Qx4FRn+z
+coBkeYYpVRg0pknPMDo4Q50TqZPfVhroTynx2Or+cjl5csd5hMKxWQpdzGq8JzH9
+2BCLcDz/51vG3tPrpLIB50ABqa0wRGGDOO+XN0h+VkdqJvKReWOsNRoMT3s0Lh78
+BqvRUomYXnbc1RBaxwWa+UoLCFgnAgMBAAECgYBd9XmjLeW6//tds5gB+4tsVpYB
+cRhAprOM3/zNXYlmpHu+2x78y1gvoSJRWWplVvPPeT8fIuxWL0844JJwJN5wyCwN
+nnrA28l6+Tcde+NlzCxwED+QDjAH20BRxCs0BLvnx3WAXRDmUbWAjOl/qPn9H6m1
+nmUQ7H/f6dxZ0vVMQQJBAOl3xeVLyZZ828P/p3PvYkaeIxxVK1QDGOWi/3vC0DrY
+WK8xAoopjj0RHHZ1fL5bG31G3OR9Vc/rfk4a5XPIlRECQQDE+teCTiwV5Wwzdpg3
+r440qOLCmpMXwJr/Jlh+C4c8ebnIQ9P5sSe4wQNHyeEZ2t7SGvPfjr7glpPhAkXy
+JTm3AkEAvNPgvVoUy6Bk5xuJRl2hMNiKMUo5ZxOyOVkiJeklHdMJt3h+Q1zk7ENA
+sBbKM/PgQezkj/FHTIl9eJKMbp8W4QJBAL4aXHyslw12wisUrKkpa7PUviwT5BvL
+TYsrZcIXvCeYTr1BAMX8vBopZNIWuoEqY1sgmfZKnFrB1+wTNpAQbxcCQQCHbtvQ
+1U2p5Pz5XYyaoK2OEZhPMuLnOBMpzjSxRLxKyhb4k+ssIA0IeAiT4RIECtHJ8DJX
+4aZK/qg9WmBH+zbO
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIICbzCCAdgCAQEwDQYJKoZIhvcNAQEFBQAwgYYxCzAJBgNVBAYTAlVTMREwDwYD
+VQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEdMBsGA1UECgwU
+TW9uZ29EQiBLZXJuZWwgVG9vbHMxGTAXBgNVBAsMEFRvb2xzIFRlc3RpbmcgQ0Ex
+EjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xNTA2MDUxNTUxNDVaFw0zNTA2MDQxNTUx
+NDVaMHkxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN
+TmV3IFlvcmsgQ2l0eTEUMBIGA1UECgwLTW9uZ29EQiBJbmMxFTATBgNVBAsMDEtl
+cm5lbCBUb29sczESMBAGA1UEAwwJbG9jYWxob3N0MIGfMA0GCSqGSIb3DQEBAQUA
+A4GNADCBiQKBgQCzpHcFPUMeBUZ/s3KAZHmGKVUYNKZJzzA6OEOdE6mT31Ya6E8p
+8djq/nI5eXLHeYTCsVkKXcxqvCcx/dgQi3A8/+dbxt7T66SyAedAAamtMERhgzjv
+lzdIflZHaibykXljrDUaDE97NC4e/Aar0VKJmF523NUQWscFmvlKCwhYJwIDAQAB
+MA0GCSqGSIb3DQEBBQUAA4GBACJiTnC3nksZsmMyD88+DuV8IA1DHSby4X/qtDYT
+eSuNbxRKnihXkm2KE+MGn7YeKg4a7FaYiH3ejk0ZBlY3TZXK3I1uh/zIhC9aMnSL
+z0z4OLcqp46F8PpYF7ARtXXWQuOEWe6k+VKy5XP1NX60sEJ0KwGBQjUw3Ys41JE8
+iigw
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/common/db/read_preferences.go b/src/mongo/gotools/common/db/read_preferences.go
new file mode 100644
index 00000000000..9dec319ca48
--- /dev/null
+++ b/src/mongo/gotools/common/db/read_preferences.go
@@ -0,0 +1,51 @@
+package db
+
+import (
+ "fmt"
+
+ "github.com/mongodb/mongo-tools/common/json"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+type readPrefDoc struct {
+ Mode string
+ Tags bson.D
+}
+
+const (
+ WarningNonPrimaryMongosConnection = "Warning: using a non-primary readPreference with a " +
+ "connection to mongos may produce inconsistent duplicates or miss some documents."
+)
+
+func ParseReadPreference(rp string) (mgo.Mode, bson.D, error) {
+ var mode string
+ var tags bson.D
+ if rp == "" {
+ return mgo.Nearest, nil, nil
+ }
+ if rp[0] != '{' {
+ mode = rp
+ } else {
+ var doc readPrefDoc
+ err := json.Unmarshal([]byte(rp), &doc)
+ if err != nil {
+ return 0, nil, fmt.Errorf("invalid --ReadPreferences json object: %v", err)
+ }
+ tags = doc.Tags
+ mode = doc.Mode
+ }
+ switch mode {
+ case "primary":
+ return mgo.Primary, tags, nil
+ case "primaryPreferred":
+ return mgo.PrimaryPreferred, tags, nil
+ case "secondary":
+ return mgo.Secondary, tags, nil
+ case "secondaryPreferred":
+ return mgo.SecondaryPreferred, tags, nil
+ case "nearest":
+ return mgo.Nearest, tags, nil
+ }
+ return 0, nil, fmt.Errorf("invalid readPreference mode '%v'", mode)
+}
diff --git a/src/mongo/gotools/common/db/testdata/testdata.bson b/src/mongo/gotools/common/db/testdata/testdata.bson
new file mode 100644
index 00000000000..5157dc1158f
--- /dev/null
+++ b/src/mongo/gotools/common/db/testdata/testdata.bson
Binary files differ
diff --git a/src/mongo/gotools/common/db/write_concern.go b/src/mongo/gotools/common/db/write_concern.go
new file mode 100644
index 00000000000..0a9a16214c8
--- /dev/null
+++ b/src/mongo/gotools/common/db/write_concern.go
@@ -0,0 +1,123 @@
+package db
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "strconv"
+)
+
+// write concern fields
+const (
+ j = "j"
+ w = "w"
+ fSync = "fsync"
+ wTimeout = "wtimeout"
+)
+
+// constructWCObject takes in a write concern and attempts to construct an
+// mgo.Safe object from it. It returns an error if it is unable to parse the
+// string or if a parsed write concern field value is invalid.
+func constructWCObject(writeConcern string) (sessionSafety *mgo.Safe, err error) {
+ sessionSafety = &mgo.Safe{}
+ defer func() {
+ // If the user passes a w value of 0, we set the session to use the
+ // unacknowledged write concern but only if journal commit acknowledgment,
+ // is not required. If commit acknowledgment is required, it prevails,
+ // and the server will require that mongod acknowledge the write operation
+ if sessionSafety.WMode == "" && sessionSafety.W == 0 && !sessionSafety.J {
+ sessionSafety = nil
+ }
+ }()
+ jsonWriteConcern := map[string]interface{}{}
+
+ if err = json.Unmarshal([]byte(writeConcern), &jsonWriteConcern); err != nil {
+ // if the writeConcern string can not be unmarshaled into JSON, this
+ // allows a default to the old behavior wherein the entire argument
+ // passed in is assigned to the 'w' field - thus allowing users pass
+ // a write concern that looks like: "majority", 0, "4", etc.
+ wValue, err := strconv.Atoi(writeConcern)
+ if err != nil {
+ sessionSafety.WMode = writeConcern
+ } else {
+ sessionSafety.W = wValue
+ if wValue < 0 {
+ return sessionSafety, fmt.Errorf("invalid '%v' argument: %v", w, wValue)
+ }
+ }
+ return sessionSafety, nil
+ }
+
+ if jVal, ok := jsonWriteConcern[j]; ok && util.IsTruthy(jVal) {
+ sessionSafety.J = true
+ }
+
+ if fsyncVal, ok := jsonWriteConcern[fSync]; ok && util.IsTruthy(fsyncVal) {
+ sessionSafety.FSync = true
+ }
+
+ if wtimeout, ok := jsonWriteConcern[wTimeout]; ok {
+ wtimeoutValue, err := util.ToInt(wtimeout)
+ if err != nil {
+ return sessionSafety, fmt.Errorf("invalid '%v' argument: %v", wTimeout, wtimeout)
+ }
+ sessionSafety.WTimeout = wtimeoutValue
+ }
+
+ if wInterface, ok := jsonWriteConcern[w]; ok {
+ wValue, err := util.ToInt(wInterface)
+ if err != nil {
+ // if the argument is neither a string nor int, error out
+ wStrVal, ok := wInterface.(string)
+ if !ok {
+ return sessionSafety, fmt.Errorf("invalid '%v' argument: %v", w, wInterface)
+ }
+ sessionSafety.WMode = wStrVal
+ } else {
+ sessionSafety.W = wValue
+ if wValue < 0 {
+ return sessionSafety, fmt.Errorf("invalid '%v' argument: %v", w, wValue)
+ }
+ }
+ }
+ return sessionSafety, nil
+}
+
+// BuildWriteConcern takes a string and a NodeType indicating the type of node the write concern
+// is intended to be used against, and converts the write concern string argument into an
+// mgo.Safe object that's usable on sessions for that node type.
+func BuildWriteConcern(writeConcern string, nodeType NodeType) (*mgo.Safe, error) {
+ sessionSafety, err := constructWCObject(writeConcern)
+ if err != nil {
+ return nil, err
+ }
+
+ if sessionSafety == nil {
+ log.Logvf(log.DebugLow, "using unacknowledged write concern")
+ return nil, nil
+ }
+
+ // for standalone mongods, set the default write concern to 1
+ if nodeType == Standalone {
+ log.Logvf(log.DebugLow, "standalone server: setting write concern %v to 1", w)
+ sessionSafety.W = 1
+ sessionSafety.WMode = ""
+ }
+
+ var writeConcernStr interface{}
+
+ if sessionSafety.WMode != "" {
+ writeConcernStr = sessionSafety.WMode
+ } else {
+ writeConcernStr = sessionSafety.W
+ }
+ log.Logvf(log.Info, "using write concern: %v='%v', %v=%v, %v=%v, %v=%v",
+ w, writeConcernStr,
+ j, sessionSafety.J,
+ fSync, sessionSafety.FSync,
+ wTimeout, sessionSafety.WTimeout,
+ )
+ return sessionSafety, nil
+}
diff --git a/src/mongo/gotools/common/db/write_concern_test.go b/src/mongo/gotools/common/db/write_concern_test.go
new file mode 100644
index 00000000000..96bd8e0ed89
--- /dev/null
+++ b/src/mongo/gotools/common/db/write_concern_test.go
@@ -0,0 +1,166 @@
+package db
+
+import (
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestBuildWriteConcern(t *testing.T) {
+ Convey("Given a write concern string value, and a boolean indicating if the "+
+ "write concern is to be used on a replica set, on calling BuildWriteConcern...", t, func() {
+ Convey("no error should be returned if the write concern is valid", func() {
+ writeConcern, err := BuildWriteConcern(`{w:34}`, ReplSet)
+ So(err, ShouldBeNil)
+ So(writeConcern.W, ShouldEqual, 34)
+ writeConcern, err = BuildWriteConcern(`{w:"majority"}`, ReplSet)
+ So(err, ShouldBeNil)
+ So(writeConcern.WMode, ShouldEqual, "majority")
+ writeConcern, err = BuildWriteConcern(`majority`, ReplSet)
+ So(err, ShouldBeNil)
+ So(writeConcern.WMode, ShouldEqual, "majority")
+ writeConcern, err = BuildWriteConcern(`tagset`, ReplSet)
+ So(err, ShouldBeNil)
+ So(writeConcern.WMode, ShouldEqual, "tagset")
+ })
+ Convey("on replica sets, only a write concern of 1 or 0 should be returned", func() {
+ writeConcern, err := BuildWriteConcern(`{w:34}`, Standalone)
+ So(err, ShouldBeNil)
+ So(writeConcern.W, ShouldEqual, 1)
+ writeConcern, err = BuildWriteConcern(`{w:"majority"}`, Standalone)
+ So(err, ShouldBeNil)
+ So(writeConcern.W, ShouldEqual, 1)
+ writeConcern, err = BuildWriteConcern(`tagset`, Standalone)
+ So(err, ShouldBeNil)
+ So(writeConcern.W, ShouldEqual, 1)
+ })
+ Convey("with a w value of 0, without j set, a nil write concern should be returned", func() {
+ writeConcern, err := BuildWriteConcern(`{w:0}`, Standalone)
+ So(err, ShouldBeNil)
+ So(writeConcern, ShouldBeNil)
+ })
+ Convey("with a negative w value, an error should be returned", func() {
+ _, err := BuildWriteConcern(`{w:-1}`, ReplSet)
+ So(err, ShouldNotBeNil)
+ _, err = BuildWriteConcern(`{w:-2}`, ReplSet)
+ So(err, ShouldNotBeNil)
+ })
+ Convey("with a w value of 0, with j set, a non-nil write concern should be returned", func() {
+ writeConcern, err := BuildWriteConcern(`{w:0, j:true}`, Standalone)
+ So(err, ShouldBeNil)
+ So(writeConcern.J, ShouldBeTrue)
+ })
+ })
+}
+
+func TestConstructWCObject(t *testing.T) {
+ Convey("Given a write concern string value, on calling constructWCObject...", t, func() {
+
+ Convey("non-JSON string values should be assigned to the 'WMode' "+
+ "field in their entirety", func() {
+ writeConcernString := "majority"
+ writeConcern, err := constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.WMode, ShouldEqual, writeConcernString)
+ })
+
+ Convey("non-JSON int values should be assigned to the 'w' field "+
+ "in their entirety", func() {
+ writeConcernString := `{w: 4}`
+ writeConcern, err := constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.W, ShouldEqual, 4)
+ })
+
+ Convey("JSON strings with valid j, wtimeout, fsync and w, should be "+
+ "assigned accordingly", func() {
+ writeConcernString := `{w: 3, j: true, fsync: false, wtimeout: 43}`
+ expectedW := 3
+ expectedWTimeout := 43
+ writeConcern, err := constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.W, ShouldEqual, expectedW)
+ So(writeConcern.J, ShouldBeTrue)
+ So(writeConcern.FSync, ShouldBeFalse)
+ So(writeConcern.WTimeout, ShouldEqual, expectedWTimeout)
+ })
+
+ Convey("JSON strings with an argument for j that is not false should set j true", func() {
+ writeConcernString := `{w: 3, j: "rue"}`
+ writeConcern, err := constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.W, ShouldEqual, 3)
+ So(writeConcern.J, ShouldBeTrue)
+ })
+
+ Convey("JSON strings with an argument for fsync that is not false should set fsync true", func() {
+ writeConcernString := `{w: 3, fsync: "rue"}`
+ writeConcern, err := constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.W, ShouldEqual, 3)
+ So(writeConcern.FSync, ShouldBeTrue)
+ })
+
+ Convey("JSON strings with an invalid wtimeout argument should error out", func() {
+ writeConcernString := `{w: 3, wtimeout: "rue"}`
+ _, err := constructWCObject(writeConcernString)
+ So(err, ShouldNotBeNil)
+ writeConcernString = `{w: 3, wtimeout: "43"}`
+ _, err = constructWCObject(writeConcernString)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("JSON strings with any non-false j argument should not error out", func() {
+ writeConcernString := `{w: 3, j: "t"}`
+ writeConcern, err := constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.J, ShouldBeTrue)
+ writeConcernString = `{w: 3, j: "f"}`
+ writeConcern, err = constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.J, ShouldBeTrue)
+ writeConcernString = `{w: 3, j: false}`
+ writeConcern, err = constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.J, ShouldBeFalse)
+ writeConcernString = `{w: 3, j: 0}`
+ writeConcern, err = constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.J, ShouldBeFalse)
+ })
+
+ Convey("JSON strings with a shorthand fsync argument should not error out", func() {
+ writeConcernString := `{w: 3, fsync: "t"}`
+ writeConcern, err := constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.FSync, ShouldBeTrue)
+ writeConcernString = `{w: "3", fsync: "f"}`
+ writeConcern, err = constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.FSync, ShouldBeTrue)
+ writeConcernString = `{w: "3", fsync: false}`
+ writeConcern, err = constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.FSync, ShouldBeFalse)
+ writeConcernString = `{w: "3", fsync: 0}`
+ writeConcern, err = constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern.FSync, ShouldBeFalse)
+ })
+
+ Convey("Unacknowledge write concern strings should return a nil object "+
+ "if journaling is not required", func() {
+ writeConcernString := `{w: 0}`
+ writeConcern, err := constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern, ShouldBeNil)
+ writeConcernString = `{w: 0}`
+ writeConcern, err = constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern, ShouldBeNil)
+ writeConcernString = `0`
+ writeConcern, err = constructWCObject(writeConcernString)
+ So(err, ShouldBeNil)
+ So(writeConcern, ShouldBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/intents/intent.go b/src/mongo/gotools/common/intents/intent.go
new file mode 100644
index 00000000000..42999806744
--- /dev/null
+++ b/src/mongo/gotools/common/intents/intent.go
@@ -0,0 +1,466 @@
+// Package intents provides utilities for performing dump/restore operations.
+package intents
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/mongodb/mongo-tools/common"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2/bson"
+)
+
+type file interface {
+ io.ReadWriteCloser
+ Open() error
+ Pos() int64
+}
+
+// DestinationConflictError occurs when multiple namespaces map to the same
+// destination.
+type DestinationConflictError struct {
+ Src, Dst string
+}
+
+func (e DestinationConflictError) Error() string {
+ return fmt.Sprintf("destination conflict: %s (src) => %s (dst)", e.Src, e.Dst)
+}
+
+// FileNeedsIOBuffer is an interface that denotes that a struct needs
+// an IO buffer that is managed by an outside control. This interface
+// is used to both hand off a buffer to a struct and signal that it should
+// release its buffer. Added to reduce memory usage as outlined in TOOLS-1088.
+type FileNeedsIOBuffer interface {
+ TakeIOBuffer([]byte)
+ ReleaseIOBuffer()
+}
+
+// mongorestore first scans the directory to generate a list
+// of all files to restore and what they map to. TODO comments
+type Intent struct {
+ // Destination namespace info
+ DB string
+ C string
+
+ // File locations as absolute paths
+ BSONFile file
+ BSONSize int64
+ MetadataFile file
+
+ // Indicates where the intent will be read from or written to
+ Location string
+ MetadataLocation string
+
+ // Collection options
+ Options *bson.D
+
+ // File/collection size, for some prioritizer implementations.
+ // Units don't matter as long as they are consistent for a given use case.
+ Size int64
+}
+
+func (it *Intent) Namespace() string {
+ return it.DB + "." + it.C
+}
+
+func (it *Intent) IsOplog() bool {
+ if it.DB == "" && it.C == "oplog" {
+ return true
+ }
+ return it.DB == "local" && (it.C == "oplog.rs" || it.C == "oplog.$main")
+}
+
+func (it *Intent) IsUsers() bool {
+ if it.C == "$admin.system.users" {
+ return true
+ }
+ if it.DB == "admin" && it.C == "system.users" {
+ return true
+ }
+ return false
+}
+
+func (it *Intent) IsRoles() bool {
+ if it.C == "$admin.system.roles" {
+ return true
+ }
+ if it.DB == "admin" && it.C == "system.roles" {
+ return true
+ }
+ return false
+}
+
+func (it *Intent) IsAuthVersion() bool {
+ if it.C == "$admin.system.version" {
+ return true
+ }
+ if it.DB == "admin" && it.C == "system.version" {
+ return true
+ }
+ return false
+}
+
+func (it *Intent) IsSystemIndexes() bool {
+ return it.C == "system.indexes"
+}
+
+func (intent *Intent) IsSpecialCollection() bool {
+ return intent.IsSystemIndexes() || intent.IsUsers() || intent.IsRoles() || intent.IsAuthVersion()
+}
+
+func (existing *Intent) MergeIntent(intent *Intent) {
+ // merge new intent into old intent
+ if existing.BSONFile == nil {
+ existing.BSONFile = intent.BSONFile
+ }
+ if existing.Size == 0 {
+ existing.Size = intent.Size
+ }
+ if existing.Location == "" {
+ existing.Location = intent.Location
+ }
+ if existing.MetadataFile == nil {
+ existing.MetadataFile = intent.MetadataFile
+ }
+ if existing.MetadataLocation == "" {
+ existing.MetadataLocation = intent.MetadataLocation
+ }
+
+}
+
+type Manager struct {
+ // intents are for all of the regular user created collections
+ intents map[string]*Intent
+ // special intents are for all of the collections that are created by mongod
+ // and require special handling
+ specialIntents map[string]*Intent
+
+ // legacy mongorestore works in the order that paths are discovered,
+ // so we need an ordered data structure to preserve this behavior.
+ intentsByDiscoveryOrder []*Intent
+
+ // we need different scheduling order depending on the target
+ // mongod/mongos and whether or not we are multi threading;
+ // the IntentPrioritizer interface encapsulates this.
+ prioritizer IntentPrioritizer
+
+ // special cases that should be saved but not be part of the queue.
+ // used to deal with oplog and user/roles restoration, which are
+ // handled outside of the basic logic of the tool
+ oplogIntent *Intent
+ usersIntent *Intent
+ rolesIntent *Intent
+ versionIntent *Intent
+ indexIntents map[string]*Intent
+
+ // Tells the manager if it should choose a single oplog when multiple are provided.
+ smartPickOplog bool
+
+ // Indicates if an the manager has seen two conflicting oplogs.
+ oplogConflict bool
+
+ // prevent conflicting destinations by checking which sources map to the
+ // same namespace
+ destinations map[string][]string
+}
+
+func NewIntentManager() *Manager {
+ return &Manager{
+ intents: map[string]*Intent{},
+ specialIntents: map[string]*Intent{},
+ intentsByDiscoveryOrder: []*Intent{},
+ indexIntents: map[string]*Intent{},
+ smartPickOplog: false,
+ oplogConflict: false,
+ destinations: map[string][]string{},
+ }
+}
+
+func (mgr *Manager) SetSmartPickOplog(smartPick bool) {
+ mgr.smartPickOplog = smartPick
+}
+
+// HasConfigDBIntent returns a bool indicating if any of the intents refer to the "config" database.
+// This can be used to check for possible unwanted conflicts before restoring to a sharded system.
+func (mgr *Manager) HasConfigDBIntent() bool {
+ for _, intent := range mgr.intentsByDiscoveryOrder {
+ if intent.DB == "config" {
+ return true
+ }
+ }
+ return false
+}
+
+// PutOplogIntent takes an intent for an oplog and stores it in the intent manager with the
+// provided key. If the manager has smartPickOplog enabled, then it uses a priority system
+// to determine which oplog intent to maintain as the actual oplog.
+func (manager *Manager) PutOplogIntent(intent *Intent, managerKey string) {
+ if manager.smartPickOplog {
+ if existing := manager.specialIntents[managerKey]; existing != nil {
+ existing.MergeIntent(intent)
+ return
+ }
+ if manager.oplogIntent == nil {
+ // If there is no oplog intent, make this one the oplog.
+ manager.oplogIntent = intent
+ manager.specialIntents[managerKey] = intent
+ } else if intent.DB == "" {
+ // We already have an oplog and this is a top priority oplog.
+ if manager.oplogIntent.DB == "" {
+ // If the manager's current oplog is also top priority, we have a
+ // conflict and ignore this oplog.
+ manager.oplogConflict = true
+ } else {
+ // If the manager's current oplog is lower priority, replace it and
+ // move that one to be a normal intent.
+ manager.putNormalIntent(manager.oplogIntent)
+ delete(manager.specialIntents, manager.oplogIntent.Namespace())
+ manager.oplogIntent = intent
+ manager.specialIntents[managerKey] = intent
+ }
+ } else {
+ // We already have an oplog and this is a low priority oplog.
+ if manager.oplogIntent.DB != "" {
+ // If the manager's current oplog is also low priority, set a conflict.
+ manager.oplogConflict = true
+ }
+ // No matter what, set this lower priority oplog to be a normal intent.
+ manager.putNormalIntent(intent)
+ }
+ } else {
+ if intent.DB == "" && intent.C == "oplog" {
+ // If this is a normal oplog, then add it as an oplog intent.
+ if existing := manager.specialIntents[managerKey]; existing != nil {
+ existing.MergeIntent(intent)
+ return
+ }
+ manager.oplogIntent = intent
+ manager.specialIntents[managerKey] = intent
+ } else {
+ manager.putNormalIntent(intent)
+ }
+ }
+}
+
+func (manager *Manager) putNormalIntent(intent *Intent) {
+ manager.putNormalIntentWithNamespace(intent.Namespace(), intent)
+}
+
+func (manager *Manager) putNormalIntentWithNamespace(ns string, intent *Intent) {
+ // BSON and metadata files for the same collection are merged
+ // into the same intent. This is done to allow for simple
+ // pairing of BSON + metadata without keeping track of the
+ // state of the filepath walker
+ if existing := manager.intents[ns]; existing != nil {
+ if existing.Namespace() != intent.Namespace() {
+ // remove old destination, add new one
+ dst := existing.Namespace()
+ dsts := manager.destinations[dst]
+ i := util.StringSliceIndex(dsts, ns)
+ manager.destinations[dst] = append(dsts[:i], dsts[i+1:]...)
+
+ dsts = manager.destinations[intent.Namespace()]
+ manager.destinations[intent.Namespace()] = append(dsts, ns)
+ }
+ existing.MergeIntent(intent)
+ return
+ }
+
+ // if key doesn't already exist, add it to the manager
+ manager.intents[ns] = intent
+ manager.intentsByDiscoveryOrder = append(manager.intentsByDiscoveryOrder, intent)
+
+ manager.destinations[intent.Namespace()] = append(manager.destinations[intent.Namespace()], ns)
+}
+
+// Put inserts an intent into the manager with the same source namespace as
+// its destinations.
+func (manager *Manager) Put(intent *Intent) {
+ manager.PutWithNamespace(intent.Namespace(), intent)
+}
+
+// PutWithNamespace inserts an intent into the manager with the source set
+// to the provided namespace. Intents for the same collection are merged
+// together, so that BSON and metadata files for the same collection are
+// returned in the same intent.
+func (manager *Manager) PutWithNamespace(ns string, intent *Intent) {
+ if intent == nil {
+ panic("cannot insert nil *Intent into IntentManager")
+ }
+ db, _ := common.SplitNamespace(ns)
+
+ // bucket special-case collections
+ if intent.IsOplog() {
+ manager.PutOplogIntent(intent, intent.Namespace())
+ return
+ }
+ if intent.IsSystemIndexes() {
+ if intent.BSONFile != nil {
+ manager.indexIntents[db] = intent
+ manager.specialIntents[ns] = intent
+ }
+ return
+ }
+ if intent.IsUsers() {
+ if intent.BSONFile != nil {
+ manager.usersIntent = intent
+ manager.specialIntents[ns] = intent
+ }
+ return
+ }
+ if intent.IsRoles() {
+ if intent.BSONFile != nil {
+ manager.rolesIntent = intent
+ manager.specialIntents[ns] = intent
+ }
+ return
+ }
+ if intent.IsAuthVersion() {
+ if intent.BSONFile != nil {
+ manager.versionIntent = intent
+ manager.specialIntents[ns] = intent
+ }
+ return
+ }
+
+ manager.putNormalIntentWithNamespace(ns, intent)
+}
+
+func (manager *Manager) GetOplogConflict() bool {
+ return manager.oplogConflict
+}
+
+func (manager *Manager) GetDestinationConflicts() (errs []DestinationConflictError) {
+ for dst, srcs := range manager.destinations {
+ if len(srcs) <= 1 {
+ continue
+ }
+ for _, src := range srcs {
+ errs = append(errs, DestinationConflictError{Dst: dst, Src: src})
+ }
+ }
+ return
+}
+
+// Intents returns a slice containing all of the intents in the manager.
+// Intents is not thread safe
+func (manager *Manager) Intents() []*Intent {
+ allIntents := []*Intent{}
+ for _, intent := range manager.intents {
+ allIntents = append(allIntents, intent)
+ }
+ for _, intent := range manager.indexIntents {
+ allIntents = append(allIntents, intent)
+ }
+ if manager.oplogIntent != nil {
+ allIntents = append(allIntents, manager.oplogIntent)
+ }
+ if manager.usersIntent != nil {
+ allIntents = append(allIntents, manager.usersIntent)
+ }
+ if manager.rolesIntent != nil {
+ allIntents = append(allIntents, manager.rolesIntent)
+ }
+ if manager.versionIntent != nil {
+ allIntents = append(allIntents, manager.versionIntent)
+ }
+ return allIntents
+}
+
+func (manager *Manager) IntentForNamespace(ns string) *Intent {
+ intent := manager.intents[ns]
+ if intent != nil {
+ return intent
+ }
+ intent = manager.specialIntents[ns]
+ return intent
+}
+
+// Pop returns the next available intent from the manager. If the manager is
+// empty, it returns nil. Pop is thread safe.
+func (manager *Manager) Pop() *Intent {
+ return manager.prioritizer.Get()
+}
+
+// Peek returns a copy of a stored intent from the manager without removing
+// the intent. This method is useful for edge cases that need to look ahead
+// at what collections are in the manager before they are scheduled.
+//
+// NOTE: There are no guarantees that peek will return a usable
+// intent after Finalize() is called.
+func (manager *Manager) Peek() *Intent {
+ if len(manager.intentsByDiscoveryOrder) == 0 {
+ return nil
+ }
+ intentCopy := *manager.intentsByDiscoveryOrder[0]
+ return &intentCopy
+}
+
+// Finish tells the prioritizer that mongorestore is done restoring
+// the given collection intent.
+func (manager *Manager) Finish(intent *Intent) {
+ manager.prioritizer.Finish(intent)
+}
+
+// Oplog returns the intent representing the oplog, which isn't
+// stored with the other intents, because it is dumped and restored in
+// a very different way from other collections.
+func (manager *Manager) Oplog() *Intent {
+ return manager.oplogIntent
+}
+
+// SystemIndexes returns the system.indexes bson for a database
+func (manager *Manager) SystemIndexes(dbName string) *Intent {
+ return manager.indexIntents[dbName]
+}
+
+// SystemIndexes returns the databases for which there are system.indexes
+func (manager *Manager) SystemIndexDBs() []string {
+ databases := []string{}
+ for dbname := range manager.indexIntents {
+ databases = append(databases, dbname)
+ }
+ return databases
+}
+
+// Users returns the intent of the users collection to restore, a special case
+func (manager *Manager) Users() *Intent {
+ return manager.usersIntent
+}
+
+// Roles returns the intent of the user roles collection to restore, a special case
+func (manager *Manager) Roles() *Intent {
+ return manager.rolesIntent
+}
+
+// AuthVersion returns the intent of the version collection to restore, a special case
+func (manager *Manager) AuthVersion() *Intent {
+ return manager.versionIntent
+}
+
+// Finalize processes the intents for prioritization. Currently only two
+// kinds of prioritizers are supported. No more "Put" operations may be done
+// after finalize is called.
+func (manager *Manager) Finalize(pType PriorityType) {
+ switch pType {
+ case Legacy:
+ log.Logv(log.DebugHigh, "finalizing intent manager with legacy prioritizer")
+ manager.prioritizer = NewLegacyPrioritizer(manager.intentsByDiscoveryOrder)
+ case LongestTaskFirst:
+ log.Logv(log.DebugHigh, "finalizing intent manager with longest task first prioritizer")
+ manager.prioritizer = NewLongestTaskFirstPrioritizer(manager.intentsByDiscoveryOrder)
+ case MultiDatabaseLTF:
+ log.Logv(log.DebugHigh, "finalizing intent manager with multi-database longest task first prioritizer")
+ manager.prioritizer = NewMultiDatabaseLTFPrioritizer(manager.intentsByDiscoveryOrder)
+ default:
+ panic("cannot initialize IntentPrioritizer with unknown type")
+ }
+ // release these for the garbage collector and to ensure code correctness
+ manager.intents = nil
+ manager.intentsByDiscoveryOrder = nil
+}
+
+func (manager *Manager) UsePrioritizer(prioritizer IntentPrioritizer) {
+ manager.prioritizer = prioritizer
+}
diff --git a/src/mongo/gotools/common/intents/intent_prioritizer.go b/src/mongo/gotools/common/intents/intent_prioritizer.go
new file mode 100644
index 00000000000..290a7c83d1e
--- /dev/null
+++ b/src/mongo/gotools/common/intents/intent_prioritizer.go
@@ -0,0 +1,241 @@
+package intents
+
+import (
+ "container/heap"
+ "sort"
+ "sync"
+)
+
+type PriorityType int
+
+const (
+ Legacy PriorityType = iota
+ LongestTaskFirst
+ MultiDatabaseLTF
+)
+
+// IntentPrioritizer encapsulates the logic of scheduling intents
+// for restoration. It can know about which intents are in the
+// process of being restored through the "Finish" hook.
+//
+// Oplog entries and auth entries are not handled by the prioritizer,
+// as these are special cases handled by the regular mongorestore code.
+type IntentPrioritizer interface {
+ Get() *Intent
+ Finish(*Intent)
+}
+
+//===== Legacy =====
+
+// legacyPrioritizer processes the intents in the order they were read off the
+// file system, keeping with legacy mongorestore behavior.
+type legacyPrioritizer struct {
+ sync.Mutex
+ queue []*Intent
+}
+
+func NewLegacyPrioritizer(intentList []*Intent) *legacyPrioritizer {
+ return &legacyPrioritizer{queue: intentList}
+}
+
+func (legacy *legacyPrioritizer) Get() *Intent {
+ legacy.Lock()
+ defer legacy.Unlock()
+
+ if len(legacy.queue) == 0 {
+ return nil
+ }
+
+ var intent *Intent
+ intent, legacy.queue = legacy.queue[0], legacy.queue[1:]
+ return intent
+}
+
+func (legacy *legacyPrioritizer) Finish(*Intent) {
+ // no-op
+ return
+}
+
+//===== Longest Task First =====
+
+// longestTaskFirstPrioritizer returns intents in the order of largest -> smallest,
+// which is better at minimizing total runtime in parallel environments than
+// other simple orderings.
+type longestTaskFirstPrioritizer struct {
+ sync.Mutex
+ queue []*Intent
+}
+
+// NewLongestTaskFirstPrioritizer returns an initialized LTP prioritizer
+func NewLongestTaskFirstPrioritizer(intents []*Intent) *longestTaskFirstPrioritizer {
+ sort.Sort(BySize(intents))
+ return &longestTaskFirstPrioritizer{
+ queue: intents,
+ }
+}
+
+func (ltf *longestTaskFirstPrioritizer) Get() *Intent {
+ ltf.Lock()
+ defer ltf.Unlock()
+
+ if len(ltf.queue) == 0 {
+ return nil
+ }
+
+ var intent *Intent
+ intent, ltf.queue = ltf.queue[0], ltf.queue[1:]
+ return intent
+}
+
+func (ltf *longestTaskFirstPrioritizer) Finish(*Intent) {
+ // no-op
+ return
+}
+
+// For sorting intents from largest to smallest size
+type BySize []*Intent
+
+func (s BySize) Len() int { return len(s) }
+func (s BySize) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s BySize) Less(i, j int) bool { return s[i].Size > s[j].Size }
+
+//===== Multi Database Longest Task First =====
+
+// multiDatabaseLTF is designed to properly schedule intents with two constraints:
+// 1. it is optimized to run in a multi-processor environment
+// 2. it is optimized for parallelism against 2.6's db-level write lock
+// These goals result in a design that attempts to have as many different
+// database's intents being restored as possible and attempts to restore the
+// largest collections first.
+//
+// If we can have a minimum number of collections in flight for a given db,
+// we avoid lock contention in an optimal way on 2.6 systems. That is,
+// it is better to have two restore jobs where
+// job1 = "test.mycollection"
+// job2 = "mydb2.othercollection"
+// so that these collections do not compete for the db-level write lock.
+//
+// We also schedule the largest jobs first, in a greedy fashion, in order
+// to minimize total restoration time. Each database's intents are sorted
+// by decreasing file size at initialization, so that the largest jobs are
+// run first. Admittedly, .bson file size is not a direct predictor of restore
+// time, but there is certainly a strong correlation. Note that this attribute
+// is secondary to the multi-db scheduling laid out above, since multi-db will
+// get us bigger wins in terms of parallelism.
+type multiDatabaseLTFPrioritizer struct {
+ sync.Mutex
+ dbHeap heap.Interface
+ counterMap map[string]*dbCounter
+}
+
+// NewMultiDatabaseLTFPrioritizer takes in a list of intents and returns an
+// initialized prioritizer.
+func NewMultiDatabaseLTFPrioritizer(intents []*Intent) *multiDatabaseLTFPrioritizer {
+ prioritizer := &multiDatabaseLTFPrioritizer{
+ counterMap: map[string]*dbCounter{},
+ dbHeap: &DBHeap{},
+ }
+ heap.Init(prioritizer.dbHeap)
+ // first, create all database counters
+ for _, intent := range intents {
+ counter, exists := prioritizer.counterMap[intent.DB]
+ if !exists {
+ // initialize a new counter if one doesn't exist for DB
+ counter = &dbCounter{}
+ prioritizer.counterMap[intent.DB] = counter
+ }
+ counter.collections = append(counter.collections, intent)
+ }
+ // then ensure that all the dbCounters have sorted intents
+ for _, counter := range prioritizer.counterMap {
+ counter.SortCollectionsBySize()
+ heap.Push(prioritizer.dbHeap, counter)
+ }
+ return prioritizer
+}
+
+// Get returns the next prioritized intent and updates the count of active
+// restores for the returned intent's DB. Get is not thread safe, and depends
+// on the implementation of the intent manager to lock around it.
+func (mdb *multiDatabaseLTFPrioritizer) Get() *Intent {
+ mdb.Lock()
+ defer mdb.Unlock()
+
+ if mdb.dbHeap.Len() == 0 {
+ // we're out of things to return
+ return nil
+ }
+ optimalDB := heap.Pop(mdb.dbHeap).(*dbCounter)
+ optimalDB.active++
+ nextIntent := optimalDB.PopIntent()
+ // only release the db counter if it's out of collections
+ if len(optimalDB.collections) > 0 {
+ heap.Push(mdb.dbHeap, optimalDB)
+ }
+ return nextIntent
+}
+
+// Finish decreases the number of active restore jobs for the given intent's
+// database, and reshuffles the heap accordingly. Finish is not thread safe,
+// and depends on the implementation of the intent manager to lock around it.
+func (mdb *multiDatabaseLTFPrioritizer) Finish(intent *Intent) {
+ mdb.Lock()
+ defer mdb.Unlock()
+
+ counter := mdb.counterMap[intent.DB]
+ counter.active--
+ // only fix up the heap if the counter is still in the heap
+ if len(counter.collections) > 0 {
+ // This is an O(n) operation on the heap. We could make all heap
+ // operations O(log(n)) if we set up dbCounters to track their own
+ // position in the heap, but in practice this overhead is likely negligible.
+ heap.Init(mdb.dbHeap)
+ }
+}
+
+type dbCounter struct {
+ active int
+ collections []*Intent
+}
+
+func (dbc *dbCounter) SortCollectionsBySize() {
+ sort.Sort(BySize(dbc.collections))
+}
+
+// PopIntent returns the largest intent remaining for the database
+func (dbc *dbCounter) PopIntent() *Intent {
+ var intent *Intent
+ if len(dbc.collections) > 0 {
+ intent, dbc.collections = dbc.collections[0], dbc.collections[1:]
+ }
+ return intent
+}
+
+// Returns the largest collection of the databases with the least active restores.
+// Implements the container/heap interface. None of its methods are meant to be
+// called directly.
+type DBHeap []*dbCounter
+
+func (dbh DBHeap) Len() int { return len(dbh) }
+func (dbh DBHeap) Swap(i, j int) { dbh[i], dbh[j] = dbh[j], dbh[i] }
+func (dbh DBHeap) Less(i, j int) bool {
+ if dbh[i].active == dbh[j].active {
+ // prioritize the largest bson file if dbs have the same number
+ // of restorations in progress
+ return dbh[i].collections[0].Size > dbh[j].collections[0].Size
+ }
+ return dbh[i].active < dbh[j].active
+}
+
+func (dbh *DBHeap) Push(x interface{}) {
+ *dbh = append(*dbh, x.(*dbCounter))
+}
+
+func (dbh *DBHeap) Pop() interface{} {
+ // for container/heap package: removes the top entry and resizes the heap array
+ old := *dbh
+ n := len(old)
+ toPop := old[n-1]
+ *dbh = old[0 : n-1]
+ return toPop
+}
diff --git a/src/mongo/gotools/common/intents/intent_prioritizer_test.go b/src/mongo/gotools/common/intents/intent_prioritizer_test.go
new file mode 100644
index 00000000000..2abd79e5641
--- /dev/null
+++ b/src/mongo/gotools/common/intents/intent_prioritizer_test.go
@@ -0,0 +1,174 @@
+package intents
+
+import (
+ "container/heap"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestLegacyPrioritizer(t *testing.T) {
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a legacyPrioritizer initialized with an ordered intent list", t, func() {
+ testList := []*Intent{
+ &Intent{DB: "1"},
+ &Intent{DB: "2"},
+ &Intent{DB: "3"},
+ }
+ legacy := NewLegacyPrioritizer(testList)
+ So(legacy, ShouldNotBeNil)
+
+ Convey("the priority should be defined by 'first-in-first-out'", func() {
+ it0 := legacy.Get()
+ it1 := legacy.Get()
+ it2 := legacy.Get()
+ it3 := legacy.Get()
+ So(it3, ShouldBeNil)
+ So(it0.DB, ShouldBeLessThan, it1.DB)
+ So(it1.DB, ShouldBeLessThan, it2.DB)
+ })
+ })
+}
+
+func TestBasicDBHeapBehavior(t *testing.T) {
+ var dbheap heap.Interface
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With an empty dbHeap", t, func() {
+ dbheap = &DBHeap{}
+ heap.Init(dbheap)
+
+ Convey("when inserting unordered dbCounters with different active counts", func() {
+ heap.Push(dbheap, &dbCounter{75, nil})
+ heap.Push(dbheap, &dbCounter{121, nil})
+ heap.Push(dbheap, &dbCounter{76, nil})
+ heap.Push(dbheap, &dbCounter{51, nil})
+ heap.Push(dbheap, &dbCounter{82, nil})
+ heap.Push(dbheap, &dbCounter{117, nil})
+ heap.Push(dbheap, &dbCounter{49, nil})
+ heap.Push(dbheap, &dbCounter{101, nil})
+ heap.Push(dbheap, &dbCounter{122, nil})
+ heap.Push(dbheap, &dbCounter{33, nil})
+ heap.Push(dbheap, &dbCounter{0, nil})
+
+ Convey("they should pop in active order, least to greatest", func() {
+ prev := -1
+ for dbheap.Len() > 0 {
+ popped := heap.Pop(dbheap).(*dbCounter)
+ So(popped.active, ShouldBeGreaterThan, prev)
+ prev = popped.active
+ }
+ })
+ })
+
+ Convey("when inserting unordered dbCounters with different bson sizes", func() {
+ heap.Push(dbheap, &dbCounter{0, []*Intent{&Intent{Size: 70}}})
+ heap.Push(dbheap, &dbCounter{0, []*Intent{&Intent{Size: 1024}}})
+ heap.Push(dbheap, &dbCounter{0, []*Intent{&Intent{Size: 97}}})
+ heap.Push(dbheap, &dbCounter{0, []*Intent{&Intent{Size: 3}}})
+ heap.Push(dbheap, &dbCounter{0, []*Intent{&Intent{Size: 1024 * 1024}}})
+
+ Convey("they should pop in bson size order, greatest to least", func() {
+ prev := int64(1024*1024 + 1) // Maximum
+ for dbheap.Len() > 0 {
+ popped := heap.Pop(dbheap).(*dbCounter)
+ So(popped.collections[0].Size, ShouldBeLessThan, prev)
+ prev = popped.collections[0].Size
+ }
+ })
+ })
+ })
+}
+
+func TestDBCounterCollectionSorting(t *testing.T) {
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a dbCounter and an unordered collection of intents", t, func() {
+ dbc := &dbCounter{
+ collections: []*Intent{
+ &Intent{Size: 100},
+ &Intent{Size: 1000},
+ &Intent{Size: 1},
+ &Intent{Size: 10},
+ },
+ }
+
+ Convey("popping the sorted intents should return in decreasing BSONSize", func() {
+ dbc.SortCollectionsBySize()
+ So(dbc.PopIntent().Size, ShouldEqual, 1000)
+ So(dbc.PopIntent().Size, ShouldEqual, 100)
+ So(dbc.PopIntent().Size, ShouldEqual, 10)
+ So(dbc.PopIntent().Size, ShouldEqual, 1)
+ So(dbc.PopIntent(), ShouldBeNil)
+ So(dbc.PopIntent(), ShouldBeNil)
+ })
+ })
+}
+
+func TestSimulatedMultiDBJob(t *testing.T) {
+ var prioritizer IntentPrioritizer
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a prioritizer initialized with a set of intents", t, func() {
+ intents := []*Intent{
+ &Intent{C: "small", DB: "db2", Size: 32},
+ &Intent{C: "medium", DB: "db2", Size: 128},
+ &Intent{C: "giant", DB: "db1", Size: 1024},
+ &Intent{C: "tiny", DB: "db1", Size: 2},
+ }
+ prioritizer = NewMultiDatabaseLTFPrioritizer(intents)
+ So(prioritizer, ShouldNotBeNil)
+
+ Convey("and a running simulation of two jobs threads:", func() {
+ Convey("the first two intents should be of different dbs", func() {
+ i0 := prioritizer.Get()
+ So(i0, ShouldNotBeNil)
+ i1 := prioritizer.Get()
+ So(i1, ShouldNotBeNil)
+
+ Convey("the first intent should be the largest bson file", func() {
+ So(i0.C, ShouldEqual, "giant")
+ So(i0.DB, ShouldEqual, "db1")
+ })
+
+ Convey("the second intent should be the largest bson file of db2", func() {
+ So(i1.C, ShouldEqual, "medium")
+ So(i1.DB, ShouldEqual, "db2")
+ })
+
+ Convey("with the second job finishing the smaller intents", func() {
+ prioritizer.Finish(i1)
+ i2 := prioritizer.Get()
+ So(i2, ShouldNotBeNil)
+ prioritizer.Finish(i2)
+ i3 := prioritizer.Get()
+ So(i3, ShouldNotBeNil)
+
+ Convey("the next job should be from db2", func() {
+ So(i2.C, ShouldEqual, "small")
+ So(i2.DB, ShouldEqual, "db2")
+ })
+
+ Convey("the final job should be from db1", func() {
+ So(i3.C, ShouldEqual, "tiny")
+ So(i3.DB, ShouldEqual, "db1")
+
+ Convey("which means that there should be two active db1 jobs", func() {
+ counter := prioritizer.(*multiDatabaseLTFPrioritizer).counterMap["db1"]
+ So(counter.active, ShouldEqual, 2)
+ })
+ })
+
+ Convey("the heap should now be empty", func() {
+ So(prioritizer.(*multiDatabaseLTFPrioritizer).dbHeap.Len(), ShouldEqual, 0)
+ })
+ })
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/intents/intent_test.go b/src/mongo/gotools/common/intents/intent_test.go
new file mode 100644
index 00000000000..15b99f1af30
--- /dev/null
+++ b/src/mongo/gotools/common/intents/intent_test.go
@@ -0,0 +1,81 @@
+package intents
+
+import (
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestIntentManager(t *testing.T) {
+ var manager *Manager
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With an empty IntentManager", t, func() {
+ manager = NewIntentManager()
+ So(manager, ShouldNotBeNil)
+
+ Convey("putting a series of added bson intents", func() {
+ manager.Put(&Intent{DB: "1", C: "1", Location: "/b1/"})
+ manager.Put(&Intent{DB: "1", C: "2", Location: "/b2/"})
+ manager.Put(&Intent{DB: "1", C: "3", Location: "/b3/"})
+ manager.Put(&Intent{DB: "2", C: "1", Location: "/b4/"})
+ So(len(manager.intentsByDiscoveryOrder), ShouldEqual, 4)
+ So(len(manager.intents), ShouldEqual, 4)
+
+ Convey("and then some matching metadata intents", func() {
+ manager.Put(&Intent{DB: "2", C: "1", MetadataLocation: "/4m/"})
+ manager.Put(&Intent{DB: "1", C: "3", MetadataLocation: "/3m/"})
+ manager.Put(&Intent{DB: "1", C: "1", MetadataLocation: "/1m/"})
+ manager.Put(&Intent{DB: "1", C: "2", MetadataLocation: "/2m/"})
+
+ Convey("the size of the queue should be unchanged", func() {
+ So(len(manager.intentsByDiscoveryOrder), ShouldEqual, 4)
+ So(len(manager.intents), ShouldEqual, 4)
+ })
+
+ Convey("popping them from the IntentManager", func() {
+ manager.Finalize(Legacy)
+ it0 := manager.Pop()
+ it1 := manager.Pop()
+ it2 := manager.Pop()
+ it3 := manager.Pop()
+ it4 := manager.Pop()
+ So(it4, ShouldBeNil)
+
+ Convey("should return them in insert order", func() {
+ So(*it0, ShouldResemble,
+ Intent{DB: "1", C: "1", Location: "/b1/", MetadataLocation: "/1m/"})
+ So(*it1, ShouldResemble,
+ Intent{DB: "1", C: "2", Location: "/b2/", MetadataLocation: "/2m/"})
+ So(*it2, ShouldResemble,
+ Intent{DB: "1", C: "3", Location: "/b3/", MetadataLocation: "/3m/"})
+ So(*it3, ShouldResemble,
+ Intent{DB: "2", C: "1", Location: "/b4/", MetadataLocation: "/4m/"})
+ })
+ })
+ })
+
+ Convey("but adding non-matching intents", func() {
+ manager.Put(&Intent{DB: "7", C: "49", MetadataLocation: "/5/"})
+ manager.Put(&Intent{DB: "27", C: "B", MetadataLocation: "/6/"})
+
+ Convey("should increase the size, because they are not merged in", func() {
+ So(len(manager.intentsByDiscoveryOrder), ShouldEqual, 6)
+ So(len(manager.intents), ShouldEqual, 6)
+ })
+ })
+
+ Convey("using the Peek() method", func() {
+ peeked := manager.Peek()
+ So(peeked, ShouldNotBeNil)
+ So(peeked, ShouldResemble, manager.intentsByDiscoveryOrder[0])
+
+ Convey("modifying the returned copy should not modify the original", func() {
+ peeked.DB = "SHINY NEW VALUE"
+ So(peeked, ShouldNotResemble, manager.intentsByDiscoveryOrder[0])
+ })
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/bench_test.go b/src/mongo/gotools/common/json/bench_test.go
new file mode 100644
index 00000000000..29dbc26d417
--- /dev/null
+++ b/src/mongo/gotools/common/json/bench_test.go
@@ -0,0 +1,189 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Large data benchmark.
+// The JSON data is a summary of agl's changes in the
+// go, webkit, and chromium open source projects.
+// We benchmark converting between the JSON form
+// and in-memory data structures.
+
+package json
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+ "os"
+ "testing"
+)
+
+type codeResponse struct {
+ Tree *codeNode `json:"tree"`
+ Username string `json:"username"`
+}
+
+type codeNode struct {
+ Name string `json:"name"`
+ Kids []*codeNode `json:"kids"`
+ CLWeight float64 `json:"cl_weight"`
+ Touches int `json:"touches"`
+ MinT int64 `json:"min_t"`
+ MaxT int64 `json:"max_t"`
+ MeanT int64 `json:"mean_t"`
+}
+
+var codeJSON []byte
+var codeStruct codeResponse
+
+func codeInit() {
+ f, err := os.Open("testdata/code.json.gz")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ gz, err := gzip.NewReader(f)
+ if err != nil {
+ panic(err)
+ }
+ data, err := ioutil.ReadAll(gz)
+ if err != nil {
+ panic(err)
+ }
+
+ codeJSON = data
+
+ if err := Unmarshal(codeJSON, &codeStruct); err != nil {
+ panic("unmarshal code.json: " + err.Error())
+ }
+
+ if data, err = Marshal(&codeStruct); err != nil {
+ panic("marshal code.json: " + err.Error())
+ }
+
+ if !bytes.Equal(data, codeJSON) {
+ println("different lengths", len(data), len(codeJSON))
+ for i := 0; i < len(data) && i < len(codeJSON); i++ {
+ if data[i] != codeJSON[i] {
+ println("re-marshal: changed at byte", i)
+ println("orig: ", string(codeJSON[i-10:i+10]))
+ println("new: ", string(data[i-10:i+10]))
+ break
+ }
+ }
+ panic("re-marshal code.json: different result")
+ }
+}
+
+func BenchmarkCodeEncoder(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ enc := NewEncoder(ioutil.Discard)
+ for i := 0; i < b.N; i++ {
+ if err := enc.Encode(&codeStruct); err != nil {
+ b.Fatal("Encode:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeMarshal(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ for i := 0; i < b.N; i++ {
+ if _, err := Marshal(&codeStruct); err != nil {
+ b.Fatal("Marshal:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeDecoder(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ var buf bytes.Buffer
+ dec := NewDecoder(&buf)
+ var r codeResponse
+ for i := 0; i < b.N; i++ {
+ buf.Write(codeJSON)
+ // hide EOF
+ buf.WriteByte('\n')
+ buf.WriteByte('\n')
+ buf.WriteByte('\n')
+ if err := dec.Decode(&r); err != nil {
+ b.Fatal("Decode:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeUnmarshal(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ for i := 0; i < b.N; i++ {
+ var r codeResponse
+ if err := Unmarshal(codeJSON, &r); err != nil {
+ b.Fatal("Unmmarshal:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeUnmarshalReuse(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ var r codeResponse
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(codeJSON, &r); err != nil {
+ b.Fatal("Unmmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalString(b *testing.B) {
+ data := []byte(`"hello, world"`)
+ var s string
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &s); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalFloat64(b *testing.B) {
+ var f float64
+ data := []byte(`3.14`)
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &f); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalInt64(b *testing.B) {
+ var x int64
+ data := []byte(`3`)
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &x); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
diff --git a/src/mongo/gotools/common/json/bindata.go b/src/mongo/gotools/common/json/bindata.go
new file mode 100644
index 00000000000..a7357675ba8
--- /dev/null
+++ b/src/mongo/gotools/common/json/bindata.go
@@ -0,0 +1,67 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Transition functions for recognizing BinData.
+// Adapted from encoding/json/scanner.go.
+
+// stateBi is the state after reading `Bi`.
+func stateBi(s *scanner, c int) int {
+ if c == 'n' {
+ s.step = generateState("BinData", []byte("Data"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal BinData (expecting 'n')")
+}
+
+// Decodes a BinData literal stored in the underlying byte data into v.
+func (d *decodeState) storeBinData(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args, err := d.ctor("BinData", []reflect.Type{byteType, stringType})
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ arg0 := byte(args[0].Uint())
+ arg1 := args[1].String()
+ v.Set(reflect.ValueOf(BinData{arg0, arg1}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", binDataType, kind))
+ }
+}
+
+// Returns a BinData literal from the underlying byte data.
+func (d *decodeState) getBinData() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ // Prevent d.convertNumber() from parsing the argument as a float64.
+ useNumber := d.useNumber
+ d.useNumber = true
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("BinData", 2, len(args)); err != nil {
+ d.error(err)
+ }
+ arg0, err := args[0].(Number).Uint8()
+ if err != nil {
+ d.error(fmt.Errorf("expected byte for first argument of BinData constructor"))
+ }
+ arg1, ok := args[1].(string)
+ if !ok {
+ d.error(fmt.Errorf("expected string for second argument of BinData constructor"))
+ }
+
+ d.useNumber = useNumber
+ return BinData{arg0, arg1}
+}
diff --git a/src/mongo/gotools/common/json/bindata_test.go b/src/mongo/gotools/common/json/bindata_test.go
new file mode 100644
index 00000000000..97d3dddee24
--- /dev/null
+++ b/src/mongo/gotools/common/json/bindata_test.go
@@ -0,0 +1,89 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestBinDataValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with BinData values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `BinData(1, "xyz")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, BinData{1, "xyz"})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := `BinData(1, "abc")`,
+ `BinData(2, "def")`, `BinData(3, "ghi")`
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, BinData{1, "abc"})
+
+ jsonValue2, ok := jsonMap[key2].(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, BinData{2, "def"})
+
+ jsonValue3, ok := jsonMap[key3].(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, BinData{3, "ghi"})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `BinData(42, "10")`
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, BinData{42, "10"})
+ }
+ })
+
+ Convey("can specify type argument using hexadecimal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `BinData(0x5f, "xyz")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, BinData{0x5f, "xyz"})
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/boolean.go b/src/mongo/gotools/common/json/boolean.go
new file mode 100644
index 00000000000..b40ba5204da
--- /dev/null
+++ b/src/mongo/gotools/common/json/boolean.go
@@ -0,0 +1,73 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Transition functions for recognizing Boolean.
+// Adapted from encoding/json/scanner.go.
+
+// stateBo is the state after reading `Bo`.
+func stateBo(s *scanner, c int) int {
+ if c == 'o' {
+ s.step = generateState("Boolean", []byte("lean"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal Boolean (expecting 'o')")
+}
+
+// Decodes a Boolean literal stored in the underlying byte data into v.
+func (d *decodeState) storeBoolean(v reflect.Value) {
+ res := d.getBoolean()
+ switch kind := v.Kind(); kind {
+ case reflect.Interface, reflect.Bool:
+ v.Set(reflect.ValueOf(res))
+ default:
+ d.error(fmt.Errorf("cannot store bool value into %v type", kind))
+ }
+}
+
+// Returns a Boolean literal from the underlying byte data.
+func (d *decodeState) getBoolean() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ // Prevent d.convertNumber() from parsing the argument as a float64.
+ useNumber := d.useNumber
+ d.useNumber = true
+
+ args := d.ctorInterface()
+ if len(args) == 0 {
+ return false
+ }
+
+ // Ignore all but the first argument.
+ switch v := args[0].(type) {
+ case bool:
+ return v
+ case Number:
+ d.useNumber = useNumber
+
+ // First try Int64 so hex numbers work, then if that fails try Float64.
+ num, err := v.Int64()
+ if err == nil {
+ return (num != 0)
+ }
+
+ numF, err := v.Float64()
+ if err != nil {
+ d.error(fmt.Errorf("expected float64 for numeric argument of Boolean constructor, got err: %v", err))
+ }
+ return (numF != 0)
+ case string:
+ return (v != "")
+ case Undefined, nil:
+ return false
+ // Parameter values of any other types should yield true.
+ default:
+ return true
+ }
+}
diff --git a/src/mongo/gotools/common/json/boolean_test.go b/src/mongo/gotools/common/json/boolean_test.go
new file mode 100644
index 00000000000..5b5cae6e2ed
--- /dev/null
+++ b/src/mongo/gotools/common/json/boolean_test.go
@@ -0,0 +1,368 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestBooleanValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with Boolean values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+ })
+
+ Convey("works for no args", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean()"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+ })
+
+ Convey("works for a struct of a specific type", func() {
+ type TestStruct struct {
+ A bool
+ b int
+ }
+ var jsonStruct TestStruct
+
+ key := "A"
+ value := "Boolean(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonStruct)
+ So(err, ShouldBeNil)
+ So(jsonStruct.A, ShouldEqual, true)
+
+ key = "A"
+ value = "Boolean(0)"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonStruct)
+ So(err, ShouldBeNil)
+ So(jsonStruct.A, ShouldEqual, false)
+ })
+
+ Convey("works for bool", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(true)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ value = "Boolean(false)"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+ })
+
+ Convey("works for numbers", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(1)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ value = "Boolean(0)"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+
+ value = "Boolean(0.0)"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+
+ value = "Boolean(2.0)"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ value = "Boolean(-15.4)"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+ })
+
+ Convey("works for strings", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean('hello')"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ value = "Boolean('')"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+ })
+
+ Convey("works for undefined", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(undefined)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+ })
+
+ Convey("works for null", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(null)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+ })
+
+ Convey("works when given too many args", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(true, false)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ key = "key"
+ value = "Boolean(false, true)"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "Boolean(123)", "Boolean(0)", "Boolean(true)"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldEqual, true)
+
+ jsonValue2, ok := jsonMap[key2].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldEqual, false)
+
+ jsonValue3, ok := jsonMap[key3].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldEqual, true)
+ })
+
+ Convey("works for other types", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(new Date (0))"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ value = "Boolean(ObjectId('56609335028bd7dc5c36cb9f'))"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ value = "Boolean([])"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+ })
+
+ Convey("works for nested booleans", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(Boolean(5))"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ value = "Boolean(Boolean(Boolean(0)))"
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value1 := "Boolean(42)"
+ value2 := "Boolean(0)"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value1, value2, value1)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ jsonValue, ok := jsonArray[0].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+
+ jsonValue, ok = jsonArray[1].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+
+ jsonValue, ok = jsonArray[2].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+ })
+
+ Convey("can specify argument in hexadecimal (true)", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(0x5f)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+ })
+
+ Convey("can specify argument in hexadecimal (false)", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Boolean(0x0)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, false)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/constructor.go b/src/mongo/gotools/common/json/constructor.go
new file mode 100644
index 00000000000..b2bb91165fc
--- /dev/null
+++ b/src/mongo/gotools/common/json/constructor.go
@@ -0,0 +1,117 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+const CtorNumArgsErrorf = "expected %v argument%v to %v constructor, but %v received"
+
+// Transition functions for recognizing object constructors.
+// Adapted from encoding/json/scanner.go.
+
+// stateConstructor is the state after reading a constructor name.
+func stateConstructor(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanSkipSpace
+ }
+ if c == '(' {
+ s.step = stateBeginCtorOrEmpty
+ s.pushParseState(parseCtorArg)
+ return scanBeginCtor
+ }
+ return s.error(c, "expected '('")
+}
+
+// stateBeginCtorOrEmpty is the state after reading `(`.
+func stateBeginCtorOrEmpty(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanSkipSpace
+ }
+ if c == ')' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// ctor consumes a constructor from d.data[d.off-1:], given a type specification t.
+// the first byte of the constructor ('(') has been read already.
+func (d *decodeState) ctor(name string, t []reflect.Type) ([]reflect.Value, error) {
+ result := make([]reflect.Value, 0, len(t))
+
+ i := 0
+ for {
+ // Look ahead for ) - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndCtor {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ if i < len(t) {
+ v := reflect.New(t[i]).Elem()
+
+ // Get argument of constructor
+ d.value(v)
+
+ result = append(result, v)
+ i++
+ }
+
+ // Next token must be , or ).
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndCtor {
+ break
+ }
+ if op != scanCtorArg {
+ d.error(errPhase)
+ }
+ }
+
+ return result, ctorNumArgsMismatch(name, len(t), i)
+}
+
+// ctorInterface is like ctor but returns []interface{}.
+func (d *decodeState) ctorInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ) - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndCtor {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface(false))
+
+ // Next token must be , or ).
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndCtor {
+ break
+ }
+ if op != scanCtorArg {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// Returns a descriptive error message if the number of arguments given
+// to the constructor do not match what is expected.
+func ctorNumArgsMismatch(name string, expected, actual int) error {
+ if expected == actual {
+ return nil
+ }
+
+ quantifier := ""
+ if expected > 1 {
+ quantifier = "s"
+ }
+ return fmt.Errorf(CtorNumArgsErrorf, expected, quantifier, name, actual)
+}
diff --git a/src/mongo/gotools/common/json/consts.go b/src/mongo/gotools/common/json/consts.go
new file mode 100644
index 00000000000..ae7f7e6d31a
--- /dev/null
+++ b/src/mongo/gotools/common/json/consts.go
@@ -0,0 +1,7 @@
+package json
+
+const (
+ ArrayStart = '['
+ ArraySep = ','
+ ArrayEnd = ']'
+)
diff --git a/src/mongo/gotools/common/json/csv_format.go b/src/mongo/gotools/common/json/csv_format.go
new file mode 100644
index 00000000000..2228d227393
--- /dev/null
+++ b/src/mongo/gotools/common/json/csv_format.go
@@ -0,0 +1,84 @@
+package json
+
+import (
+ "encoding/base64"
+ "fmt"
+ "time"
+)
+
+const CSV_DATE_FORMAT = "2006-01-02T15:04:05.000Z"
+
+func (b BinData) String() string {
+ data, err := base64.StdEncoding.DecodeString(b.Base64)
+ if err != nil {
+ return "" // XXX: panic?
+ }
+ if b.Type == 0x02 {
+ data = data[4:] // skip the first 4 bytes
+ }
+ return fmt.Sprintf("%X", data) // use uppercase hexadecimal
+}
+
+func (js JavaScript) String() string {
+ return js.Code
+}
+
+func (d Date) String() string {
+ if d.isFormatable() {
+ n := int64(d)
+ t := time.Unix(n/1e3, n%1e3*1e6)
+ return t.UTC().Format(JSON_DATE_FORMAT)
+ }
+ // date.MarshalJSON always returns a nil err.
+ data, _ := d.MarshalJSON()
+ return string(data)
+}
+
+func (d DBRef) String() string {
+ return fmt.Sprintf(`{ "$ref": "%v", "$id": %v, "$db": "%v" }`,
+ d.Collection, d.Id, d.Database)
+}
+
+func (d DBPointer) String() string {
+ return fmt.Sprintf(`{ "$ref": "%v", "$id": %v }`,
+ d.Namespace, d.Id)
+}
+
+func (f Float) String() string {
+ return fmt.Sprintf("%v", float64(f))
+}
+
+func (_ MinKey) String() string {
+ return "$MinKey"
+}
+
+func (_ MaxKey) String() string {
+ return "$MaxKey"
+}
+
+func (n NumberInt) String() string {
+ return fmt.Sprintf("%v", int32(n))
+}
+
+func (n NumberLong) String() string {
+ return fmt.Sprintf("%v", int64(n))
+}
+
+// Assumes that o represents a valid ObjectId
+// (composed of 24 hexadecimal characters).
+func (o ObjectId) String() string {
+ return fmt.Sprintf("ObjectId(%v)", string(o))
+}
+
+func (r RegExp) String() string {
+ return fmt.Sprintf("/%v/%v", r.Pattern, r.Options)
+}
+
+func (t Timestamp) String() string {
+ return fmt.Sprintf(`{ "$timestamp": { "t": %v, "i": %v } }`,
+ t.Seconds, t.Increment)
+}
+
+func (_ Undefined) String() string {
+ return `{ "$undefined": true }`
+}
diff --git a/src/mongo/gotools/common/json/date.go b/src/mongo/gotools/common/json/date.go
new file mode 100644
index 00000000000..dd310a675ee
--- /dev/null
+++ b/src/mongo/gotools/common/json/date.go
@@ -0,0 +1,80 @@
+package json
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/util"
+ "reflect"
+)
+
+// Transition functions for recognizing Date.
+// Adapted from encoding/json/scanner.go.
+
+// stateDa is the state after reading `Da`.
+func stateDa(s *scanner, c int) int {
+ if c == 't' {
+ s.step = stateDat
+ return scanContinue
+ }
+ return s.error(c, "in literal Date (expecting 't')")
+}
+
+// stateDat is the state after reading `Dat`.
+func stateDat(s *scanner, c int) int {
+ if c == 'e' {
+ s.step = stateConstructor
+ return scanContinue
+ }
+ return s.error(c, "in literal Date (expecting 'e')")
+}
+
+// Decodes a Date literal stored in the underlying byte data into v.
+func (d *decodeState) storeDate(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+ args, err := d.ctor("Date", []reflect.Type{dateType})
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(args[0])
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", dateType, kind))
+ }
+}
+
+// Returns a Date literal from the underlying byte data.
+func (d *decodeState) getDate() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ // Prevent d.convertNumber() from parsing the argument as a float64.
+ useNumber := d.useNumber
+ d.useNumber = true
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("Date", 1, len(args)); err != nil {
+ d.error(err)
+ }
+ arg0num, isNumber := args[0].(Number)
+ if !isNumber {
+ // validate the date format of the string
+ _, err := util.FormatDate(args[0].(string))
+ if err != nil {
+ d.error(fmt.Errorf("unexpected ISODate format"))
+ }
+ d.useNumber = useNumber
+ return ISODate(args[0].(string))
+ }
+ arg0, err := arg0num.Int64()
+ if err != nil {
+ d.error(fmt.Errorf("expected int64 for first argument of Date constructor"))
+ }
+
+ d.useNumber = useNumber
+ return Date(arg0)
+}
diff --git a/src/mongo/gotools/common/json/date_test.go b/src/mongo/gotools/common/json/date_test.go
new file mode 100644
index 00000000000..b606fb62f43
--- /dev/null
+++ b/src/mongo/gotools/common/json/date_test.go
@@ -0,0 +1,99 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestDateValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with Date values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Date(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(Date)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, Date(123))
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "Date(123)", "Date(456)", "Date(789)"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(Date)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldEqual, Date(123))
+
+ jsonValue2, ok := jsonMap[key2].(Date)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldEqual, Date(456))
+
+ jsonValue3, ok := jsonMap[key3].(Date)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldEqual, Date(789))
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Date(42)"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(Date)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, Date(42))
+ }
+ })
+
+ Convey("cannot use string as argument", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `Date("123")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("can specify argument in hexadecimal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Date(0x5f)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(Date)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, Date(0x5f))
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/dbpointer.go b/src/mongo/gotools/common/json/dbpointer.go
new file mode 100644
index 00000000000..24582038576
--- /dev/null
+++ b/src/mongo/gotools/common/json/dbpointer.go
@@ -0,0 +1,71 @@
+package json
+
+import (
+ "fmt"
+ "gopkg.in/mgo.v2/bson"
+ "reflect"
+)
+
+// Transition functions for recognizing DBPointer.
+// Adapted from encoding/json/scanner.go.
+
+// stateDB is the state after reading `DB`.
+func stateDBP(s *scanner, c int) int {
+ if c == 'o' {
+ s.step = generateState("DBPointer", []byte("inter"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal DBPointer (expecting 'o')")
+}
+
+// Decodes a DBRef literal stored in the underlying byte data into v.
+func (d *decodeState) storeDBPointer(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args := d.ctorInterface()
+ if len(args) != 2 {
+ d.error(fmt.Errorf("expected 2 arguments to DBPointer constructor, but %v received", len(args)))
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ arg0, ok := args[0].(string)
+ if !ok {
+ d.error(fmt.Errorf("expected first argument to DBPointer to be of type string"))
+ }
+ arg1, ok := args[1].(ObjectId)
+ if !ok {
+ d.error(fmt.Errorf("expected second argument to DBPointer to be of type ObjectId, but ended up being %t", args[1]))
+ }
+ id := bson.ObjectIdHex(string(arg1))
+ v.Set(reflect.ValueOf(DBPointer{arg0, id}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", dbPointerType, kind))
+ }
+}
+
+// Returns a DBRef literal from the underlying byte data.
+func (d *decodeState) getDBPointer() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("DBPointer", 2, len(args)); err != nil {
+ d.error(err)
+ }
+ arg0, ok := args[0].(string)
+ if !ok {
+ d.error(fmt.Errorf("expected string for first argument of DBPointer constructor"))
+ }
+ arg1, ok := args[1].(ObjectId)
+ if !ok {
+ d.error(fmt.Errorf("expected ObjectId for second argument of DBPointer constructor"))
+ }
+ id := bson.ObjectIdHex(string(arg1))
+
+ return DBPointer{arg0, id}
+}
diff --git a/src/mongo/gotools/common/json/dbpointer_test.go b/src/mongo/gotools/common/json/dbpointer_test.go
new file mode 100644
index 00000000000..9831f7028ac
--- /dev/null
+++ b/src/mongo/gotools/common/json/dbpointer_test.go
@@ -0,0 +1,84 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestDBPointerValue(t *testing.T) {
+
+ Convey("Unmarshalling JSON with DBPointer values", t, func() {
+ key := "key"
+ value := `DBPointer("ref", ObjectId("552ffe9f5739878e73d116a9"))`
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBPointer)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBPointer{"ref", bson.ObjectIdHex("552ffe9f5739878e73d116a9")})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value2 := `DBPointer("ref2", ObjectId("552ffed95739878e73d116aa"))`
+ value3 := `DBPointer("ref3", ObjectId("552fff215739878e73d116ab"))`
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(DBPointer)
+ So(ok, ShouldBeTrue)
+
+ So(jsonValue1, ShouldResemble, DBPointer{"ref", bson.ObjectIdHex("552ffe9f5739878e73d116a9")})
+
+ jsonValue2, ok := jsonMap[key2].(DBPointer)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, DBPointer{"ref2", bson.ObjectIdHex("552ffed95739878e73d116aa")})
+
+ jsonValue3, ok := jsonMap[key3].(DBPointer)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, DBPointer{"ref3", bson.ObjectIdHex("552fff215739878e73d116ab")})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(DBPointer)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBPointer{"ref", bson.ObjectIdHex("552ffe9f5739878e73d116a9")})
+ }
+ })
+
+ Convey("will not accept an $id type that is not an ObjectId", func() {
+ value := `DBPointer("ref", 4)`
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+
+ })
+}
diff --git a/src/mongo/gotools/common/json/dbref.go b/src/mongo/gotools/common/json/dbref.go
new file mode 100644
index 00000000000..23ee1eb2d50
--- /dev/null
+++ b/src/mongo/gotools/common/json/dbref.go
@@ -0,0 +1,69 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Transition functions for recognizing DBRef and Dbref.
+// Adapted from encoding/json/scanner.go.
+
+// stateDB is the state after reading `DB`.
+func stateDBR(s *scanner, c int) int {
+ if c == 'e' {
+ s.step = generateState("DBRef", []byte("f"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal DBRef (expecting 'e')")
+}
+
+// stateDb is the state after reading `Db`.
+func stateDb(s *scanner, c int) int {
+ if c == 'r' {
+ s.step = generateState("Dbref", []byte("ef"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal Dbref (expecting 'r')")
+}
+
+// Decodes a DBRef literal stored in the underlying byte data into v.
+func (d *decodeState) storeDBRef(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args := d.ctorInterface()
+ if len(args) != 2 {
+ d.error(fmt.Errorf("expected 2 arguments to DBRef constructor, but %v received", len(args)))
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ arg0, ok := args[0].(string)
+ if !ok {
+ d.error(fmt.Errorf("expected first argument to DBRef to be of type string"))
+ }
+ arg1 := args[1]
+ v.Set(reflect.ValueOf(DBRef{arg0, arg1, ""}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", dbRefType, kind))
+ }
+}
+
+// Returns a DBRef literal from the underlying byte data.
+func (d *decodeState) getDBRef() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("DBRef", 2, len(args)); err != nil {
+ d.error(err)
+ }
+ arg0, ok := args[0].(string)
+ if !ok {
+ d.error(fmt.Errorf("expected string for first argument of DBRef constructor"))
+ }
+ return DBRef{arg0, args[1], ""}
+}
diff --git a/src/mongo/gotools/common/json/dbref_test.go b/src/mongo/gotools/common/json/dbref_test.go
new file mode 100644
index 00000000000..e1f4c6bb287
--- /dev/null
+++ b/src/mongo/gotools/common/json/dbref_test.go
@@ -0,0 +1,347 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "math"
+ "testing"
+)
+
+func TestDBRefValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with DBRef values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", "123")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", "123", ""})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := `DBRef("ref1", "123")`,
+ `DBRef("ref2", "456")`, `DBRef("ref3", "789")`
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, DBRef{"ref1", "123", ""})
+
+ jsonValue2, ok := jsonMap[key2].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, DBRef{"ref2", "456", ""})
+
+ jsonValue3, ok := jsonMap[key3].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, DBRef{"ref3", "789", ""})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", "42")`
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", "42", ""})
+ }
+ })
+
+ Convey("can use alternative capitalization ('Dbref')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `Dbref("ref", "123")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", "123", ""})
+ })
+
+ Convey("can have any extended JSON value for id parameter", func() {
+
+ Convey("a null literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", null)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", nil, ""})
+ })
+
+ Convey("a true literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", true)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", true, ""})
+ })
+
+ Convey("a false literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", false)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", false, ""})
+ })
+
+ Convey("an undefined literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", undefined)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", Undefined{}, ""})
+ })
+
+ Convey("a NaN literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", NaN)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Collection, ShouldEqual, "ref")
+
+ id, ok := jsonValue.Id.(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsNaN(id), ShouldBeTrue)
+
+ })
+
+ Convey("an Infinity literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", Infinity)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Collection, ShouldEqual, "ref")
+
+ id, ok := jsonValue.Id.(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsInf(id, 1), ShouldBeTrue)
+
+ })
+
+ Convey("a MinKey literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", MinKey)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", MinKey{}, ""})
+ })
+
+ Convey("a MaxKey literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", MaxKey)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", MaxKey{}, ""})
+ })
+
+ Convey("an ObjectId object", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", ObjectId("123"))`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", ObjectId("123"), ""})
+ })
+
+ Convey("a NumberInt object", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", NumberInt(123))`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", NumberInt(123), ""})
+ })
+
+ Convey("a NumberLong object", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", NumberLong(123))`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", NumberLong(123), ""})
+ })
+
+ Convey("a RegExp object", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", RegExp("xyz", "i"))`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", RegExp{"xyz", "i"}, ""})
+ })
+
+ Convey("a regular expression literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", /xyz/i)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", RegExp{"xyz", "i"}, ""})
+ })
+
+ Convey("a Timestamp object", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", Timestamp(123, 321))`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", Timestamp{123, 321}, ""})
+ })
+
+ Convey("a string literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", "xyz")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, DBRef{"ref", "xyz", ""})
+ })
+
+ Convey("a numeric literal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `DBRef("ref", 123)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Collection, ShouldEqual, "ref")
+
+ id, ok := jsonValue.Id.(int32)
+ So(ok, ShouldBeTrue)
+ So(id, ShouldAlmostEqual, 123)
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/decode.go b/src/mongo/gotools/common/json/decode.go
new file mode 100644
index 00000000000..e2e6418ed5f
--- /dev/null
+++ b/src/mongo/gotools/common/json/decode.go
@@ -0,0 +1,1273 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "gopkg.in/mgo.v2/bson"
+ "math"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshalling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+func UnmarshalMap(data []byte) (map[string]interface{}, error) {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return nil, err
+ }
+
+ d.init(data)
+ return d.unmarshalMap()
+}
+
+func UnmarshalBsonD(data []byte) (bson.D, error) {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return nil, err
+ }
+
+ d.init(data)
+ return d.unmarshalBsonD()
+}
+
+// Unmarshaler is the interface implemented by objects
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshalMap() (out map[string]interface{}, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ out = d.document()
+ return out, d.savedError
+}
+
+func (d *decodeState) unmarshalBsonD() (out bson.D, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ out = d.bsonDocument()
+ return out, d.savedError
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int32 returns the number as an int32.
+func (n Number) Int32() (int32, error) {
+ x, err := n.Int64()
+ return int32(x), err
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ base := 10
+ if isHexPrefix(string(n)) {
+ base = 0 // strconv.ParseInt will infer base 16
+ }
+ return strconv.ParseInt(string(n), base, 64)
+}
+
+// Uint8 returns the number as an uint8.
+func (n Number) Uint8() (uint8, error) {
+ x, err := n.Uint64()
+ return uint8(x), err
+}
+
+// Uint32 returns the number as an uint32.
+func (n Number) Uint32() (uint32, error) {
+ x, err := n.Uint64()
+ return uint32(x), err
+}
+
+// Uint64 returns the number as an uint64.
+func (n Number) Uint64() (uint64, error) {
+ base := 10
+ if isHexPrefix(string(n)) {
+ base = 0 // strconv.ParseUint will infer base 16
+ }
+ return strconv.ParseUint(string(n), base, 64)
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ tempstr string // scratch space to avoid some allocations
+ useNumber bool
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else {
+ d.scan.step(&d.scan, ']')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := int(d.data[d.off])
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+func (d *decodeState) document() map[string]interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+ return nil
+ case scanBeginObject:
+ return d.objectInterface()
+ }
+}
+
+func (d *decodeState) bsonDocument() bson.D {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+ return nil
+ case scanBeginObject:
+ return d.bsonDInterface()
+ }
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+ }
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type()})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface(false)))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type()})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte of the object ('{') has been read already.
+func (d *decodeState) object(v reflect.Value) {
+
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type()})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[string]T
+ switch v.Kind() {
+ case reflect.Map:
+ // map must have string kind
+ t := v.Type()
+ if t.Key().Kind() != reflect.String {
+ d.saveError(&UnmarshalTypeError{"object", v.Type()})
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+
+ case reflect.Struct:
+ // do nothing
+
+ case reflect.Slice:
+ // this is only a valid case if the output type is a bson.D
+ t := v.Type()
+ if t == orderedBSONType {
+ v.Set(reflect.ValueOf(d.bsonDInterface()))
+ return
+ }
+ fallthrough // can't unmarshal into a regular slice, goto "default" error case
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type()})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := maybeUnquoteBytes(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ if f == nil && ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+ // Read value.
+ if destring {
+ d.value(reflect.ValueOf(&d.tempstr))
+ d.literalStore([]byte(d.tempstr), subv, true)
+ d.tempstr = "" // Zero scratch space for successive values.
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kv := reflect.ValueOf(key).Convert(v.Type().Key())
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to an int32, int64, a float64,
+// or a Number depending on the setting of d.useNumber and whether the
+// string is specified in hexadecimal. It does this by parsing the string to see if it
+// can an integer, if not it is treated as a float. If the integer is within the bounds of an int32 it
+// is returned as an int32.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ parsedInteger, err := strconv.ParseInt(s, 0, 64)
+ if err != nil {
+ parsedFloat, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0)}
+ }
+ return parsedFloat, nil
+ }
+
+ if parsedInteger <= math.MaxInt32 && parsedInteger >= math.MinInt32 {
+ return int32(parsedInteger), nil
+ }
+ return int64(parsedInteger), nil
+
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ // Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := isNull(item) // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ }
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; {
+ case isNull(item): // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case c == 't', c == 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type()})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type()})
+ }
+ }
+
+ case c == '"', c == '\'': // string
+
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ case reflect.Slice:
+ if v.Type() != byteSliceType {
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.Set(reflect.ValueOf(b[0:n]))
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type()})
+ }
+ }
+
+ case isNumber(item):
+ s := string(item)
+ switch v.Kind() {
+ default:
+
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type()})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type()})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ base := 10
+ if isHexPrefix(s) {
+ base = 0 // strconv.ParseInt will infer base 16
+ }
+ n, err := strconv.ParseInt(s, base, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ base := 10
+ if isHexPrefix(s) {
+ base = 0 // strconv.ParseUint will infer base 16
+ }
+ n, err := strconv.ParseUint(s, base, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type()})
+ break
+ }
+ v.SetFloat(n)
+ }
+
+ default:
+ if ok := d.storeExtendedLiteral(item, v, fromQuoted); !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ }
+
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}. It takes a boolean
+// parameter denoting whether or not the value is being unmarshalled within
+// a bson.D, so that bson.Ds can be the default object type when
+// they are inside other bson.D documents.
+func (d *decodeState) valueInterface(insideBSOND bool) interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface(insideBSOND)
+ case scanBeginObject:
+ if insideBSOND {
+ return d.bsonDInterface()
+ }
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ }
+}
+
+// arrayInterface is like array but returns []interface{}. It takes a boolean
+// parameter denoting whether or not the value is being unmarshalled within
+// a bson.D, so that bson.Ds can be the default object type when
+// they are inside other bson.D documents.
+func (d *decodeState) arrayInterface(insideBSOND bool) []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface(insideBSOND))
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// bsonDInterface is like object but returns bson.D{}.
+func (d *decodeState) bsonDInterface() bson.D {
+ m := bson.D{}
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := maybeUnquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m = append(m, bson.DocElem{Name: key, Value: d.valueInterface(true)})
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ key, ok := maybeUnquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface(false)
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; {
+ case isNull(item): // null
+ return nil
+
+ case c == 't', c == 'f': // true, false
+ return c == 't'
+
+ case c == '"', c == '\'': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ case isNumber(item): // number
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+
+ default:
+ if value, ok := d.getExtendedLiteral(item); ok {
+ return value
+ }
+ d.error(errPhase)
+ panic("unreachable")
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func maybeUnquote(s []byte) (t string, ok bool) {
+ s, ok = maybeUnquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' && s[0] != '\'' || s[len(s)-1] != '"' && s[len(s)-1] != '\'' {
+ return
+ }
+ singleQuoted := s[0] == '\''
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c == '\'' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Double quote, control characters are invalid.
+ case !singleQuoted && c == '"', c < ' ':
+ return
+
+ // Single quote characters are invalid.
+ case singleQuoted && c == '\'':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/src/mongo/gotools/common/json/decode_d_test.go b/src/mongo/gotools/common/json/decode_d_test.go
new file mode 100644
index 00000000000..84a95424e59
--- /dev/null
+++ b/src/mongo/gotools/common/json/decode_d_test.go
@@ -0,0 +1,118 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestDecodeBsonD(t *testing.T) {
+ Convey("When unmarshalling JSON into a bson.D", t, func() {
+ Convey("a document should be stored with keys in the same order", func() {
+ data := `{"a":1, "b":2, "c":3, "d":4, "e":5, "f":6}`
+ out := bson.D{}
+ err := Unmarshal([]byte(data), &out)
+ So(err, ShouldBeNil)
+ So(len(out), ShouldEqual, 6)
+ So(out[0].Name, ShouldEqual, "a")
+ So(out[1].Name, ShouldEqual, "b")
+ So(out[2].Name, ShouldEqual, "c")
+ So(out[3].Name, ShouldEqual, "d")
+ So(out[4].Name, ShouldEqual, "e")
+ So(out[5].Name, ShouldEqual, "f")
+
+ })
+
+ Convey("a nested bson.D should be parsed", func() {
+ data := `{"a": 17, "b":{"foo":"bar", "baz":"boo"}, c:"wow" }`
+ out := struct {
+ A int `json:"a"`
+ B bson.D `json:"b"`
+ C string `json:"c"`
+ }{}
+ err := Unmarshal([]byte(data), &out)
+ So(err, ShouldBeNil)
+ So(out.A, ShouldEqual, 17)
+ So(out.C, ShouldEqual, "wow")
+ So(len(out.B), ShouldEqual, 2)
+ So(out.B[0].Name, ShouldEqual, "foo")
+ So(out.B[0].Value, ShouldEqual, "bar")
+ So(out.B[1].Name, ShouldEqual, "baz")
+ So(out.B[1].Value, ShouldEqual, "boo")
+ })
+
+ Convey("objects nested within DocElems should still be parsed", func() {
+ data := `{"a":["x", "y","z"], "b":{"foo":"bar", "baz":"boo"}}`
+ out := bson.D{}
+ err := Unmarshal([]byte(data), &out)
+ So(err, ShouldBeNil)
+ So(len(out), ShouldEqual, 2)
+ So(out[0].Name, ShouldEqual, "a")
+ So(out[1].Name, ShouldEqual, "b")
+ So(out[0].Value, ShouldResemble, []interface{}{"x", "y", "z"})
+ So(out[1].Value, ShouldResemble, bson.D{{"foo", "bar"}, {"baz", "boo"}})
+ })
+
+ Convey("only subdocuments inside a bson.D should be parsed into a bson.D", func() {
+ data := `{subA: {a:{b:{c:9}}}, subB:{a:{b:{c:9}}}}`
+ out := struct {
+ A interface{} `json:"subA"`
+ B bson.D `json:"subB"`
+ }{}
+ err := Unmarshal([]byte(data), &out)
+ So(err, ShouldBeNil)
+ aMap := out.A.(map[string]interface{})
+ So(len(aMap), ShouldEqual, 1)
+ aMapSub := aMap["a"].(map[string]interface{})
+ So(len(aMapSub), ShouldEqual, 1)
+ aMapSubSub := aMapSub["b"].(map[string]interface{})
+ So(aMapSubSub["c"], ShouldEqual, 9)
+ So(len(out.B), ShouldEqual, 1)
+ // using string comparison for simplicity
+ c := bson.D{{Name: "c", Value: 9}}
+ b := bson.D{{Name: "b", Value: c}}
+ a := bson.D{{Name: "a", Value: b}}
+ So(fmt.Sprintf("%v", out.B), ShouldEqual, fmt.Sprintf("%v", a))
+ })
+
+ Convey("subdocuments inside arrays inside bson.D should be parsed into a bson.D", func() {
+ data := `{"a":[1,2,{b:"inner"}]}`
+ out := bson.D{}
+ err := Unmarshal([]byte(data), &out)
+ So(err, ShouldBeNil)
+ So(len(out), ShouldEqual, 1)
+ So(out[0].Value, ShouldHaveSameTypeAs, []interface{}{})
+ innerArray := out[0].Value.([]interface{})
+ So(len(innerArray), ShouldEqual, 3)
+ So(innerArray[0], ShouldEqual, 1)
+ So(innerArray[1], ShouldEqual, 2)
+ So(innerArray[2], ShouldHaveSameTypeAs, bson.D{})
+ innerD := innerArray[2].(bson.D)
+ So(len(innerD), ShouldEqual, 1)
+ So(innerD[0].Name, ShouldEqual, "b")
+ So(innerD[0].Value, ShouldEqual, "inner")
+ })
+
+ Convey("null should be a valid value", func() {
+ data := `{"a":true, "b":null, "c": 5}`
+ out := bson.D{}
+ err := Unmarshal([]byte(data), &out)
+ So(err, ShouldBeNil)
+ So(len(out), ShouldEqual, 3)
+ So(out[0].Name, ShouldEqual, "a")
+ So(out[0].Value, ShouldEqual, true)
+ So(out[1].Name, ShouldEqual, "b")
+ So(out[1].Value, ShouldBeNil)
+ So(out[2].Name, ShouldEqual, "c")
+ So(out[2].Value, ShouldEqual, 5)
+ })
+
+ })
+ Convey("Unmarshalling to a non-bson.D slice types should fail", t, func() {
+ data := `{"a":["x", "y","z"], "b":{"foo":"bar", "baz":"boo"}}`
+ out := []interface{}{}
+ err := Unmarshal([]byte(data), &out)
+ So(err, ShouldNotBeNil)
+ })
+}
diff --git a/src/mongo/gotools/common/json/decode_test.go b/src/mongo/gotools/common/json/decode_test.go
new file mode 100644
index 00000000000..d32f00b4952
--- /dev/null
+++ b/src/mongo/gotools/common/json/decode_test.go
@@ -0,0 +1,1364 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "image"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+type T struct {
+ X string
+ Y int
+ Z int `json:"-"`
+}
+
+type U struct {
+ Alphabet string `json:"alpha"`
+}
+
+type V struct {
+ F1 interface{}
+ F2 int32
+ F3 Number
+}
+
+// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and
+// without UseNumber
+var ifaceNumAsFloat64 = map[string]interface{}{
+ "k1": float64(1),
+ "k2": "s",
+ "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)},
+}
+
+// ifaceNumAsMixedTypes is used to test unmarshalling with extended JSON
+var ifaceNumAsMixedTypes = map[string]interface{}{
+ "k1": int32(1),
+ "k2": "s",
+ "k3": []interface{}{int32(1), int32(2), float64(3e-3)},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": int32(2)},
+}
+
+var ifaceNumAsNumber = map[string]interface{}{
+ "k1": Number("1"),
+ "k2": "s",
+ "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")},
+}
+
+type tx struct {
+ x int
+}
+
+// A type that can unmarshal itself.
+
+type unmarshaler struct {
+ T bool
+}
+
+func (u *unmarshaler) UnmarshalJSON(b []byte) error {
+ *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called.
+ return nil
+}
+
+type ustruct struct {
+ M unmarshaler
+}
+
+type unmarshalerText struct {
+ T bool
+}
+
+// needed for re-marshaling tests
+func (u *unmarshalerText) MarshalText() ([]byte, error) {
+ return []byte(""), nil
+}
+
+func (u *unmarshalerText) UnmarshalText(b []byte) error {
+ *u = unmarshalerText{true} // All we need to see that UnmarshalText is called.
+ return nil
+}
+
+var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil)
+
+type ustructText struct {
+ M unmarshalerText
+}
+
+var (
+ um0, um1 unmarshaler // target2 of unmarshaling
+ ump = &um1
+ umtrue = unmarshaler{true}
+ umslice = []unmarshaler{{true}}
+ umslicep = new([]unmarshaler)
+ umstruct = ustruct{unmarshaler{true}}
+
+ um0T, um1T unmarshalerText // target2 of unmarshaling
+ umpT = &um1T
+ umtrueT = unmarshalerText{true}
+ umsliceT = []unmarshalerText{{true}}
+ umslicepT = new([]unmarshalerText)
+ umstructT = ustructText{unmarshalerText{true}}
+)
+
+// Test data structures for anonymous fields.
+
+type Point struct {
+ Z int
+}
+
+type Top struct {
+ Level0 int
+ Embed0
+ *Embed0a
+ *Embed0b `json:"e,omitempty"` // treated as named
+ Embed0c `json:"-"` // ignored
+ Loop
+ Embed0p // has Point with X, Y, used
+ Embed0q // has Point with Z, used
+}
+
+type Embed0 struct {
+ Level1a int // overridden by Embed0a's Level1a with json tag
+ Level1b int // used because Embed0a's Level1b is renamed
+ Level1c int // used because Embed0a's Level1c is ignored
+ Level1d int // annihilated by Embed0a's Level1d
+ Level1e int `json:"x"` // annihilated by Embed0a.Level1e
+}
+
+type Embed0a struct {
+ Level1a int `json:"Level1a,omitempty"`
+ Level1b int `json:"LEVEL1B,omitempty"`
+ Level1c int `json:"-"`
+ Level1d int // annihilated by Embed0's Level1d
+ Level1f int `json:"x"` // annihilated by Embed0's Level1e
+}
+
+type Embed0b Embed0
+
+type Embed0c Embed0
+
+type Embed0p struct {
+ image.Point
+}
+
+type Embed0q struct {
+ Point
+}
+
+type Loop struct {
+ Loop1 int `json:",omitempty"`
+ Loop2 int `json:",omitempty"`
+ *Loop
+}
+
+// From reflect test:
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// From reflect test:
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+type unmarshalTest struct {
+ in string
+ ptr interface{}
+ out interface{}
+ err error
+ useNumber bool
+}
+
+type Ambig struct {
+ // Given "hello", the first match should win.
+ First int `json:"HELLO"`
+ Second int `json:"Hello"`
+}
+
+type XYZ struct {
+ X interface{}
+ Y interface{}
+ Z interface{}
+}
+
+var unmarshalTests = []unmarshalTest{
+ // basic types
+ {in: `true`, ptr: new(bool), out: true},
+ {in: `1`, ptr: new(int), out: 1},
+ {in: `1.2`, ptr: new(float64), out: 1.2},
+ {in: `-5`, ptr: new(int16), out: int16(-5)},
+ {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true},
+ {in: `2`, ptr: new(Number), out: Number("2")},
+ {in: `2`, ptr: new(interface{}), out: int32(2)},
+ {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true},
+ {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"},
+ {in: `"http:\/\/"`, ptr: new(string), out: "http://"},
+ {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"},
+ {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"},
+ {in: "null", ptr: new(interface{}), out: nil},
+ {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf("")}},
+ {in: `{"x": 1}`, ptr: new(tx), out: tx{}},
+ {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: int32(1), F2: int32(2), F3: Number("3")}},
+ {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true},
+ {in: `{"k1":1,"k2":"s","k3":[1,2,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsMixedTypes},
+ {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true},
+
+ // raw values with whitespace
+ {in: "\n true ", ptr: new(bool), out: true},
+ {in: "\t 1 ", ptr: new(int), out: 1},
+ {in: "\r 1.2 ", ptr: new(float64), out: 1.2},
+ {in: "\t -5 \n", ptr: new(int16), out: int16(-5)},
+ {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"},
+
+ // Z has a "-" tag.
+ {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}},
+
+ {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+ {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+ {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}},
+
+ // syntax errors
+ {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}},
+ {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}},
+ {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true},
+
+ // raw value errors
+ {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}},
+ {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}},
+ {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}},
+ {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}},
+
+ // array tests
+ {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}},
+ {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}},
+ {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}},
+
+ // empty array to interface test
+ {in: `[]`, ptr: new([]interface{}), out: []interface{}{}},
+ {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)},
+ {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}},
+ {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}},
+
+ // composite tests
+ {in: allValueIndent, ptr: new(All), out: allValue},
+ {in: allValueCompact, ptr: new(All), out: allValue},
+ {in: allValueIndent, ptr: new(*All), out: &allValue},
+ {in: allValueCompact, ptr: new(*All), out: &allValue},
+ {in: pallValueIndent, ptr: new(All), out: pallValue},
+ {in: pallValueCompact, ptr: new(All), out: pallValue},
+ {in: pallValueIndent, ptr: new(*All), out: &pallValue},
+ {in: pallValueCompact, ptr: new(*All), out: &pallValue},
+
+ // unmarshal interface test
+ {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called
+ {in: `{"T":false}`, ptr: &ump, out: &umtrue},
+ {in: `[{"T":false}]`, ptr: &umslice, out: umslice},
+ {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice},
+ {in: `{"M":{"T":false}}`, ptr: &umstruct, out: umstruct},
+
+ // UnmarshalText interface test
+ {in: `"X"`, ptr: &um0T, out: umtrueT}, // use "false" so test will fail if custom unmarshaler is not called
+ {in: `"X"`, ptr: &umpT, out: &umtrueT},
+ {in: `["X"]`, ptr: &umsliceT, out: umsliceT},
+ {in: `["X"]`, ptr: &umslicepT, out: &umsliceT},
+ {in: `{"M":"X"}`, ptr: &umstructT, out: umstructT},
+
+ {
+ in: `{
+ "Level0": 1,
+ "Level1b": 2,
+ "Level1c": 3,
+ "x": 4,
+ "Level1a": 5,
+ "LEVEL1B": 6,
+ "e": {
+ "Level1a": 8,
+ "Level1b": 9,
+ "Level1c": 10,
+ "Level1d": 11,
+ "x": 12
+ },
+ "Loop1": 13,
+ "Loop2": 14,
+ "X": 15,
+ "Y": 16,
+ "Z": 17
+ }`,
+ ptr: new(Top),
+ out: Top{
+ Level0: 1,
+ Embed0: Embed0{
+ Level1b: 2,
+ Level1c: 3,
+ },
+ Embed0a: &Embed0a{
+ Level1a: 5,
+ Level1b: 6,
+ },
+ Embed0b: &Embed0b{
+ Level1a: 8,
+ Level1b: 9,
+ Level1c: 10,
+ Level1d: 11,
+ Level1e: 12,
+ },
+ Loop: Loop{
+ Loop1: 13,
+ Loop2: 14,
+ },
+ Embed0p: Embed0p{
+ Point: image.Point{X: 15, Y: 16},
+ },
+ Embed0q: Embed0q{
+ Point: Point{Z: 17},
+ },
+ },
+ },
+ {
+ in: `{"hello": 1}`,
+ ptr: new(Ambig),
+ out: Ambig{First: 1},
+ },
+
+ {
+ in: `{"X": 1,"Y":2}`,
+ ptr: new(S5),
+ out: S5{S8: S8{S9: S9{Y: 2}}},
+ },
+ {
+ in: `{"X": 1,"Y":2}`,
+ ptr: new(S10),
+ out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}},
+ },
+
+ // invalid UTF-8 is coerced to valid UTF-8.
+ {
+ in: "\"hello\xffworld\"",
+ ptr: new(string),
+ out: "hello\ufffdworld",
+ },
+ {
+ in: "\"hello\xc2\xc2world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\xc2\xffworld\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld",
+ },
+}
+
+func TestMarshal(t *testing.T) {
+ b, err := Marshal(allValue)
+ if err != nil {
+ t.Fatalf("Marshal allValue: %v", err)
+ }
+ if string(b) != allValueCompact {
+ t.Errorf("Marshal allValueCompact")
+ diff(t, b, []byte(allValueCompact))
+ return
+ }
+
+ b, err = Marshal(pallValue)
+ if err != nil {
+ t.Fatalf("Marshal pallValue: %v", err)
+ }
+ if string(b) != pallValueCompact {
+ t.Errorf("Marshal pallValueCompact")
+ diff(t, b, []byte(pallValueCompact))
+ return
+ }
+}
+
+var badUTF8 = []struct {
+ in, out string
+}{
+ {"hello\xffworld", `"hello\ufffdworld"`},
+ {"", `""`},
+ {"\xff", `"\ufffd"`},
+ {"\xff\xff", `"\ufffd\ufffd"`},
+ {"a\xffb", `"a\ufffdb"`},
+ {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`},
+}
+
+func TestMarshalBadUTF8(t *testing.T) {
+ for _, tt := range badUTF8 {
+ b, err := Marshal(tt.in)
+ if string(b) != tt.out || err != nil {
+ t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out)
+ }
+ }
+}
+
+func TestMarshalNumberZeroVal(t *testing.T) {
+ var n Number
+ out, err := Marshal(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+ outStr := string(out)
+ if outStr != "0" {
+ t.Fatalf("Invalid zero val for Number: %q", outStr)
+ }
+}
+
+func TestMarshalEmbeds(t *testing.T) {
+ top := &Top{
+ Level0: 1,
+ Embed0: Embed0{
+ Level1b: 2,
+ Level1c: 3,
+ },
+ Embed0a: &Embed0a{
+ Level1a: 5,
+ Level1b: 6,
+ },
+ Embed0b: &Embed0b{
+ Level1a: 8,
+ Level1b: 9,
+ Level1c: 10,
+ Level1d: 11,
+ Level1e: 12,
+ },
+ Loop: Loop{
+ Loop1: 13,
+ Loop2: 14,
+ },
+ Embed0p: Embed0p{
+ Point: image.Point{X: 15, Y: 16},
+ },
+ Embed0q: Embed0q{
+ Point: Point{Z: 17},
+ },
+ }
+ b, err := Marshal(top)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17}"
+ if string(b) != want {
+ t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want)
+ }
+}
+
+func TestUnmarshal(t *testing.T) {
+ for i, tt := range unmarshalTests {
+ var scan scanner
+ in := []byte(tt.in)
+ if err := checkValid(in, &scan); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: checkValid: %#v", i, err)
+ continue
+ }
+ }
+ if tt.ptr == nil {
+ continue
+ }
+ // v = new(right-type)
+ v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec := NewDecoder(bytes.NewReader(in))
+ if tt.useNumber {
+ dec.UseNumber()
+ }
+ if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: %v want %v", i, err, tt.err)
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
+ data, _ := Marshal(v.Elem().Interface())
+ println(string(data))
+ data, _ = Marshal(tt.out)
+ println(string(data))
+ continue
+ }
+
+ // Check round trip.
+ if tt.err == nil {
+ enc, err := Marshal(v.Interface())
+ if err != nil {
+ t.Errorf("#%d: error re-marshaling: %v", i, err)
+ continue
+ }
+ vv := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec = NewDecoder(bytes.NewReader(enc))
+ if tt.useNumber {
+ dec.UseNumber()
+ }
+ if err := dec.Decode(vv.Interface()); err != nil {
+ t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err)
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface())
+ t.Errorf(" In: %q", strings.Map(noSpace, string(in)))
+ t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc)))
+ continue
+ }
+ }
+ }
+}
+
+func TestUnmarshalMarshal(t *testing.T) {
+ initBig()
+ var v interface{}
+ if err := Unmarshal(jsonBig, &v); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if !bytes.Equal(jsonBig, b) {
+ t.Errorf("Marshal jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+var numberTests = []struct {
+ in string
+ i int64
+ intErr string
+ f float64
+ floatErr string
+}{
+ {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1},
+ {in: "-12", i: -12, f: -12.0},
+ {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"},
+}
+
+// Independent of Decode, basic coverage of the accessors in Number
+func TestNumberAccessors(t *testing.T) {
+ for _, tt := range numberTests {
+ n := Number(tt.in)
+ if s := n.String(); s != tt.in {
+ t.Errorf("Number(%q).String() is %q", tt.in, s)
+ }
+ if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i {
+ t.Errorf("Number(%q).Int64() is %d", tt.in, i)
+ } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) {
+ t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err)
+ }
+ if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f {
+ t.Errorf("Number(%q).Float64() is %g", tt.in, f)
+ } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) {
+ t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err)
+ }
+ }
+}
+
+func TestLargeByteSlice(t *testing.T) {
+ s0 := make([]byte, 2000)
+ for i := range s0 {
+ s0[i] = byte(i)
+ }
+ b, err := Marshal(s0)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ var s1 []byte
+ if err := Unmarshal(b, &s1); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !bytes.Equal(s0, s1) {
+ t.Errorf("Marshal large byte slice")
+ diff(t, s0, s1)
+ }
+}
+
+type Xint struct {
+ X int
+}
+
+func TestUnmarshalInterface(t *testing.T) {
+ var xint Xint
+ var i interface{} = &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestUnmarshalPtrPtr(t *testing.T) {
+ var xint Xint
+ pxint := &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestEscape(t *testing.T) {
+ const input = `"foobar"<html>` + " [\u2028 \u2029]"
+ const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"`
+ b, err := Marshal(input)
+ if err != nil {
+ t.Fatalf("Marshal error: %v", err)
+ }
+ if s := string(b); s != expected {
+ t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected)
+ }
+}
+
+// WrongString is a struct that's misusing the ,string modifier.
+type WrongString struct {
+ Message string `json:"result,string"`
+}
+
+type wrongStringTest struct {
+ in, err string
+}
+
+var wrongStringTests = []wrongStringTest{
+ {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`},
+ {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`},
+ {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`},
+}
+
+// If people misuse the ,string modifier, the error message should be
+// helpful, telling the user that they're doing it wrong.
+func TestErrorMessageFromMisusedString(t *testing.T) {
+ for n, tt := range wrongStringTests {
+ r := strings.NewReader(tt.in)
+ var s WrongString
+ err := NewDecoder(r).Decode(&s)
+ got := fmt.Sprintf("%v", err)
+ if got != tt.err {
+ t.Errorf("%d. got err = %q, want %q", n, got, tt.err)
+ }
+ }
+}
+
+func noSpace(c rune) rune {
+ if isSpace(c) {
+ return -1
+ }
+ return c
+}
+
+type All struct {
+ Bool bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint uint
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+
+ Foo string `json:"bar"`
+ Foo2 string `json:"bar2,dummyopt"`
+
+ IntStr int64 `json:",string"`
+
+ PBool *bool
+ PInt *int
+ PInt8 *int8
+ PInt16 *int16
+ PInt32 *int32
+ PInt64 *int64
+ PUint *uint
+ PUint8 *uint8
+ PUint16 *uint16
+ PUint32 *uint32
+ PUint64 *uint64
+ PUintptr *uintptr
+ PFloat32 *float32
+ PFloat64 *float64
+
+ String string
+ PString *string
+
+ Map map[string]Small
+ MapP map[string]*Small
+ PMap *map[string]Small
+ PMapP *map[string]*Small
+
+ EmptyMap map[string]Small
+ NilMap map[string]Small
+
+ Slice []Small
+ SliceP []*Small
+ PSlice *[]Small
+ PSliceP *[]*Small
+
+ EmptySlice []Small
+ NilSlice []Small
+
+ StringSlice []string
+ ByteSlice []byte
+
+ Small Small
+ PSmall *Small
+ PPSmall **Small
+
+ Interface interface{}
+ PInterface *interface{}
+
+ unexported int
+}
+
+type Small struct {
+ Tag string
+}
+
+var allValue = All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Uintptr: 12,
+ Float32: 14.1,
+ Float64: 15.1,
+ Foo: "foo",
+ Foo2: "foo2",
+ IntStr: 42,
+ String: "16",
+ Map: map[string]Small{
+ "17": {Tag: "tag17"},
+ "18": {Tag: "tag18"},
+ },
+ MapP: map[string]*Small{
+ "19": {Tag: "tag19"},
+ "20": nil,
+ },
+ EmptyMap: map[string]Small{},
+ Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}},
+ SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}},
+ EmptySlice: []Small{},
+ StringSlice: []string{"str24", "str25", "str26"},
+ ByteSlice: []byte{27, 28, 29},
+ Small: Small{Tag: "tag30"},
+ PSmall: &Small{Tag: "tag31"},
+ Interface: 5.2,
+}
+
+var pallValue = All{
+ PBool: &allValue.Bool,
+ PInt: &allValue.Int,
+ PInt8: &allValue.Int8,
+ PInt16: &allValue.Int16,
+ PInt32: &allValue.Int32,
+ PInt64: &allValue.Int64,
+ PUint: &allValue.Uint,
+ PUint8: &allValue.Uint8,
+ PUint16: &allValue.Uint16,
+ PUint32: &allValue.Uint32,
+ PUint64: &allValue.Uint64,
+ PUintptr: &allValue.Uintptr,
+ PFloat32: &allValue.Float32,
+ PFloat64: &allValue.Float64,
+ PString: &allValue.String,
+ PMap: &allValue.Map,
+ PMapP: &allValue.MapP,
+ PSlice: &allValue.Slice,
+ PSliceP: &allValue.SliceP,
+ PPSmall: &allValue.PSmall,
+ PInterface: &allValue.Interface,
+}
+
+var allValueIndent = `{
+ "Bool": true,
+ "Int": 2,
+ "Int8": 3,
+ "Int16": 4,
+ "Int32": 5,
+ "Int64": 6,
+ "Uint": 7,
+ "Uint8": 8,
+ "Uint16": 9,
+ "Uint32": 10,
+ "Uint64": 11,
+ "Uintptr": 12,
+ "Float32": 14.1,
+ "Float64": 15.1,
+ "bar": "foo",
+ "bar2": "foo2",
+ "IntStr": "42",
+ "PBool": null,
+ "PInt": null,
+ "PInt8": null,
+ "PInt16": null,
+ "PInt32": null,
+ "PInt64": null,
+ "PUint": null,
+ "PUint8": null,
+ "PUint16": null,
+ "PUint32": null,
+ "PUint64": null,
+ "PUintptr": null,
+ "PFloat32": null,
+ "PFloat64": null,
+ "String": "16",
+ "PString": null,
+ "Map": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "MapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "PMap": null,
+ "PMapP": null,
+ "EmptyMap": {},
+ "NilMap": null,
+ "Slice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "SliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "PSlice": null,
+ "PSliceP": null,
+ "EmptySlice": [],
+ "NilSlice": null,
+ "StringSlice": [
+ "str24",
+ "str25",
+ "str26"
+ ],
+ "ByteSlice": "Gxwd",
+ "Small": {
+ "Tag": "tag30"
+ },
+ "PSmall": {
+ "Tag": "tag31"
+ },
+ "PPSmall": null,
+ "Interface": 5.2,
+ "PInterface": null
+}`
+
+var allValueCompact = strings.Map(noSpace, allValueIndent)
+
+var pallValueIndent = `{
+ "Bool": false,
+ "Int": 0,
+ "Int8": 0,
+ "Int16": 0,
+ "Int32": 0,
+ "Int64": 0,
+ "Uint": 0,
+ "Uint8": 0,
+ "Uint16": 0,
+ "Uint32": 0,
+ "Uint64": 0,
+ "Uintptr": 0,
+ "Float32": 0,
+ "Float64": 0,
+ "bar": "",
+ "bar2": "",
+ "IntStr": "0",
+ "PBool": true,
+ "PInt": 2,
+ "PInt8": 3,
+ "PInt16": 4,
+ "PInt32": 5,
+ "PInt64": 6,
+ "PUint": 7,
+ "PUint8": 8,
+ "PUint16": 9,
+ "PUint32": 10,
+ "PUint64": 11,
+ "PUintptr": 12,
+ "PFloat32": 14.1,
+ "PFloat64": 15.1,
+ "String": "",
+ "PString": "16",
+ "Map": null,
+ "MapP": null,
+ "PMap": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "PMapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "EmptyMap": null,
+ "NilMap": null,
+ "Slice": null,
+ "SliceP": null,
+ "PSlice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "PSliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "EmptySlice": null,
+ "NilSlice": null,
+ "StringSlice": null,
+ "ByteSlice": null,
+ "Small": {
+ "Tag": ""
+ },
+ "PSmall": null,
+ "PPSmall": {
+ "Tag": "tag31"
+ },
+ "Interface": null,
+ "PInterface": 5.2
+}`
+
+var pallValueCompact = strings.Map(noSpace, pallValueIndent)
+
+func TestRefUnmarshal(t *testing.T) {
+ type S struct {
+ // Ref is defined in encode_test.go.
+ R0 Ref
+ R1 *Ref
+ R2 RefText
+ R3 *RefText
+ }
+ want := S{
+ R0: 12,
+ R1: new(Ref),
+ R2: 13,
+ R3: new(RefText),
+ }
+ *want.R1 = 12
+ *want.R3 = 13
+
+ var got S
+ if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %+v, want %+v", got, want)
+ }
+}
+
+// Test that the empty string doesn't panic decoding when ,string is specified
+// Issue 3450
+func TestEmptyString(t *testing.T) {
+ type T2 struct {
+ Number1 int `json:",string"`
+ Number2 int `json:",string"`
+ }
+ data := `{"Number1":"1", "Number2":""}`
+ dec := NewDecoder(strings.NewReader(data))
+ var t2 T2
+ err := dec.Decode(&t2)
+ if err == nil {
+ t.Fatal("Decode: did not return error")
+ }
+ if t2.Number1 != 1 {
+ t.Fatal("Decode: did not set Number1")
+ }
+}
+
+// Test that the returned error is non-nil when trying to unmarshal null string into int, for successive ,string option
+// Issue 7046
+func TestNullString(t *testing.T) {
+ type T struct {
+ A int `json:",string"`
+ B int `json:",string"`
+ }
+ data := []byte(`{"A": "1", "B": null}`)
+ var s T
+ err := Unmarshal(data, &s)
+ if err == nil {
+ t.Fatalf("expected error; got %v", s)
+ }
+}
+
+func intp(x int) *int {
+ p := new(int)
+ *p = x
+ return p
+}
+
+func intpp(x *int) **int {
+ pp := new(*int)
+ *pp = x
+ return pp
+}
+
+var interfaceSetTests = []struct {
+ pre interface{}
+ json string
+ post interface{}
+}{
+ {"foo", `"bar"`, "bar"},
+ {"foo", `2`, int32(2)},
+ {"foo", `true`, true},
+ {"foo", `null`, nil},
+
+ {nil, `null`, nil},
+ {new(int), `null`, nil},
+ {(*int)(nil), `null`, nil},
+ {new(*int), `null`, new(*int)},
+ {(**int)(nil), `null`, nil},
+ {intp(1), `null`, nil},
+ {intpp(nil), `null`, intpp(nil)},
+ {intpp(intp(1)), `null`, intpp(nil)},
+}
+
+func TestInterfaceSet(t *testing.T) {
+ for _, tt := range interfaceSetTests {
+ b := struct{ X interface{} }{tt.pre}
+ blob := `{"X":` + tt.json + `}`
+ if err := Unmarshal([]byte(blob), &b); err != nil {
+ t.Errorf("Unmarshal %#q: %v", blob, err)
+ continue
+ }
+ if !reflect.DeepEqual(b.X, tt.post) {
+ t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post)
+ }
+ }
+}
+
+// JSON null values should be ignored for primitives and string values instead of resulting in an error.
+// Issue 2540
+func TestUnmarshalNulls(t *testing.T) {
+ jsonData := []byte(`{
+ "Bool" : null,
+ "Int" : null,
+ "Int8" : null,
+ "Int16" : null,
+ "Int32" : null,
+ "Int64" : null,
+ "Uint" : null,
+ "Uint8" : null,
+ "Uint16" : null,
+ "Uint32" : null,
+ "Uint64" : null,
+ "Float32" : null,
+ "Float64" : null,
+ "String" : null}`)
+
+ nulls := All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Float32: 12.1,
+ Float64: 13.1,
+ String: "14"}
+
+ err := Unmarshal(jsonData, &nulls)
+ if err != nil {
+ t.Errorf("Unmarshal of null values failed: %v", err)
+ }
+ if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 ||
+ nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 ||
+ nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" {
+
+ t.Errorf("Unmarshal of null values affected primitives")
+ }
+}
+
+func TestStringKind(t *testing.T) {
+ type stringKind string
+
+ var m1, m2 map[stringKind]int
+ m1 = map[stringKind]int{
+ "foo": 42,
+ }
+
+ data, err := Marshal(m1)
+ if err != nil {
+ t.Errorf("Unexpected error marshalling: %v", err)
+ }
+
+ err = Unmarshal(data, &m2)
+ if err != nil {
+ t.Errorf("Unexpected error unmarshalling: %v", err)
+ }
+
+ if !reflect.DeepEqual(m1, m2) {
+ t.Error("Items should be equal after encoding and then decoding")
+ }
+
+}
+
+var decodeTypeErrorTests = []struct {
+ dest interface{}
+ src string
+}{
+ {new(string), `{"user": "name"}`}, // issue 4628.
+ {new(error), `{}`}, // issue 4222
+ {new(error), `[]`},
+ {new(error), `""`},
+ {new(error), `123`},
+ {new(error), `true`},
+}
+
+func TestUnmarshalTypeError(t *testing.T) {
+ for _, item := range decodeTypeErrorTests {
+ err := Unmarshal([]byte(item.src), item.dest)
+ if _, ok := err.(*UnmarshalTypeError); !ok {
+ t.Errorf("expected type error for Unmarshal(%q, type %T): got %T",
+ item.src, item.dest, err)
+ }
+ }
+}
+
+var unmarshalSyntaxTests = []string{
+ "tru",
+ "fals",
+ "nul",
+ "123e",
+ `"hello`,
+ `[1,2,3`,
+ `{"key":1`,
+ `{"key":1,`,
+}
+
+func TestUnmarshalSyntax(t *testing.T) {
+ var x interface{}
+ for _, src := range unmarshalSyntaxTests {
+ err := Unmarshal([]byte(src), &x)
+ if _, ok := err.(*SyntaxError); !ok {
+ t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err)
+ }
+ }
+}
+
+// Test handling of unexported fields that should be ignored.
+// Issue 4660
+type unexportedFields struct {
+ Name string
+ m map[string]interface{}
+ m2 map[string]interface{}
+}
+
+func TestUnmarshalUnexported(t *testing.T) {
+ input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}`
+ want := &unexportedFields{Name: "Bob"}
+
+ out := &unexportedFields{}
+ err := Unmarshal([]byte(input), out)
+ if err != nil {
+ t.Errorf("got error %v, expected nil", err)
+ }
+ if !reflect.DeepEqual(out, want) {
+ t.Errorf("got %q, want %q", out, want)
+ }
+}
+
+// Time3339 is a time.Time which encodes to and from JSON
+// as an RFC 3339 time in UTC.
+type Time3339 time.Time
+
+func (t *Time3339) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b)
+ }
+ tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1]))
+ if err != nil {
+ return err
+ }
+ *t = Time3339(tm)
+ return nil
+}
+
+func TestUnmarshalJSONLiteralError(t *testing.T) {
+ var t3 Time3339
+ err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3)
+ if err == nil {
+ t.Fatalf("expected error; got time %v", time.Time(t3))
+ }
+ if !strings.Contains(err.Error(), "range") {
+ t.Errorf("got err = %v; want out of range error", err)
+ }
+}
+
+// Test that extra object elements in an array do not result in a
+// "data changing underfoot" error.
+// Issue 3717
+func TestSkipArrayObjects(t *testing.T) {
+ json := `[{}]`
+ var dest [0]interface{}
+
+ err := Unmarshal([]byte(json), &dest)
+ if err != nil {
+ t.Errorf("got error %q, want nil", err)
+ }
+}
+
+// Test semantics of pre-filled struct fields and pre-filled map fields.
+// Issue 4900.
+func TestPrefilled(t *testing.T) {
+ ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m }
+
+ // Values here change, cannot reuse table across runs.
+ var prefillTests = []struct {
+ in string
+ ptr interface{}
+ out interface{}
+ }{
+ {
+ in: `{"X": 1, "Y": 2}`,
+ ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5},
+ out: &XYZ{X: int32(1), Y: int32(2), Z: 1.5},
+ },
+ {
+ in: `{"X": 1, "Y": 2}`,
+ ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}),
+ out: ptrToMap(map[string]interface{}{"X": int32(1), "Y": int32(2), "Z": 1.5}),
+ },
+ }
+
+ for _, tt := range prefillTests {
+ ptrstr := fmt.Sprintf("%v", tt.ptr)
+ err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here
+ if err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(tt.ptr, tt.out) {
+ t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out)
+ }
+ }
+}
+
+var invalidUnmarshalTests = []struct {
+ v interface{}
+ want string
+}{
+ {nil, "json: Unmarshal(nil)"},
+ {struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+ {(*int)(nil), "json: Unmarshal(nil *int)"},
+}
+
+func TestInvalidUnmarshal(t *testing.T) {
+ buf := []byte(`{"a":"1"}`)
+ for _, tt := range invalidUnmarshalTests {
+ err := Unmarshal(buf, tt.v)
+ if err == nil {
+ t.Errorf("Unmarshal expecting error, got nil")
+ continue
+ }
+ if got := err.Error(); got != tt.want {
+ t.Errorf("Unmarshal = %q; want %q", got, tt.want)
+ }
+ }
+}
diff --git a/src/mongo/gotools/common/json/encode.go b/src/mongo/gotools/common/json/encode.go
new file mode 100644
index 00000000000..ee0fd021294
--- /dev/null
+++ b/src/mongo/gotools/common/json/encode.go
@@ -0,0 +1,1186 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON objects as defined in
+// RFC 4627. The mapping between JSON objects and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// http://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings. InvalidUTF8Error will be returned
+// if an invalid UTF-8 sequence is encountered.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON object.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// or integer types. This extra level of encoding is sometimes used when
+// communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects.
+// The map's key type must be string; the object keys are used directly
+// as map keys.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON object.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON object.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+ // The characters can only appear in string literals,
+ // so just scan the string one byte at a time.
+ start := 0
+ for i, c := range src {
+ if c == '<' || c == '>' || c == '&' {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+}
+
+// Marshaler is the interface implemented by objects that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+ Value reflect.Value
+ Str string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+ S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+ return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+ Type reflect.Type
+ Err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+ bytes.Buffer // accumulated output
+ scratch [64]byte
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+ if v := encodeStatePool.Get(); v != nil {
+ e := v.(*encodeState)
+ e.Reset()
+ return e
+ }
+ return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if s, ok := r.(string); ok {
+ panic(s)
+ }
+ err = r.(error)
+ }
+ }()
+ e.reflectValue(reflect.ValueOf(v))
+ return nil
+}
+
+func (e *encodeState) error(err error) {
+ panic(err)
+}
+
+var byteSliceType = reflect.TypeOf([]byte(nil))
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value) {
+ valueEncoder(v)(e, v, false)
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, quoted bool)
+
+var encoderCache struct {
+ sync.RWMutex
+ m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+ if !v.IsValid() {
+ return invalidValueEncoder
+ }
+ return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+ encoderCache.RLock()
+ f := encoderCache.m[t]
+ encoderCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // To deal with recursive types, populate the map with an
+ // indirect func before we build it. This type waits on the
+ // real func (f) to be ready and then calls it. This indirect
+ // func is only used for recursive types.
+ encoderCache.Lock()
+ if encoderCache.m == nil {
+ encoderCache.m = make(map[reflect.Type]encoderFunc)
+ }
+ var wg sync.WaitGroup
+ wg.Add(1)
+ encoderCache.m[t] = func(e *encodeState, v reflect.Value, quoted bool) {
+ wg.Wait()
+ f(e, v, quoted)
+ }
+ encoderCache.Unlock()
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = newTypeEncoder(t, true)
+ wg.Done()
+ encoderCache.Lock()
+ encoderCache.m[t] = f
+ encoderCache.Unlock()
+ return f
+}
+
+var (
+ marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+ textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+ if t.Implements(marshalerType) {
+ return marshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ if t.Implements(textMarshalerType) {
+ return textMarshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(textMarshalerType) {
+ return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return boolEncoder
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intEncoder
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintEncoder
+ case reflect.Float32:
+ return float32Encoder
+ case reflect.Float64:
+ return float64Encoder
+ case reflect.String:
+ return stringEncoder
+ case reflect.Interface:
+ return interfaceEncoder
+ case reflect.Struct:
+ return newStructEncoder(t)
+ case reflect.Map:
+ return newMapEncoder(t)
+ case reflect.Slice:
+ return newSliceEncoder(t)
+ case reflect.Array:
+ return newArrayEncoder(t)
+ case reflect.Ptr:
+ return newPtrEncoder(t)
+ default:
+ return unsupportedTypeEncoder
+ }
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err == nil {
+ _, err = e.stringBytes(b)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err == nil {
+ _, err = e.stringBytes(b)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if quoted {
+ e.WriteByte('"')
+ }
+ if v.Bool() {
+ e.WriteString("true")
+ } else {
+ e.WriteString("false")
+ }
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func intEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ f := v.Float()
+ if math.IsInf(f, 0) {
+ if math.IsInf(f, 1) { //positive inf
+ e.WriteString("+Infinity")
+ } else { //negative inf
+ e.WriteString("-Infinity")
+ }
+ return
+ }
+ if math.IsNaN(f) {
+ e.WriteString("NaN")
+ return
+ //e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+ }
+ b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+ if quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if quoted {
+ e.WriteByte('"')
+ }
+}
+
+var (
+ float32Encoder = (floatEncoder(32)).encode
+ float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.Type() == numberType {
+ numStr := v.String()
+ if numStr == "" {
+ numStr = "0" // Number's zero-val
+ }
+ e.WriteString(numStr)
+ return
+ }
+ if quoted {
+ sb, err := Marshal(v.String())
+ if err != nil {
+ e.error(err)
+ }
+ e.string(string(sb))
+ } else {
+ e.string(v.String())
+ }
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.reflectValue(v.Elem())
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, quoted bool) {
+ e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+ fields []field
+ fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ e.WriteByte('{')
+ first := true
+ for i, f := range se.fields {
+ fv := fieldByIndex(v, f.index)
+ if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ e.WriteByte(',')
+ }
+ e.string(f.name)
+ e.WriteByte(':')
+ se.fieldEncs[i](e, fv, f.quoted)
+ }
+ e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+ fields := cachedTypeFields(t)
+ se := &structEncoder{
+ fields: fields,
+ fieldEncs: make([]encoderFunc, len(fields)),
+ }
+ for i, f := range fields {
+ se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+ }
+ return se.encode
+}
+
+type mapEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.WriteByte('{')
+ var sv stringValues
+ sv = v.MapKeys()
+ sort.Sort(sv)
+ for i, k := range sv {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.string(k.String())
+ e.WriteByte(':')
+ me.elemEnc(e, v.MapIndex(k), false)
+ }
+ e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+ if t.Key().Kind() != reflect.String {
+ return unsupportedTypeEncoder
+ }
+ me := &mapEncoder{typeEncoder(t.Elem())}
+ return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ s := v.Bytes()
+ e.WriteByte('"')
+ if len(s) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, e)
+ enc.Write(s)
+ enc.Close()
+ }
+ e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+ arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ se.arrayEnc(e, v, false)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+ // Byte slices get special treatment; arrays don't.
+ if t.Elem().Kind() == reflect.Uint8 {
+ return encodeByteSlice
+ }
+ enc := &sliceEncoder{newArrayEncoder(t)}
+ return enc.encode
+}
+
+type arrayEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ e.WriteByte('[')
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ ae.elemEnc(e, v.Index(i), false)
+ }
+ e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+ enc := &arrayEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type ptrEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, _ bool) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ pe.elemEnc(e, v.Elem(), false)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+ enc := &ptrEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type condAddrEncoder struct {
+ canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, quoted bool) {
+ if v.CanAddr() {
+ ce.canAddrEnc(e, v, quoted)
+ } else {
+ ce.elseEnc(e, v, quoted)
+ }
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+ enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+ return enc.encode
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+ for _, i := range index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+ for _, i := range index {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ t = t.Field(i).Type
+ }
+ return t
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) }
+func (sv stringValues) get(i int) string { return sv[i].String() }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string) (int, error) {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as <, > and &. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.WriteString(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0, nil
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte) (int, error) {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ e.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ default:
+ // This encodes bytes < 0x20 except for \n and \r,
+ // as well as < and >. The latter are escaped because they
+ // can lead to security holes when user-controlled strings
+ // are rendered into JSON and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.Write(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0, nil
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/src/mongo/gotools/common/json/encode_test.go b/src/mongo/gotools/common/json/encode_test.go
new file mode 100644
index 00000000000..8226a3df71a
--- /dev/null
+++ b/src/mongo/gotools/common/json/encode_test.go
@@ -0,0 +1,453 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "math"
+ "reflect"
+ "testing"
+ "unicode"
+)
+
+type Optionals struct {
+ Sr string `json:"sr"`
+ So string `json:"so,omitempty"`
+ Sw string `json:"-"`
+
+ Ir int `json:"omitempty"` // actually named omitempty, not an option
+ Io int `json:"io,omitempty"`
+
+ Slr []string `json:"slr,random"`
+ Slo []string `json:"slo,omitempty"`
+
+ Mr map[string]interface{} `json:"mr"`
+ Mo map[string]interface{} `json:",omitempty"`
+
+ Fr float64 `json:"fr"`
+ Fo float64 `json:"fo,omitempty"`
+
+ Br bool `json:"br"`
+ Bo bool `json:"bo,omitempty"`
+
+ Ur uint `json:"ur"`
+ Uo uint `json:"uo,omitempty"`
+
+ Str struct{} `json:"str"`
+ Sto struct{} `json:"sto,omitempty"`
+}
+
+var optionalsExpected = `{
+ "sr": "",
+ "omitempty": 0,
+ "slr": null,
+ "mr": {},
+ "fr": 0,
+ "br": false,
+ "ur": 0,
+ "str": {},
+ "sto": {}
+}`
+
+func TestOmitEmpty(t *testing.T) {
+ var o Optionals
+ o.Sw = "something"
+ o.Mr = map[string]interface{}{}
+ o.Mo = map[string]interface{}{}
+
+ got, err := MarshalIndent(&o, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := string(got); got != optionalsExpected {
+ t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
+ }
+}
+
+type StringTag struct {
+ BoolStr bool `json:",string"`
+ IntStr int64 `json:",string"`
+ StrStr string `json:",string"`
+}
+
+var stringTagExpected = `{
+ "BoolStr": "true",
+ "IntStr": "42",
+ "StrStr": "\"xzbit\""
+}`
+
+func TestStringTag(t *testing.T) {
+ var s StringTag
+ s.BoolStr = true
+ s.IntStr = 42
+ s.StrStr = "xzbit"
+ got, err := MarshalIndent(&s, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := string(got); got != stringTagExpected {
+ t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected)
+ }
+
+ // Verify that it round-trips.
+ var s2 StringTag
+ err = NewDecoder(bytes.NewReader(got)).Decode(&s2)
+ if err != nil {
+ t.Fatalf("Decode: %v", err)
+ }
+ if !reflect.DeepEqual(s, s2) {
+ t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2)
+ }
+}
+
+// byte slices are special even if they're renamed types.
+type renamedByte byte
+type renamedByteSlice []byte
+type renamedRenamedByteSlice []renamedByte
+
+func TestEncodeRenamedByteSlice(t *testing.T) {
+ s := renamedByteSlice("abc")
+ result, err := Marshal(s)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expect := `"YWJj"`
+ if string(result) != expect {
+ t.Errorf(" got %s want %s", result, expect)
+ }
+ r := renamedRenamedByteSlice("abc")
+ result, err = Marshal(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(result) != expect {
+ t.Errorf(" got %s want %s", result, expect)
+ }
+}
+
+func TestFloatSpecialValues(t *testing.T) {
+ _, err := Marshal(math.NaN())
+ if err != nil {
+ t.Errorf("Got error for NaN: %v", err)
+ }
+
+ _, err = Marshal(math.Inf(-1))
+ if err != nil {
+ t.Errorf("Got error for -Inf: %v", err)
+ }
+
+ _, err = Marshal(math.Inf(1))
+ if err != nil {
+ t.Errorf("Got error for +Inf: %v", err)
+ }
+}
+
+// Ref has Marshaler and Unmarshaler methods with pointer receiver.
+type Ref int
+
+func (*Ref) MarshalJSON() ([]byte, error) {
+ return []byte(`"ref"`), nil
+}
+
+func (r *Ref) UnmarshalJSON([]byte) error {
+ *r = 12
+ return nil
+}
+
+// Val has Marshaler methods with value receiver.
+type Val int
+
+func (Val) MarshalJSON() ([]byte, error) {
+ return []byte(`"val"`), nil
+}
+
+// RefText has Marshaler and Unmarshaler methods with pointer receiver.
+type RefText int
+
+func (*RefText) MarshalText() ([]byte, error) {
+ return []byte(`"ref"`), nil
+}
+
+func (r *RefText) UnmarshalText([]byte) error {
+ *r = 13
+ return nil
+}
+
+// ValText has Marshaler methods with value receiver.
+type ValText int
+
+func (ValText) MarshalText() ([]byte, error) {
+ return []byte(`"val"`), nil
+}
+
+func TestRefValMarshal(t *testing.T) {
+ var s = struct {
+ R0 Ref
+ R1 *Ref
+ R2 RefText
+ R3 *RefText
+ V0 Val
+ V1 *Val
+ V2 ValText
+ V3 *ValText
+ }{
+ R0: 12,
+ R1: new(Ref),
+ R2: 14,
+ R3: new(RefText),
+ V0: 13,
+ V1: new(Val),
+ V2: 15,
+ V3: new(ValText),
+ }
+ const want = `{"R0":"ref","R1":"ref","R2":"\"ref\"","R3":"\"ref\"","V0":"val","V1":"val","V2":"\"val\"","V3":"\"val\""}`
+ b, err := Marshal(&s)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+// C1 implements Marshaler and returns unescaped JSON.
+type C1 int
+
+func (C1) MarshalJSON() ([]byte, error) {
+ return []byte(`"<&>"`), nil
+}
+
+// CText implements Marshaler and returns unescaped text.
+type CText int
+
+func (CText) MarshalText() ([]byte, error) {
+ return []byte(`"<&>"`), nil
+}
+
+func TestMarshalerEscaping(t *testing.T) {
+ var c C1
+ want := `"\u003c\u0026\u003e"`
+ b, err := Marshal(c)
+ if err != nil {
+ t.Fatalf("Marshal(c1): %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("Marshal(c1) = %#q, want %#q", got, want)
+ }
+
+ var ct CText
+ want = `"\"\u003c\u0026\u003e\""`
+ b, err = Marshal(ct)
+ if err != nil {
+ t.Fatalf("Marshal(ct): %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("Marshal(ct) = %#q, want %#q", got, want)
+ }
+}
+
+type IntType int
+
+type MyStruct struct {
+ IntType
+}
+
+func TestAnonymousNonstruct(t *testing.T) {
+ var i IntType = 11
+ a := MyStruct{i}
+ const want = `{"IntType":11}`
+
+ b, err := Marshal(a)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+type BugA struct {
+ S string
+}
+
+type BugB struct {
+ BugA
+ S string
+}
+
+type BugC struct {
+ S string
+}
+
+// Legal Go: We never use the repeated embedded field (S).
+type BugX struct {
+ A int
+ BugA
+ BugB
+}
+
+// Issue 5245.
+func TestEmbeddedBug(t *testing.T) {
+ v := BugB{
+ BugA{"A"},
+ "B",
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{"S":"B"}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+ // Now check that the duplicate field, S, does not appear.
+ x := BugX{
+ A: 23,
+ }
+ b, err = Marshal(x)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want = `{"A":23}`
+ got = string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+type BugD struct { // Same as BugA after tagging.
+ XXX string `json:"S"`
+}
+
+// BugD's tagged S field should dominate BugA's.
+type BugY struct {
+ BugA
+ BugD
+}
+
+// Test that a field with a tag dominates untagged fields.
+func TestTaggedFieldDominates(t *testing.T) {
+ v := BugY{
+ BugA{"BugA"},
+ BugD{"BugD"},
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{"S":"BugD"}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+// There are no tags here, so S should not appear.
+type BugZ struct {
+ BugA
+ BugC
+ BugY // Contains a tagged S field through BugD; should not dominate.
+}
+
+func TestDuplicatedFieldDisappears(t *testing.T) {
+ v := BugZ{
+ BugA{"BugA"},
+ BugC{"BugC"},
+ BugY{
+ BugA{"nested BugA"},
+ BugD{"nested BugD"},
+ },
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+func TestStringBytes(t *testing.T) {
+ // Test that encodeState.stringBytes and encodeState.string use the same encoding.
+ es := &encodeState{}
+ var r []rune
+ for i := '\u0000'; i <= unicode.MaxRune; i++ {
+ r = append(r, i)
+ }
+ s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too
+ _, err := es.string(s)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ esBytes := &encodeState{}
+ _, err = esBytes.stringBytes([]byte(s))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ enc := es.Buffer.String()
+ encBytes := esBytes.Buffer.String()
+ if enc != encBytes {
+ i := 0
+ for i < len(enc) && i < len(encBytes) && enc[i] == encBytes[i] {
+ i++
+ }
+ enc = enc[i:]
+ encBytes = encBytes[i:]
+ i = 0
+ for i < len(enc) && i < len(encBytes) && enc[len(enc)-i-1] == encBytes[len(encBytes)-i-1] {
+ i++
+ }
+ enc = enc[:len(enc)-i]
+ encBytes = encBytes[:len(encBytes)-i]
+
+ if len(enc) > 20 {
+ enc = enc[:20] + "..."
+ }
+ if len(encBytes) > 20 {
+ encBytes = encBytes[:20] + "..."
+ }
+
+ t.Errorf("encodings differ at %#q vs %#q", enc, encBytes)
+ }
+}
+
+func TestIssue6458(t *testing.T) {
+ type Foo struct {
+ M RawMessage
+ }
+ x := Foo{RawMessage(`"foo"`)}
+
+ b, err := Marshal(&x)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want := `{"M":"foo"}`; string(b) != want {
+ t.Errorf("Marshal(&x) = %#q; want %#q", b, want)
+ }
+
+ b, err = Marshal(x)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want := `{"M":"ImZvbyI="}`; string(b) != want {
+ t.Errorf("Marshal(x) = %#q; want %#q", b, want)
+ }
+}
+
+func TestHTMLEscape(t *testing.T) {
+ var b, want bytes.Buffer
+ m := `{"M":"<html>foo &` + "\xe2\x80\xa8 \xe2\x80\xa9" + `</html>"}`
+ want.Write([]byte(`{"M":"\u003chtml\u003efoo \u0026\u2028 \u2029\u003c/html\u003e"}`))
+ HTMLEscape(&b, []byte(m))
+ if !bytes.Equal(b.Bytes(), want.Bytes()) {
+ t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes())
+ }
+}
diff --git a/src/mongo/gotools/common/json/example_test.go b/src/mongo/gotools/common/json/example_test.go
new file mode 100644
index 00000000000..ca4e5ae68d4
--- /dev/null
+++ b/src/mongo/gotools/common/json/example_test.go
@@ -0,0 +1,161 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "strings"
+)
+
+func ExampleMarshal() {
+ type ColorGroup struct {
+ ID int
+ Name string
+ Colors []string
+ }
+ group := ColorGroup{
+ ID: 1,
+ Name: "Reds",
+ Colors: []string{"Crimson", "Red", "Ruby", "Maroon"},
+ }
+ b, err := json.Marshal(group)
+ if err != nil {
+ fmt.Println("error:", err)
+ }
+ os.Stdout.Write(b)
+ // Output:
+ // {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]}
+}
+
+func ExampleUnmarshal() {
+ var jsonBlob = []byte(`[
+ {"Name": "Platypus", "Order": "Monotremata"},
+ {"Name": "Quoll", "Order": "Dasyuromorphia"}
+ ]`)
+ type Animal struct {
+ Name string
+ Order string
+ }
+ var animals []Animal
+ err := json.Unmarshal(jsonBlob, &animals)
+ if err != nil {
+ fmt.Println("error:", err)
+ }
+ fmt.Printf("%+v", animals)
+ // Output:
+ // [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}]
+}
+
+// This example uses a Decoder to decode a stream of distinct JSON values.
+func ExampleDecoder() {
+ const jsonStream = `
+ {"Name": "Ed", "Text": "Knock knock."}
+ {"Name": "Sam", "Text": "Who's there?"}
+ {"Name": "Ed", "Text": "Go fmt."}
+ {"Name": "Sam", "Text": "Go fmt who?"}
+ {"Name": "Ed", "Text": "Go fmt yourself!"}
+ `
+ type Message struct {
+ Name, Text string
+ }
+ dec := json.NewDecoder(strings.NewReader(jsonStream))
+ for {
+ var m Message
+ if err := dec.Decode(&m); err == io.EOF {
+ break
+ } else if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("%s: %s\n", m.Name, m.Text)
+ }
+ // Output:
+ // Ed: Knock knock.
+ // Sam: Who's there?
+ // Ed: Go fmt.
+ // Sam: Go fmt who?
+ // Ed: Go fmt yourself!
+}
+
+// This example uses RawMessage to delay parsing part of a JSON message.
+func ExampleRawMessage() {
+ type Color struct {
+ Space string
+ Point json.RawMessage // delay parsing until we know the color space
+ }
+ type RGB struct {
+ R uint8
+ G uint8
+ B uint8
+ }
+ type YCbCr struct {
+ Y uint8
+ Cb int8
+ Cr int8
+ }
+
+ var j = []byte(`[
+ {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
+ {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
+ ]`)
+ var colors []Color
+ err := json.Unmarshal(j, &colors)
+ if err != nil {
+ log.Fatalln("error:", err)
+ }
+
+ for _, c := range colors {
+ var dst interface{}
+ switch c.Space {
+ case "RGB":
+ dst = new(RGB)
+ case "YCbCr":
+ dst = new(YCbCr)
+ }
+ err := json.Unmarshal(c.Point, dst)
+ if err != nil {
+ log.Fatalln("error:", err)
+ }
+ fmt.Println(c.Space, dst)
+ }
+ // Output:
+ // YCbCr &{255 0 -10}
+ // RGB &{98 218 255}
+}
+
+func ExampleIndent() {
+ type Road struct {
+ Name string
+ Number int
+ }
+ roads := []Road{
+ {"Diamond Fork", 29},
+ {"Sheep Creek", 51},
+ }
+
+ b, err := json.Marshal(roads)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var out bytes.Buffer
+ json.Indent(&out, b, "=", "\t")
+ out.WriteTo(os.Stdout)
+ // Output:
+ // [
+ // = {
+ // = "Name": "Diamond Fork",
+ // = "Number": 29
+ // = },
+ // = {
+ // = "Name": "Sheep Creek",
+ // = "Number": 51
+ // = }
+ // =]
+}
diff --git a/src/mongo/gotools/common/json/float_test.go b/src/mongo/gotools/common/json/float_test.go
new file mode 100644
index 00000000000..32b9a6315d6
--- /dev/null
+++ b/src/mongo/gotools/common/json/float_test.go
@@ -0,0 +1,93 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestNumberFloatValue(t *testing.T) {
+
+ Convey("When unmarshaling JSON with float values", t, func() {
+
+ Convey("converts to a JSON NumberFloat value", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "5.5"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberFloat(5.5))
+
+ })
+ })
+
+ Convey("When unmarshaling and marshaling NumberFloat values", t, func() {
+ key := "key"
+
+ Convey("maintains decimal point with trailing zero", func() {
+ var jsonMap map[string]interface{}
+
+ value := "5.0"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberFloat(5.0))
+
+ numFloat := NumberFloat(jsonValue)
+ byteValue, err := numFloat.MarshalJSON()
+ So(err, ShouldBeNil)
+ So(string(byteValue), ShouldEqual, "5.0")
+
+ })
+
+ Convey("maintains precision with large decimals", func() {
+ var jsonMap map[string]interface{}
+
+ value := "5.52342123"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberFloat(5.52342123))
+
+ numFloat := NumberFloat(jsonValue)
+ byteValue, err := numFloat.MarshalJSON()
+ So(err, ShouldBeNil)
+ So(string(byteValue), ShouldEqual, "5.52342123")
+
+ })
+
+ Convey("maintains exponent values", func() {
+ var jsonMap map[string]interface{}
+
+ value := "5e+32"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberFloat(5e32))
+
+ numFloat := NumberFloat(jsonValue)
+ byteValue, err := numFloat.MarshalJSON()
+ So(err, ShouldBeNil)
+ So(string(byteValue), ShouldEqual, "5e+32")
+
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/fold.go b/src/mongo/gotools/common/json/fold.go
new file mode 100644
index 00000000000..d6f77c93e57
--- /dev/null
+++ b/src/mongo/gotools/common/json/fold.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "unicode/utf8"
+)
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/mongo/gotools/common/json/fold_test.go b/src/mongo/gotools/common/json/fold_test.go
new file mode 100644
index 00000000000..9fb94646a85
--- /dev/null
+++ b/src/mongo/gotools/common/json/fold_test.go
@@ -0,0 +1,116 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+var foldTests = []struct {
+ fn func(s, t []byte) bool
+ s, t string
+ want bool
+}{
+ {equalFoldRight, "", "", true},
+ {equalFoldRight, "a", "a", true},
+ {equalFoldRight, "", "a", false},
+ {equalFoldRight, "a", "", false},
+ {equalFoldRight, "a", "A", true},
+ {equalFoldRight, "AB", "ab", true},
+ {equalFoldRight, "AB", "ac", false},
+ {equalFoldRight, "sbkKc", "ſbKKc", true},
+ {equalFoldRight, "SbKkc", "ſbKKc", true},
+ {equalFoldRight, "SbKkc", "ſbKK", false},
+ {equalFoldRight, "e", "é", false},
+ {equalFoldRight, "s", "S", true},
+
+ {simpleLetterEqualFold, "", "", true},
+ {simpleLetterEqualFold, "abc", "abc", true},
+ {simpleLetterEqualFold, "abc", "ABC", true},
+ {simpleLetterEqualFold, "abc", "ABCD", false},
+ {simpleLetterEqualFold, "abc", "xxx", false},
+
+ {asciiEqualFold, "a_B", "A_b", true},
+ {asciiEqualFold, "aa@", "aa`", false}, // verify 0x40 and 0x60 aren't case-equivalent
+}
+
+func TestFold(t *testing.T) {
+ for i, tt := range foldTests {
+ if got := tt.fn([]byte(tt.s), []byte(tt.t)); got != tt.want {
+ t.Errorf("%d. %q, %q = %v; want %v", i, tt.s, tt.t, got, tt.want)
+ }
+ truth := strings.EqualFold(tt.s, tt.t)
+ if truth != tt.want {
+ t.Errorf("strings.EqualFold doesn't agree with case %d", i)
+ }
+ }
+}
+
+func TestFoldAgainstUnicode(t *testing.T) {
+ const bufSize = 5
+ buf1 := make([]byte, 0, bufSize)
+ buf2 := make([]byte, 0, bufSize)
+ var runes []rune
+ for i := 0x20; i <= 0x7f; i++ {
+ runes = append(runes, rune(i))
+ }
+ runes = append(runes, kelvin, smallLongEss)
+
+ funcs := []struct {
+ name string
+ fold func(s, t []byte) bool
+ letter bool // must be ASCII letter
+ simple bool // must be simple ASCII letter (not 'S' or 'K')
+ }{
+ {
+ name: "equalFoldRight",
+ fold: equalFoldRight,
+ },
+ {
+ name: "asciiEqualFold",
+ fold: asciiEqualFold,
+ simple: true,
+ },
+ {
+ name: "simpleLetterEqualFold",
+ fold: simpleLetterEqualFold,
+ simple: true,
+ letter: true,
+ },
+ }
+
+ for _, ff := range funcs {
+ for _, r := range runes {
+ if r >= utf8.RuneSelf {
+ continue
+ }
+ if ff.letter && !isASCIILetter(byte(r)) {
+ continue
+ }
+ if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') {
+ continue
+ }
+ for _, r2 := range runes {
+ buf1 := append(buf1[:0], 'x')
+ buf2 := append(buf2[:0], 'x')
+ buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)]
+ buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)]
+ buf1 = append(buf1, 'x')
+ buf2 = append(buf2, 'x')
+ want := bytes.EqualFold(buf1, buf2)
+ if got := ff.fold(buf1, buf2); got != want {
+ t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want)
+ }
+ }
+ }
+ }
+}
+
+func isASCIILetter(b byte) bool {
+ return ('A' <= b && b <= 'Z') || ('a' <= b && b <= 'z')
+}
diff --git a/src/mongo/gotools/common/json/frac_test.go b/src/mongo/gotools/common/json/frac_test.go
new file mode 100644
index 00000000000..33d5058cad9
--- /dev/null
+++ b/src/mongo/gotools/common/json/frac_test.go
@@ -0,0 +1,98 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestFractionalNumber(t *testing.T) {
+
+ Convey("When unmarshalling JSON with fractional numeric values "+
+ "without a leading zero", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := ".123"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldAlmostEqual, 0.123)
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := ".123", ".456", ".789"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldAlmostEqual, 0.123)
+
+ jsonValue2, ok := jsonMap[key2].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldAlmostEqual, 0.456)
+
+ jsonValue3, ok := jsonMap[key3].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldAlmostEqual, 0.789)
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := ".42"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldAlmostEqual, 0.42)
+ }
+ })
+
+ Convey("can have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := ".106"
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldAlmostEqual, 0.106)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldAlmostEqual, -0.106)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/helpers.go b/src/mongo/gotools/common/json/helpers.go
new file mode 100644
index 00000000000..09b7e3ca5b9
--- /dev/null
+++ b/src/mongo/gotools/common/json/helpers.go
@@ -0,0 +1,76 @@
+package json
+
+import "fmt"
+
+// Returns true if the byte array represents the null literal,
+// and false otherwise. Assumes that `nu` is sufficient to distinguish
+// between these cases.
+func isNull(s []byte) bool {
+ return len(s) > 1 && s[0] == 'n' && s[1] == 'u'
+}
+
+// Returns true if the byte array represents some kind of number literal,
+// e.g. +123, -0.456, NaN, or Infinity, and false otherwise. Assumes that
+// the first character is sufficient to distinguish between these cases
+// with the exception of `N` where the second letter must be checked.
+func isNumber(s []byte) bool {
+ if len(s) == 0 {
+ return false
+ }
+ if len(s) > 1 && (s[0] == 'N' && s[1] == 'a') || (s[0] == 'I' && s[1] == 'n') { // NaN
+ return true
+ }
+ return s[0] == '+' || s[0] == '-' || s[0] == '.' || (s[0] >= '0' && s[0] <= '9')
+}
+
+// Returns true if the string represents the start of a hexadecimal
+// literal, e.g. 0X123, -0x456, +0x789.
+func isHexPrefix(s string) bool {
+ if len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') {
+ return true
+ }
+ return (s[0] == '+' || s[0] == '-') && isHexPrefix(s[1:])
+}
+
+// Returns the accept state (transition function) if x is empty.
+// Otherwise returns a function that upon matching the first element
+// of x will generate another function to match the second, etc.
+// (or accept if no remaining elements).
+func generateState(name string, x []byte, accept func(*scanner, int) int) func(*scanner, int) int {
+ if len(x) == 0 {
+ return accept
+ }
+
+ return func(s *scanner, c int) int {
+ if c == int(x[0]) {
+ s.step = generateState(name, x[1:], accept)
+ return scanContinue
+ }
+ return s.error(c, fmt.Sprintf("in literal %v (expecting '%v')", name, string(x[0])))
+ }
+}
+
+// stateOptionalConstructor is the state where there is the possibility of entering an empty constructor.
+func stateOptionalConstructor(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanContinue
+ }
+ if c == '(' {
+ s.step = stateInParen
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateInParen is the state when inside a `(` waiting for a `)`
+func stateInParen(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanContinue
+ }
+ if c == ')' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "expecting ')' as next character")
+
+}
diff --git a/src/mongo/gotools/common/json/hex.go b/src/mongo/gotools/common/json/hex.go
new file mode 100644
index 00000000000..625dcab6493
--- /dev/null
+++ b/src/mongo/gotools/common/json/hex.go
@@ -0,0 +1,13 @@
+package json
+
+// Transition function for recognizing hexadecimal numbers.
+// Adapted from encoding/json/scanner.go.
+
+// stateHex is the state after reading `0x` or `0X`.
+func stateHex(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateHex
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
diff --git a/src/mongo/gotools/common/json/hex_test.go b/src/mongo/gotools/common/json/hex_test.go
new file mode 100644
index 00000000000..3067d07d34f
--- /dev/null
+++ b/src/mongo/gotools/common/json/hex_test.go
@@ -0,0 +1,117 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestHexadecimalNumber(t *testing.T) {
+ value := "0x123"
+ intValue := 0x123
+
+ Convey("When unmarshalling JSON with hexadecimal numeric values", t, func() {
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+ key := "key"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+ jsonValue, ok := jsonMap[key].(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, intValue)
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "0x100", "0x101", "0x102"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldEqual, 0x100)
+
+ jsonValue2, ok := jsonMap[key2].(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldEqual, 0x101)
+
+ jsonValue3, ok := jsonMap[key3].(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldEqual, 0x102)
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, intValue)
+ }
+ })
+
+ Convey("can have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, intValue)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, -intValue)
+ })
+
+ Convey("can use '0x' or '0X' prefix", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "123"
+ data := fmt.Sprintf(`{"%v":0x%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, intValue)
+
+ data = fmt.Sprintf(`{"%v":0X%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(int32)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, intValue)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/indent.go b/src/mongo/gotools/common/json/indent.go
new file mode 100644
index 00000000000..e1bacafd6b8
--- /dev/null
+++ b/src/mongo/gotools/common/json/indent.go
@@ -0,0 +1,137 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+ return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ start := 0
+ for i, c := range src {
+ if escape && (c == '<' || c == '>' || c == '&') {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ v := scan.step(&scan, int(c))
+ if v >= scanSkipSpace {
+ if v == scanError {
+ break
+ }
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ start = i + 1
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+ return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+ dst.WriteByte('\n')
+ dst.WriteString(prefix)
+ for i := 0; i < depth; i++ {
+ dst.WriteString(indent)
+ }
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, and has no trailing newline, to make it
+// easier to embed inside other formatted JSON data.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ needIndent := false
+ depth := 0
+ for _, c := range src {
+ scan.bytes++
+ v := scan.step(&scan, int(c))
+ if v == scanSkipSpace {
+ continue
+ }
+ if v == scanError {
+ break
+ }
+ if needIndent && v != scanEndObject && v != scanEndArray {
+ needIndent = false
+ depth++
+ newline(dst, prefix, indent, depth)
+ }
+
+ // Emit semantically uninteresting bytes
+ // (in particular, punctuation in strings) unmodified.
+ if v == scanContinue {
+ dst.WriteByte(c)
+ continue
+ }
+
+ // Add spacing around real punctuation.
+ switch c {
+ case '{', '[':
+ // delay indent so that empty object and array are formatted as {} and [].
+ needIndent = true
+ dst.WriteByte(c)
+
+ case ',':
+ dst.WriteByte(c)
+ newline(dst, prefix, indent, depth)
+
+ case ':':
+ dst.WriteByte(c)
+ dst.WriteByte(' ')
+
+ case '}', ']':
+ if needIndent {
+ // suppress indent in empty object/array
+ needIndent = false
+ } else {
+ depth--
+ newline(dst, prefix, indent, depth)
+ }
+ dst.WriteByte(c)
+
+ default:
+ dst.WriteByte(c)
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/common/json/infinity.go b/src/mongo/gotools/common/json/infinity.go
new file mode 100644
index 00000000000..d5504ce0a44
--- /dev/null
+++ b/src/mongo/gotools/common/json/infinity.go
@@ -0,0 +1,13 @@
+package json
+
+// Transition functions for recognizing Infinity.
+// Adapted from encoding/json/scanner.go.
+
+// stateI is the state after reading `In`.
+func stateIn(s *scanner, c int) int {
+ if c == 'f' {
+ s.step = generateState("Infinity", []byte("inity"), stateEndValue)
+ return scanContinue
+ }
+ return s.error(c, "in literal Infinity (expecting 'f')")
+}
diff --git a/src/mongo/gotools/common/json/infinity_test.go b/src/mongo/gotools/common/json/infinity_test.go
new file mode 100644
index 00000000000..d02eb0fcbf5
--- /dev/null
+++ b/src/mongo/gotools/common/json/infinity_test.go
@@ -0,0 +1,98 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "math"
+ "testing"
+)
+
+func TestInfinityValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with Infinity values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Infinity"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsInf(jsonValue, 1), ShouldBeTrue)
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value := "Infinity"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value, key2, value, key3, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsInf(jsonValue1, 1), ShouldBeTrue)
+
+ jsonValue2, ok := jsonMap[key2].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsInf(jsonValue2, 1), ShouldBeTrue)
+
+ jsonValue3, ok := jsonMap[key3].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsInf(jsonValue3, 1), ShouldBeTrue)
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Infinity"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsInf(jsonValue, 1), ShouldBeTrue)
+ }
+ })
+
+ Convey("can have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Infinity"
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsInf(jsonValue, 1), ShouldBeTrue)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsInf(jsonValue, -1), ShouldBeTrue)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/iso_date.go b/src/mongo/gotools/common/json/iso_date.go
new file mode 100644
index 00000000000..1144257edbf
--- /dev/null
+++ b/src/mongo/gotools/common/json/iso_date.go
@@ -0,0 +1,45 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Transition functions for recognizing ISODate.
+// Adapted from encoding/json/scanner.go.
+
+// stateIS is the state after reading `IS`.
+func stateIS(s *scanner, c int) int {
+ if c == 'O' {
+ s.step = stateISO
+ return scanContinue
+ }
+ return s.error(c, "in literal ISODate (expecting 'O')")
+}
+
+// stateISO is the state after reading `ISO`.
+func stateISO(s *scanner, c int) int {
+ if c == 'D' {
+ s.step = stateD
+ return scanContinue
+ }
+ return s.error(c, "in literal ISODate (expecting 'D')")
+}
+
+// Decodes a ISODate literal stored in the underlying byte data into v.
+func (d *decodeState) storeISODate(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+ args, err := d.ctor("ISODate", []reflect.Type{isoDateType})
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(args[0])
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", isoDateType, kind))
+ }
+}
diff --git a/src/mongo/gotools/common/json/iso_date_test.go b/src/mongo/gotools/common/json/iso_date_test.go
new file mode 100644
index 00000000000..2a34435d3a1
--- /dev/null
+++ b/src/mongo/gotools/common/json/iso_date_test.go
@@ -0,0 +1,124 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestISODateValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with ISODate values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "ISODate(\"2006-01-02T15:04-0700\")"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(ISODate)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ISODate("2006-01-02T15:04-0700"))
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "ISODate(\"2006-01-02T15:04Z0700\")", "ISODate(\"2013-01-02T15:04Z0700\")", "ISODate(\"2014-02-02T15:04Z0700\")"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(ISODate)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldEqual, ISODate("2006-01-02T15:04Z0700"))
+
+ jsonValue2, ok := jsonMap[key2].(ISODate)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldEqual, ISODate("2013-01-02T15:04Z0700"))
+
+ jsonValue3, ok := jsonMap[key3].(ISODate)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldEqual, ISODate("2014-02-02T15:04Z0700"))
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "ISODate(\"2006-01-02T15:04-0700\")"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(ISODate)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ISODate("2006-01-02T15:04-0700"))
+ }
+ })
+
+ Convey("will take valid format 2006-01-02T15:04:05.000-0700", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "ISODate(\"2006-01-02T15:04:05.000-0700\")"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(ISODate)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ISODate("2006-01-02T15:04:05.000-0700"))
+ })
+
+
+ Convey("will take valid format 2006-01-02T15:04:05", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "ISODate(\"2014-01-02T15:04:05Z\")"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(ISODate)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ISODate("2014-01-02T15:04:05Z"))
+ })
+
+
+ Convey("will take valid format 2006-01-02T15:04-0700", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "ISODate(\"2006-01-02T15:04-0700\")"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(ISODate)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ISODate("2006-01-02T15:04-0700"))
+ })
+
+
+
+ })
+}
+
diff --git a/src/mongo/gotools/common/json/json_format.go b/src/mongo/gotools/common/json/json_format.go
new file mode 100644
index 00000000000..f9243a4dcab
--- /dev/null
+++ b/src/mongo/gotools/common/json/json_format.go
@@ -0,0 +1,163 @@
+package json
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const JSON_DATE_FORMAT = "2006-01-02T15:04:05.000Z"
+
+func (b BinData) MarshalJSON() ([]byte, error) {
+ data := fmt.Sprintf(`{ "$binary": "%v", "$type": "%0x" }`,
+ b.Base64, []byte{b.Type})
+ return []byte(data), nil
+}
+
+func (d128 Decimal128) MarshalJSON() ([]byte, error) {
+ s := d128.Decimal128.String()
+ return []byte(fmt.Sprintf(`{ "$numberDecimal" : "%s" }`, s)), nil
+}
+
+func (js JavaScript) MarshalJSON() ([]byte, error) {
+ data := []byte(fmt.Sprintf(`{ "$code": %q`, js.Code))
+
+ scopeChunk := []byte{}
+ var err error
+ if js.Scope != nil {
+ scopeChunk, err = Marshal(js.Scope)
+ if err != nil {
+ return nil, err
+ }
+ scopeChunk = []byte(fmt.Sprintf(`, "$scope": %v `, string(scopeChunk)))
+ }
+ scopeChunk = append(scopeChunk, '}')
+
+ data = append(data, scopeChunk...)
+ return data, nil
+}
+
+func (d Date) MarshalJSON() ([]byte, error) {
+ var data string
+ n := int64(d)
+ if d.isFormatable() {
+ t := time.Unix(n/1e3, n%1e3*1e6)
+ data = fmt.Sprintf(`{ "$date": "%v" }`, t.UTC().Format(JSON_DATE_FORMAT))
+ } else {
+ data = fmt.Sprintf(`{ "$date": { "$numberLong" : "%v" }}`, n)
+ }
+
+ return []byte(data), nil
+}
+
+func (d DBRef) MarshalJSON() ([]byte, error) {
+ // Convert the $id field to JSON
+ idChunk, err := Marshal(d.Id)
+ if err != nil {
+ return nil, err
+ }
+
+ // Need to form JSON like { "$ref": "REF", "$id": ID, "$db": "DB" }
+ // so piece chunks together since can only get $id field as bytes.
+ refChunk := []byte(fmt.Sprintf(`{ "$ref": "%v", "$id": `, d.Collection))
+
+ dbChunk := []byte{}
+ if d.Database != "" {
+ dbChunk = []byte(fmt.Sprintf(`, "$db": "%v" `, d.Database))
+ }
+ dbChunk = append(dbChunk, '}')
+
+ data := make([]byte, len(refChunk)+len(idChunk)+len(dbChunk))
+ copy(data, refChunk)
+ copy(data[len(refChunk):], idChunk)
+ copy(data[len(refChunk)+len(idChunk):], dbChunk)
+
+ return data, nil
+}
+
+func (d DBPointer) MarshalJSON() ([]byte, error) {
+ buffer := bytes.Buffer{}
+ // Convert the $id field to JSON
+ idChunk, err := Marshal(d.Id)
+ if err != nil {
+ return nil, err
+ }
+ buffer.Write([]byte(fmt.Sprintf(`{ "$ref": "%v", "$id": { "$oid" : `, d.Namespace)))
+ buffer.Write(idChunk)
+ buffer.Write([]byte("}}"))
+ return buffer.Bytes(), nil
+}
+
+func (_ MinKey) MarshalJSON() ([]byte, error) {
+ data := `{ "$minKey": 1 }`
+ return []byte(data), nil
+}
+
+func (_ MaxKey) MarshalJSON() ([]byte, error) {
+ data := `{ "$maxKey": 1 }`
+ return []byte(data), nil
+}
+
+func (n NumberInt) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.FormatInt(int64(n), 10)), nil
+}
+
+func (n NumberLong) MarshalJSON() ([]byte, error) {
+ data := fmt.Sprintf(`{ "$numberLong": "%v" }`, int64(n))
+ return []byte(data), nil
+}
+
+func (n NumberFloat) MarshalJSON() ([]byte, error) {
+
+ // check floats for infinity and return +Infinity or -Infinity if so
+ if math.IsInf(float64(n), 1) {
+ return []byte("+Infinity"), nil
+ }
+ if math.IsInf(float64(n), -1) {
+ return []byte("-Infinity"), nil
+ }
+
+ floatString := strconv.FormatFloat(float64(n), 'g', -1, 64)
+
+ // determine if the float has a decimal point and if not
+ // add one to maintain consistency when importing.
+ if _, d := math.Modf(float64(n)); d == 0 {
+ // check for 'e' to determine if the float's formatted in scientific notation
+ if strings.IndexByte(floatString, 'e') == -1 {
+ return []byte(floatString + ".0"), nil
+ }
+
+ }
+ return []byte(floatString), nil
+}
+
+// Assumes that o represents a valid ObjectId
+// (composed of 24 hexadecimal characters).
+func (o ObjectId) MarshalJSON() ([]byte, error) {
+ data := fmt.Sprintf(`{ "$oid": "%v" }`, string(o))
+ return []byte(data), nil
+}
+
+func (r RegExp) MarshalJSON() ([]byte, error) {
+ pattern, err := Marshal(r.Pattern)
+ if err != nil {
+ return nil, err
+ }
+ data := fmt.Sprintf(`{ "$regex": %v, "$options": "%v" }`,
+ string(pattern), r.Options)
+ return []byte(data), nil
+}
+
+func (t Timestamp) MarshalJSON() ([]byte, error) {
+ data := fmt.Sprintf(`{ "$timestamp": { "t": %v, "i": %v } }`,
+ t.Seconds, t.Increment)
+ return []byte(data), nil
+}
+
+func (_ Undefined) MarshalJSON() ([]byte, error) {
+ data := `{ "$undefined": true }`
+ return []byte(data), nil
+}
diff --git a/src/mongo/gotools/common/json/maxkey.go b/src/mongo/gotools/common/json/maxkey.go
new file mode 100644
index 00000000000..6451fd9ae63
--- /dev/null
+++ b/src/mongo/gotools/common/json/maxkey.go
@@ -0,0 +1,13 @@
+package json
+
+// Transition functions for recognizing MaxKey.
+// Adapted from encoding/json/scanner.go.
+
+// stateUpperMa is the state after reading `Ma`.
+func stateUpperMa(s *scanner, c int) int {
+ if c == 'x' {
+ s.step = generateState("MaxKey", []byte("Key"), stateOptionalConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal MaxKey (expecting 'x')")
+}
diff --git a/src/mongo/gotools/common/json/maxkey_test.go b/src/mongo/gotools/common/json/maxkey_test.go
new file mode 100644
index 00000000000..4e6557b72b9
--- /dev/null
+++ b/src/mongo/gotools/common/json/maxkey_test.go
@@ -0,0 +1,180 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestMaxKeyValue(t *testing.T) {
+
+ key := "key"
+
+ Convey("Unmarshalling JSON with MaxKey values", t, func() {
+ value := "MaxKey"
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MaxKey{})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value, key2, value, key3, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, MaxKey{})
+
+ jsonValue2, ok := jsonMap[key2].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, MaxKey{})
+
+ jsonValue3, ok := jsonMap[key3].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, MaxKey{})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MaxKey{})
+ }
+ })
+
+ Convey("cannot have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Unmarshalling JSON with MaxKey() values", t, func() {
+ value := "MaxKey()"
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MaxKey{})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value, key2, value, key3, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, MaxKey{})
+
+ jsonValue2, ok := jsonMap[key2].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, MaxKey{})
+
+ jsonValue3, ok := jsonMap[key3].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, MaxKey{})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MaxKey{})
+ }
+ })
+
+ Convey("cannot have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("can have whitespace inside or around()", func() {
+ var jsonMap map[string]interface{}
+
+ value = "MaxKey ( )"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(MaxKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MaxKey{})
+ })
+
+ Convey("cannot have any value other than whitespace inside ()", func() {
+ var jsonMap map[string]interface{}
+ value = "MaxKey(5)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/minkey.go b/src/mongo/gotools/common/json/minkey.go
new file mode 100644
index 00000000000..e87897d68dc
--- /dev/null
+++ b/src/mongo/gotools/common/json/minkey.go
@@ -0,0 +1,13 @@
+package json
+
+// Transition functions for recognizing MinKey.
+// Adapted from encoding/json/scanner.go.
+
+// stateUpperMi is the state after reading `Mi`.
+func stateUpperMi(s *scanner, c int) int {
+ if c == 'n' {
+ s.step = generateState("MinKey", []byte("Key"), stateOptionalConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal MinKey (expecting 'n')")
+}
diff --git a/src/mongo/gotools/common/json/minkey_test.go b/src/mongo/gotools/common/json/minkey_test.go
new file mode 100644
index 00000000000..ccbf86aff64
--- /dev/null
+++ b/src/mongo/gotools/common/json/minkey_test.go
@@ -0,0 +1,184 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestMinKeyValue(t *testing.T) {
+ key := "key"
+ Convey("Unmarshalling JSON with MinKey values", t, func() {
+
+ value := "MinKey"
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MinKey{})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value, key2, value, key3, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, MinKey{})
+
+ jsonValue2, ok := jsonMap[key2].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, MinKey{})
+
+ jsonValue3, ok := jsonMap[key3].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, MinKey{})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MinKey{})
+ }
+ })
+
+ Convey("cannot have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Unmarshalling JSON with MinKey() values", t, func() {
+
+ value := "MinKey()"
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MinKey{})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value, key2, value, key3, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, MinKey{})
+
+ jsonValue2, ok := jsonMap[key2].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, MinKey{})
+
+ jsonValue3, ok := jsonMap[key3].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, MinKey{})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MinKey{})
+ }
+ })
+
+ Convey("cannot have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ value := "MinKey()"
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("can have whitespace inside or around()", func() {
+ var jsonMap map[string]interface{}
+
+ value = "MinKey ( )"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(MinKey)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, MinKey{})
+ })
+
+ Convey("cannot have any value other than whitespace inside ()", func() {
+ var jsonMap map[string]interface{}
+
+ value = "MinKey(5)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+}
diff --git a/src/mongo/gotools/common/json/mongo_extjson.go b/src/mongo/gotools/common/json/mongo_extjson.go
new file mode 100644
index 00000000000..606b1368b3c
--- /dev/null
+++ b/src/mongo/gotools/common/json/mongo_extjson.go
@@ -0,0 +1,392 @@
+package json
+
+import (
+ "fmt"
+ "gopkg.in/mgo.v2/bson"
+ "reflect"
+)
+
+// Represents base-64 encoded binary data
+type BinData struct {
+ Type byte
+ Base64 string
+}
+
+// Represents the number of milliseconds since the Unix epoch.
+type Date int64
+
+type ISODate string
+
+type ObjectId string
+
+// Represents a reference to another document.
+type DBRef struct {
+ Collection string
+ Id interface{}
+ Database string // optional
+}
+
+// Refers to a document in some namespace by wrapping a string containing the namespace
+// and the objectId in which the _id of the document is contained
+type DBPointer struct {
+ Namespace string
+ Id bson.ObjectId
+}
+
+// Represents the literal MinKey.
+type MinKey struct{}
+
+// Represents the literal MaxKey.
+type MaxKey struct{}
+
+// Represents a signed 32-bit integer.
+type NumberInt int32
+
+// Represents a signed 64-bit integer.
+type NumberLong int64
+
+// Represents a signed 64-bit float.
+type NumberFloat float64
+
+type Decimal128 struct {
+ bson.Decimal128
+}
+
+// Represents a regular expression.
+type RegExp struct {
+ Pattern string
+ Options string
+}
+
+// Represents a timestamp value.
+type Timestamp struct {
+ Seconds uint32
+ Increment uint32
+}
+
+type JavaScript struct {
+ Code string
+ Scope interface{}
+}
+
+type Float float64
+
+// Represents the literal undefined.
+type Undefined struct{}
+
+var (
+ // primitive types
+ byteType = reflect.TypeOf(byte(0))
+ stringType = reflect.TypeOf(string(""))
+ uint32Type = reflect.TypeOf(uint32(0))
+
+ // object types
+ binDataType = reflect.TypeOf(BinData{})
+ dateType = reflect.TypeOf(Date(0))
+ isoDateType = reflect.TypeOf(ISODate(""))
+ dbRefType = reflect.TypeOf(DBRef{})
+ dbPointerType = reflect.TypeOf(DBPointer{})
+ maxKeyType = reflect.TypeOf(MaxKey{})
+ minKeyType = reflect.TypeOf(MinKey{})
+ numberIntType = reflect.TypeOf(NumberInt(0))
+ numberLongType = reflect.TypeOf(NumberLong(0))
+ numberFloatType = reflect.TypeOf(NumberFloat(0))
+ objectIdType = reflect.TypeOf(ObjectId(""))
+ regexpType = reflect.TypeOf(RegExp{})
+ timestampType = reflect.TypeOf(Timestamp{})
+ undefinedType = reflect.TypeOf(Undefined{})
+ orderedBSONType = reflect.TypeOf(bson.D{})
+ interfaceType = reflect.TypeOf((*interface{})(nil))
+)
+
+func (d Date) isFormatable() bool {
+ return int64(d) < int64(32535215999000)
+}
+
+func stateBeginExtendedValue(s *scanner, c int) int {
+ switch c {
+ case 'u': // beginning of undefined
+ s.step = stateU
+ case 'B': // beginning of BinData or Boolean
+ s.step = stateB
+ case 'D': // beginning of Date
+ s.step = stateD
+ case 'I': // beginning of Infinity or ISODate
+ s.step = stateI
+ case 'M': // beginning of MinKey or MaxKey
+ s.step = stateM
+ case 'N': // beginning of NaN or NumberXX
+ s.step = stateUpperN
+ case 'O': // beginning of ObjectId
+ s.step = stateO
+ case 'R': // beginning of RegExp
+ s.step = stateR
+ case 'T': // beginning of Timestamp
+ s.step = stateUpperT
+ case '/': // beginning of /foo/i
+ s.step = stateInRegexpPattern
+ default:
+ return s.error(c, "looking for beginning of value")
+ }
+
+ return scanBeginLiteral
+}
+
+// stateB is the state after reading `B`.
+func stateB(s *scanner, c int) int {
+ if c == 'i' {
+ s.step = stateBi
+ return scanContinue
+ }
+ if c == 'o' {
+ s.step = stateBo
+ return scanContinue
+ }
+ return s.error(c, "in literal BinData or Boolean (expecting 'i' or 'o')")
+}
+
+// stateUpperN is the state after reading `N`.
+func stateUpperN(s *scanner, c int) int {
+ if c == 'a' {
+ s.step = stateUpperNa
+ return scanContinue
+ }
+ if c == 'u' {
+ s.step = stateUpperNu
+ return scanContinue
+ }
+ return s.error(c, "in literal NaN or Number (expecting 'a' or 'u')")
+}
+
+// stateM is the state after reading `M`.
+func stateM(s *scanner, c int) int {
+ if c == 'a' {
+ s.step = stateUpperMa
+ return scanContinue
+ }
+ if c == 'i' {
+ s.step = stateUpperMi
+ return scanContinue
+ }
+ return s.error(c, "in literal MaxKey or MinKey (expecting 'a' or 'i')")
+}
+
+// stateD is the state after reading `D`.
+func stateD(s *scanner, c int) int {
+ switch c {
+ case 'a':
+ s.step = stateDa
+ case 'B':
+ s.step = stateDB
+ case 'b':
+ s.step = stateDb
+ default:
+ return s.error(c, "in literal Date or DBRef (expecting 'a' or 'B')")
+ }
+ return scanContinue
+}
+
+// stateDB is the state after reading `DB`.
+func stateDB(s *scanner, c int) int {
+ if c == 'R' {
+ s.step = stateDBR
+ return scanContinue
+ }
+ if c == 'P' {
+ s.step = stateDBP
+ return scanContinue
+ }
+ return s.error(c, "in state DB (expecting 'R or P')")
+}
+
+// stateI is the state after reading `I`.
+func stateI(s *scanner, c int) int {
+ switch c {
+ case 'n':
+ s.step = stateIn
+ case 'S':
+ s.step = stateIS
+ default:
+ return s.error(c, "in literal Infinity or ISO (expecting 'n' or 'S')")
+ }
+ return scanContinue
+}
+
+// Decodes a literal stored in item into v.
+func (d *decodeState) storeExtendedLiteral(item []byte, v reflect.Value, fromQuoted bool) bool {
+ switch c := item[0]; c {
+ case 'n':
+ d.storeNewLiteral(v, fromQuoted)
+
+ case 'u': // undefined
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(Undefined{}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", undefinedType, kind))
+ }
+
+ case 'B': // BinData or Boolean
+ switch item[1] {
+ case 'i': // BinData
+ d.storeBinData(v)
+ case 'o': // Boolean
+ d.storeBoolean(v)
+ }
+ case 'D': // Date, DBRef, DBPointer, Dbpointer,or Dbref
+ switch item[1] {
+ case 'a': // Date
+ d.storeDate(v)
+ case 'b': // Dbref
+ d.storeDBRef(v)
+ case 'B': // DBRef or DBPointer
+ switch item[2] {
+ case 'R': //DBRef
+ d.storeDBRef(v)
+ case 'P': //DBPointer
+ d.storeDBPointer(v)
+ }
+ }
+ case 'I':
+ switch item[1] {
+ case 'S': // ISODate
+ d.storeISODate(v)
+ }
+
+ case 'M': // MinKey or MaxKey
+ switch item[1] {
+ case 'i': // MinKey
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(MinKey{}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", minKeyType, kind))
+ }
+ case 'a': // MaxKey
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(MaxKey{}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", maxKeyType, kind))
+ }
+ }
+
+ case 'O': // ObjectId
+ d.storeObjectId(v)
+
+ case 'N': // NumberInt or NumberLong
+ switch item[6] {
+ case 'I': // NumberInt
+ d.storeNumberInt(v)
+ case 'L': // NumberLong
+ d.storeNumberLong(v)
+ }
+
+ case 'R': // RegExp constructor
+ d.storeRegexp(v)
+
+ case 'T': // Timestamp
+ d.storeTimestamp(v)
+
+ case '/': // regular expression literal
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanRegexpPattern {
+ d.error(fmt.Errorf("expected beginning of regular expression pattern"))
+ }
+
+ pattern, options, err := d.regexp()
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(reflect.ValueOf(RegExp{pattern, options}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", regexpType, kind))
+ }
+
+ default:
+ return false
+ }
+
+ return true
+}
+
+// Returns a literal from the underlying byte data.
+func (d *decodeState) getExtendedLiteral(item []byte) (interface{}, bool) {
+ switch c := item[0]; c {
+ case 'n':
+ return d.getNewLiteral(), true
+
+ case 'u': // undefined
+ return Undefined{}, true
+
+ case 'B': // BinData or Boolean
+ switch item[1] {
+ case 'i': // BinData
+ return d.getBinData(), true
+ case 'o': // Boolean
+ return d.getBoolean(), true
+ }
+
+ case 'D': // Date, DBRef, or Dbref
+ switch item[1] {
+ case 'a': // Date
+ return d.getDate(), true
+ case 'b': // Dbref
+ return d.getDBRef(), true
+ case 'B': // DBRef or DBPoiner
+ switch item[2] {
+ case 'R': // DBRef
+ return d.getDBRef(), true
+ case 'P': // DBPointer
+ return d.getDBPointer(), true
+ }
+ }
+
+ case 'M': // MinKey or MaxKey
+ switch item[1] {
+ case 'i': // MinKey
+ return MinKey{}, true
+ case 'a': // MaxKey
+ return MaxKey{}, true
+ }
+
+ case 'O': // ObjectId
+ return d.getObjectId(), true
+
+ case 'N': // NumberInt or NumberLong
+ switch item[6] {
+ case 'I': // NumberInt
+ return d.getNumberInt(), true
+ case 'L': // NumberLong
+ return d.getNumberLong(), true
+ }
+
+ case 'R': // RegExp constructor
+ return d.getRegexp(), true
+
+ case 'T': // Timestamp
+ return d.getTimestamp(), true
+
+ case 'I': // ISO Date
+ switch item[1] {
+ case 'S': // ISODate
+ return d.getDate(), true
+ }
+
+ case '/': // regular expression literal
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanRegexpPattern {
+ d.error(fmt.Errorf("expected beginning of regular expression pattern"))
+ }
+
+ pattern, options, err := d.regexp()
+ if err != nil {
+ d.error(err)
+ }
+ return RegExp{pattern, options}, true
+ }
+
+ return nil, false
+}
diff --git a/src/mongo/gotools/common/json/nan.go b/src/mongo/gotools/common/json/nan.go
new file mode 100644
index 00000000000..23219906ce3
--- /dev/null
+++ b/src/mongo/gotools/common/json/nan.go
@@ -0,0 +1,13 @@
+package json
+
+// Transition functions for recognizing NaN.
+// Adapted from encoding/json/scanner.go.
+
+// stateUpperNa is the state after reading `Na`.
+func stateUpperNa(s *scanner, c int) int {
+ if c == 'N' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal NaN (expecting 'N')")
+}
diff --git a/src/mongo/gotools/common/json/nan_test.go b/src/mongo/gotools/common/json/nan_test.go
new file mode 100644
index 00000000000..6653d8c6d5d
--- /dev/null
+++ b/src/mongo/gotools/common/json/nan_test.go
@@ -0,0 +1,90 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "math"
+ "testing"
+)
+
+func TestNaNValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with NaN values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NaN"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsNaN(jsonValue), ShouldBeTrue)
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value := "NaN"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value, key2, value, key3, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsNaN(jsonValue1), ShouldBeTrue)
+
+ jsonValue2, ok := jsonMap[key2].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsNaN(jsonValue2), ShouldBeTrue)
+
+ jsonValue3, ok := jsonMap[key3].(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsNaN(jsonValue3), ShouldBeTrue)
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NaN"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(float64)
+ So(ok, ShouldBeTrue)
+ So(math.IsNaN(jsonValue), ShouldBeTrue)
+ }
+ })
+
+ Convey("cannot have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NaN"
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/new.go b/src/mongo/gotools/common/json/new.go
new file mode 100644
index 00000000000..71c9bc4b7ef
--- /dev/null
+++ b/src/mongo/gotools/common/json/new.go
@@ -0,0 +1,92 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Transition functions for recognizing new.
+// Adapted from encoding/json/scanner.go.
+
+// stateNe is the state after reading `ne`.
+func stateNe(s *scanner, c int) int {
+ if c == 'w' {
+ s.step = stateNew
+ return scanContinue
+ }
+ return s.error(c, "in literal new (expecting 'w')")
+}
+
+// stateNew is the state after reading `new`.
+// Ensures that there is a space after the new keyword.
+func stateNew(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ s.step = stateBeginObjectValue
+ return scanContinue
+ }
+ return s.error(c, "expected space after new keyword")
+}
+
+// stateBeginObjectValue is the state after reading `new`.
+func stateBeginObjectValue(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanSkipSpace
+ }
+ switch c {
+ case 'B': // beginning of BinData or Boolean
+ s.step = stateB
+ case 'D': // beginning of Date
+ s.step = stateD
+ case 'N': // beginning of NumberInt or NumberLong
+ s.step = stateNumberUpperN
+ case 'O': // beginning of ObjectId
+ s.step = stateO
+ case 'R': // beginning of RegExp
+ s.step = stateR
+ case 'T': // beginning of Timestamp
+ s.step = stateUpperT
+ default:
+ return s.error(c, "looking for beginning of value")
+ }
+
+ return scanBeginLiteral
+}
+
+// stateNumberUpperN is the state after reading `N`.
+func stateNumberUpperN(s *scanner, c int) int {
+ if c == 'u' {
+ s.step = stateUpperNu
+ return scanContinue
+ }
+ return s.error(c, "in literal NumberInt or NumberLong (expecting 'u')")
+}
+
+// Decodes a literal stored in the underlying byte data into v.
+func (d *decodeState) storeNewLiteral(v reflect.Value, fromQuoted bool) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginLiteral {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ // Read constructor identifier
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ // Back up so d.ctor can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off-1], v, fromQuoted)
+}
+
+// Returns a literal from the underlying byte data.
+func (d *decodeState) getNewLiteral() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginLiteral {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+ return d.literalInterface()
+}
diff --git a/src/mongo/gotools/common/json/new_test.go b/src/mongo/gotools/common/json/new_test.go
new file mode 100644
index 00000000000..3d91ed9dcfb
--- /dev/null
+++ b/src/mongo/gotools/common/json/new_test.go
@@ -0,0 +1,197 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestNewKeyword(t *testing.T) {
+
+ Convey("When unmarshalling JSON using the new keyword", t, func() {
+
+ Convey("can be used with BinData constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `new BinData(1, "xyz")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, BinData{1, "xyz"})
+ })
+
+ Convey("can be used with Boolean constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `new Boolean(1)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, true)
+
+ key = "key"
+ value = `new Boolean(0)`
+ data = fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok = jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, false)
+ })
+
+ Convey("can be used with Date constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "new Date(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(Date)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, Date(123))
+ })
+
+ Convey("can be used with DBRef constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `new BinData(1, "xyz")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, BinData{1, "xyz"})
+ })
+
+ Convey("can be used with NumberInt constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "new NumberInt(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(NumberInt)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberInt(123))
+ })
+
+ Convey("can be used with NumberLong constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "new NumberLong(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(NumberLong)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberLong(123))
+ })
+
+ Convey("can be used with ObjectId constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `new ObjectId("123")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(ObjectId)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ObjectId("123"))
+ })
+
+ Convey("can be used with RegExp constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `new RegExp("foo", "i")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"foo", "i"})
+ })
+
+ Convey("can be used with Timestamp constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "new Timestamp(123, 321)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(Timestamp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, Timestamp{123, 321})
+ })
+
+ Convey("cannot be used with literals", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ literals := []string{"null", "true", "false", "undefined",
+ "NaN", "Infinity", "MinKey", "MaxKey"}
+
+ for _, value := range literals {
+ data := fmt.Sprintf(`{"%v":new %v}`, key, value)
+ Convey(value, func() {
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ }
+ })
+
+ Convey("must be followed by a space", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "newDate(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("cannot be chained togther (`new new ...`)", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "new new Date(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/number.go b/src/mongo/gotools/common/json/number.go
new file mode 100644
index 00000000000..3222b5e6ef6
--- /dev/null
+++ b/src/mongo/gotools/common/json/number.go
@@ -0,0 +1,136 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Transition functions for recognizing NumberInt and NumberLong.
+// Adapted from encoding/json/scanner.go.
+
+// stateUpperNu is the state after reading `Nu`.
+func stateUpperNu(s *scanner, c int) int {
+ if c == 'm' {
+ s.step = generateState("Number", []byte("ber"), stateUpperNumber)
+ return scanContinue
+ }
+ return s.error(c, "in literal Number (expecting 'm')")
+}
+
+// stateUpperNumber is the state after reading `Number`.
+func stateUpperNumber(s *scanner, c int) int {
+ if c == 'I' {
+ s.step = generateState("NumberInt", []byte("nt"), stateConstructor)
+ return scanContinue
+ }
+ if c == 'L' {
+ s.step = generateState("NumberLong", []byte("ong"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal NumberInt or NumberLong (expecting 'I' or 'L')")
+}
+
+// Decodes a NumberInt literal stored in the underlying byte data into v.
+func (d *decodeState) storeNumberInt(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args, err := d.ctor("NumberInt", []reflect.Type{numberIntType})
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(args[0])
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", numberIntType, kind))
+ }
+}
+
+// Returns a NumberInt literal from the underlying byte data.
+func (d *decodeState) getNumberInt() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ // Prevent d.convertNumber() from parsing the argument as a float64.
+ useNumber := d.useNumber
+ d.useNumber = true
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("NumberInt", 1, len(args)); err != nil {
+ d.error(err)
+ }
+ var number Number
+ switch v := args[0].(type) {
+ case Number:
+ number = v
+ case string:
+ number = Number(v)
+ default:
+ d.error(fmt.Errorf("expected int32 for first argument of NumberInt constructor, got %T (value was %v)", v, v))
+ }
+
+ d.useNumber = useNumber
+ arg0, err := number.Int32()
+ if err != nil {
+ d.error(fmt.Errorf("expected int32 for first argument of NumberInt constructor, got %T (value was %v)", number, number))
+ }
+ return NumberInt(arg0)
+}
+
+// Decodes a NumberLong literal stored in the underlying byte data into v.
+func (d *decodeState) storeNumberLong(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args, err := d.ctor("NumberLong", []reflect.Type{numberLongType})
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(args[0])
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", numberLongType, kind))
+ }
+}
+
+// Returns a NumberLong literal from the underlying byte data.
+func (d *decodeState) getNumberLong() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ // Prevent d.convertNumber() from parsing the argument as a float64.
+ useNumber := d.useNumber
+ d.useNumber = true
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("NumberLong", 1, len(args)); err != nil {
+ d.error(err)
+ }
+ var number Number
+ switch v := args[0].(type) {
+ case Number:
+ number = v
+ case string:
+ number = Number(v)
+
+ default:
+ d.error(fmt.Errorf("expected int64 for first argument of NumberLong constructor, got %T (value was %v)", v, v))
+ }
+
+ d.useNumber = useNumber
+ arg0, err := number.Int64()
+ if err != nil {
+ d.error(fmt.Errorf("expected int64 for first argument of NumberLong constructor, got %T (value was %v)", number, number))
+ }
+ return NumberLong(arg0)
+}
diff --git a/src/mongo/gotools/common/json/number_test.go b/src/mongo/gotools/common/json/number_test.go
new file mode 100644
index 00000000000..88ce8de9a34
--- /dev/null
+++ b/src/mongo/gotools/common/json/number_test.go
@@ -0,0 +1,191 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestNumberIntValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with NumberInt values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NumberInt(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(NumberInt)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberInt(123))
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "NumberInt(123)", "NumberInt(456)", "NumberInt(789)"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(NumberInt)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldEqual, NumberInt(123))
+
+ jsonValue2, ok := jsonMap[key2].(NumberInt)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldEqual, NumberInt(456))
+
+ jsonValue3, ok := jsonMap[key3].(NumberInt)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldEqual, NumberInt(789))
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NumberInt(42)"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(NumberInt)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberInt(42))
+ }
+ })
+
+ Convey("can use string as argument", func() {
+ key := "key"
+ value := `NumberInt("123")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ jsonValue, err := UnmarshalBsonD([]byte(data))
+
+ So(jsonValue[0].Value, ShouldEqual, NumberInt(123))
+ So(err, ShouldBeNil)
+ })
+
+ Convey("can specify argument in hexadecimal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NumberInt(0x5f)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(NumberInt)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberInt(0x5f))
+ })
+ })
+}
+
+func TestNumberLongValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with NumberLong values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NumberLong(123)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(NumberLong)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberLong(123))
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "NumberLong(123)", "NumberLong(456)", "NumberLong(789)"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(NumberLong)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldEqual, NumberLong(123))
+
+ jsonValue2, ok := jsonMap[key2].(NumberLong)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldEqual, NumberLong(456))
+
+ jsonValue3, ok := jsonMap[key3].(NumberLong)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldEqual, NumberLong(789))
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NumberLong(42)"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(NumberLong)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberLong(42))
+ }
+ })
+
+ Convey("can use string as argument", func() {
+ key := "key"
+ value := `NumberLong("123")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ jsonValue, err := UnmarshalBsonD([]byte(data))
+
+ So(jsonValue[0].Value, ShouldEqual, NumberLong(123))
+ So(err, ShouldBeNil)
+ })
+
+ Convey("can specify argument in hexadecimal", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "NumberLong(0x5f)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(NumberLong)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, NumberLong(0x5f))
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/objectid.go b/src/mongo/gotools/common/json/objectid.go
new file mode 100644
index 00000000000..c38f4df5225
--- /dev/null
+++ b/src/mongo/gotools/common/json/objectid.go
@@ -0,0 +1,55 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Transition functions for recognizing ObjectId.
+// Adapted from encoding/json/scanner.go.
+
+// stateO is the state after reading `O`.
+func stateO(s *scanner, c int) int {
+ if c == 'b' {
+ s.step = generateState("ObjectId", []byte("jectId"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal ObjectId (expecting 'b')")
+}
+
+// Decodes an ObjectId literal stored in the underlying byte data into v.
+func (d *decodeState) storeObjectId(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args, err := d.ctor("ObjectId", []reflect.Type{objectIdType})
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ v.Set(args[0])
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", objectIdType, kind))
+ }
+}
+
+// Returns an ObjectId literal from the underlying byte data.
+func (d *decodeState) getObjectId() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("ObjectId", 1, len(args)); err != nil {
+ d.error(err)
+ }
+ arg0, ok := args[0].(string)
+ if !ok {
+ d.error(fmt.Errorf("expected string for first argument of ObjectId constructor"))
+ }
+ return ObjectId(arg0)
+}
diff --git a/src/mongo/gotools/common/json/objectid_test.go b/src/mongo/gotools/common/json/objectid_test.go
new file mode 100644
index 00000000000..2bb25ba7d7c
--- /dev/null
+++ b/src/mongo/gotools/common/json/objectid_test.go
@@ -0,0 +1,84 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestObjectIdValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with ObjectId values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `ObjectId("123")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(ObjectId)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ObjectId("123"))
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := `ObjectId("123")`, `ObjectId("456")`, `ObjectId("789")`
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(ObjectId)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldEqual, ObjectId("123"))
+
+ jsonValue2, ok := jsonMap[key2].(ObjectId)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldEqual, ObjectId("456"))
+
+ jsonValue3, ok := jsonMap[key3].(ObjectId)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldEqual, ObjectId("789"))
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `ObjectId("000")`
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(ObjectId)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ObjectId("000"))
+ }
+ })
+
+ Convey("cannot use number as argument", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `ObjectId(123)`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/regexp.go b/src/mongo/gotools/common/json/regexp.go
new file mode 100644
index 00000000000..a9d06fe1d48
--- /dev/null
+++ b/src/mongo/gotools/common/json/regexp.go
@@ -0,0 +1,275 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Transition functions for recognizing RegExp.
+// Adapted from encoding/json/scanner.go.
+
+// stateR is the state after reading `R`.
+func stateR(s *scanner, c int) int {
+ if c == 'e' {
+ s.step = generateState("RegExp", []byte("gExp"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal RegExp (expecting 'e')")
+}
+
+// stateInRegexpPattern is the state after reading `/`.
+func stateInRegexpPattern(s *scanner, c int) int {
+ if c == '/' {
+ s.step = stateInRegexpOptions
+ return scanRegexpOptions
+ }
+ if c == '\\' {
+ s.step = stateInRegexpPatternEsc
+ return scanRegexpPattern
+ }
+ if c < 0x20 {
+ return s.error(c, "in regular expression literal")
+ }
+ return scanRegexpPattern
+}
+
+// stateInRegexpPatternEsc is the state after reading `'\` during a regex pattern.
+func stateInRegexpPatternEsc(s *scanner, c int) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '\'':
+ s.step = stateInRegexpPattern
+ return scanRegexpPattern
+ }
+ if c == 'u' {
+ s.step = stateInRegexpPatternEscU
+ return scanRegexpPattern
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInRegexpPatternEscU is the state after reading `'\u` during a regex pattern.
+func stateInRegexpPatternEscU(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInRegexpPatternEscU1
+ return scanRegexpPattern
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInRegexpPatternEscU1 is the state after reading `'\u1` during a regex pattern.
+func stateInRegexpPatternEscU1(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInRegexpPatternEscU12
+ return scanRegexpPattern
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInRegexpPatternEscU12 is the state after reading `'\u12` during a regex pattern.
+func stateInRegexpPatternEscU12(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInRegexpPatternEscU123
+ return scanRegexpPattern
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInRegexpPatternEscU123 is the state after reading `'\u123` during a regex pattern.
+func stateInRegexpPatternEscU123(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInRegexpPattern
+ return scanRegexpPattern
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInRegexpOptions is the state after reading `/foo/`.
+func stateInRegexpOptions(s *scanner, c int) int {
+ switch c {
+ case 'g', 'i', 'm', 's':
+ return scanRegexpOptions
+ }
+ return stateEndValue(s, c)
+}
+
+// Decodes a RegExp literal stored in the underlying byte data into v.
+func (d *decodeState) storeRegexp(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args, err := d.ctor("RegExp", []reflect.Type{stringType, stringType})
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ arg0 := args[0].String()
+ arg1 := args[1].String()
+ v.Set(reflect.ValueOf(RegExp{arg0, arg1}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", regexpType, kind))
+ }
+}
+
+// Returns a RegExp literal from the underlying byte data.
+func (d *decodeState) getRegexp() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("RegExp", 2, len(args)); err != nil {
+ d.error(err)
+ }
+ arg0, ok := args[0].(string)
+ if !ok {
+ d.error(fmt.Errorf("expected string for first argument of RegExp constructor"))
+ }
+ arg1, ok := args[1].(string)
+ if !ok {
+ d.error(fmt.Errorf("expected string for second argument of RegExp constructor"))
+ }
+ return RegExp{arg0, arg1}
+}
+
+// Decoder function that breaks a regular expression literal into its pattern and options.
+// Adapted from encoding/json/decode.go.
+
+// regexp consumes a regular expression from d.data[d.off-1:].
+// the two bytes of the regexp ("/a") have been read already.
+func (d *decodeState) regexp() (string, string, error) {
+ start := d.off - 1
+
+ // Look ahead for /.
+ op := d.scanWhile(scanRegexpPattern)
+ if op != scanRegexpOptions {
+ return "", "", fmt.Errorf("expected beginning of regular expression options")
+ }
+ pattern := d.data[start : d.off-1]
+
+ start = d.off
+ op = d.scanWhile(scanRegexpOptions)
+
+ // Back up so caller can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ options := d.data[start:d.off]
+
+ // Check for unusual characters. If there are none,
+ // then no copying is needed, so return string of the
+ // original bytes.
+ r := 0
+ for r < len(pattern) {
+ c := pattern[r]
+ if c == '\\' || c == '/' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(pattern[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(pattern) {
+ return string(pattern), string(options), nil
+ }
+
+ b := make([]byte, len(pattern)+2*utf8.UTFMax)
+ w := copy(b, pattern[0:r])
+ for r < len(pattern) {
+ // Out of room? Can only happen if pattern is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := pattern[r]; {
+ case c == '\\':
+ r++
+ if r >= len(pattern) {
+ return "", "", errPhase
+ }
+ switch pattern[r] {
+ default:
+ return "", "", fmt.Errorf("invalid escape character")
+ case '"', '\\', '/', '\'':
+ b[w] = pattern[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(pattern[r:])
+ if rr < 0 {
+ return "", "", fmt.Errorf("non-hexadecimal character found")
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(pattern[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Forward slash, control characters are invalid.
+ case c == '/', c < ' ':
+ d.error(fmt.Errorf("regular expression pattern cannot contain unescaped '/'"))
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(pattern[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return string(b[0:w]), string(options), nil
+}
diff --git a/src/mongo/gotools/common/json/regexp_test.go b/src/mongo/gotools/common/json/regexp_test.go
new file mode 100644
index 00000000000..a1f611b6c05
--- /dev/null
+++ b/src/mongo/gotools/common/json/regexp_test.go
@@ -0,0 +1,243 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestRegExpValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with RegExp values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `RegExp("foo", "i")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"foo", "i"})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := `RegExp("foo", "i")`,
+ `RegExp("bar", "i")`, `RegExp("baz", "i")`
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, RegExp{"foo", "i"})
+
+ jsonValue2, ok := jsonMap[key2].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, RegExp{"bar", "i"})
+
+ jsonValue3, ok := jsonMap[key3].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, RegExp{"baz", "i"})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `RegExp("xyz", "i")`
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"xyz", "i"})
+ }
+ })
+
+ Convey("can use options 'g', 'i', 'm', and 's'", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ options := []string{"g", "i", "m", "s"}
+
+ for _, option := range options {
+ data := fmt.Sprintf(`{"%v":RegExp("xyz", "%v")}`, key, option)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"xyz", option})
+ }
+ })
+
+ Convey("can use multiple options", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `RegExp("foo", "gims")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"foo", "gims"})
+ })
+ })
+}
+
+func TestRegexpLiteral(t *testing.T) {
+
+ Convey("When unmarshalling JSON with regular expression literals", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "/foo/i"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"foo", "i"})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "/foo/i", "/bar/i", "/baz/i"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, RegExp{"foo", "i"})
+
+ jsonValue2, ok := jsonMap[key2].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, RegExp{"bar", "i"})
+
+ jsonValue3, ok := jsonMap[key3].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, RegExp{"baz", "i"})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "/xyz/i"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"xyz", "i"})
+ }
+ })
+
+ Convey("can use options 'g', 'i', 'm', and 's'", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ options := []string{"g", "i", "m", "s"}
+
+ for _, option := range options {
+ data := fmt.Sprintf(`{"%v":/xyz/%v}`, key, option)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"xyz", option})
+ }
+ })
+
+ Convey("can use multiple options", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "/foo/gims"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{"foo", "gims"})
+ })
+
+ Convey("can contain unescaped quotes (`'` and `\"`)", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `/f'o"o/i`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, RegExp{`f'o"o`, "i"})
+ })
+
+ Convey("cannot contain unescaped forward slashes ('/')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "/f/o/o/i"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("cannot contain invalid escape sequences", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `/f\o\o/`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/scanner.go b/src/mongo/gotools/common/json/scanner.go
new file mode 100644
index 00000000000..4317432d761
--- /dev/null
+++ b/src/mongo/gotools/common/json/scanner.go
@@ -0,0 +1,669 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.bytes++
+ if scan.step(scan, int(c)) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+ scan.reset()
+ for i, c := range data {
+ v := scan.step(scan, int(c))
+ if v >= scanEnd {
+ switch v {
+ case scanError:
+ return nil, nil, scan.err
+ case scanEnd:
+ return data[0:i], data[i:], nil
+ }
+ }
+ }
+ if scan.eof() == scanError {
+ return nil, nil, scan.err
+ }
+ return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+ msg string // description of error
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in. (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+ // The step is a func to be called to execute the next transition.
+ // Also tried using an integer constant and a single func
+ // with a switch, but using the func directly was 10% faster
+ // on a 64-bit Mac Mini, and it's nicer to read.
+ step func(*scanner, int) int
+
+ // Reached end of top-level value.
+ endTop bool
+
+ // Stack of what we're in the middle of - array values, object keys, object values.
+ parseState []int
+
+ // Error that happened, if any.
+ err error
+
+ // 1-byte redo (see undo method)
+ redo bool
+ redoCode int
+ redoState func(*scanner, int) int
+
+ // total bytes consumed, updated by decoder.Decode
+ bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+ // Continue.
+ scanContinue = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanBeginCtor // begin constructor
+ scanCtorArg // just finished constructor argument
+ scanEndCtor // end constructor (implies scanCtorArg if possible)
+ scanRegexpPattern // inside regular expression pattern
+ scanRegexpOptions // inside regular expression options
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+
+ // Stop.
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+ parseCtorArg // parsing constructor argument
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+ s.redo = false
+ s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+ s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ s.redo = false
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+func isSpace(c rune) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ s.pushParseState(parseObjectKey)
+ return scanBeginObject
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ s.pushParseState(parseArrayValue)
+ return scanBeginArray
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '\'':
+ s.step = stateInSingleQuotedString
+ return scanBeginLiteral
+ case '+', '-':
+ s.step = stateSign
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case '.': // beginning of .123
+ s.step = stateDot
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return stateBeginExtendedValue(s, c)
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c int) int {
+ if c <= ' ' && isSpace(rune(c)) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ if c == '\'' {
+ s.step = stateInSingleQuotedString
+ return scanBeginLiteral
+ }
+ if isBeginUnquotedString(c) {
+ s.step = stateInUnquotedString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c int) int {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(rune(c)) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ case parseCtorArg:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanCtorArg
+ }
+ if c == ')' {
+ s.popParseState()
+ return scanEndCtor
+ }
+ return s.error(c, "after constructor argument")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c int) int {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c int) int {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c int) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ }
+ if c == 'u' {
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateSign is the state after reading `+` or `-` during a number.
+func stateSign(s *scanner, c int) int {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ if c == 'I' {
+ s.step = stateI
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c int) int {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ if c == 'x' || c == 'X' {
+ s.step = stateHex
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c int) int {
+ if c == '+' {
+ s.step = stateESign
+ return scanContinue
+ }
+ if c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c int) int {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c int) int {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c int) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c int) int {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c int) int {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c int) int {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c int) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c int) int {
+ if c == 'e' {
+ s.step = stateNe
+ return scanContinue
+ }
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal new or null (expecting 'e' or 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c int) int {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c int) int {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c int) int {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c int, context string) int {
+ s.step = stateError
+ s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+ return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c int) string {
+ // special cases - different from quoted strings
+ if c == '\'' {
+ return `'\''`
+ }
+ if c == '"' {
+ return `'"'`
+ }
+
+ // use quoted string with different quotation marks
+ s := strconv.Quote(string(c))
+ return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+ if s.redo {
+ panic("json: invalid use of scanner")
+ }
+ s.redoCode = scanCode
+ s.redoState = s.step
+ s.step = stateRedo
+ s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c int) int {
+ s.redo = false
+ s.step = s.redoState
+ return s.redoCode
+}
diff --git a/src/mongo/gotools/common/json/scanner_test.go b/src/mongo/gotools/common/json/scanner_test.go
new file mode 100644
index 00000000000..93dc25a5f1b
--- /dev/null
+++ b/src/mongo/gotools/common/json/scanner_test.go
@@ -0,0 +1,315 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "math"
+ "math/rand"
+ "reflect"
+ "testing"
+)
+
+// Tests of simple examples.
+
+type example struct {
+ compact string
+ indent string
+}
+
+var examples = []example{
+ {`1`, `1`},
+ {`{}`, `{}`},
+ {`[]`, `[]`},
+ {`{"":2}`, "{\n\t\"\": 2\n}"},
+ {`[3]`, "[\n\t3\n]"},
+ {`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
+ {`{"x":1}`, "{\n\t\"x\": 1\n}"},
+ {ex1, ex1i},
+}
+
+var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]`
+
+var ex1i = `[
+ true,
+ false,
+ null,
+ "x",
+ 1,
+ 1.5,
+ 0,
+ -5e+2
+]`
+
+func TestCompact(t *testing.T) {
+ var buf bytes.Buffer
+ for _, tt := range examples {
+ buf.Reset()
+ if err := Compact(&buf, []byte(tt.compact)); err != nil {
+ t.Errorf("Compact(%#q): %v", tt.compact, err)
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s)
+ }
+
+ buf.Reset()
+ if err := Compact(&buf, []byte(tt.indent)); err != nil {
+ t.Errorf("Compact(%#q): %v", tt.indent, err)
+ continue
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact)
+ }
+ }
+}
+
+func TestCompactSeparators(t *testing.T) {
+ // U+2028 and U+2029 should be escaped inside strings.
+ // They should not appear outside strings.
+ tests := []struct {
+ in, compact string
+ }{
+ {"{\"\u2028\": 1}", `{"\u2028":1}`},
+ {"{\"\u2029\" :2}", `{"\u2029":2}`},
+ }
+ for _, tt := range tests {
+ var buf bytes.Buffer
+ if err := Compact(&buf, []byte(tt.in)); err != nil {
+ t.Errorf("Compact(%q): %v", tt.in, err)
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%q) = %q, want %q", tt.in, s, tt.compact)
+ }
+ }
+}
+
+func TestIndent(t *testing.T) {
+ var buf bytes.Buffer
+ for _, tt := range examples {
+ buf.Reset()
+ if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
+ t.Errorf("Indent(%#q): %v", tt.indent, err)
+ } else if s := buf.String(); s != tt.indent {
+ t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s)
+ }
+
+ buf.Reset()
+ if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
+ t.Errorf("Indent(%#q): %v", tt.compact, err)
+ continue
+ } else if s := buf.String(); s != tt.indent {
+ t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent)
+ }
+ }
+}
+
+// Tests of a large random structure.
+
+func TestCompactBig(t *testing.T) {
+ initBig()
+ var buf bytes.Buffer
+ if err := Compact(&buf, jsonBig); err != nil {
+ t.Fatalf("Compact: %v", err)
+ }
+ b := buf.Bytes()
+ if !bytes.Equal(b, jsonBig) {
+ t.Error("Compact(jsonBig) != jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+func TestIndentBig(t *testing.T) {
+ initBig()
+ var buf bytes.Buffer
+ if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
+ t.Fatalf("Indent1: %v", err)
+ }
+ b := buf.Bytes()
+ if len(b) == len(jsonBig) {
+ // jsonBig is compact (no unnecessary spaces);
+ // indenting should make it bigger
+ t.Fatalf("Indent(jsonBig) did not get bigger")
+ }
+
+ // should be idempotent
+ var buf1 bytes.Buffer
+ if err := Indent(&buf1, b, "", "\t"); err != nil {
+ t.Fatalf("Indent2: %v", err)
+ }
+ b1 := buf1.Bytes()
+ if !bytes.Equal(b1, b) {
+ t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)")
+ diff(t, b1, b)
+ return
+ }
+
+ // should get back to original
+ buf1.Reset()
+ if err := Compact(&buf1, b); err != nil {
+ t.Fatalf("Compact: %v", err)
+ }
+ b1 = buf1.Bytes()
+ if !bytes.Equal(b1, jsonBig) {
+ t.Error("Compact(Indent(jsonBig)) != jsonBig")
+ diff(t, b1, jsonBig)
+ return
+ }
+}
+
+type indentErrorTest struct {
+ in string
+ err error
+}
+
+var indentErrorTests = []indentErrorTest{
+ {`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
+ {`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
+}
+
+func TestIndentErrors(t *testing.T) {
+ for i, tt := range indentErrorTests {
+ var slice []uint8
+ buf := bytes.NewBuffer(slice)
+ if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: Indent: %#v", i, err)
+ continue
+ }
+ }
+ }
+}
+
+func TestNextValueBig(t *testing.T) {
+ initBig()
+ var scan scanner
+ item, rest, err := nextValue(jsonBig, &scan)
+ if err != nil {
+ t.Fatalf("nextValue: %s", err)
+ }
+ if len(item) != len(jsonBig) || &item[0] != &jsonBig[0] {
+ t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
+ }
+ if len(rest) != 0 {
+ t.Errorf("invalid rest: %d", len(rest))
+ }
+
+ item, rest, err = nextValue(append(jsonBig, "HELLO WORLD"...), &scan)
+ if err != nil {
+ t.Fatalf("nextValue extra: %s", err)
+ }
+ if len(item) != len(jsonBig) {
+ t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
+ }
+ if string(rest) != "HELLO WORLD" {
+ t.Errorf("invalid rest: %d", len(rest))
+ }
+}
+
+var benchScan scanner
+
+func BenchmarkSkipValue(b *testing.B) {
+ initBig()
+ for i := 0; i < b.N; i++ {
+ nextValue(jsonBig, &benchScan)
+ }
+ b.SetBytes(int64(len(jsonBig)))
+}
+
+func diff(t *testing.T, a, b []byte) {
+ for i := 0; ; i++ {
+ if i >= len(a) || i >= len(b) || a[i] != b[i] {
+ j := i - 10
+ if j < 0 {
+ j = 0
+ }
+ t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:]))
+ return
+ }
+ }
+}
+
+func trim(b []byte) []byte {
+ if len(b) > 20 {
+ return b[0:20]
+ }
+ return b
+}
+
+// Generate a random JSON object.
+
+var jsonBig []byte
+
+func initBig() {
+ n := 10000
+ if testing.Short() {
+ n = 100
+ }
+ b, err := Marshal(genValue(n))
+ if err != nil {
+ panic(err)
+ }
+ jsonBig = b
+}
+
+func genValue(n int) interface{} {
+ if n > 1 {
+ switch rand.Intn(2) {
+ case 0:
+ return genArray(n)
+ case 1:
+ return genMap(n)
+ }
+ }
+ switch rand.Intn(3) {
+ case 0:
+ return rand.Intn(2) == 0
+ case 1:
+ return rand.NormFloat64()
+ case 2:
+ return genString(30)
+ }
+ panic("unreachable")
+}
+
+func genString(stddev float64) string {
+ n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2))
+ c := make([]rune, n)
+ for i := range c {
+ f := math.Abs(rand.NormFloat64()*64 + 32)
+ if f > 0x10ffff {
+ f = 0x10ffff
+ }
+ c[i] = rune(f)
+ }
+ return string(c)
+}
+
+func genArray(n int) []interface{} {
+ f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
+ if f > n {
+ f = n
+ }
+ if f < 1 {
+ f = 1
+ }
+ x := make([]interface{}, f)
+ for i := range x {
+ x[i] = genValue(((i+1)*n)/f - (i*n)/f)
+ }
+ return x
+}
+
+func genMap(n int) map[string]interface{} {
+ f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
+ if f > n {
+ f = n
+ }
+ if n > 0 && f == 0 {
+ f = 1
+ }
+ x := make(map[string]interface{})
+ for i := 0; i < f; i++ {
+ x[genString(10)] = genValue(((i+1)*n)/f - (i*n)/f)
+ }
+ return x
+}
diff --git a/src/mongo/gotools/common/json/single_quoted.go b/src/mongo/gotools/common/json/single_quoted.go
new file mode 100644
index 00000000000..ca465ee04f2
--- /dev/null
+++ b/src/mongo/gotools/common/json/single_quoted.go
@@ -0,0 +1,74 @@
+package json
+
+// Transition functions for recognizing single-quoted strings.
+// Adapted from encoding/json/scanner.go.
+
+// stateInSingleQuotedString is the state after reading `'`.
+func stateInSingleQuotedString(s *scanner, c int) int {
+ if c == '\'' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInSingleQuotedStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInSingleQuotedStringEsc is the state after reading `'\` during a quoted string.
+func stateInSingleQuotedStringEsc(s *scanner, c int) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '\'':
+ s.step = stateInSingleQuotedString
+ return scanContinue
+ }
+ if c == 'u' {
+ s.step = stateInSingleQuotedStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInSingleQuotedStringEscU is the state after reading `'\u` during a quoted string.
+func stateInSingleQuotedStringEscU(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInSingleQuotedStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInSingleQuotedStringEscU1 is the state after reading `'\u1` during a quoted string.
+func stateInSingleQuotedStringEscU1(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInSingleQuotedStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInSingleQuotedStringEscU12 is the state after reading `'\u12` during a quoted string.
+func stateInSingleQuotedStringEscU12(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInSingleQuotedStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInSingleQuotedStringEscU123 is the state after reading `'\u123` during a quoted string.
+func stateInSingleQuotedStringEscU123(s *scanner, c int) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInSingleQuotedString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
diff --git a/src/mongo/gotools/common/json/single_quoted_test.go b/src/mongo/gotools/common/json/single_quoted_test.go
new file mode 100644
index 00000000000..6b9849109c4
--- /dev/null
+++ b/src/mongo/gotools/common/json/single_quoted_test.go
@@ -0,0 +1,156 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestSingleQuotedKeys(t *testing.T) {
+
+ Convey("When unmarshalling JSON with single quotes around its keys", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "value"
+ data := fmt.Sprintf(`{'%v':"%v"}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key], ShouldEqual, value)
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "value1", "value2", "value3"
+ data := fmt.Sprintf(`{'%v':"%v",'%v':"%v",'%v':"%v"}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key1], ShouldEqual, value1)
+ So(jsonMap[key2], ShouldEqual, value2)
+ So(jsonMap[key3], ShouldEqual, value3)
+ })
+ })
+}
+
+func TestSingleQuotedValues(t *testing.T) {
+
+ Convey("When unmarshalling JSON with single quotes around its values", t, func() {
+
+ Convey("works for a single value", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "value"
+ data := fmt.Sprintf(`{"%v":'%v'}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key], ShouldEqual, value)
+ })
+
+ Convey("works for multiple values", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "value1", "value2", "value3"
+ data := fmt.Sprintf(`{"%v":'%v',"%v":'%v',"%v":'%v'}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key1], ShouldEqual, value1)
+ So(jsonMap[key2], ShouldEqual, value2)
+ So(jsonMap[key3], ShouldEqual, value3)
+ })
+
+ Convey("can be used within BinData constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "bindata"
+ value := "BinData(1, 'xyz')"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(BinData)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Type, ShouldEqual, 1)
+ So(jsonValue.Base64, ShouldEqual, "xyz")
+ })
+
+ Convey("can be used within Boolean constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "boolean"
+ value := "Boolean('xyz')"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(bool)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, true)
+ })
+
+ Convey("can be used within DBRef constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "dbref"
+ value := "DBRef('examples', 'xyz')"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(DBRef)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Collection, ShouldEqual, "examples")
+ So(jsonValue.Id, ShouldEqual, "xyz")
+ So(jsonValue.Database, ShouldBeEmpty)
+ })
+
+ Convey("can be used within ObjectId constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "_id"
+ value := "ObjectId('xyz')"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(ObjectId)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldEqual, ObjectId("xyz"))
+ })
+
+ Convey("can be used within RegExp constructor", func() {
+ var jsonMap map[string]interface{}
+
+ key := "regex"
+ value := "RegExp('xyz', 'i')"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(RegExp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue.Pattern, ShouldEqual, "xyz")
+ So(jsonValue.Options, ShouldEqual, "i")
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/stream.go b/src/mongo/gotools/common/json/stream.go
new file mode 100644
index 00000000000..58443f211ec
--- /dev/null
+++ b/src/mongo/gotools/common/json/stream.go
@@ -0,0 +1,243 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+// A Decoder reads and decodes JSON objects from an input stream.
+type Decoder struct {
+ R io.Reader
+ Buf []byte
+ d decodeState
+ scan scanner
+ err error
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{R: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) DecodeMap() (map[string]interface{}, error) {
+ if dec.err != nil {
+ return nil, dec.err
+ }
+
+ n, err := dec.readValue()
+ if err != nil {
+ return nil, err
+ }
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ dec.d.init(dec.Buf[0:n])
+ out, err := dec.d.unmarshalMap()
+
+ // Slide rest of data down.
+ rest := copy(dec.Buf, dec.Buf[n:])
+ dec.Buf = dec.Buf[0:rest]
+
+ return out, err
+}
+
+func (dec *Decoder) ScanObject() ([]byte, error) {
+ if dec.err != nil {
+ return nil, dec.err
+ }
+
+ n, err := dec.readValue()
+ if err != nil {
+ return nil, err
+ }
+
+ outbuf := make([]byte, n)
+ copy(outbuf, dec.Buf[0:n])
+ // Slide rest of data down.
+ rest := copy(dec.Buf, dec.Buf[n:])
+ dec.Buf = dec.Buf[0:rest]
+ return outbuf, nil
+
+}
+
+func (dec *Decoder) Decode(v interface{}) error {
+ if dec.err != nil {
+ return dec.err
+ }
+
+ n, err := dec.readValue()
+ if err != nil {
+ return err
+ }
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ dec.d.init(dec.Buf[0:n])
+ err = dec.d.unmarshal(v)
+
+ // Slide rest of data down.
+ rest := copy(dec.Buf, dec.Buf[n:])
+ dec.Buf = dec.Buf[0:rest]
+
+ return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.Buf)
+}
+
+// readValue reads a JSON value into dec.Buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+ dec.scan.reset()
+
+ scanp := 0
+ var err error
+Input:
+ for {
+ // Look in the buffer for a new value.
+ for i, c := range dec.Buf[scanp:] {
+ dec.scan.bytes++
+ v := dec.scan.step(&dec.scan, int(c))
+ if v == scanEnd {
+ scanp += i
+ break Input
+ }
+ // scanEnd is delayed one byte.
+ // We might block trying to get that byte from src,
+ // so instead invent a space byte.
+ if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
+ scanp += i + 1
+ break Input
+ }
+ if v == scanError {
+ dec.err = dec.scan.err
+ return 0, dec.scan.err
+ }
+ }
+ scanp = len(dec.Buf)
+
+ // Did the last read have an error?
+ // Delayed until now to allow buffer scan.
+ if err != nil {
+ if err == io.EOF {
+ if dec.scan.step(&dec.scan, ' ') == scanEnd {
+ break Input
+ }
+ if nonSpace(dec.Buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ dec.err = err
+ return 0, err
+ }
+
+ // Make room to read more into the buffer.
+ const minRead = 512
+ if cap(dec.Buf)-len(dec.Buf) < minRead {
+ newBuf := make([]byte, len(dec.Buf), 2*cap(dec.Buf)+minRead)
+ copy(newBuf, dec.Buf)
+ dec.Buf = newBuf
+ }
+
+ // Read. Delay error for next iteration (after scan).
+ var n int
+ n, err = dec.R.Read(dec.Buf[len(dec.Buf):cap(dec.Buf)])
+ dec.Buf = dec.Buf[0 : len(dec.Buf)+n]
+ }
+ return scanp, nil
+}
+
+func nonSpace(b []byte) bool {
+ for _, c := range b {
+ if !isSpace(rune(c)) {
+ return true
+ }
+ }
+ return false
+}
+
+// An Encoder writes JSON objects to an output stream.
+type Encoder struct {
+ w io.Writer
+ e encodeState
+ err error
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+ if enc.err != nil {
+ return enc.err
+ }
+ e := newEncodeState()
+ err := e.marshal(v)
+ if err != nil {
+ return err
+ }
+
+ // Terminate each value with a newline.
+ // This makes the output look a little nicer
+ // when debugging, and some kind of space
+ // is required if the encoded value was a number,
+ // so that the reader knows there aren't more
+ // digits coming.
+ e.WriteByte('\n')
+
+ if _, err = enc.w.Write(e.Bytes()); err != nil {
+ enc.err = err
+ }
+ encodeStatePool.Put(e)
+ return err
+}
+
+// RawMessage is a raw encoded JSON object.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+ return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
diff --git a/src/mongo/gotools/common/json/stream_test.go b/src/mongo/gotools/common/json/stream_test.go
new file mode 100644
index 00000000000..b562e87690d
--- /dev/null
+++ b/src/mongo/gotools/common/json/stream_test.go
@@ -0,0 +1,206 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+// Test values for the stream test.
+// One of each JSON kind.
+var streamTest = []interface{}{
+ 0.1,
+ "hello",
+ nil,
+ true,
+ false,
+ []interface{}{"a", "b", "c"},
+ map[string]interface{}{"K": "Kelvin", "ß": "long s"},
+ 3.14, // another value to make sure something can follow map
+}
+
+var streamEncoded = `0.1
+"hello"
+null
+true
+false
+["a","b","c"]
+{"ß":"long s","K":"Kelvin"}
+3.14
+`
+
+func TestEncoder(t *testing.T) {
+ for i := 0; i <= len(streamTest); i++ {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ for j, v := range streamTest[0:i] {
+ if err := enc.Encode(v); err != nil {
+ t.Fatalf("encode #%d: %v", j, err)
+ }
+ }
+ if have, want := buf.String(), nlines(streamEncoded, i); have != want {
+ t.Errorf("encoding %d items: mismatch", i)
+ diff(t, []byte(have), []byte(want))
+ break
+ }
+ }
+}
+
+func TestDecoder(t *testing.T) {
+ for i := 0; i <= len(streamTest); i++ {
+ // Use stream without newlines as input,
+ // just to stress the decoder even more.
+ // Our test input does not include back-to-back numbers.
+ // Otherwise stripping the newlines would
+ // merge two adjacent JSON values.
+ var buf bytes.Buffer
+ for _, c := range nlines(streamEncoded, i) {
+ if c != '\n' {
+ buf.WriteRune(c)
+ }
+ }
+ out := make([]interface{}, i)
+ dec := NewDecoder(&buf)
+ for j := range out {
+ if err := dec.Decode(&out[j]); err != nil {
+ t.Fatalf("decode #%d/%d: %v", j, i, err)
+ }
+ }
+ if !reflect.DeepEqual(out, streamTest[0:i]) {
+ t.Errorf("decoding %d items: mismatch", i)
+ for j := range out {
+ if !reflect.DeepEqual(out[j], streamTest[j]) {
+ t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
+ }
+ }
+ break
+ }
+ }
+}
+
+func TestDecoderBuffered(t *testing.T) {
+ r := strings.NewReader(`{"Name": "Gopher"} extra `)
+ var m struct {
+ Name string
+ }
+ d := NewDecoder(r)
+ err := d.Decode(&m)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if m.Name != "Gopher" {
+ t.Errorf("Name = %q; want Gopher", m.Name)
+ }
+ rest, err := ioutil.ReadAll(d.Buffered())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, w := string(rest), " extra "; g != w {
+ t.Errorf("Remaining = %q; want %q", g, w)
+ }
+}
+
+func nlines(s string, n int) string {
+ if n <= 0 {
+ return ""
+ }
+ for i, c := range s {
+ if c == '\n' {
+ if n--; n == 0 {
+ return s[0 : i+1]
+ }
+ }
+ }
+ return s
+}
+
+func TestRawMessage(t *testing.T) {
+ // TODO(rsc): Should not need the * in *RawMessage
+ var data struct {
+ X float64
+ Id *RawMessage
+ Y float32
+ }
+ const raw = `["\u0056",null]`
+ const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
+ err := Unmarshal([]byte(msg), &data)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if string([]byte(*data.Id)) != raw {
+ t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
+ }
+ b, err := Marshal(&data)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if string(b) != msg {
+ t.Fatalf("Marshal: have %#q want %#q", b, msg)
+ }
+}
+
+func TestNullRawMessage(t *testing.T) {
+ // TODO(rsc): Should not need the * in *RawMessage
+ var data struct {
+ X float64
+ Id *RawMessage
+ Y float32
+ }
+ data.Id = new(RawMessage)
+ const msg = `{"X":0.1,"Id":null,"Y":0.2}`
+ err := Unmarshal([]byte(msg), &data)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if data.Id != nil {
+ t.Fatalf("Raw mismatch: have non-nil, want nil")
+ }
+ b, err := Marshal(&data)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if string(b) != msg {
+ t.Fatalf("Marshal: have %#q want %#q", b, msg)
+ }
+}
+
+var blockingTests = []string{
+ `{"x": 1}`,
+ `[1, 2, 3]`,
+}
+
+func TestBlocking(t *testing.T) {
+ for _, enc := range blockingTests {
+ r, w := net.Pipe()
+ go w.Write([]byte(enc))
+ var val interface{}
+
+ // If Decode reads beyond what w.Write writes above,
+ // it will block, and the test will deadlock.
+ if err := NewDecoder(r).Decode(&val); err != nil {
+ t.Errorf("decoding %s: %v", enc, err)
+ }
+ r.Close()
+ w.Close()
+ }
+}
+
+func BenchmarkEncoderEncode(b *testing.B) {
+ b.ReportAllocs()
+ type T struct {
+ X, Y string
+ }
+ v := &T{"foo", "bar"}
+ for i := 0; i < b.N; i++ {
+ if err := NewEncoder(ioutil.Discard).Encode(v); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/src/mongo/gotools/common/json/tagkey_test.go b/src/mongo/gotools/common/json/tagkey_test.go
new file mode 100644
index 00000000000..1c42a903fd2
--- /dev/null
+++ b/src/mongo/gotools/common/json/tagkey_test.go
@@ -0,0 +1,110 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "testing"
+)
+
+type basicLatin2xTag struct {
+ V string `json:"$%-/"`
+}
+
+type basicLatin3xTag struct {
+ V string `json:"0123456789"`
+}
+
+type basicLatin4xTag struct {
+ V string `json:"ABCDEFGHIJKLMO"`
+}
+
+type basicLatin5xTag struct {
+ V string `json:"PQRSTUVWXYZ_"`
+}
+
+type basicLatin6xTag struct {
+ V string `json:"abcdefghijklmno"`
+}
+
+type basicLatin7xTag struct {
+ V string `json:"pqrstuvwxyz"`
+}
+
+type miscPlaneTag struct {
+ V string `json:"色は匂へど"`
+}
+
+type percentSlashTag struct {
+ V string `json:"text/html%"` // http://golang.org/issue/2718
+}
+
+type punctuationTag struct {
+ V string `json:"!#$%&()*+-./:<=>?@[]^_{|}~"` // http://golang.org/issue/3546
+}
+
+type emptyTag struct {
+ W string
+}
+
+type misnamedTag struct {
+ X string `jsom:"Misnamed"`
+}
+
+type badCodeTag struct {
+ Z string `json:" !\"#&'()*+,."`
+}
+
+type spaceTag struct {
+ Q string `json:"With space"`
+}
+
+type unicodeTag struct {
+ W string `json:"Ελλάδα"`
+}
+
+var structTagObjectKeyTests = []struct {
+ raw interface{}
+ value string
+ key string
+}{
+ {basicLatin2xTag{"2x"}, "2x", "$%-/"},
+ {basicLatin3xTag{"3x"}, "3x", "0123456789"},
+ {basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
+ {basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
+ {basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
+ {basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
+ {miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"},
+ {emptyTag{"Pour Moi"}, "Pour Moi", "W"},
+ {misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
+ {badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
+ {percentSlashTag{"brut"}, "brut", "text/html%"},
+ {punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:<=>?@[]^_{|}~"},
+ {spaceTag{"Perreddu"}, "Perreddu", "With space"},
+ {unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"},
+}
+
+func TestStructTagObjectKey(t *testing.T) {
+ for _, tt := range structTagObjectKeyTests {
+ b, err := Marshal(tt.raw)
+ if err != nil {
+ t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err)
+ }
+ var f interface{}
+ err = Unmarshal(b, &f)
+ if err != nil {
+ t.Fatalf("Unmarshal(%#q) failed: %v", b, err)
+ }
+ for i, v := range f.(map[string]interface{}) {
+ switch i {
+ case tt.key:
+ if s, ok := v.(string); !ok || s != tt.value {
+ t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
+ }
+ default:
+ t.Fatalf("Unexpected key: %#q, from %#q", i, b)
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/common/json/tags.go b/src/mongo/gotools/common/json/tags.go
new file mode 100644
index 00000000000..c38fd5102f6
--- /dev/null
+++ b/src/mongo/gotools/common/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/src/mongo/gotools/common/json/tags_test.go b/src/mongo/gotools/common/json/tags_test.go
new file mode 100644
index 00000000000..91fb18831e2
--- /dev/null
+++ b/src/mongo/gotools/common/json/tags_test.go
@@ -0,0 +1,28 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "testing"
+)
+
+func TestTagParsing(t *testing.T) {
+ name, opts := parseTag("field,foobar,foo")
+ if name != "field" {
+ t.Fatalf("name = %q, want field", name)
+ }
+ for _, tt := range []struct {
+ opt string
+ want bool
+ }{
+ {"foobar", true},
+ {"foo", true},
+ {"bar", false},
+ } {
+ if opts.Contains(tt.opt) != tt.want {
+ t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
+ }
+ }
+}
diff --git a/src/mongo/gotools/common/json/testdata/code.json.gz b/src/mongo/gotools/common/json/testdata/code.json.gz
new file mode 100644
index 00000000000..0e2895b53ac
--- /dev/null
+++ b/src/mongo/gotools/common/json/testdata/code.json.gz
Binary files differ
diff --git a/src/mongo/gotools/common/json/timestamp.go b/src/mongo/gotools/common/json/timestamp.go
new file mode 100644
index 00000000000..dbc38ef56e6
--- /dev/null
+++ b/src/mongo/gotools/common/json/timestamp.go
@@ -0,0 +1,67 @@
+package json
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Transition functions for recognizing Timestamp.
+// Adapted from encoding/json/scanner.go.
+
+// stateUpperT is the state after reading `T`.
+func stateUpperT(s *scanner, c int) int {
+ if c == 'i' {
+ s.step = generateState("Timestamp", []byte("mestamp"), stateConstructor)
+ return scanContinue
+ }
+ return s.error(c, "in literal Timestamp (expecting 'i')")
+}
+
+// Decodes a Timestamp literal stored in the underlying byte data into v.
+func (d *decodeState) storeTimestamp(v reflect.Value) {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ args, err := d.ctor("Timestamp", []reflect.Type{uint32Type, uint32Type})
+ if err != nil {
+ d.error(err)
+ }
+ switch kind := v.Kind(); kind {
+ case reflect.Interface:
+ arg0 := uint32(args[0].Uint())
+ arg1 := uint32(args[1].Uint())
+ v.Set(reflect.ValueOf(Timestamp{arg0, arg1}))
+ default:
+ d.error(fmt.Errorf("cannot store %v value into %v type", timestampType, kind))
+ }
+}
+
+// Returns a Timestamp literal from the underlying byte data.
+func (d *decodeState) getTimestamp() interface{} {
+ op := d.scanWhile(scanSkipSpace)
+ if op != scanBeginCtor {
+ d.error(fmt.Errorf("expected beginning of constructor"))
+ }
+
+ // Prevent d.convertNumber() from parsing the arguments as float64s.
+ useNumber := d.useNumber
+ d.useNumber = true
+
+ args := d.ctorInterface()
+ if err := ctorNumArgsMismatch("Timestamp", 2, len(args)); err != nil {
+ d.error(err)
+ }
+ arg0, err := args[0].(Number).Uint32()
+ if err != nil {
+ d.error(fmt.Errorf("expected uint32 for first argument of Timestamp constructor"))
+ }
+ arg1, err := args[1].(Number).Uint32()
+ if err != nil {
+ d.error(fmt.Errorf("expected uint32 for second argument of Timestamp constructor"))
+ }
+
+ d.useNumber = useNumber
+ return Timestamp{arg0, arg1}
+}
diff --git a/src/mongo/gotools/common/json/timestamp_test.go b/src/mongo/gotools/common/json/timestamp_test.go
new file mode 100644
index 00000000000..027d9b4cb30
--- /dev/null
+++ b/src/mongo/gotools/common/json/timestamp_test.go
@@ -0,0 +1,85 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestTimestampValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with Timestamp values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Timestamp(123, 321)"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(Timestamp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, Timestamp{123, 321})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "Timestamp(123, 321)",
+ "Timestamp(456, 654)", "Timestamp(789, 987)"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(Timestamp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, Timestamp{123, 321})
+
+ jsonValue2, ok := jsonMap[key2].(Timestamp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, Timestamp{456, 654})
+
+ jsonValue3, ok := jsonMap[key3].(Timestamp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, Timestamp{789, 987})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "Timestamp(42, 10)"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(Timestamp)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, Timestamp{42, 10})
+ }
+ })
+
+ Convey("cannot use string as argument", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := `Timestamp("123", "321")`
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/undefined.go b/src/mongo/gotools/common/json/undefined.go
new file mode 100644
index 00000000000..20cb3e9a8b3
--- /dev/null
+++ b/src/mongo/gotools/common/json/undefined.go
@@ -0,0 +1,13 @@
+package json
+
+// Transition functions for recognizing undefined.
+// Adapted from encoding/json/scanner.go.
+
+// stateU is the state after reading `u`.
+func stateU(s *scanner, c int) int {
+ if c == 'n' {
+ s.step = generateState("undefined", []byte("defined"), stateEndValue)
+ return scanContinue
+ }
+ return s.error(c, "in literal undefined (expecting 'n')")
+}
diff --git a/src/mongo/gotools/common/json/undefined_test.go b/src/mongo/gotools/common/json/undefined_test.go
new file mode 100644
index 00000000000..367220ff8cd
--- /dev/null
+++ b/src/mongo/gotools/common/json/undefined_test.go
@@ -0,0 +1,89 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestUndefinedValue(t *testing.T) {
+
+ Convey("When unmarshalling JSON with undefined values", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "undefined"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue, ok := jsonMap[key].(Undefined)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, Undefined{})
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value := "undefined"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value, key2, value, key3, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonValue1, ok := jsonMap[key1].(Undefined)
+ So(ok, ShouldBeTrue)
+ So(jsonValue1, ShouldResemble, Undefined{})
+
+ jsonValue2, ok := jsonMap[key2].(Undefined)
+ So(ok, ShouldBeTrue)
+ So(jsonValue2, ShouldResemble, Undefined{})
+
+ jsonValue3, ok := jsonMap[key3].(Undefined)
+ So(ok, ShouldBeTrue)
+ So(jsonValue3, ShouldResemble, Undefined{})
+ })
+
+ Convey("works in an array", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "undefined"
+ data := fmt.Sprintf(`{"%v":[%v,%v,%v]}`,
+ key, value, value, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ jsonArray, ok := jsonMap[key].([]interface{})
+ So(ok, ShouldBeTrue)
+
+ for _, _jsonValue := range jsonArray {
+ jsonValue, ok := _jsonValue.(Undefined)
+ So(ok, ShouldBeTrue)
+ So(jsonValue, ShouldResemble, Undefined{})
+ }
+ })
+
+ Convey("cannot have a sign ('+' or '-')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "undefined"
+ data := fmt.Sprintf(`{"%v":+%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+
+ data = fmt.Sprintf(`{"%v":-%v}`, key, value)
+
+ err = Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/json/unquoted.go b/src/mongo/gotools/common/json/unquoted.go
new file mode 100644
index 00000000000..91aa2485907
--- /dev/null
+++ b/src/mongo/gotools/common/json/unquoted.go
@@ -0,0 +1,31 @@
+package json
+
+// Transition function for recognizing unquoted strings.
+// Adapted from encoding/json/scanner.go.
+
+func isBeginUnquotedString(c int) bool {
+ return c == '$' || c == '_' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
+}
+
+func isInUnquotedString(c int) bool {
+ return isBeginUnquotedString(c) || '0' <= c && c <= '9'
+}
+
+func stateInUnquotedString(s *scanner, c int) int {
+ if isInUnquotedString(c) {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// Decoder function that immediately returns an already unquoted string.
+// Adapted from encoding/json/decode.go.
+func maybeUnquoteBytes(s []byte) ([]byte, bool) {
+ if len(s) == 0 {
+ return nil, false
+ }
+ if s[0] != '"' && s[len(s)-1] != '"' && s[0] != '\'' && s[len(s)-1] != '\'' {
+ return s, true
+ }
+ return unquoteBytes(s)
+}
diff --git a/src/mongo/gotools/common/json/unquoted_test.go b/src/mongo/gotools/common/json/unquoted_test.go
new file mode 100644
index 00000000000..4bf9a47638b
--- /dev/null
+++ b/src/mongo/gotools/common/json/unquoted_test.go
@@ -0,0 +1,129 @@
+package json
+
+import (
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestUnquotedKeys(t *testing.T) {
+
+ Convey("When unmarshalling JSON without quotes around its keys", t, func() {
+
+ Convey("works for a single key", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "value"
+ data := fmt.Sprintf(`{%v:"%v"}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key], ShouldEqual, value)
+ })
+
+ Convey("works for multiple keys", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "value1", "value2", "value3"
+ data := fmt.Sprintf(`{%v:"%v",%v:"%v",%v:"%v"}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key1], ShouldEqual, value1)
+ So(jsonMap[key2], ShouldEqual, value2)
+ So(jsonMap[key3], ShouldEqual, value3)
+ })
+
+ Convey("can start with a dollar sign ('$')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "$dollar"
+ value := "money"
+ data := fmt.Sprintf(`{%v:"%v"}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key], ShouldEqual, value)
+ })
+
+ Convey("can start with an underscore ('_')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "_id"
+ value := "unique"
+ data := fmt.Sprintf(`{%v:"%v"}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key], ShouldEqual, value)
+ })
+
+ Convey("cannot start with a number ('[0-9]')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "073"
+ value := "octal"
+ data := fmt.Sprintf(`{%v:"%v"}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("can contain numbers ('[0-9]')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "b16"
+ value := "little"
+ data := fmt.Sprintf(`{%v:"%v"}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldBeNil)
+
+ So(jsonMap[key], ShouldEqual, value)
+ })
+
+ Convey("cannot contain a period ('.')", func() {
+ var jsonMap map[string]interface{}
+
+ key := "horse.horse"
+ value := "horse"
+ data := fmt.Sprintf(`{%v:"%v"}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("When unmarshalling JSON without quotes around its values", t, func() {
+
+ Convey("fails for a single value", func() {
+ var jsonMap map[string]interface{}
+
+ key := "key"
+ value := "value"
+ data := fmt.Sprintf(`{"%v":%v}`, key, value)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("fails for multiple values", func() {
+ var jsonMap map[string]interface{}
+
+ key1, key2, key3 := "key1", "key2", "key3"
+ value1, value2, value3 := "value1", "value2", "value3"
+ data := fmt.Sprintf(`{"%v":%v,"%v":%v,"%v":%v}`,
+ key1, value1, key2, value2, key3, value3)
+
+ err := Unmarshal([]byte(data), &jsonMap)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/log/tool_logger.go b/src/mongo/gotools/common/log/tool_logger.go
new file mode 100644
index 00000000000..c93378d796b
--- /dev/null
+++ b/src/mongo/gotools/common/log/tool_logger.go
@@ -0,0 +1,156 @@
+// Package log provides a utility to log timestamped messages to an io.Writer.
+package log
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "sync"
+ "time"
+)
+
+// Tool Logger verbosity constants
+const (
+ Always = iota
+ Info
+ DebugLow
+ DebugHigh
+)
+
+const (
+ ToolTimeFormat = "2006-01-02T15:04:05.000-0700"
+)
+
+//// Tool Logger Definition
+
+type ToolLogger struct {
+ mutex *sync.Mutex
+ writer io.Writer
+ format string
+ verbosity int
+}
+
+type VerbosityLevel interface {
+ Level() int
+ IsQuiet() bool
+}
+
+func (tl *ToolLogger) SetVerbosity(level VerbosityLevel) {
+ if level == nil {
+ tl.verbosity = 0
+ return
+ }
+
+ if level.IsQuiet() {
+ tl.verbosity = -1
+ } else {
+ tl.verbosity = level.Level()
+ }
+}
+
+func (tl *ToolLogger) SetWriter(writer io.Writer) {
+ tl.writer = writer
+}
+
+func (tl *ToolLogger) SetDateFormat(dateFormat string) {
+ tl.format = dateFormat
+}
+
+func (tl *ToolLogger) Logvf(minVerb int, format string, a ...interface{}) {
+ if minVerb < 0 {
+ panic("cannot set a minimum log verbosity that is less than 0")
+ }
+
+ if minVerb <= tl.verbosity {
+ tl.mutex.Lock()
+ defer tl.mutex.Unlock()
+ tl.log(fmt.Sprintf(format, a...))
+ }
+}
+
+func (tl *ToolLogger) Logv(minVerb int, msg string) {
+ if minVerb < 0 {
+ panic("cannot set a minimum log verbosity that is less than 0")
+ }
+
+ if minVerb <= tl.verbosity {
+ tl.mutex.Lock()
+ defer tl.mutex.Unlock()
+ tl.log(msg)
+ }
+}
+
+func (tl *ToolLogger) log(msg string) {
+ fmt.Fprintf(tl.writer, "%v\t%v\n", time.Now().Format(tl.format), msg)
+}
+
+func NewToolLogger(verbosity VerbosityLevel) *ToolLogger {
+ tl := &ToolLogger{
+ mutex: &sync.Mutex{},
+ writer: os.Stderr, // default to stderr
+ format: ToolTimeFormat,
+ }
+ tl.SetVerbosity(verbosity)
+ return tl
+}
+
+//// Log Writer Interface
+
+// toolLogWriter is an io.Writer wrapping a tool logger. It is a private
+// type meant for creation with the ToolLogger.Writer(...) method.
+type toolLogWriter struct {
+ logger *ToolLogger
+ minVerbosity int
+}
+
+func (tlw *toolLogWriter) Write(message []byte) (int, error) {
+ tlw.logger.Logv(tlw.minVerbosity, string(message))
+ return len(message), nil
+}
+
+// Writer returns an io.Writer that writes to the logger with
+// the given verbosity
+func (tl *ToolLogger) Writer(minVerb int) io.Writer {
+ return &toolLogWriter{tl, minVerb}
+}
+
+//// Global Logging
+
+var globalToolLogger *ToolLogger
+
+func init() {
+ if globalToolLogger == nil {
+ // initialize tool logger with verbosity level = 0
+ globalToolLogger = NewToolLogger(nil)
+ }
+}
+
+// IsInVerbosity returns true if the current verbosity level setting is
+// greater than or equal to the given level.
+func IsInVerbosity(minVerb int) bool {
+ return minVerb <= globalToolLogger.verbosity
+}
+
+func Logvf(minVerb int, format string, a ...interface{}) {
+ globalToolLogger.Logvf(minVerb, format, a...)
+}
+
+func Logv(minVerb int, msg string) {
+ globalToolLogger.Logv(minVerb, msg)
+}
+
+func SetVerbosity(verbosity VerbosityLevel) {
+ globalToolLogger.SetVerbosity(verbosity)
+}
+
+func SetWriter(writer io.Writer) {
+ globalToolLogger.SetWriter(writer)
+}
+
+func SetDateFormat(dateFormat string) {
+ globalToolLogger.SetDateFormat(dateFormat)
+}
+
+func Writer(minVerb int) io.Writer {
+ return globalToolLogger.Writer(minVerb)
+}
diff --git a/src/mongo/gotools/common/log/tool_logger_test.go b/src/mongo/gotools/common/log/tool_logger_test.go
new file mode 100644
index 00000000000..5e6fea316b6
--- /dev/null
+++ b/src/mongo/gotools/common/log/tool_logger_test.go
@@ -0,0 +1,125 @@
+package log
+
+import (
+ "bytes"
+ "github.com/mongodb/mongo-tools/common/options"
+ . "github.com/smartystreets/goconvey/convey"
+ "os"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestBasicToolLoggerFunctionality(t *testing.T) {
+ var tl *ToolLogger
+
+ oldTime := time.Now()
+ // sleep to avoid failures due to low timestamp resolution
+ time.Sleep(time.Millisecond)
+
+ Convey("With a new ToolLogger", t, func() {
+ v1 := &options.Verbosity{
+ Quiet: false,
+ SetVerbosity: nil,
+ VLevel: 3,
+ }
+ tl = NewToolLogger(v1)
+ So(tl, ShouldNotBeNil)
+ So(tl.writer, ShouldNotBeNil)
+ So(tl.verbosity, ShouldEqual, 3)
+
+ Convey("writing a negative verbosity should panic", func() {
+ So(func() { tl.Logvf(-1, "nope") }, ShouldPanic)
+ })
+
+ Convey("writing the output to a buffer", func() {
+ buf := bytes.NewBuffer(make([]byte, 1024))
+ tl.SetWriter(buf)
+
+ Convey("with Logfs of various verbosity levels", func() {
+ tl.Logvf(0, "test this string")
+ tl.Logvf(5, "this log level is too high and will not log")
+ tl.Logvf(1, "====!%v!====", 12.5)
+
+ Convey("only messages of low enough verbosity should be written", func() {
+ l1, _ := buf.ReadString('\n')
+ So(l1, ShouldContainSubstring, ":")
+ So(l1, ShouldContainSubstring, ".")
+ So(l1, ShouldContainSubstring, "test this string")
+ l2, _ := buf.ReadString('\n')
+ So(l2, ShouldContainSubstring, "====!12.5!====")
+
+ Convey("and contain a proper timestamp", func() {
+ So(l2, ShouldContainSubstring, "\t")
+ timestamp := l2[:strings.Index(l2, "\t")]
+ So(len(timestamp), ShouldBeGreaterThan, 1)
+ parsedTime, err := time.Parse(ToolTimeFormat, timestamp)
+ So(err, ShouldBeNil)
+ So(parsedTime, ShouldHappenOnOrAfter, oldTime)
+ })
+ })
+ })
+ })
+ })
+}
+
+func TestGlobalToolLoggerFunctionality(t *testing.T) {
+ globalToolLogger = nil // just to be sure
+
+ Convey("With an initialized global ToolLogger", t, func() {
+ globalToolLogger = NewToolLogger(&options.Verbosity{
+ Quiet: false,
+ SetVerbosity: nil,
+ VLevel: 3,
+ })
+ So(globalToolLogger, ShouldNotBeNil)
+
+ Convey("actions shouldn't panic", func() {
+ So(func() { SetVerbosity(&options.Verbosity{Quiet: true}) }, ShouldNotPanic)
+ So(func() { Logvf(0, "woooo") }, ShouldNotPanic)
+ So(func() { SetDateFormat("ahaha") }, ShouldNotPanic)
+ So(func() { SetWriter(os.Stdout) }, ShouldNotPanic)
+ })
+ })
+}
+
+func TestToolLoggerWriter(t *testing.T) {
+ Convey("With a tool logger that writes to a buffer", t, func() {
+ buff := bytes.NewBuffer(make([]byte, 1024))
+ v1 := &options.Verbosity{
+ Quiet: false,
+ SetVerbosity: nil,
+ VLevel: 3,
+ }
+ tl := NewToolLogger(v1)
+ tl.SetWriter(buff)
+
+ Convey("writing using a ToolLogWriter", func() {
+ tlw := tl.Writer(0)
+ _, err := tlw.Write([]byte("One"))
+ So(err, ShouldBeNil)
+ _, err = tlw.Write([]byte("Two"))
+ So(err, ShouldBeNil)
+ _, err = tlw.Write([]byte("Three"))
+ So(err, ShouldBeNil)
+
+ Convey("the messages should appear in the buffer", func() {
+ results := buff.String()
+ So(results, ShouldContainSubstring, "One")
+ So(results, ShouldContainSubstring, "Two")
+ So(results, ShouldContainSubstring, "Three")
+ })
+ })
+
+ Convey("but with a log writer of too high verbosity", func() {
+ tlw2 := tl.Writer(1776)
+ _, err := tlw2.Write([]byte("nothing to see here"))
+ So(err, ShouldBeNil)
+
+ Convey("nothing should be written", func() {
+ results := buff.String()
+ So(results, ShouldNotContainSubstring, "nothing")
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/options/options.go b/src/mongo/gotools/common/options/options.go
new file mode 100644
index 00000000000..8962be7f8d7
--- /dev/null
+++ b/src/mongo/gotools/common/options/options.go
@@ -0,0 +1,346 @@
+// Package options implements command-line options that are used by all of
+// the mongo tools.
+package options
+
+import (
+ "github.com/jessevdk/go-flags"
+ "github.com/mongodb/mongo-tools/common/log"
+
+ "fmt"
+ "os"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Gitspec that the tool was built with. Needs to be set using -ldflags
+var (
+ VersionStr = "built-without-version-string"
+ Gitspec = "built-without-git-spec"
+)
+
+// Struct encompassing all of the options that are reused across tools: "help",
+// "version", verbosity settings, ssl settings, etc.
+type ToolOptions struct {
+
+ // The name of the tool
+ AppName string
+
+ // The version of the tool
+ VersionStr string
+
+ // Sub-option types
+ *General
+ *Verbosity
+ *Connection
+ *SSL
+ *Auth
+ *Kerberos
+ *Namespace
+
+ // Force direct connection to the server and disable the
+ // drivers automatic repl set discovery logic.
+ Direct bool
+
+ // ReplicaSetName, if specified, will prevent the obtained session from
+ // communicating with any server which is not part of a replica set
+ // with the given name. The default is to communicate with any server
+ // specified or discovered via the servers contacted.
+ ReplicaSetName string
+
+ // for caching the parser
+ parser *flags.Parser
+}
+
+type Namespace struct {
+ // Specified database and collection
+ DB string `short:"d" long:"db" value-name:"<database-name>" description:"database to use"`
+ Collection string `short:"c" long:"collection" value-name:"<collection-name>" description:"collection to use"`
+}
+
+// Struct holding generic options
+type General struct {
+ Help bool `long:"help" description:"print usage"`
+ Version bool `long:"version" description:"print the tool version and exit"`
+
+ MaxProcs int `long:"numThreads" default:"0" hidden:"true"`
+}
+
+// Struct holding verbosity-related options
+type Verbosity struct {
+ SetVerbosity func(string) `short:"v" long:"verbose" value-name:"<level>" description:"more detailed log output (include multiple times for more verbosity, e.g. -vvvvv, or specify a numeric value, e.g. --verbose=N)" optional:"true" optional-value:""`
+ Quiet bool `long:"quiet" description:"hide all log output"`
+ VLevel int `no-flag:"true"`
+}
+
+func (v Verbosity) Level() int {
+ return v.VLevel
+}
+
+func (v Verbosity) IsQuiet() bool {
+ return v.Quiet
+}
+
+// Struct holding connection-related options
+type Connection struct {
+ Host string `short:"h" long:"host" value-name:"<hostname>" description:"mongodb host to connect to (setname/host1,host2 for replica sets)"`
+ Port string `long:"port" value-name:"<port>" description:"server port (can also use --host hostname:port)"`
+
+ Timeout int `long:"dialTimeout" default:"3" hidden:"true" description:"dial timeout in seconds"`
+}
+
+// Struct holding ssl-related options
+type SSL struct {
+ UseSSL bool `long:"ssl" description:"connect to a mongod or mongos that has ssl enabled"`
+ SSLCAFile string `long:"sslCAFile" value-name:"<filename>" description:"the .pem file containing the root certificate chain from the certificate authority"`
+ SSLPEMKeyFile string `long:"sslPEMKeyFile" value-name:"<filename>" description:"the .pem file containing the certificate and key"`
+ SSLPEMKeyPassword string `long:"sslPEMKeyPassword" value-name:"<password>" description:"the password to decrypt the sslPEMKeyFile, if necessary"`
+ SSLCRLFile string `long:"sslCRLFile" value-name:"<filename>" description:"the .pem file containing the certificate revocation list"`
+ SSLAllowInvalidCert bool `long:"sslAllowInvalidCertificates" description:"bypass the validation for server certificates"`
+ SSLAllowInvalidHost bool `long:"sslAllowInvalidHostnames" description:"bypass the validation for server name"`
+ SSLFipsMode bool `long:"sslFIPSMode" description:"use FIPS mode of the installed openssl library"`
+}
+
+// Struct holding auth-related options
+type Auth struct {
+ Username string `short:"u" value-name:"<username>" long:"username" description:"username for authentication"`
+ Password string `short:"p" value-name:"<password>" long:"password" description:"password for authentication"`
+ Source string `long:"authenticationDatabase" value-name:"<database-name>" description:"database that holds the user's credentials"`
+ Mechanism string `long:"authenticationMechanism" value-name:"<mechanism>" description:"authentication mechanism to use"`
+}
+
+// Struct for Kerberos/GSSAPI-specific options
+type Kerberos struct {
+ Service string `long:"gssapiServiceName" value-name:"<service-name>" description:"service name to use when authenticating using GSSAPI/Kerberos ('mongodb' by default)"`
+ ServiceHost string `long:"gssapiHostName" value-name:"<host-name>" description:"hostname to use when authenticating using GSSAPI/Kerberos (remote server's address by default)"`
+}
+
+type OptionRegistrationFunction func(o *ToolOptions) error
+
+var ConnectionOptFunctions []OptionRegistrationFunction
+
+type EnabledOptions struct {
+ Auth bool
+ Connection bool
+ Namespace bool
+}
+
+func parseVal(val string) int {
+ idx := strings.Index(val, "=")
+ ret, err := strconv.Atoi(val[idx+1:])
+ if err != nil {
+ panic(fmt.Errorf("value was not a valid integer: %v", err))
+ }
+ return ret
+}
+
+// Ask for a new instance of tool options
+func New(appName, usageStr string, enabled EnabledOptions) *ToolOptions {
+ opts := &ToolOptions{
+ AppName: appName,
+ VersionStr: VersionStr,
+
+ General: &General{},
+ Verbosity: &Verbosity{},
+ Connection: &Connection{},
+ SSL: &SSL{},
+ Auth: &Auth{},
+ Namespace: &Namespace{},
+ Kerberos: &Kerberos{},
+ parser: flags.NewNamedParser(
+ fmt.Sprintf("%v %v", appName, usageStr), flags.None),
+ }
+
+ // Called when -v or --verbose is parsed
+ opts.SetVerbosity = func(val string) {
+ if i, err := strconv.Atoi(val); err == nil {
+ opts.VLevel = opts.VLevel + i // -v=N or --verbose=N
+ } else if matched, _ := regexp.MatchString(`^v+$`, val); matched {
+ opts.VLevel = opts.VLevel + len(val) + 1 // Handles the -vvv cases
+ } else if matched, _ := regexp.MatchString(`^v+=[0-9]$`, val); matched {
+ opts.VLevel = parseVal(val) // I.e. -vv=3
+ } else if val == "" {
+ opts.VLevel = opts.VLevel + 1 // Increment for every occurrence of flag
+ } else {
+ log.Logvf(log.Always, "Invalid verbosity value given")
+ os.Exit(-1)
+ }
+ }
+
+ opts.parser.UnknownOptionHandler = opts.handleUnknownOption
+
+ if _, err := opts.parser.AddGroup("general options", "", opts.General); err != nil {
+ panic(fmt.Errorf("couldn't register general options: %v", err))
+ }
+ if _, err := opts.parser.AddGroup("verbosity options", "", opts.Verbosity); err != nil {
+ panic(fmt.Errorf("couldn't register verbosity options: %v", err))
+ }
+
+ if enabled.Connection {
+ if _, err := opts.parser.AddGroup("connection options", "", opts.Connection); err != nil {
+ panic(fmt.Errorf("couldn't register connection options: %v", err))
+ }
+
+ // Register options that were enabled at compile time with build tags (ssl, sasl)
+ for _, optionRegistrationFunction := range ConnectionOptFunctions {
+ if err := optionRegistrationFunction(opts); err != nil {
+ panic(fmt.Errorf("couldn't register command-line options: %v", err))
+ }
+ }
+ }
+
+ if enabled.Auth {
+ if _, err := opts.parser.AddGroup("authentication options", "", opts.Auth); err != nil {
+ panic(fmt.Errorf("couldn't register auth options"))
+ }
+ }
+ if enabled.Namespace {
+ if _, err := opts.parser.AddGroup("namespace options", "", opts.Namespace); err != nil {
+ panic(fmt.Errorf("couldn't register namespace options"))
+ }
+ }
+
+ if opts.MaxProcs <= 0 {
+ opts.MaxProcs = runtime.NumCPU()
+ }
+ log.Logvf(log.Info, "Setting num cpus to %v", opts.MaxProcs)
+ runtime.GOMAXPROCS(opts.MaxProcs)
+ return opts
+}
+
+// UseReadOnlyHostDescription changes the help description of the --host arg to
+// not mention the shard/host:port format used in the data-mutating tools
+func (o *ToolOptions) UseReadOnlyHostDescription() {
+ hostOpt := o.parser.FindOptionByLongName("host")
+ hostOpt.Description = "mongodb host(s) to connect to (use commas to delimit hosts)"
+}
+
+// FindOptionByLongName finds an option in any of the added option groups by
+// matching its long name; useful for modifying the attributes (e.g. description
+// or name) of an option
+func (o *ToolOptions) FindOptionByLongName(name string) *flags.Option {
+ return o.parser.FindOptionByLongName(name)
+}
+
+// Print the usage message for the tool to stdout. Returns whether or not the
+// help flag is specified.
+func (o *ToolOptions) PrintHelp(force bool) bool {
+ if o.Help || force {
+ o.parser.WriteHelp(os.Stdout)
+ }
+ return o.Help
+}
+
+type versionInfo struct {
+ key, value string
+}
+
+var versionInfos []versionInfo
+
+// Print the tool version to stdout. Returns whether or not the version flag
+// is specified.
+func (o *ToolOptions) PrintVersion() bool {
+ if o.Version {
+ fmt.Printf("%v version: %v\n", o.AppName, o.VersionStr)
+ fmt.Printf("git version: %v\n", Gitspec)
+ fmt.Printf("Go version: %v\n", runtime.Version())
+ fmt.Printf(" os: %v\n", runtime.GOOS)
+ fmt.Printf(" arch: %v\n", runtime.GOARCH)
+ fmt.Printf(" compiler: %v\n", runtime.Compiler)
+ for _, info := range versionInfos {
+ fmt.Printf("%s: %s\n", info.key, info.value)
+ }
+ }
+ return o.Version
+}
+
+// Interface for extra options that need to be used by specific tools
+type ExtraOptions interface {
+ // Name specifying what type of options these are
+ Name() string
+}
+
+func (auth *Auth) RequiresExternalDB() bool {
+ return auth.Mechanism == "GSSAPI" || auth.Mechanism == "PLAIN" || auth.Mechanism == "MONGODB-X509"
+}
+
+// ShouldAskForPassword returns true if the user specifies a username flag
+// but no password, and the authentication mechanism requires a password.
+func (auth *Auth) ShouldAskForPassword() bool {
+ return auth.Username != "" && auth.Password == "" &&
+ !(auth.Mechanism == "MONGODB-X509" || auth.Mechanism == "GSSAPI")
+}
+
+// Get the authentication database to use. Should be the value of
+// --authenticationDatabase if it's provided, otherwise, the database that's
+// specified in the tool's --db arg.
+func (o *ToolOptions) GetAuthenticationDatabase() string {
+ if o.Auth.Source != "" {
+ return o.Auth.Source
+ } else if o.Auth.RequiresExternalDB() {
+ return "$external"
+ } else if o.Namespace != nil && o.Namespace.DB != "" {
+ return o.Namespace.DB
+ }
+ return ""
+}
+
+// AddOptions registers an additional options group to this instance
+func (o *ToolOptions) AddOptions(opts ExtraOptions) {
+ _, err := o.parser.AddGroup(opts.Name()+" options", "", opts)
+ if err != nil {
+ panic(fmt.Sprintf("error setting command line options for %v: %v",
+ opts.Name(), err))
+ }
+}
+
+// Parse the command line args. Returns any extra args not accounted for by
+// parsing, as well as an error if the parsing returns an error.
+func (o *ToolOptions) Parse() ([]string, error) {
+ return o.parser.Parse()
+}
+
+func (opts *ToolOptions) handleUnknownOption(option string, arg flags.SplitArgument, args []string) ([]string, error) {
+ if option == "dbpath" || option == "directoryperdb" || option == "journal" {
+ return args, fmt.Errorf("--dbpath and related flags are not supported in 3.0 tools.\n" +
+ "See http://dochub.mongodb.org/core/tools-dbpath-deprecated for more information")
+ }
+
+ return args, fmt.Errorf(`unknown option "%v"`, option)
+}
+
+// getIntArg returns 3 args: the parsed int value, a bool set to true if a value
+// was consumed from the incoming args array during parsing, and an error
+// value if parsing failed
+func getIntArg(arg flags.SplitArgument, args []string) (int, bool, error) {
+ var rawVal string
+ consumeValue := false
+ rawVal, hasVal := arg.Value()
+ if !hasVal {
+ if len(args) == 0 {
+ return 0, false, fmt.Errorf("no value specified")
+ }
+ rawVal = args[0]
+ consumeValue = true
+ }
+ val, err := strconv.Atoi(rawVal)
+ if err != nil {
+ return val, consumeValue, fmt.Errorf("expected an integer value but got '%v'", rawVal)
+ }
+ return val, consumeValue, nil
+}
+
+// getStringArg returns 3 args: the parsed string value, a bool set to true if a value
+// was consumed from the incoming args array during parsing, and an error
+// value if parsing failed
+func getStringArg(arg flags.SplitArgument, args []string) (string, bool, error) {
+ value, hasVal := arg.Value()
+ if hasVal {
+ return value, false, nil
+ }
+ if len(args) == 0 {
+ return "", false, fmt.Errorf("no value specified")
+ }
+ return args[0], true, nil
+}
diff --git a/src/mongo/gotools/common/options/options_gssapi.go b/src/mongo/gotools/common/options/options_gssapi.go
new file mode 100644
index 00000000000..f10aa3a2b3b
--- /dev/null
+++ b/src/mongo/gotools/common/options/options_gssapi.go
@@ -0,0 +1,12 @@
+// +build sasl
+
+package options
+
+func init() {
+ ConnectionOptFunctions = append(ConnectionOptFunctions, registerGSSAPIOptions)
+}
+
+func registerGSSAPIOptions(self *ToolOptions) error {
+ _, err := self.parser.AddGroup("kerberos options", "", self.Kerberos)
+ return err
+}
diff --git a/src/mongo/gotools/common/options/options_ssl.go b/src/mongo/gotools/common/options/options_ssl.go
new file mode 100644
index 00000000000..e8b8f27171f
--- /dev/null
+++ b/src/mongo/gotools/common/options/options_ssl.go
@@ -0,0 +1,18 @@
+// +build ssl
+
+package options
+
+import "github.com/spacemonkeygo/openssl"
+
+func init() {
+ ConnectionOptFunctions = append(ConnectionOptFunctions, registerSSLOptions)
+ versionInfos = append(versionInfos, versionInfo{
+ key: "OpenSSL version",
+ value: openssl.Version,
+ })
+}
+
+func registerSSLOptions(self *ToolOptions) error {
+ _, err := self.parser.AddGroup("ssl options", "", self.SSL)
+ return err
+}
diff --git a/src/mongo/gotools/common/options/options_test.go b/src/mongo/gotools/common/options/options_test.go
new file mode 100644
index 00000000000..cc18af21506
--- /dev/null
+++ b/src/mongo/gotools/common/options/options_test.go
@@ -0,0 +1,75 @@
+package options
+
+import (
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestVerbosityFlag(t *testing.T) {
+ Convey("With a new ToolOptions", t, func() {
+ enabled := EnabledOptions{false, false, false}
+ optPtr := New("", "", enabled)
+ So(optPtr, ShouldNotBeNil)
+ So(optPtr.parser, ShouldNotBeNil)
+
+ Convey("no verbosity flags, Level should be 0", func() {
+ _, err := optPtr.parser.ParseArgs([]string{})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 0)
+ })
+
+ Convey("one short verbosity flag, Level should be 1", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"-v"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 1)
+ })
+
+ Convey("three short verbosity flags (consecutive), Level should be 3", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"-vvv"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 3)
+ })
+
+ Convey("three short verbosity flags (dispersed), Level should be 3", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"-v", "-v", "-v"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 3)
+ })
+
+ Convey("short verbosity flag assigned to 3, Level should be 3", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"-v=3"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 3)
+ })
+
+ Convey("consecutive short flags with assignment, only assignment holds", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"-vv=3"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 3)
+ })
+
+ Convey("one long verbose flag, Level should be 1", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"--verbose"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 1)
+ })
+
+ Convey("three long verbosity flags, Level should be 3", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"--verbose", "--verbose", "--verbose"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 3)
+ })
+
+ Convey("long verbosity flag assigned to 3, Level should be 3", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"--verbose=3"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 3)
+ })
+
+ Convey("mixed assignment and bare flag, total is sum", func() {
+ _, err := optPtr.parser.ParseArgs([]string{"--verbose", "--verbose=3"})
+ So(err, ShouldBeNil)
+ So(optPtr.Level(), ShouldEqual, 4)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/password/pass_util.go b/src/mongo/gotools/common/password/pass_util.go
new file mode 100644
index 00000000000..438195ed62b
--- /dev/null
+++ b/src/mongo/gotools/common/password/pass_util.go
@@ -0,0 +1,22 @@
+// +build !solaris
+
+package password
+
+import (
+ "github.com/howeyc/gopass"
+ "golang.org/x/crypto/ssh/terminal"
+ "syscall"
+)
+
+// This file contains all the calls needed to properly
+// handle password input from stdin/terminal on all
+// operating systems that aren't solaris
+
+func IsTerminal() bool {
+ return terminal.IsTerminal(int(syscall.Stdin))
+}
+
+func GetPass() string {
+ pass, _ := gopass.GetPasswd()
+ return string(pass)
+}
diff --git a/src/mongo/gotools/common/password/pass_util_solaris.go b/src/mongo/gotools/common/password/pass_util_solaris.go
new file mode 100644
index 00000000000..f5f22dc83f8
--- /dev/null
+++ b/src/mongo/gotools/common/password/pass_util_solaris.go
@@ -0,0 +1,107 @@
+package password
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+// This file is a mess based primarily on
+// "github.com/howeyc/gopass"
+// "golang.org/x/crypto/ssh/terminal"
+// with extra unistd.h ripped from solaris on amd64
+//
+// TODO: get some of these changes merged into the above two packages
+
+// ioctl constants -- not defined in solaris syscall pkg
+const (
+ SYS_IOCTL = 54
+ TCGETS = 21517
+ TCSETS = 21518
+ ttyfd = 0 //STDIN
+)
+
+// getTermios reads the current termios settings into the
+// given termios struct.
+func getTermios(term *syscall.Termios) error {
+ _, _, errno := syscall.Syscall(SYS_IOCTL,
+ uintptr(ttyfd), uintptr(TCGETS),
+ uintptr(unsafe.Pointer(term)))
+ if errno != 0 {
+ return os.NewSyscallError("SYS_IOCTL", errno)
+ }
+ return nil
+}
+
+// setTermios applies the supplied termios settings
+func setTermios(term *syscall.Termios) error {
+ _, _, errno := syscall.Syscall(SYS_IOCTL,
+ uintptr(ttyfd), uintptr(TCSETS),
+ uintptr(unsafe.Pointer(term)))
+ if errno != 0 {
+ return os.NewSyscallError("SYS_IOCTL", errno)
+ }
+ return nil
+}
+
+// setRaw puts the terminal into "raw" mode, which takes
+// in all key presses and does not echo them.
+func setRaw(term syscall.Termios) error {
+ termCopy := term
+ termCopy.Iflag &^= syscall.ISTRIP | syscall.INLCR |
+ syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF
+ termCopy.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG
+ return setTermios(&termCopy)
+}
+
+// isTerminal checks if we are reading from a terminal (instead of a pipe).
+func IsTerminal() bool {
+ var termios syscall.Termios
+ _, _, errno := syscall.Syscall(SYS_IOCTL,
+ uintptr(ttyfd), TCGETS,
+ uintptr(unsafe.Pointer(&termios)))
+ return errno == 0
+}
+
+// readChar safely gets one byte from stdin
+func readChar() byte {
+ var originalTerm syscall.Termios
+ if err := getTermios(&originalTerm); err != nil {
+ panic(err) // should not happen on amd64 solaris (untested on sparc)
+ }
+ if err := setRaw(originalTerm); err != nil {
+ panic(err)
+ }
+ defer func() {
+ // make sure we return the termios back to normal
+ if err := setTermios(&originalTerm); err != nil {
+ panic(err)
+ }
+ }()
+
+ // read a single byte then reset the terminal state
+ var singleChar [1]byte
+ if n, err := syscall.Read(ttyfd, singleChar[:]); n == 0 || err != nil {
+ panic(err)
+ }
+ return singleChar[0]
+}
+
+// get password from terminal
+func GetPass() string {
+ // keep reading in characters until we hit a stopping point
+ pass := []byte{}
+ for {
+ ch := readChar()
+ if ch == backspaceKey || ch == deleteKey {
+ if len(pass) > 0 {
+ pass = pass[:len(pass)-1]
+ }
+ } else if ch == carriageReturnKey || ch == newLineKey || ch == eotKey || ch == eofKey {
+ break
+ } else if ch != 0 {
+ pass = append(pass, ch)
+ }
+ }
+ return string(pass)
+}
diff --git a/src/mongo/gotools/common/password/password.go b/src/mongo/gotools/common/password/password.go
new file mode 100644
index 00000000000..b89f2b91d17
--- /dev/null
+++ b/src/mongo/gotools/common/password/password.go
@@ -0,0 +1,63 @@
+// Package password handles cleanly reading in a user's password from
+// the command line. This varies heavily between operating systems.
+package password
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/log"
+ "os"
+)
+
+// key constants
+const (
+ backspaceKey = 8
+ deleteKey = 127
+ eotKey = 3
+ eofKey = 4
+ newLineKey = 10
+ carriageReturnKey = 13
+)
+
+// Prompt displays a prompt asking for the password and returns the
+// password the user enters as a string.
+func Prompt() string {
+ var pass string
+ if IsTerminal() {
+ log.Logv(log.DebugLow, "standard input is a terminal; reading password from terminal")
+ fmt.Fprintf(os.Stderr, "Enter password:")
+ pass = GetPass()
+ } else {
+ log.Logv(log.Always, "reading password from standard input")
+ fmt.Fprintf(os.Stderr, "Enter password:")
+ pass = readPassFromStdin()
+ }
+ fmt.Fprintln(os.Stderr)
+ return pass
+}
+
+// readPassFromStdin pipes in a password from stdin if
+// we aren't using a terminal for standard input
+func readPassFromStdin() string {
+ pass := []byte{}
+ for {
+ var chBuf [1]byte
+ n, err := os.Stdin.Read(chBuf[:])
+ if err != nil {
+ panic(err)
+ }
+ if n == 0 {
+ break
+ }
+ ch := chBuf[0]
+ if ch == backspaceKey || ch == deleteKey {
+ if len(pass) > 0 {
+ pass = pass[:len(pass)-1]
+ }
+ } else if ch == carriageReturnKey || ch == newLineKey || ch == eotKey || ch == eofKey {
+ break
+ } else if ch != 0 {
+ pass = append(pass, ch)
+ }
+ }
+ return string(pass)
+}
diff --git a/src/mongo/gotools/common/progress/manager.go b/src/mongo/gotools/common/progress/manager.go
new file mode 100644
index 00000000000..e1c5f0db5f7
--- /dev/null
+++ b/src/mongo/gotools/common/progress/manager.go
@@ -0,0 +1,138 @@
+package progress
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/text"
+ "io"
+ "sync"
+ "time"
+)
+
+const GridPadding = 2
+
+// Manager handles thread-safe synchronized progress bar writing, so that all
+// given progress bars are written in a group at a given interval.
+// The current implementation maintains insert order when printing,
+// such that new bars appear at the bottom of the group.
+type Manager struct {
+ waitTime time.Duration
+ writer io.Writer
+ bars []*Bar
+ barsLock *sync.Mutex
+ stopChan chan struct{}
+}
+
+// NewProgressBarManager returns an initialized Manager with the given
+// time.Duration to wait between writes
+func NewProgressBarManager(w io.Writer, waitTime time.Duration) *Manager {
+ return &Manager{
+ waitTime: waitTime,
+ writer: w,
+ barsLock: &sync.Mutex{},
+ stopChan: make(chan struct{}),
+ }
+}
+
+// Attach registers the given progress bar with the manager. Should be used as
+// myManager.Attach(myBar)
+// defer myManager.Detach(myBar)
+func (manager *Manager) Attach(pb *Bar) {
+ // first some quick error checks
+ if pb.Name == "" {
+ panic("cannot attach a nameless bar to a progress bar manager")
+ }
+ pb.validate()
+
+ manager.barsLock.Lock()
+ defer manager.barsLock.Unlock()
+
+ // make sure we are not adding the same bar again
+ for _, bar := range manager.bars {
+ if bar.Name == pb.Name {
+ panic(fmt.Sprintf("progress bar with name '%v' already exists in manager", pb.Name))
+ }
+ }
+
+ manager.bars = append(manager.bars, pb)
+}
+
+// Detach removes the given progress bar from the manager.
+// Insert order is maintained for consistent ordering of the printed bars.
+// Note: the manager removes progress bars by "Name" not by memory location
+func (manager *Manager) Detach(pb *Bar) {
+ if pb.Name == "" {
+ panic("cannot detach a nameless bar from a progress bar manager")
+ }
+
+ manager.barsLock.Lock()
+ defer manager.barsLock.Unlock()
+
+ grid := &text.GridWriter{
+ ColumnPadding: GridPadding,
+ }
+ if pb.hasRendered {
+ // if we've rendered this bar at least once, render it one last time
+ pb.renderToGridRow(grid)
+ }
+ grid.FlushRows(manager.writer)
+
+ updatedBars := make([]*Bar, 0, len(manager.bars)-1)
+ for _, bar := range manager.bars {
+ // move all bars to the updated list except for the bar we want to detach
+ if bar.Name != pb.Name {
+ updatedBars = append(updatedBars, bar)
+ }
+ }
+
+ manager.bars = updatedBars
+}
+
+// helper to render all bars in order
+func (manager *Manager) renderAllBars() {
+ manager.barsLock.Lock()
+ defer manager.barsLock.Unlock()
+ grid := &text.GridWriter{
+ ColumnPadding: GridPadding,
+ }
+ for _, bar := range manager.bars {
+ bar.renderToGridRow(grid)
+ }
+ grid.FlushRows(manager.writer)
+ // add padding of one row if we have more than one active bar
+ if len(manager.bars) > 1 {
+ // we just write an empty array here, since a write call of any
+ // length to our log.Writer will trigger a new logline.
+ manager.writer.Write([]byte{})
+ }
+}
+
+// Start kicks of the timed batch writing of progress bars.
+func (manager *Manager) Start() {
+ if manager.writer == nil {
+ panic("Cannot use a progress.Manager with an unset Writer")
+ }
+ go manager.start()
+}
+
+func (manager *Manager) start() {
+ if manager.waitTime <= 0 {
+ manager.waitTime = DefaultWaitTime
+ }
+ ticker := time.NewTicker(manager.waitTime)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-manager.stopChan:
+ return
+ case <-ticker.C:
+ manager.renderAllBars()
+ }
+ }
+}
+
+// Stop ends the main manager goroutine, stopping the manager's bars
+// from being rendered.
+func (manager *Manager) Stop() {
+ manager.stopChan <- struct{}{}
+}
diff --git a/src/mongo/gotools/common/progress/manager_test.go b/src/mongo/gotools/common/progress/manager_test.go
new file mode 100644
index 00000000000..e997404e886
--- /dev/null
+++ b/src/mongo/gotools/common/progress/manager_test.go
@@ -0,0 +1,218 @@
+package progress
+
+import (
+ "bytes"
+ . "github.com/smartystreets/goconvey/convey"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+type safeBuffer struct {
+ sync.Mutex
+ bytes.Buffer
+}
+
+func (b *safeBuffer) Write(p []byte) (n int, err error) {
+ b.Lock()
+ defer b.Unlock()
+ return b.Buffer.Write(p)
+}
+
+func (b *safeBuffer) String() string {
+ b.Lock()
+ defer b.Unlock()
+ return b.Buffer.String()
+}
+
+func (b *safeBuffer) Reset() {
+ b.Lock()
+ defer b.Unlock()
+ b.Buffer.Reset()
+}
+
+func TestManagerAttachAndDetach(t *testing.T) {
+ writeBuffer := new(safeBuffer)
+ var manager *Manager
+
+ Convey("With an empty progress.Manager", t, func() {
+ manager = NewProgressBarManager(writeBuffer, time.Second)
+ So(manager, ShouldNotBeNil)
+
+ Convey("adding 3 bars", func() {
+ watching := NewCounter(10)
+ watching.Inc(5)
+ pbar1 := &Bar{
+ Name: "\nTEST1",
+ Watching: watching,
+ BarLength: 10,
+ }
+ manager.Attach(pbar1)
+ pbar2 := &Bar{
+ Name: "\nTEST2",
+ Watching: watching,
+ BarLength: 10,
+ }
+ manager.Attach(pbar2)
+ pbar3 := &Bar{
+ Name: "\nTEST3",
+ Watching: watching,
+ BarLength: 10,
+ }
+ manager.Attach(pbar3)
+
+ So(len(manager.bars), ShouldEqual, 3)
+
+ Convey("should write all three bars ar once", func() {
+ manager.renderAllBars()
+ writtenString := writeBuffer.String()
+ So(writtenString, ShouldContainSubstring, "TEST1")
+ So(writtenString, ShouldContainSubstring, "TEST2")
+ So(writtenString, ShouldContainSubstring, "TEST3")
+ })
+
+ Convey("detaching the second bar", func() {
+ manager.Detach(pbar2)
+ So(len(manager.bars), ShouldEqual, 2)
+
+ Convey("should print 1,3", func() {
+ manager.renderAllBars()
+ writtenString := writeBuffer.String()
+ So(writtenString, ShouldContainSubstring, "TEST1")
+ So(writtenString, ShouldNotContainSubstring, "TEST2")
+ So(writtenString, ShouldContainSubstring, "TEST3")
+ So(
+ strings.Index(writtenString, "TEST1"),
+ ShouldBeLessThan,
+ strings.Index(writtenString, "TEST3"),
+ )
+ })
+
+ Convey("but adding a new bar should print 1,2,4", func() {
+ watching := NewCounter(10)
+ pbar4 := &Bar{
+ Name: "\nTEST4",
+ Watching: watching,
+ BarLength: 10,
+ }
+ manager.Attach(pbar4)
+
+ So(len(manager.bars), ShouldEqual, 3)
+ manager.renderAllBars()
+ writtenString := writeBuffer.String()
+ So(writtenString, ShouldContainSubstring, "TEST1")
+ So(writtenString, ShouldNotContainSubstring, "TEST2")
+ So(writtenString, ShouldContainSubstring, "TEST3")
+ So(writtenString, ShouldContainSubstring, "TEST4")
+ So(
+ strings.Index(writtenString, "TEST1"),
+ ShouldBeLessThan,
+ strings.Index(writtenString, "TEST3"),
+ )
+ So(
+ strings.Index(writtenString, "TEST3"),
+ ShouldBeLessThan,
+ strings.Index(writtenString, "TEST4"),
+ )
+ })
+ Reset(func() { writeBuffer.Reset() })
+
+ })
+ Reset(func() { writeBuffer.Reset() })
+ })
+ })
+}
+
+func TestManagerStartAndStop(t *testing.T) {
+ writeBuffer := new(safeBuffer)
+ var manager *Manager
+
+ Convey("With a progress.Manager with a waitTime of 10 ms and one bar", t, func() {
+ manager = NewProgressBarManager(writeBuffer, time.Millisecond*10)
+ So(manager, ShouldNotBeNil)
+ watching := NewCounter(10)
+ watching.Inc(5)
+ pbar := &Bar{
+ Name: "\nTEST",
+ Watching: watching,
+ BarLength: 10,
+ }
+ manager.Attach(pbar)
+
+ So(manager.waitTime, ShouldEqual, time.Millisecond*10)
+ So(len(manager.bars), ShouldEqual, 1)
+
+ Convey("running the manager for 45 ms and stopping", func() {
+ manager.Start()
+ time.Sleep(time.Millisecond * 45) // enough time for the manager to write 4 times
+ manager.Stop()
+
+ Convey("should generate 4 writes of the bar", func() {
+ output := writeBuffer.String()
+ So(strings.Count(output, "TEST"), ShouldEqual, 4)
+ })
+
+ Convey("starting and stopping the manager again should not panic", func() {
+ So(manager.Start, ShouldNotPanic)
+ So(manager.Stop, ShouldNotPanic)
+ })
+ })
+ })
+}
+
+func TestNumberOfWrites(t *testing.T) {
+ var cw *CountWriter
+ var manager *Manager
+ Convey("With a test manager and counting writer", t, func() {
+ cw = new(CountWriter)
+ manager = NewProgressBarManager(cw, time.Millisecond*10)
+ So(manager, ShouldNotBeNil)
+
+ manager.Attach(&Bar{Name: "1", Watching: NewCounter(10), BarLength: 10})
+
+ Convey("with one attached bar", func() {
+ So(len(manager.bars), ShouldEqual, 1)
+
+ Convey("only one write should be made per render", func() {
+ manager.renderAllBars()
+ So(cw.Count(), ShouldEqual, 1)
+ })
+ })
+
+ Convey("with two bars attached", func() {
+ manager.Attach(&Bar{Name: "2", Watching: NewCounter(10), BarLength: 10})
+ So(len(manager.bars), ShouldEqual, 2)
+
+ Convey("three writes should be made per render, since an empty write is added", func() {
+ manager.renderAllBars()
+ So(cw.Count(), ShouldEqual, 3)
+ })
+ })
+
+ Convey("with 57 bars attached", func() {
+ for i := 2; i <= 57; i++ {
+ manager.Attach(&Bar{Name: strconv.Itoa(i), Watching: NewCounter(10), BarLength: 10})
+ }
+ So(len(manager.bars), ShouldEqual, 57)
+
+ Convey("58 writes should be made per render, since an empty write is added", func() {
+ manager.renderAllBars()
+ So(cw.Count(), ShouldEqual, 58)
+ })
+ })
+ })
+}
+
+// helper type for counting calls to a writer
+type CountWriter int
+
+func (cw CountWriter) Count() int {
+ return int(cw)
+}
+
+func (cw *CountWriter) Write(b []byte) (int, error) {
+ *cw++
+ return len(b), nil
+}
diff --git a/src/mongo/gotools/common/progress/progress_bar.go b/src/mongo/gotools/common/progress/progress_bar.go
new file mode 100644
index 00000000000..3b196b7f471
--- /dev/null
+++ b/src/mongo/gotools/common/progress/progress_bar.go
@@ -0,0 +1,236 @@
+// Package progress exposes utilities to asynchronously monitor and display processing progress.
+package progress
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/text"
+ "io"
+ "sync/atomic"
+ "time"
+)
+
+const (
+ DefaultWaitTime = 3 * time.Second
+ BarFilling = "#"
+ BarEmpty = "."
+ BarLeft = "["
+ BarRight = "]"
+)
+
+// countProgressor is an implementation of Progressor that uses
+type countProgressor struct {
+ max int64
+ current int64
+}
+
+func (c *countProgressor) Progress() (int64, int64) {
+ current := atomic.LoadInt64(&c.current)
+ return c.max, current
+}
+
+func (c *countProgressor) Inc(amount int64) {
+ atomic.AddInt64(&c.current, amount)
+}
+
+func (c *countProgressor) Set(amount int64) {
+ atomic.StoreInt64(&c.current, amount)
+}
+
+func NewCounter(max int64) *countProgressor {
+ return &countProgressor{max, 0}
+}
+
+// Progressor can be implemented to allow an object to hook up to a progress.Bar.
+type Progressor interface {
+ // Progress returns a pair of integers: the total amount to reach 100%, and
+ // the amount completed. This method is called by progress.Bar to
+ // determine what percentage to display.
+ Progress() (int64, int64)
+}
+
+// Updateable is a Progressor which also exposes the ability for the progressing
+// value to be incremented, or reset.
+type Updateable interface {
+ // Inc increments the current progress counter by the given amount.
+ Inc(amount int64)
+
+ // Set resets the progress counter to the given amount.
+ Set(amount int64)
+
+ Progressor
+}
+
+// Bar is a tool for concurrently monitoring the progress
+// of a task with a simple linear ASCII visualization
+type Bar struct {
+ // Name is an identifier printed along with the bar
+ Name string
+ // BarLength is the number of characters used to print the bar
+ BarLength int
+
+ // IsBytes denotes whether byte-specific formatting (kB, MB, GB) should
+ // be applied to the numeric output
+ IsBytes bool
+
+ // Watching is the object that implements the Progressor to expose the
+ // values necessary for calculation
+ Watching Progressor
+
+ // Writer is where the Bar is written out to
+ Writer io.Writer
+ // WaitTime is the time to wait between writing the bar
+ WaitTime time.Duration
+
+ stopChan chan struct{}
+ stopChanSync chan struct{}
+
+ // hasRendered indicates that the bar has been rendered at least once
+ // and implies that when detaching should be rendered one more time
+ hasRendered bool
+}
+
+// Start starts the Bar goroutine. Once Start is called, a bar will
+// be written to the given Writer at regular intervals. The goroutine
+// can only be stopped manually using the Stop() method. The Bar
+// must be set up before calling this. Panics if Start has already been called.
+func (pb *Bar) Start() {
+ pb.validate()
+ // we only check for the writer if we're using a single bar without a manager
+ if pb.Writer == nil {
+ panic("Cannot use a Bar with an unset Writer")
+ }
+ pb.stopChan = make(chan struct{})
+ pb.stopChanSync = make(chan struct{})
+
+ go pb.start()
+}
+
+// validate does a set of sanity checks against the progress bar, and panics
+// if the bar is unfit for use
+func (pb *Bar) validate() {
+ if pb.Watching == nil {
+ panic("Cannot use a Bar with a nil Watching")
+ }
+ if pb.stopChan != nil {
+ panic("Cannot start a Bar more than once")
+ }
+}
+
+// Stop kills the Bar goroutine, stopping it from writing.
+// Generally called as
+// myBar.Start()
+// defer myBar.Stop()
+// to stop leakage
+// Stop() needs to be synchronous in order that when pb.Stop() is called
+// all of the rendering has completed
+func (pb *Bar) Stop() {
+ close(pb.stopChan)
+ <-pb.stopChanSync
+}
+
+func (pb *Bar) formatCounts() (string, string) {
+ maxCount, currentCount := pb.Watching.Progress()
+ if pb.IsBytes {
+ return text.FormatByteAmount(maxCount), text.FormatByteAmount(currentCount)
+ }
+ return fmt.Sprintf("%v", maxCount), fmt.Sprintf("%v", currentCount)
+}
+
+// computes all necessary values renders to the bar's Writer
+func (pb *Bar) renderToWriter() {
+ pb.hasRendered = true
+ maxCount, currentCount := pb.Watching.Progress()
+ maxStr, currentStr := pb.formatCounts()
+ if maxCount == 0 {
+ // if we have no max amount, just print a count
+ fmt.Fprintf(pb.Writer, "%v\t%v", pb.Name, currentStr)
+ return
+ }
+ // otherwise, print a bar and percents
+ percent := float64(currentCount) / float64(maxCount)
+ fmt.Fprintf(pb.Writer, "%v %v\t%s/%s (%2.1f%%)",
+ drawBar(pb.BarLength, percent),
+ pb.Name,
+ currentStr,
+ maxStr,
+ percent*100,
+ )
+}
+
+func (pb *Bar) renderToGridRow(grid *text.GridWriter) {
+ pb.hasRendered = true
+ maxCount, currentCount := pb.Watching.Progress()
+ maxStr, currentStr := pb.formatCounts()
+ if maxCount == 0 {
+ // if we have no max amount, just print a count
+ grid.WriteCells(pb.Name, currentStr)
+ } else {
+ percent := float64(currentCount) / float64(maxCount)
+ grid.WriteCells(
+ drawBar(pb.BarLength, percent),
+ pb.Name,
+ fmt.Sprintf("%s/%s", currentStr, maxStr),
+ fmt.Sprintf("(%2.1f%%)", percent*100),
+ )
+ }
+ grid.EndRow()
+}
+
+// the main concurrent loop
+func (pb *Bar) start() {
+ if pb.WaitTime <= 0 {
+ pb.WaitTime = DefaultWaitTime
+ }
+ ticker := time.NewTicker(pb.WaitTime)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-pb.stopChan:
+ if pb.hasRendered {
+ // if we've rendered this bar at least once, render it one last time
+ pb.renderToWriter()
+ }
+ close(pb.stopChanSync)
+ return
+ case <-ticker.C:
+ pb.renderToWriter()
+ }
+ }
+}
+
+// drawBar returns a drawn progress bar of a given width and percentage
+// as a string. Examples:
+// [........................]
+// [###########.............]
+// [########################]
+func drawBar(spaces int, percent float64) string {
+ if spaces <= 0 {
+ return ""
+ }
+ var strBuffer bytes.Buffer
+ strBuffer.WriteString(BarLeft)
+
+ // the number of "#" to draw
+ fullSpaces := int(percent * float64(spaces))
+
+ // some bounds for ensuring a constant width, even with weird inputs
+ if fullSpaces > spaces {
+ fullSpaces = spaces
+ }
+ if fullSpaces < 0 {
+ fullSpaces = 0
+ }
+
+ // write the "#"s for the current percentage
+ for i := 0; i < fullSpaces; i++ {
+ strBuffer.WriteString(BarFilling)
+ }
+ // fill out the remainder of the bar
+ for i := 0; i < spaces-fullSpaces; i++ {
+ strBuffer.WriteString(BarEmpty)
+ }
+ strBuffer.WriteString(BarRight)
+ return strBuffer.String()
+}
diff --git a/src/mongo/gotools/common/progress/progress_bar_test.go b/src/mongo/gotools/common/progress/progress_bar_test.go
new file mode 100644
index 00000000000..3eb030d5c60
--- /dev/null
+++ b/src/mongo/gotools/common/progress/progress_bar_test.go
@@ -0,0 +1,186 @@
+// +build !race
+
+// Disable race detector since these tests are inherently racy
+package progress
+
+import (
+ "bytes"
+ . "github.com/smartystreets/goconvey/convey"
+ "strings"
+ "testing"
+ "time"
+)
+
+func TestBasicProgressBar(t *testing.T) {
+
+ writeBuffer := &bytes.Buffer{}
+
+ Convey("With a simple ProgressBar", t, func() {
+ watching := NewCounter(10)
+ pbar := &Bar{
+ Name: "\nTEST",
+ Watching: watching,
+ WaitTime: 3 * time.Millisecond,
+ Writer: writeBuffer,
+ BarLength: 10,
+ }
+
+ Convey("running it while incrementing its counter", func() {
+ pbar.Start()
+ // TODO make this test non-racy and reliable
+ time.Sleep(10 * time.Millisecond)
+ // iterate though each value 1-10, sleeping to make sure it is written
+ for localCounter := 0; localCounter < 10; localCounter++ {
+ watching.Inc(1)
+ time.Sleep(5 * time.Millisecond)
+ }
+ pbar.Stop()
+
+ Convey("the messages written in the buffer should cover all states", func() {
+ results := writeBuffer.String()
+ So(results, ShouldContainSubstring, "TEST")
+ So(results, ShouldContainSubstring, BarLeft)
+ So(results, ShouldContainSubstring, BarRight)
+ So(results, ShouldContainSubstring, BarFilling)
+ So(results, ShouldContainSubstring, BarEmpty)
+ So(results, ShouldContainSubstring, "0/10")
+ So(results, ShouldContainSubstring, "1/10")
+ So(results, ShouldContainSubstring, "2/10")
+ So(results, ShouldContainSubstring, "3/10")
+ So(results, ShouldContainSubstring, "4/10")
+ So(results, ShouldContainSubstring, "5/10")
+ So(results, ShouldContainSubstring, "6/10")
+ So(results, ShouldContainSubstring, "7/10")
+ So(results, ShouldContainSubstring, "8/10")
+ So(results, ShouldContainSubstring, "9/10")
+ So(results, ShouldContainSubstring, "10.0%")
+ })
+ })
+ })
+}
+
+func TestProgressBarWithNoMax(t *testing.T) {
+ writeBuffer := &bytes.Buffer{}
+
+ Convey("With a simple ProgressBar with no max value", t, func() {
+ watching := NewCounter(0)
+ watching.Inc(5)
+ pbar := &Bar{
+ Name: "test",
+ Watching: watching,
+ Writer: writeBuffer,
+ }
+ Convey("rendering the progress should still work, but not draw a bar", func() {
+ pbar.renderToWriter()
+ So(writeBuffer.String(), ShouldContainSubstring, "5")
+ So(writeBuffer.String(), ShouldContainSubstring, "test")
+ So(writeBuffer.String(), ShouldNotContainSubstring, "[")
+ So(writeBuffer.String(), ShouldNotContainSubstring, "]")
+ })
+ })
+}
+
+func TestBarConcurrency(t *testing.T) {
+ writeBuffer := &bytes.Buffer{}
+
+ Convey("With a simple ProgressBar", t, func() {
+ watching := NewCounter(1000)
+ watching.Inc(777)
+ pbar := &Bar{
+ Name: "\nTEST",
+ Watching: watching,
+ WaitTime: 10 * time.Millisecond,
+ Writer: writeBuffer,
+ }
+
+ Convey("if it rendered only once", func() {
+ pbar.Start()
+ time.Sleep(15 * time.Millisecond)
+ watching.Inc(1)
+ results := writeBuffer.String()
+ So(results, ShouldContainSubstring, "777")
+ So(results, ShouldNotContainSubstring, "778")
+
+ Convey("it will render a second time on stop", func() {
+ pbar.Stop()
+ results := writeBuffer.String()
+ So(results, ShouldContainSubstring, "777")
+ So(results, ShouldContainSubstring, "778")
+
+ Convey("and trying to start or stop the bar again should panic", func() {
+ So(func() { pbar.Start() }, ShouldPanic)
+ So(func() { pbar.Stop() }, ShouldPanic)
+ })
+ })
+ })
+ })
+}
+
+func TestBarDrawing(t *testing.T) {
+ Convey("Drawing some test bars and checking their character counts", t, func() {
+ Convey("20 wide @ 50%", func() {
+ b := drawBar(20, .5)
+ So(strings.Count(b, BarFilling), ShouldEqual, 10)
+ So(strings.Count(b, BarEmpty), ShouldEqual, 10)
+ So(b, ShouldContainSubstring, BarLeft)
+ So(b, ShouldContainSubstring, BarRight)
+ })
+ Convey("100 wide @ 50%", func() {
+ b := drawBar(100, .5)
+ So(strings.Count(b, BarFilling), ShouldEqual, 50)
+ So(strings.Count(b, BarEmpty), ShouldEqual, 50)
+ })
+ Convey("100 wide @ 99.9999%", func() {
+ b := drawBar(100, .999999)
+ So(strings.Count(b, BarFilling), ShouldEqual, 99)
+ So(strings.Count(b, BarEmpty), ShouldEqual, 1)
+ })
+ Convey("9 wide @ 72%", func() {
+ b := drawBar(9, .72)
+ So(strings.Count(b, BarFilling), ShouldEqual, 6)
+ So(strings.Count(b, BarEmpty), ShouldEqual, 3)
+ })
+ Convey("10 wide @ 0%", func() {
+ b := drawBar(10, 0)
+ So(strings.Count(b, BarFilling), ShouldEqual, 0)
+ So(strings.Count(b, BarEmpty), ShouldEqual, 10)
+ })
+ Convey("10 wide @ 100%", func() {
+ b := drawBar(10, 1)
+ So(strings.Count(b, BarFilling), ShouldEqual, 10)
+ So(strings.Count(b, BarEmpty), ShouldEqual, 0)
+ })
+ Convey("10 wide @ -60%", func() {
+ b := drawBar(10, -0.6)
+ So(strings.Count(b, BarFilling), ShouldEqual, 0)
+ So(strings.Count(b, BarEmpty), ShouldEqual, 10)
+ })
+ Convey("10 wide @ 160%", func() {
+ b := drawBar(10, 1.6)
+ So(strings.Count(b, BarFilling), ShouldEqual, 10)
+ So(strings.Count(b, BarEmpty), ShouldEqual, 0)
+ })
+ })
+}
+
+func TestBarUnits(t *testing.T) {
+ writeBuffer := &bytes.Buffer{}
+
+ Convey("With a simple ProgressBar with IsBytes==true", t, func() {
+ watching := NewCounter(1024 * 1024)
+ watching.Inc(777)
+ pbar := &Bar{
+ Name: "\nTEST",
+ Watching: watching,
+ WaitTime: 10 * time.Millisecond,
+ Writer: writeBuffer,
+ IsBytes: true,
+ }
+
+ Convey("the written output should contain units", func() {
+ pbar.renderToWriter()
+ So(writeBuffer.String(), ShouldContainSubstring, "B")
+ So(writeBuffer.String(), ShouldContainSubstring, "MB")
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/signals/signals.go b/src/mongo/gotools/common/signals/signals.go
new file mode 100644
index 00000000000..b7ce20b1bea
--- /dev/null
+++ b/src/mongo/gotools/common/signals/signals.go
@@ -0,0 +1,51 @@
+package signals
+
+import (
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/util"
+
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+// Handle is like HandleWithInterrupt but it doesn't take a finalizer and will
+// exit immediately after the first signal is received.
+func Handle() chan struct{} {
+ return HandleWithInterrupt(nil)
+}
+
+// HandleWithInterrupt starts a goroutine which listens for SIGTERM, SIGINT,
+// SIGKILL, and SIGPIPE. It calls the finalizer function when the first signal
+// is received and forcibly terminates the program after the second. If a nil
+// function is provided, the program will exit after the first signal.
+func HandleWithInterrupt(finalizer func()) chan struct{} {
+ finishedChan := make(chan struct{})
+ go handleSignals(finalizer, finishedChan)
+ return finishedChan
+}
+
+func handleSignals(finalizer func(), finishedChan chan struct{}) {
+ log.Logv(log.DebugLow, "will listen for SIGTERM, SIGINT, SIGKILL, and SIGPIPE")
+ sigChan := make(chan os.Signal, 2)
+ signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL, syscall.SIGPIPE)
+ defer signal.Stop(sigChan)
+ if finalizer != nil {
+ select {
+ case sig := <-sigChan:
+ // first signal use finalizer to terminate cleanly
+ log.Logvf(log.Always, "signal '%s' received; attempting to shut down", sig)
+ finalizer()
+ case <-finishedChan:
+ return
+ }
+ }
+ select {
+ case sig := <-sigChan:
+ // second signal exits immediately
+ log.Logvf(log.Always, "signal '%s' received; forcefully terminating", sig)
+ os.Exit(util.ExitKill)
+ case <-finishedChan:
+ return
+ }
+}
diff --git a/src/mongo/gotools/common/testutil/auth.go b/src/mongo/gotools/common/testutil/auth.go
new file mode 100644
index 00000000000..3d62606bf5e
--- /dev/null
+++ b/src/mongo/gotools/common/testutil/auth.go
@@ -0,0 +1,76 @@
+package testutil
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/options"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "os"
+)
+
+var (
+ UserAdmin = "uAdmin"
+ UserAdminPassword = "password"
+ CreatedUserNameEnv = "AUTH_USERNAME"
+ CreatedUserPasswordEnv = "AUTH_PASSWORD"
+)
+
+// Initialize a user admin, using the already-connected session passed in.
+// Assumes that there are no existing users, otherwise will fail with a
+// permissions issue.
+func CreateUserAdmin(session *mgo.Session) error {
+ err := CreateUserWithRole(session, UserAdmin, UserAdminPassword,
+ mgo.RoleUserAdminAny, false)
+ return err
+}
+
+// Create a user with the specified password and role, using the
+// already-connected session passed in. If needsLogin is true, then the
+// default user admin and password will be used to log in to the admin
+// db before creating the user.
+func CreateUserWithRole(session *mgo.Session, user,
+ password string, role mgo.Role, needsLogin bool) error {
+
+ adminDB := session.DB("admin")
+ if needsLogin {
+ err := adminDB.Login(
+ UserAdmin,
+ UserAdminPassword,
+ )
+ if err != nil {
+ return fmt.Errorf("error logging in: %v", err)
+ }
+ }
+
+ err := adminDB.Run(
+ bson.D{
+ {"createUser", user},
+ {"pwd", password},
+ {"roles", []bson.M{
+ bson.M{
+ "role": role,
+ "db": "admin",
+ },
+ }},
+ },
+ &bson.M{},
+ )
+
+ if err != nil {
+ return fmt.Errorf("error adding user %v with role %v: %v", user, role, err)
+ }
+
+ return nil
+}
+
+func GetAuthOptions() options.Auth {
+ if HasTestType(AuthTestType) {
+ return options.Auth{
+ Username: os.Getenv(CreatedUserNameEnv),
+ Password: os.Getenv(CreatedUserPasswordEnv),
+ Source: "admin",
+ }
+ }
+
+ return options.Auth{}
+}
diff --git a/src/mongo/gotools/common/testutil/kerberos.go b/src/mongo/gotools/common/testutil/kerberos.go
new file mode 100644
index 00000000000..c6e3f302b17
--- /dev/null
+++ b/src/mongo/gotools/common/testutil/kerberos.go
@@ -0,0 +1,42 @@
+package testutil
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/options"
+ "os"
+ "runtime"
+)
+
+var (
+ WinKerberosPwdEnv = "MONGODB_KERBEROS_PASSWORD"
+)
+
+func GetKerberosOptions() (*options.ToolOptions, error) {
+ opts := &options.ToolOptions{
+ Namespace: &options.Namespace{
+ DB: "kerberos",
+ Collection: "test",
+ },
+ SSL: &options.SSL{},
+ Auth: &options.Auth{
+ Username: "drivers@LDAPTEST.10GEN.CC",
+ Source: "$external",
+ Mechanism: "GSSAPI",
+ },
+ Kerberos: &options.Kerberos{},
+ Connection: &options.Connection{
+ Host: "ldaptest.10gen.cc",
+ Port: "27017",
+ },
+ }
+
+ if runtime.GOOS == "windows" {
+ opts.Auth.Password = os.Getenv(WinKerberosPwdEnv)
+ if opts.Auth.Password == "" {
+ return nil, fmt.Errorf("Need to set %v environment variable to run "+
+ "kerberos tests on windows", WinKerberosPwdEnv)
+ }
+ }
+
+ return opts, nil
+}
diff --git a/src/mongo/gotools/common/testutil/ssl_integration.go b/src/mongo/gotools/common/testutil/ssl_integration.go
new file mode 100644
index 00000000000..ef49ecc6af5
--- /dev/null
+++ b/src/mongo/gotools/common/testutil/ssl_integration.go
@@ -0,0 +1,19 @@
+package testutil
+
+import (
+ commonOpts "github.com/mongodb/mongo-tools/common/options"
+)
+
+func GetSSLOptions() commonOpts.SSL {
+ if HasTestType(SSLTestType) {
+ return commonOpts.SSL{
+ UseSSL: true,
+ SSLCAFile: "../common/db/openssl/testdata/ca.pem",
+ SSLPEMKeyFile: "../common/db/openssl/testdata/server.pem",
+ }
+ }
+
+ return commonOpts.SSL{
+ UseSSL: false,
+ }
+}
diff --git a/src/mongo/gotools/common/testutil/testutil.go b/src/mongo/gotools/common/testutil/testutil.go
new file mode 100644
index 00000000000..9923634e5be
--- /dev/null
+++ b/src/mongo/gotools/common/testutil/testutil.go
@@ -0,0 +1,2 @@
+// Package testutil implements functions for filtering and configuring tests.
+package testutil
diff --git a/src/mongo/gotools/common/testutil/types.go b/src/mongo/gotools/common/testutil/types.go
new file mode 100644
index 00000000000..27dca9ed60e
--- /dev/null
+++ b/src/mongo/gotools/common/testutil/types.go
@@ -0,0 +1,57 @@
+package testutil
+
+import (
+ "flag"
+ "strings"
+ "testing"
+)
+
+const (
+ // Integration tests require a mongod running on localhost:33333. If your
+ // mongod uses SSL you need to specify the "ssl" type below, and ditto for
+ // if your mongod requires auth.
+ IntegrationTestType = "integration"
+
+ // Unit tests don't require a real mongod. They may still do file I/O.
+ UnitTestType = "unit"
+
+ // Kerberos tests are a special type of integration test that test tools
+ // with Kerberos authentication against the drivers Kerberos testing cluster
+ // because setting up a KDC every time is too brittle and expensive.
+ // (See https://wiki.mongodb.com/display/DH/Testing+Kerberos)
+ KerberosTestType = "kerberos"
+
+ // "ssl" and "auth" are used to configure integration tests to run against
+ // different mongod configurations. "ssl" will configure the integration tests
+ // to expect an SSL-enabled mongod on localhost:33333. "auth" will do the same
+ // for an auth-enabled mongod on localhost:33333.
+ SSLTestType = "ssl"
+ AuthTestType = "auth"
+)
+
+var (
+ // the types of tests that should be run
+ testTypes = flag.String("test.types", UnitTestType, "Comma-separated list of the"+
+ " types of tests to be run")
+)
+
+func HasTestType(testType string) bool {
+ if !flag.Parsed() {
+ flag.Parse()
+ }
+
+ // skip the test if the passed-in type is not being run
+ for _, typ := range strings.Split(*testTypes, ",") {
+ if typ == testType {
+ return true
+ }
+ }
+ return false
+}
+
+// Skip the test if the specified type is not being run.
+func VerifyTestType(t *testing.T, testType string) {
+ if !HasTestType(testType) {
+ t.SkipNow()
+ }
+}
diff --git a/src/mongo/gotools/common/text/grid.go b/src/mongo/gotools/common/text/grid.go
new file mode 100644
index 00000000000..f269b69912a
--- /dev/null
+++ b/src/mongo/gotools/common/text/grid.go
@@ -0,0 +1,165 @@
+// Package text provides utilities for formatting text data.
+package text
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+type Cell struct {
+ contents string
+ feed bool
+}
+
+type GridWriter struct {
+ ColumnPadding int
+ MinWidth int
+ Grid [][]Cell
+ CurrentRow int
+ colWidths []int
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// init() makes the initial row if this is the first time any data is being written.
+// otherwise, no-op.
+func (gw *GridWriter) init() {
+ if len(gw.Grid) <= gw.CurrentRow {
+ gw.Grid = append(gw.Grid, []Cell{})
+ }
+}
+
+// WriteCell writes the given string into the next cell in the current row.
+func (gw *GridWriter) WriteCell(data string) {
+ gw.init()
+ gw.Grid[gw.CurrentRow] = append(gw.Grid[gw.CurrentRow], Cell{data, false})
+}
+
+// WriteCells writes multiple cells by calling WriteCell for each argument.
+func (gw *GridWriter) WriteCells(data ...string) {
+ for _, s := range data {
+ gw.WriteCell(s)
+ }
+}
+
+// Feed writes the given string into the current cell but allowing the cell contents
+// to extend past the width of the current column, and ends the row.
+func (gw *GridWriter) Feed(data string) {
+ gw.init()
+ gw.Grid[gw.CurrentRow] = append(gw.Grid[gw.CurrentRow], Cell{data, true})
+ gw.EndRow()
+}
+
+// EndRow terminates the row of cells and begins a new row in the grid.
+func (gw *GridWriter) EndRow() {
+ gw.CurrentRow++
+ if len(gw.Grid) <= gw.CurrentRow {
+ gw.Grid = append(gw.Grid, []Cell{})
+ }
+}
+
+// Reset discards any grid data and resets the current row.
+func (gw *GridWriter) Reset() {
+ gw.CurrentRow = 0
+ gw.Grid = [][]Cell{}
+}
+
+// updateWidths sets the column widths in the Grid. For each column in the Grid,
+// it updates the cached width if its value is less than the current width.
+func (gw *GridWriter) updateWidths(colWidths []int) {
+ if gw.colWidths == nil {
+ gw.colWidths = make([]int, len(colWidths))
+ copy(gw.colWidths, colWidths)
+ }
+ for i, cw := range colWidths {
+ if gw.colWidths[i] < cw {
+ gw.colWidths[i] = cw
+ }
+ }
+}
+
+// calculateWidths returns an array containing the correct padded size for
+// each column in the grid.
+func (gw *GridWriter) calculateWidths() []int {
+ colWidths := []int{}
+
+ // Loop over each column
+ for j := 0; ; j++ {
+ found := false
+
+ // Examine all the rows at column 'j'
+ for i := range gw.Grid {
+ if len(gw.Grid[i]) <= j {
+ continue
+ }
+ found = true
+
+ if len(colWidths) <= j {
+ colWidths = append(colWidths, 0)
+ }
+
+ if gw.Grid[i][j].feed {
+ // we're at a row-terminating cell - skip over the rest of this row
+ continue
+ }
+ // Set the size for the row to be the largest
+ // of all the cells in the column
+ newMin := max(gw.MinWidth, len(gw.Grid[i][j].contents))
+ if newMin > colWidths[j] {
+ colWidths[j] = newMin
+ }
+ }
+ // This column did not have any data in it at all, so we've hit the
+ // end of the grid - stop.
+ if !found {
+ break
+ }
+ }
+ return colWidths
+}
+
+// Flush writes the fully-formatted grid to the given io.Writer.
+func (gw *GridWriter) Flush(w io.Writer) {
+ colWidths := gw.calculateWidths()
+
+ // invalidate all cached widths if new cells are added/removed
+ if len(gw.colWidths) != len(colWidths) {
+ gw.colWidths = make([]int, len(colWidths))
+ copy(gw.colWidths, colWidths)
+ } else {
+ gw.updateWidths(colWidths)
+ }
+
+ for i, row := range gw.Grid {
+ lastRow := i == (len(gw.Grid) - 1)
+ for j, cell := range row {
+ lastCol := (j == len(row)-1)
+ fmt.Fprintf(w, fmt.Sprintf("%%%vs", gw.colWidths[j]), cell.contents)
+ if gw.ColumnPadding > 0 && !lastCol {
+ fmt.Fprint(w, strings.Repeat(" ", gw.ColumnPadding))
+ }
+ }
+ if !lastRow {
+ fmt.Fprint(w, "\n")
+ }
+ }
+}
+
+// FlushRows writes the fully-formatted grid to the given io.Writer, but
+// gives each row its own Write() call instead of using newlines.
+func (gw *GridWriter) FlushRows(w io.Writer) {
+ gridBuff := &bytes.Buffer{}
+ gw.Flush(gridBuff)
+ lineScanner := bufio.NewScanner(gridBuff)
+ for lineScanner.Scan() {
+ w.Write(lineScanner.Bytes())
+ }
+}
diff --git a/src/mongo/gotools/common/text/grid_test.go b/src/mongo/gotools/common/text/grid_test.go
new file mode 100644
index 00000000000..6919786a9d9
--- /dev/null
+++ b/src/mongo/gotools/common/text/grid_test.go
@@ -0,0 +1,104 @@
+package text
+
+import (
+ "bytes"
+ "fmt"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestUpdateWidths(t *testing.T) {
+ Convey("Using a grid writer, the cached column width", t, func() {
+ gw := GridWriter{}
+ defaultWidths := []int{1, 2, 3, 4}
+ Convey("should be updated when one or more new cell widths are greater", func() {
+ // the first time, the grid's widths are nil
+ So(gw.colWidths, ShouldBeNil)
+ gw.updateWidths(defaultWidths)
+ So(gw.colWidths, ShouldResemble, defaultWidths)
+
+ // the grid's widths should not be updated if all the new cell widths are less than or equal
+ newWidths := []int{1, 2, 1, 2}
+ So(gw.colWidths, ShouldNotBeNil)
+ gw.updateWidths(newWidths)
+ So(gw.colWidths, ShouldResemble, defaultWidths)
+ So(gw.colWidths, ShouldNotResemble, newWidths)
+
+ // the grid's widths should be updated if any of the new cell widths are greater
+ newWidths = []int{1, 2, 3, 5}
+ So(gw.colWidths, ShouldNotBeNil)
+ gw.updateWidths(newWidths)
+ So(gw.colWidths, ShouldResemble, newWidths)
+ So(gw.colWidths, ShouldNotResemble, defaultWidths)
+ })
+ })
+}
+
+func writeData(gw *GridWriter) {
+ gw.Reset()
+ for i := 0; i < 3; i++ {
+ for j := 0; j < 3; j++ {
+ gw.WriteCell(fmt.Sprintf("(%v,%v)", i, j))
+ }
+ gw.EndRow()
+ }
+}
+
+func TestWriteGrid(t *testing.T) {
+ Convey("Test grid writer with no min width", t, func() {
+ gw := GridWriter{}
+ writeData(&gw)
+ buf := bytes.Buffer{}
+ gw.Flush(&buf)
+ So(buf.String(), ShouldEqual,
+ "(0,0)(0,1)(0,2)\n(1,0)(1,1)(1,2)\n(2,0)(2,1)(2,2)\n")
+
+ writeData(&gw)
+ gw.MinWidth = 7
+ buf = bytes.Buffer{}
+ gw.Flush(&buf)
+ So(buf.String(), ShouldStartWith,
+ " (0,0) (0,1) (0,2)\n (1,0) (1,1)")
+
+ writeData(&gw)
+ gw.colWidths = []int{}
+ gw.MinWidth = 0
+ gw.ColumnPadding = 1
+ buf = bytes.Buffer{}
+ gw.Flush(&buf)
+ So(buf.String(), ShouldStartWith,
+ "(0,0) (0,1) (0,2)\n(1,0) (1,1)")
+
+ writeData(&gw)
+ buf = bytes.Buffer{}
+ gw.FlushRows(&buf)
+ So(buf.String(), ShouldStartWith,
+ "(0,0) (0,1) (0,2)(1,0) (1,1)")
+ })
+
+ Convey("Test grid writer width calculation", t, func() {
+ gw := GridWriter{}
+ gw.WriteCell("bbbb")
+ gw.WriteCell("aa")
+ gw.WriteCell("c")
+ gw.EndRow()
+ gw.WriteCell("bb")
+ gw.WriteCell("a")
+ gw.WriteCell("")
+ gw.EndRow()
+ So(gw.calculateWidths(), ShouldResemble, []int{4, 2, 1})
+
+ gw.WriteCell("bbbbbbb")
+ gw.WriteCell("a")
+ gw.WriteCell("cccc")
+ gw.EndRow()
+ So(gw.calculateWidths(), ShouldResemble, []int{7, 2, 4})
+
+ gw.WriteCell("bbbbbbb")
+ gw.WriteCell("a")
+ gw.WriteCell("cccc")
+ gw.WriteCell("ddddddddd")
+ gw.EndRow()
+ So(gw.calculateWidths(), ShouldResemble, []int{7, 2, 4, 9})
+ })
+}
diff --git a/src/mongo/gotools/common/text/units.go b/src/mongo/gotools/common/text/units.go
new file mode 100644
index 00000000000..43c2720dcc4
--- /dev/null
+++ b/src/mongo/gotools/common/text/units.go
@@ -0,0 +1,68 @@
+package text
+
+import (
+ "fmt"
+ "math"
+)
+
+const (
+ decimal = 1000
+ binary = 1024
+)
+
+var (
+ longByteUnits = []string{"B", "KB", "MB", "GB"}
+ shortByteUnits = []string{"B", "K", "M", "G"}
+ shortBitUnits = []string{"b", "k", "m", "g"}
+)
+
+// FormatByteAmount takes an int64 representing a size in bytes and
+// returns a formatted string of a minimum amount of significant figures.
+// e.g. 12.4 GB, 0.0 B, 124.5 KB
+func FormatByteAmount(size int64) string {
+ return formatUnitAmount(binary, size, 3, longByteUnits)
+}
+
+// FormatMegabyteAmount is equivalent to FormatByteAmount but expects
+// an amount of MB instead of bytes.
+func FormatMegabyteAmount(size int64) string {
+ return formatUnitAmount(binary, size*1024*1024, 3, shortByteUnits)
+}
+
+// FormatBits takes in a bit (not byte) count and returns a formatted string
+// including units with three total digits (except if it is less than 1k)
+// e.g. 12.0g, 0b, 124k
+func FormatBits(size int64) string {
+ return formatUnitAmount(decimal, size, 3, shortBitUnits)
+}
+
+// formatUnitAmount formats the size using the units and at least minDigits
+// numbers, unless the number is already less than the base, where no decimal
+// will be added
+func formatUnitAmount(base, size int64, minDigits int, units []string) string {
+ result := float64(size)
+ divisor := float64(base)
+ var shifts int
+ // keep dividing by base and incrementing our unit until
+ // we hit the right unit or run out of unit strings
+ for ; result >= divisor && shifts < len(units)-1; shifts++ {
+ result /= divisor
+ }
+ result = round(result, minDigits)
+
+ var precision int // Number of digits to show after the decimal
+ len := 1 + int(math.Log10(result)) // Number of pre-decimal digits in result
+ if shifts != 0 && len < minDigits {
+ // Add as many decimal digits as we can
+ precision = minDigits - len
+ }
+ format := fmt.Sprintf("%%.%df%%s", precision)
+ return fmt.Sprintf(format, result, units[shifts])
+}
+
+// round applies the gradeschool method to round to the nth place
+func round(result float64, precision int) float64 {
+ divisor := float64(math.Pow(10.0, float64(precision-1)))
+ // round(x) == floor(x + 0.5)
+ return math.Floor(result*divisor+0.5) / divisor
+}
diff --git a/src/mongo/gotools/common/text/units_test.go b/src/mongo/gotools/common/text/units_test.go
new file mode 100644
index 00000000000..78f6fa94e1c
--- /dev/null
+++ b/src/mongo/gotools/common/text/units_test.go
@@ -0,0 +1,102 @@
+package text
+
+import (
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func TestFormatByteCount(t *testing.T) {
+ Convey("With some sample byte amounts", t, func() {
+ Convey("0 Bytes -> 0B", func() {
+ So(FormatByteAmount(0), ShouldEqual, "0B")
+ })
+ Convey("1024 Bytes -> 1.00KB", func() {
+ So(FormatByteAmount(1024), ShouldEqual, "1.00KB")
+ })
+ Convey("2500 Bytes -> 2.44KB", func() {
+ So(FormatByteAmount(2500), ShouldEqual, "2.44KB")
+ })
+ Convey("2*1024*1024 Bytes -> 2.00MB", func() {
+ So(FormatByteAmount(2*1024*1024), ShouldEqual, "2.00MB")
+ })
+ Convey("5*1024*1024*1024 Bytes -> 5.00GB", func() {
+ So(FormatByteAmount(5*1024*1024*1024), ShouldEqual, "5.00GB")
+ })
+ Convey("5*1024*1024*1024*1024 Bytes -> 5120GB", func() {
+ So(FormatByteAmount(5*1024*1024*1024*1024), ShouldEqual, "5120GB")
+ })
+ })
+}
+
+func TestOtherByteFormats(t *testing.T) {
+ Convey("With some sample byte amounts", t, func() {
+ Convey("with '10'", func() {
+ Convey("FormatMegabyteAmount -> 10.0M", func() {
+ So(FormatMegabyteAmount(10), ShouldEqual, "10.0M")
+ })
+ Convey("FormatByteAmount -> 10B", func() {
+ So(FormatByteAmount(10), ShouldEqual, "10B")
+ })
+ Convey("FormatBitsWithLowPrecision -> 10b", func() {
+ So(FormatBits(10), ShouldEqual, "10b")
+ })
+ })
+ Convey("with '1024 * 2.5'", func() {
+ val := int64(2.5 * 1024)
+ Convey("FormatMegabyteAmount -> 2.50G", func() {
+ So(FormatMegabyteAmount(val), ShouldEqual, "2.50G")
+ })
+ Convey("FormatByteAmount -> 2.50KB", func() {
+ So(FormatByteAmount(val), ShouldEqual, "2.50KB")
+ })
+ Convey("FormatBits -> 2.56k", func() {
+ So(FormatBits(val), ShouldEqual, "2.56k")
+ })
+ })
+ })
+}
+
+func TestBitFormatPrecision(t *testing.T) {
+ Convey("With values less than 1k", t, func() {
+ Convey("with '999'", func() {
+ Convey("FormatBits -> 999b", func() {
+ So(FormatBits(999), ShouldEqual, "999b")
+ })
+ })
+ Convey("with '99'", func() {
+ Convey("FormatBits -> 99b", func() {
+ So(FormatBits(99), ShouldEqual, "99b")
+ })
+ })
+ Convey("with '9'", func() {
+ Convey("FormatBits -> 9b", func() {
+ So(FormatBits(9), ShouldEqual, "9b")
+ })
+ })
+ })
+ Convey("With values less than 1m", t, func() {
+ Convey("with '9999'", func() {
+ Convey("FormatBits -> 10.0k", func() {
+ So(FormatBits(9999), ShouldEqual, "10.0k")
+ })
+ })
+ Convey("with '9990'", func() {
+ Convey("FormatBits -> 9.99k", func() {
+ So(FormatBits(9990), ShouldEqual, "9.99k")
+ })
+ })
+ })
+ Convey("With big numbers", t, func() {
+ Convey("with '999000000'", func() {
+ Convey("FormatBits -> 999m", func() {
+ So(FormatBits(999000000), ShouldEqual, "999m")
+ })
+ })
+ Convey("with '9990000000'", func() {
+ Convey("FormatBits -> 9.99g", func() {
+ So(FormatBits(9990000000), ShouldEqual, "9.99g")
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/util/bool.go b/src/mongo/gotools/common/util/bool.go
new file mode 100644
index 00000000000..d49a8483b9e
--- /dev/null
+++ b/src/mongo/gotools/common/util/bool.go
@@ -0,0 +1,32 @@
+package util
+
+import (
+ "gopkg.in/mgo.v2/bson"
+ "reflect"
+)
+
+// IsTruthy returns true for values the server will interpret as "true".
+// True values include {}, [], "", true, and any numbers != 0
+func IsTruthy(val interface{}) bool {
+ if val == nil {
+ return false
+ }
+ if val == bson.Undefined {
+ return false
+ }
+
+ v := reflect.ValueOf(val)
+ switch v.Kind() {
+ case reflect.Map, reflect.Slice, reflect.Array, reflect.String, reflect.Struct:
+ return true
+ default:
+ z := reflect.Zero(v.Type())
+ return v.Interface() != z.Interface()
+ }
+}
+
+// IsFalsy returns true for values the server will interpret as "false".
+// False values include numbers == 0, false, and nil
+func IsFalsy(val interface{}) bool {
+ return !IsTruthy(val)
+}
diff --git a/src/mongo/gotools/common/util/bool_test.go b/src/mongo/gotools/common/util/bool_test.go
new file mode 100644
index 00000000000..d3ed46d87f8
--- /dev/null
+++ b/src/mongo/gotools/common/util/bool_test.go
@@ -0,0 +1,58 @@
+package util
+
+import (
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "math"
+ "testing"
+)
+
+func TestJSTruthyValues(t *testing.T) {
+ Convey("With some sample values", t, func() {
+ Convey("known server code edge cases are correct", func() {
+ Convey("true -> true", func() {
+ So(IsTruthy(true), ShouldBeTrue)
+ })
+ Convey("{} -> true", func() {
+ var myMap map[string]interface{}
+ So(IsTruthy(myMap), ShouldBeTrue)
+ myMap = map[string]interface{}{"a": 1}
+ So(IsTruthy(myMap), ShouldBeTrue)
+ })
+ Convey("[] -> true", func() {
+ var mySlice []byte
+ So(IsTruthy(mySlice), ShouldBeTrue)
+ mySlice = []byte{21, 12}
+ So(IsTruthy(mySlice), ShouldBeTrue)
+ })
+ Convey(`"" -> true`, func() {
+ So(IsTruthy(""), ShouldBeTrue)
+ })
+ Convey("false -> false", func() {
+ So(IsTruthy(false), ShouldBeFalse)
+ })
+ Convey("0 -> false", func() {
+ So(IsTruthy(0), ShouldBeFalse)
+ })
+ Convey("0.0 -> false", func() {
+ So(IsTruthy(float64(0)), ShouldBeFalse)
+ })
+ Convey("nil -> false", func() {
+ So(IsTruthy(nil), ShouldBeFalse)
+ })
+ Convey("undefined -> false", func() {
+ So(IsTruthy(bson.Undefined), ShouldBeFalse)
+ })
+ })
+
+ Convey("and an assortment of non-edge cases are correct", func() {
+ So(IsTruthy([]int{1, 2, 3}), ShouldBeTrue)
+ So(IsTruthy("true"), ShouldBeTrue)
+ So(IsTruthy("false"), ShouldBeTrue)
+ So(IsTruthy(25), ShouldBeTrue)
+ So(IsTruthy(math.NaN()), ShouldBeTrue)
+ So(IsTruthy(25.1), ShouldBeTrue)
+ So(IsTruthy(struct{ A int }{A: 12}), ShouldBeTrue)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/util/exit_code.go b/src/mongo/gotools/common/util/exit_code.go
new file mode 100644
index 00000000000..b2f916263f9
--- /dev/null
+++ b/src/mongo/gotools/common/util/exit_code.go
@@ -0,0 +1,17 @@
+package util
+
+import (
+ "errors"
+)
+
+const (
+ ExitError int = 1
+ ExitClean int = 0
+ ExitBadOptions int = 3
+ ExitKill int = 4
+ // Go reserves exit code 2 for its own use
+)
+
+var (
+ ErrTerminated = errors.New("received termination signal")
+)
diff --git a/src/mongo/gotools/common/util/file.go b/src/mongo/gotools/common/util/file.go
new file mode 100644
index 00000000000..ce5ad160fd6
--- /dev/null
+++ b/src/mongo/gotools/common/util/file.go
@@ -0,0 +1,34 @@
+package util
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+)
+
+// GetFieldsFromFile fetches the first line from the contents of the file
+// at "path"
+func GetFieldsFromFile(path string) ([]string, error) {
+ fieldFileReader, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer fieldFileReader.Close()
+
+ var fields []string
+ fieldScanner := bufio.NewScanner(fieldFileReader)
+ for fieldScanner.Scan() {
+ fields = append(fields, fieldScanner.Text())
+ }
+ if err := fieldScanner.Err(); err != nil {
+ return nil, err
+ }
+ return fields, nil
+}
+
+// ToUniversalPath returns the result of replacing each slash ('/') character
+// in "path" with an OS-sepcific separator character. Multiple slashes are
+// replaced by multiple separators
+func ToUniversalPath(path string) string {
+ return filepath.FromSlash(path)
+}
diff --git a/src/mongo/gotools/common/util/format_date.go b/src/mongo/gotools/common/util/format_date.go
new file mode 100644
index 00000000000..6dba1e2b2c8
--- /dev/null
+++ b/src/mongo/gotools/common/util/format_date.go
@@ -0,0 +1,30 @@
+package util
+
+import (
+ "time"
+)
+
+var (
+ acceptedDateFormats = []string{
+ "2006-01-02T15:04:05.000Z",
+ "2006-01-02T15:04:05Z",
+ "2006-01-02T15:04Z",
+ "2006-01-02T15:04:05.000-0700",
+ "2006-01-02T15:04:05-0700",
+ "2006-01-02T15:04-0700",
+ "2006-01-02T15:04:05Z07:00",
+ }
+)
+
+func FormatDate(v string)(interface{}, error) {
+ var date interface{}
+ var err error
+
+ for _, format := range acceptedDateFormats {
+ date, err = time.Parse(format, v)
+ if err == nil {
+ return date, nil
+ }
+ }
+ return date, err
+}
diff --git a/src/mongo/gotools/common/util/format_date_test.go b/src/mongo/gotools/common/util/format_date_test.go
new file mode 100644
index 00000000000..c1de40f4c61
--- /dev/null
+++ b/src/mongo/gotools/common/util/format_date_test.go
@@ -0,0 +1,49 @@
+package util
+
+import (
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestFormatDate(t *testing.T) {
+
+ Convey("will take valid format 2006-01-02T15:04:05.000Z", t, func() {
+ _, err := FormatDate("2014-01-02T15:04:05.000Z")
+ So(err, ShouldBeNil)
+ })
+
+
+ Convey("will take valid format 2006-01-02T15:04:05Z", t, func() {
+ _, err := FormatDate("2014-03-02T15:05:05Z")
+ So(err, ShouldBeNil)
+ })
+
+
+ Convey("will take valid format 2006-01-02T15:04Z", t, func() {
+ _, err := FormatDate("2014-04-02T15:04Z")
+ So(err, ShouldBeNil)
+ })
+
+ Convey("will take valid format 2006-01-02T15:04-0700", t, func() {
+ _, err := FormatDate("2014-04-02T15:04-0800")
+ So(err, ShouldBeNil)
+ })
+
+ Convey("will take valid format 2006-01-02T15:04:05.000-0700", t, func() {
+ _, err := FormatDate("2014-04-02T15:04:05.000-0600")
+ So(err, ShouldBeNil)
+ })
+
+
+ Convey("will take valid format 2006-01-02T15:04:05-0700", t, func() {
+ _, err := FormatDate("2014-04-02T15:04:05-0500")
+ So(err, ShouldBeNil)
+ })
+
+ Convey("will return an error for an invalid format", t, func() {
+ _, err := FormatDate("invalid string format")
+ So(err, ShouldNotBeNil)
+ })
+
+}
+
diff --git a/src/mongo/gotools/common/util/math.go b/src/mongo/gotools/common/util/math.go
new file mode 100644
index 00000000000..00b9c39aca7
--- /dev/null
+++ b/src/mongo/gotools/common/util/math.go
@@ -0,0 +1,78 @@
+package util
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Return the max of two ints
+func MaxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// Numeric Conversion Tools
+
+type converterFunc func(interface{}) (interface{}, error)
+
+// this helper makes it simple to generate new numeric converters,
+// be sure to assign them on a package level instead of dynamically
+// within a function to avoid low performance
+func newNumberConverter(targetType reflect.Type) converterFunc {
+ return func(number interface{}) (interface{}, error) {
+ // to avoid panics on nil values
+ if number == nil {
+ return nil, fmt.Errorf("cannot convert nil value")
+ }
+ v := reflect.ValueOf(number)
+ if !v.Type().ConvertibleTo(targetType) {
+ return nil, fmt.Errorf("cannot convert %v to %v", v.Type(), targetType)
+ }
+ converted := v.Convert(targetType)
+ return converted.Interface(), nil
+ }
+}
+
+// making this package level so it is only evaluated once
+var uint32Converter = newNumberConverter(reflect.TypeOf(uint32(0)))
+
+// ToUInt32 is a function for converting any numeric type
+// into a uint32. This can easily result in a loss of information
+// due to truncation, so be careful.
+func ToUInt32(number interface{}) (uint32, error) {
+ asInterface, err := uint32Converter(number)
+ if err != nil {
+ return 0, err
+ }
+ // no check for "ok" here, since we know it will work
+ return asInterface.(uint32), nil
+}
+
+var intConverter = newNumberConverter(reflect.TypeOf(int(0)))
+
+// ToInt is a function for converting any numeric type
+// into an int. This can easily result in a loss of information
+// due to truncation of floats.
+func ToInt(number interface{}) (int, error) {
+ asInterface, err := intConverter(number)
+ if err != nil {
+ return 0, err
+ }
+ // no check for "ok" here, since we know it will work
+ return asInterface.(int), nil
+}
+
+var float64Converter = newNumberConverter(reflect.TypeOf(float64(0)))
+
+// ToFloat64 is a function for converting any numeric type
+// into a float64.
+func ToFloat64(number interface{}) (float64, error) {
+ asInterface, err := float64Converter(number)
+ if err != nil {
+ return 0, err
+ }
+ // no check for "ok" here, since we know it will work
+ return asInterface.(float64), nil
+}
diff --git a/src/mongo/gotools/common/util/math_test.go b/src/mongo/gotools/common/util/math_test.go
new file mode 100644
index 00000000000..7295991508a
--- /dev/null
+++ b/src/mongo/gotools/common/util/math_test.go
@@ -0,0 +1,89 @@
+package util
+
+import (
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "reflect"
+ "testing"
+)
+
+func TestMaxInt(t *testing.T) {
+
+ testutil.VerifyTestType(t, "unit")
+
+ Convey("When finding the maximum of two ints", t, func() {
+
+ Convey("the larger int should be returned", func() {
+
+ So(MaxInt(1, 2), ShouldEqual, 2)
+ So(MaxInt(2, 1), ShouldEqual, 2)
+
+ })
+
+ })
+}
+
+func TestNumberConverter(t *testing.T) {
+
+ testutil.VerifyTestType(t, "unit")
+
+ Convey("With a number converter for float32", t, func() {
+ floatConverter := newNumberConverter(reflect.TypeOf(float32(0)))
+
+ Convey("numeric values should be convertable", func() {
+ out, err := floatConverter(21)
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, 21.0)
+ out, err = floatConverter(uint64(21))
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, 21.0)
+ out, err = floatConverter(float64(27.52))
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, 27.52)
+ })
+
+ Convey("non-numeric values should fail", func() {
+ _, err := floatConverter("I AM A STRING")
+ So(err, ShouldNotBeNil)
+ _, err = floatConverter(struct{ int }{12})
+ So(err, ShouldNotBeNil)
+ _, err = floatConverter(nil)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestUInt32Converter(t *testing.T) {
+
+ testutil.VerifyTestType(t, "unit")
+
+ Convey("With a series of test values, conversions should pass", t, func() {
+ out, err := ToUInt32(int64(99))
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, uint32(99))
+ out, err = ToUInt32(int32(99))
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, uint32(99))
+ out, err = ToUInt32(float32(99))
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, uint32(99))
+ out, err = ToUInt32(float64(99))
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, uint32(99))
+ out, err = ToUInt32(uint64(99))
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, uint32(99))
+ out, err = ToUInt32(uint32(99))
+ So(err, ShouldEqual, nil)
+ So(out, ShouldEqual, uint32(99))
+
+ Convey("but non-numeric inputs will fail", func() {
+ _, err = ToUInt32(nil)
+ So(err, ShouldNotBeNil)
+ _, err = ToUInt32("string")
+ So(err, ShouldNotBeNil)
+ _, err = ToUInt32([]byte{1, 2, 3, 4})
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/common/util/mongo.go b/src/mongo/gotools/common/util/mongo.go
new file mode 100644
index 00000000000..628bbc1d7f4
--- /dev/null
+++ b/src/mongo/gotools/common/util/mongo.go
@@ -0,0 +1,187 @@
+package util
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ InvalidDBChars = "/\\. \"\x00$"
+ InvalidCollectionChars = "$\x00"
+ DefaultHost = "localhost"
+ DefaultPort = "27017"
+)
+
+// Extract the replica set name and the list of hosts from the connection string
+func ParseConnectionString(connString string) ([]string, string) {
+
+ // strip off the replica set name from the beginning
+ slashIndex := strings.Index(connString, "/")
+ setName := ""
+ if slashIndex != -1 {
+ setName = connString[:slashIndex]
+ if slashIndex == len(connString)-1 {
+ return []string{""}, setName
+ }
+ connString = connString[slashIndex+1:]
+ }
+
+ // split the hosts, and return them and the set name
+ return strings.Split(connString, ","), setName
+}
+
+// Split the host string into the individual nodes to connect to, appending the
+// port if necessary.
+func CreateConnectionAddrs(host, port string) []string {
+
+ // set to the defaults, if necessary
+ if host == "" {
+ host = DefaultHost
+ if port == "" {
+ host += fmt.Sprintf(":%v", DefaultPort)
+ }
+ }
+
+ // parse the host string into the individual hosts
+ addrs, _ := ParseConnectionString(host)
+
+ // if a port is specified, append it to all the hosts
+ if port != "" {
+ for idx, addr := range addrs {
+ addrs[idx] = fmt.Sprintf("%v:%v", addr, port)
+ }
+ }
+
+ return addrs
+}
+
+// SplitNamespace splits a namespace path into a database and collection,
+// returned in that order. An error is returned if the namespace is invalid.
+func SplitAndValidateNamespace(namespace string) (string, string, error) {
+
+ // first, run validation checks
+ if err := ValidateFullNamespace(namespace); err != nil {
+ return "", "", fmt.Errorf("namespace '%v' is not valid: %v",
+ namespace, err)
+ }
+
+ // find the first instance of "." in the namespace
+ firstDotIndex := strings.Index(namespace, ".")
+
+ // split the namespace, if applicable
+ var database string
+ var collection string
+ if firstDotIndex != -1 {
+ database = namespace[:firstDotIndex]
+ collection = namespace[firstDotIndex+1:]
+ } else {
+ database = namespace
+ }
+
+ return database, collection, nil
+}
+
+// ValidateFullNamespace validates a full mongodb namespace (database +
+// collection), returning an error if it is invalid.
+func ValidateFullNamespace(namespace string) error {
+
+ // the namespace must be shorter than 123 bytes
+ if len([]byte(namespace)) > 122 {
+ return fmt.Errorf("namespace %v is too long (>= 123 bytes)", namespace)
+ }
+
+ // find the first instance of "." in the namespace
+ firstDotIndex := strings.Index(namespace, ".")
+
+ // the namespace cannot begin with a dot
+ if firstDotIndex == 0 {
+ return fmt.Errorf("namespace %v begins with a '.'", namespace)
+ }
+
+ // the namespace cannot end with a dot
+ if firstDotIndex == len(namespace)-1 {
+ return fmt.Errorf("namespace %v ends with a '.'", namespace)
+ }
+
+ // split the namespace, if applicable
+ var database string
+ var collection string
+ if firstDotIndex != -1 {
+ database = namespace[:firstDotIndex]
+ collection = namespace[firstDotIndex+1:]
+ } else {
+ database = namespace
+ }
+
+ // validate the database name
+ dbValidationErr := ValidateDBName(database)
+ if dbValidationErr != nil {
+ return fmt.Errorf("database name is invalid: %v", dbValidationErr)
+ }
+
+ // validate the collection name, if necessary
+ if collection != "" {
+ collValidationErr := ValidateCollectionName(collection)
+ if collValidationErr != nil {
+ return fmt.Errorf("collection name is invalid: %v",
+ collValidationErr)
+ }
+ }
+
+ // the namespace is valid
+ return nil
+
+}
+
+// ValidateDBName validates that a string is a valid name for a mongodb
+// database. An error is returned if it is not valid.
+func ValidateDBName(database string) error {
+
+ // must be < 64 characters
+ if len([]byte(database)) > 63 {
+ return fmt.Errorf("db name '%v' is longer than 63 characters", database)
+ }
+
+ // check for illegal characters
+ for _, illegalRune := range InvalidDBChars {
+ if strings.ContainsRune(database, illegalRune) {
+ return fmt.Errorf("illegal character '%c' found in '%v'", illegalRune, database)
+ }
+ }
+
+ // db name is valid
+ return nil
+}
+
+// ValidateCollectionName validates that a string is a valid name for a mongodb
+// collection. An error is returned if it is not valid.
+func ValidateCollectionName(collection string) error {
+ // collection names cannot begin with 'system.'
+ if strings.HasPrefix(collection, "system.") {
+ return fmt.Errorf("collection name '%v' is not allowed to begin with"+
+ " 'system.'", collection)
+ }
+
+ return ValidateCollectionGrammar(collection)
+}
+
+// ValidateCollectionGrammar validates the collection for character and length
+// errors without erroring on system collections. For validation of functionality
+// that manipulates system collections.
+func ValidateCollectionGrammar(collection string) error {
+
+ // collection names cannot be empty
+ if len(collection) == 0 {
+ return fmt.Errorf("collection name cannot be an empty string")
+ }
+
+ // check for illegal characters
+ for _, illegalRune := range InvalidCollectionChars {
+ if strings.ContainsRune(collection, illegalRune) {
+ return fmt.Errorf("illegal character '%c' found in '%v'", illegalRune, collection)
+ }
+ }
+
+ // collection name is valid
+ return nil
+}
diff --git a/src/mongo/gotools/common/util/mongo_test.go b/src/mongo/gotools/common/util/mongo_test.go
new file mode 100644
index 00000000000..58001698524
--- /dev/null
+++ b/src/mongo/gotools/common/util/mongo_test.go
@@ -0,0 +1,110 @@
+package util
+
+import (
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestParseConnectionString(t *testing.T) {
+
+ testutil.VerifyTestType(t, "unit")
+
+ Convey("When extracting the replica set and hosts from a connection"+
+ " url", t, func() {
+
+ Convey("an empty url should lead to an empty replica set name"+
+ " and hosts slice", func() {
+ hosts, setName := ParseConnectionString("")
+ So(hosts, ShouldResemble, []string{""})
+ So(setName, ShouldEqual, "")
+ })
+
+ Convey("a url not specifying a replica set name should lead to"+
+ " an empty replica set name", func() {
+ hosts, setName := ParseConnectionString("host1,host2")
+ So(hosts, ShouldResemble, []string{"host1", "host2"})
+ So(setName, ShouldEqual, "")
+ })
+
+ Convey("a url specifying a replica set name should lead to that name"+
+ " being returned", func() {
+ hosts, setName := ParseConnectionString("foo/host1,host2")
+ So(hosts, ShouldResemble, []string{"host1", "host2"})
+ So(setName, ShouldEqual, "foo")
+ })
+
+ })
+
+}
+
+func TestCreateConnectionAddrs(t *testing.T) {
+
+ testutil.VerifyTestType(t, "unit")
+
+ Convey("When creating the slice of connection addresses", t, func() {
+
+ Convey("if no port is specified, the addresses should all appear"+
+ " unmodified in the result", func() {
+
+ addrs := CreateConnectionAddrs("host1,host2", "")
+ So(addrs, ShouldResemble, []string{"host1", "host2"})
+
+ })
+
+ Convey("if a port is specified, it should be appended to each host"+
+ " from the host connection string", func() {
+
+ addrs := CreateConnectionAddrs("host1,host2", "20000")
+ So(addrs, ShouldResemble, []string{"host1:20000", "host2:20000"})
+
+ })
+
+ })
+
+}
+
+func TestInvalidNames(t *testing.T) {
+
+ Convey("Checking some invalid collection names, ", t, func() {
+ Convey("test.col$ is invalid", func() {
+ So(ValidateDBName("test"), ShouldBeNil)
+ So(ValidateCollectionName("col$"), ShouldNotBeNil)
+ So(ValidateFullNamespace("test.col$"), ShouldNotBeNil)
+ })
+ Convey("db/aaa.col is invalid", func() {
+ So(ValidateDBName("db/aaa"), ShouldNotBeNil)
+ So(ValidateCollectionName("col"), ShouldBeNil)
+ So(ValidateFullNamespace("db/aaa.col"), ShouldNotBeNil)
+ })
+ Convey("db. is invalid", func() {
+ So(ValidateDBName("db"), ShouldBeNil)
+ So(ValidateCollectionName(""), ShouldNotBeNil)
+ So(ValidateFullNamespace("db."), ShouldNotBeNil)
+ })
+ Convey("db space.col is invalid", func() {
+ So(ValidateDBName("db space"), ShouldNotBeNil)
+ So(ValidateCollectionName("col"), ShouldBeNil)
+ So(ValidateFullNamespace("db space.col"), ShouldNotBeNil)
+ })
+ Convey("db x$x is invalid", func() {
+ So(ValidateDBName("x$x"), ShouldNotBeNil)
+ So(ValidateFullNamespace("x$x.y"), ShouldNotBeNil)
+ })
+ Convey("[null].[null] is invalid", func() {
+ So(ValidateDBName("\x00"), ShouldNotBeNil)
+ So(ValidateCollectionName("\x00"), ShouldNotBeNil)
+ So(ValidateFullNamespace("\x00.\x00"), ShouldNotBeNil)
+ })
+ Convey("[empty] is invalid", func() {
+ So(ValidateFullNamespace(""), ShouldNotBeNil)
+ })
+ Convey("db.col is valid", func() {
+ So(ValidateDBName("db"), ShouldBeNil)
+ So(ValidateCollectionName("col"), ShouldBeNil)
+ So(ValidateFullNamespace("db.col"), ShouldBeNil)
+ })
+
+ })
+
+}
diff --git a/src/mongo/gotools/common/util/slice.go b/src/mongo/gotools/common/util/slice.go
new file mode 100644
index 00000000000..7c98c00ce44
--- /dev/null
+++ b/src/mongo/gotools/common/util/slice.go
@@ -0,0 +1,60 @@
+package util
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// generic function that returns true if elt is in slice.
+// panics if slice is not of Kind reflect.Slice
+func SliceContains(slice, elt interface{}) bool {
+ if slice == nil {
+ return false
+ }
+ v := reflect.ValueOf(slice)
+ if v.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("Cannot call SliceContains on a non-slice %#v of "+
+ "kind %#v", slice, v.Kind().String()))
+ }
+ for i := 0; i < v.Len(); i++ {
+ if reflect.DeepEqual(v.Index(i).Interface(), elt) {
+ return true
+ }
+ }
+ return false
+}
+
+// StringSliceContains reports whether str is in the slice.
+func StringSliceContains(slice []string, str string) bool {
+ return StringSliceIndex(slice, str) != -1
+}
+
+// StringSliceContains returns the first index at which the given element
+// can be found in the slice, or -1 if it is not present.
+func StringSliceIndex(slice []string, str string) int {
+ i := -1
+ for j, v := range slice {
+ if v == str {
+ i = j
+ break
+ }
+ }
+ return i
+}
+
+// generic function that returns number of instances of 'elt' in 'slice'.
+// panics if slice is not of Kind reflect.Slice
+func SliceCount(slice, elt interface{}) int {
+ v := reflect.ValueOf(slice)
+ if v.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("Cannot call SliceCount on a non-slice %#v of kind "+
+ "%#v", slice, v.Kind().String()))
+ }
+ counter := 0
+ for i := 0; i < v.Len(); i++ {
+ if reflect.DeepEqual(v.Index(i).Interface(), elt) {
+ counter++
+ }
+ }
+ return counter
+}
diff --git a/src/mongo/gotools/common/util/strings.go b/src/mongo/gotools/common/util/strings.go
new file mode 100644
index 00000000000..80aba93a63b
--- /dev/null
+++ b/src/mongo/gotools/common/util/strings.go
@@ -0,0 +1,12 @@
+package util
+
+// Pluralize takes an amount and two strings denoting the singular
+// and plural noun the amount represents. If the amount is singular,
+// the singular form is returned; otherwise plural is returned. E.g.
+// Pluralize(X, "mouse", "mice") -> 0 mice, 1 mouse, 2 mice, ...
+func Pluralize(amount int, singular, plural string) string {
+ if amount == 1 {
+ return singular
+ }
+ return plural
+}
diff --git a/src/mongo/gotools/common/util/util.go b/src/mongo/gotools/common/util/util.go
new file mode 100644
index 00000000000..715289a1bc3
--- /dev/null
+++ b/src/mongo/gotools/common/util/util.go
@@ -0,0 +1,2 @@
+// Package util provides commonly used utility functions.
+package util
diff --git a/src/mongo/gotools/mongodump/main/mongodump.go b/src/mongo/gotools/mongodump/main/mongodump.go
new file mode 100644
index 00000000000..e8a7c4c617b
--- /dev/null
+++ b/src/mongo/gotools/mongodump/main/mongodump.go
@@ -0,0 +1,75 @@
+// Main package for the mongodump tool.
+package main
+
+import (
+ "os"
+
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongodump"
+)
+
+func main() {
+ // initialize command-line opts
+ opts := options.New("mongodump", mongodump.Usage, options.EnabledOptions{true, true, true})
+
+ inputOpts := &mongodump.InputOptions{}
+ opts.AddOptions(inputOpts)
+ outputOpts := &mongodump.OutputOptions{}
+ opts.AddOptions(outputOpts)
+
+ args, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'mongodump --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ if len(args) > 0 {
+ log.Logvf(log.Always, "positional arguments not allowed: %v", args)
+ log.Logvf(log.Always, "try 'mongodump --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // print help, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ // print version, if specified
+ if opts.PrintVersion() {
+ return
+ }
+
+ // init logger
+ log.SetVerbosity(opts.Verbosity)
+
+ // connect directly, unless a replica set name is explicitly specified
+ _, setName := util.ParseConnectionString(opts.Host)
+ opts.Direct = (setName == "")
+ opts.ReplicaSetName = setName
+
+ dump := mongodump.MongoDump{
+ ToolOptions: opts,
+ OutputOptions: outputOpts,
+ InputOptions: inputOpts,
+ }
+
+ finishedChan := signals.HandleWithInterrupt(dump.HandleInterrupt)
+ defer close(finishedChan)
+
+ if err = dump.Init(); err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ os.Exit(util.ExitError)
+ }
+
+ if err = dump.Dump(); err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ if err == util.ErrTerminated {
+ os.Exit(util.ExitKill)
+ }
+ os.Exit(util.ExitError)
+ }
+}
diff --git a/src/mongo/gotools/mongodump/metadata_dump.go b/src/mongo/gotools/mongodump/metadata_dump.go
new file mode 100644
index 00000000000..5a4382b6d2e
--- /dev/null
+++ b/src/mongo/gotools/mongodump/metadata_dump.go
@@ -0,0 +1,107 @@
+package mongodump
+
+import (
+ "bufio"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// Metadata holds information about a collection's options and indexes.
+type Metadata struct {
+ Options interface{} `json:"options,omitempty"`
+ Indexes []interface{} `json:"indexes"`
+}
+
+// IndexDocumentFromDB is used internally to preserve key ordering.
+type IndexDocumentFromDB struct {
+ Options bson.M `bson:",inline"`
+ Key bson.D `bson:"key"`
+}
+
+// dumpMetadata gets the metadata for a collection and writes it
+// in readable JSON format.
+func (dump *MongoDump) dumpMetadata(intent *intents.Intent) error {
+ var err error
+
+ nsID := fmt.Sprintf("%v.%v", intent.DB, intent.C)
+ meta := Metadata{
+ // We have to initialize Indexes to an empty slice, not nil, so that an empty
+ // array is marshalled into json instead of null. That is, {indexes:[]} is okay
+ // but {indexes:null} will cause assertions in our legacy C++ mongotools
+ Indexes: []interface{}{},
+ }
+
+ // The collection options were already gathered while building the list of intents.
+ // We convert them to JSON so that they can be written to the metadata json file as text.
+ if intent.Options != nil {
+ if meta.Options, err = bsonutil.ConvertBSONValueToJSON(*intent.Options); err != nil {
+ return fmt.Errorf("error converting collection options to JSON: %v", err)
+ }
+ } else {
+ meta.Options = nil
+ }
+
+ // Second, we read the collection's index information by either calling
+ // listIndexes (pre-2.7 systems) or querying system.indexes.
+ // We keep a running list of all the indexes
+ // for the current collection as we iterate over the cursor, and include
+ // that list as the "indexes" field of the metadata document.
+ log.Logvf(log.DebugHigh, "\treading indexes for `%v`", nsID)
+
+ session, err := dump.sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ // get the indexes
+ indexesIter, err := db.GetIndexes(session.DB(intent.DB).C(intent.C))
+ if err != nil {
+ return err
+ }
+ if indexesIter == nil {
+ log.Logvf(log.Always, "the collection %v appears to have been dropped after the dump started", intent.Namespace())
+ return nil
+ }
+
+ indexOpts := &bson.D{}
+ for indexesIter.Next(indexOpts) {
+ convertedIndex, err := bsonutil.ConvertBSONValueToJSON(*indexOpts)
+ if err != nil {
+ return fmt.Errorf("error converting index (%#v): %v", convertedIndex, err)
+ }
+ meta.Indexes = append(meta.Indexes, convertedIndex)
+ }
+
+ if err := indexesIter.Err(); err != nil {
+ return fmt.Errorf("error getting indexes for collection `%v`: %v", nsID, err)
+ }
+
+ // Finally, we send the results to the writer as JSON bytes
+ jsonBytes, err := json.Marshal(meta)
+ if err != nil {
+ return fmt.Errorf("error marshalling metadata json for collection `%v`: %v", nsID, err)
+ }
+
+ err = intent.MetadataFile.Open()
+ if err != nil {
+ return err
+ }
+ defer intent.MetadataFile.Close()
+ // make a buffered writer for nicer disk i/o
+ w := bufio.NewWriter(intent.MetadataFile)
+ _, err = w.Write(jsonBytes)
+ if err != nil {
+ return fmt.Errorf("error writing metadata for collection `%v` to disk: %v", nsID, err)
+ }
+ err = w.Flush()
+ if err != nil {
+ return fmt.Errorf("error writing metadata for collection `%v` to disk: %v", nsID, err)
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/mongodump/mongodump.go b/src/mongo/gotools/mongodump/mongodump.go
new file mode 100644
index 00000000000..a2dfcc40102
--- /dev/null
+++ b/src/mongo/gotools/mongodump/mongodump.go
@@ -0,0 +1,803 @@
+// Package mongodump creates BSON data from the contents of a MongoDB instance.
+package mongodump
+
+import (
+ "github.com/mongodb/mongo-tools/common/archive"
+ "github.com/mongodb/mongo-tools/common/auth"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/progress"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+
+ "compress/gzip"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+)
+
+const (
+ progressBarLength = 24
+ progressBarWaitTime = time.Second * 3
+ defaultPermissions = 0755
+)
+
+// MongoDump is a container for the user-specified options and
+// internal state used for running mongodump.
+type MongoDump struct {
+ // basic mongo tool options
+ ToolOptions *options.ToolOptions
+ InputOptions *InputOptions
+ OutputOptions *OutputOptions
+
+ // useful internals that we don't directly expose as options
+ sessionProvider *db.SessionProvider
+ manager *intents.Manager
+ query bson.M
+ oplogCollection string
+ oplogStart bson.MongoTimestamp
+ isMongos bool
+ authVersion int
+ archive *archive.Writer
+ progressManager *progress.Manager
+ // shutdownIntentsNotifier is provided to the multiplexer
+ // as well as the signal handler, and allows them to notify
+ // the intent dumpers that they should shutdown
+ shutdownIntentsNotifier *notifier
+ // the value of stdout gets initizlied to os.Stdout if it's unset
+ stdout io.Writer
+ readPrefMode mgo.Mode
+ readPrefTags []bson.D
+}
+
+type notifier struct {
+ notified chan struct{}
+ once sync.Once
+}
+
+func (n *notifier) Notify() { n.once.Do(func() { close(n.notified) }) }
+
+func newNotifier() *notifier { return &notifier{notified: make(chan struct{})} }
+
+// ValidateOptions checks for any incompatible sets of options.
+func (dump *MongoDump) ValidateOptions() error {
+ switch {
+ case dump.OutputOptions.Out == "-" && dump.ToolOptions.Namespace.Collection == "":
+ return fmt.Errorf("can only dump a single collection to stdout")
+ case dump.ToolOptions.Namespace.DB == "" && dump.ToolOptions.Namespace.Collection != "":
+ return fmt.Errorf("cannot dump a collection without a specified database")
+ case dump.InputOptions.Query != "" && dump.ToolOptions.Namespace.Collection == "":
+ return fmt.Errorf("cannot dump using a query without a specified collection")
+ case dump.InputOptions.QueryFile != "" && dump.ToolOptions.Namespace.Collection == "":
+ return fmt.Errorf("cannot dump using a queryFile without a specified collection")
+ case dump.InputOptions.Query != "" && dump.InputOptions.QueryFile != "":
+ return fmt.Errorf("either query or queryFile can be specified as a query option, not both")
+ case dump.InputOptions.Query != "" && dump.InputOptions.TableScan:
+ return fmt.Errorf("cannot use --forceTableScan when specifying --query")
+ case dump.OutputOptions.DumpDBUsersAndRoles && dump.ToolOptions.Namespace.DB == "":
+ return fmt.Errorf("must specify a database when running with dumpDbUsersAndRoles")
+ case dump.OutputOptions.DumpDBUsersAndRoles && dump.ToolOptions.Namespace.Collection != "":
+ return fmt.Errorf("cannot specify a collection when running with dumpDbUsersAndRoles")
+ case dump.OutputOptions.Oplog && dump.ToolOptions.Namespace.DB != "":
+ return fmt.Errorf("--oplog mode only supported on full dumps")
+ case len(dump.OutputOptions.ExcludedCollections) > 0 && dump.ToolOptions.Namespace.Collection != "":
+ return fmt.Errorf("--collection is not allowed when --excludeCollection is specified")
+ case len(dump.OutputOptions.ExcludedCollectionPrefixes) > 0 && dump.ToolOptions.Namespace.Collection != "":
+ return fmt.Errorf("--collection is not allowed when --excludeCollectionsWithPrefix is specified")
+ case len(dump.OutputOptions.ExcludedCollections) > 0 && dump.ToolOptions.Namespace.DB == "":
+ return fmt.Errorf("--db is required when --excludeCollection is specified")
+ case len(dump.OutputOptions.ExcludedCollectionPrefixes) > 0 && dump.ToolOptions.Namespace.DB == "":
+ return fmt.Errorf("--db is required when --excludeCollectionsWithPrefix is specified")
+ case dump.OutputOptions.Repair && dump.InputOptions.Query != "":
+ return fmt.Errorf("cannot run a query with --repair enabled")
+ case dump.OutputOptions.Repair && dump.InputOptions.QueryFile != "":
+ return fmt.Errorf("cannot run a queryFile with --repair enabled")
+ case dump.OutputOptions.Out != "" && dump.OutputOptions.Archive != "":
+ return fmt.Errorf("--out not allowed when --archive is specified")
+ case dump.OutputOptions.Out == "-" && dump.OutputOptions.Gzip:
+ return fmt.Errorf("compression can't be used when dumping a single collection to standard output")
+ case dump.OutputOptions.NumParallelCollections <= 0:
+ return fmt.Errorf("numParallelCollections must be positive")
+ }
+ return nil
+}
+
+// Init performs preliminary setup operations for MongoDump.
+func (dump *MongoDump) Init() error {
+ err := dump.ValidateOptions()
+ if err != nil {
+ return fmt.Errorf("bad option: %v", err)
+ }
+ if dump.stdout == nil {
+ dump.stdout = os.Stdout
+ }
+ dump.sessionProvider, err = db.NewSessionProvider(*dump.ToolOptions)
+ if err != nil {
+ return fmt.Errorf("can't create session: %v", err)
+ }
+
+ // temporarily allow secondary reads for the isMongos check
+ dump.sessionProvider.SetReadPreference(mgo.Nearest)
+ dump.isMongos, err = dump.sessionProvider.IsMongos()
+ if err != nil {
+ return err
+ }
+
+ if dump.isMongos && dump.OutputOptions.Oplog {
+ return fmt.Errorf("can't use --oplog option when dumping from a mongos")
+ }
+
+ var mode mgo.Mode
+ if dump.ToolOptions.ReplicaSetName != "" || dump.isMongos {
+ mode = mgo.Primary
+ } else {
+ mode = mgo.Nearest
+ }
+ var tags bson.D
+
+ if dump.InputOptions.ReadPreference != "" {
+ mode, tags, err = db.ParseReadPreference(dump.InputOptions.ReadPreference)
+ if err != nil {
+ return fmt.Errorf("error parsing --readPreference : %v", err)
+ }
+ if len(tags) > 0 {
+ dump.sessionProvider.SetTags(tags)
+ }
+ }
+
+ // warn if we are trying to dump from a secondary in a sharded cluster
+ if dump.isMongos && mode != mgo.Primary {
+ log.Logvf(log.Always, db.WarningNonPrimaryMongosConnection)
+ }
+
+ dump.sessionProvider.SetReadPreference(mode)
+ dump.sessionProvider.SetTags(tags)
+ dump.sessionProvider.SetFlags(db.DisableSocketTimeout)
+
+ // return a helpful error message for mongos --repair
+ if dump.OutputOptions.Repair && dump.isMongos {
+ return fmt.Errorf("--repair flag cannot be used on a mongos")
+ }
+
+ dump.manager = intents.NewIntentManager()
+ dump.progressManager = progress.NewProgressBarManager(log.Writer(0), progressBarWaitTime)
+ return nil
+}
+
+// Dump handles some final options checking and executes MongoDump.
+func (dump *MongoDump) Dump() (err error) {
+ dump.shutdownIntentsNotifier = newNotifier()
+
+ if dump.InputOptions.HasQuery() {
+ // parse JSON then convert extended JSON values
+ var asJSON interface{}
+ content, err := dump.InputOptions.GetQuery()
+ if err != nil {
+ return err
+ }
+ err = json.Unmarshal(content, &asJSON)
+ if err != nil {
+ return fmt.Errorf("error parsing query as json: %v", err)
+ }
+ convertedJSON, err := bsonutil.ConvertJSONValueToBSON(asJSON)
+ if err != nil {
+ return fmt.Errorf("error converting query to bson: %v", err)
+ }
+ asMap, ok := convertedJSON.(map[string]interface{})
+ if !ok {
+ // unlikely to be reached
+ return fmt.Errorf("query is not in proper format")
+ }
+ dump.query = bson.M(asMap)
+ }
+
+ if dump.OutputOptions.DumpDBUsersAndRoles {
+ // first make sure this is possible with the connected database
+ dump.authVersion, err = auth.GetAuthVersion(dump.sessionProvider)
+ if err == nil {
+ err = auth.VerifySystemAuthVersion(dump.sessionProvider)
+ }
+ if err != nil {
+ return fmt.Errorf("error getting auth schema version for dumpDbUsersAndRoles: %v", err)
+ }
+ log.Logvf(log.DebugLow, "using auth schema version %v", dump.authVersion)
+ if dump.authVersion < 3 {
+ return fmt.Errorf("backing up users and roles is only supported for "+
+ "deployments with auth schema versions >= 3, found: %v", dump.authVersion)
+ }
+ }
+
+ if dump.OutputOptions.Archive != "" {
+ //getArchiveOut gives us a WriteCloser to which we should write the archive
+ var archiveOut io.WriteCloser
+ archiveOut, err = dump.getArchiveOut()
+ if err != nil {
+ return err
+ }
+ dump.archive = &archive.Writer{
+ // The archive.Writer needs its own copy of archiveOut because things
+ // like the prelude are not written by the multiplexer.
+ Out: archiveOut,
+ Mux: archive.NewMultiplexer(archiveOut, dump.shutdownIntentsNotifier),
+ }
+ go dump.archive.Mux.Run()
+ defer func() {
+ // The Mux runs until its Control is closed
+ close(dump.archive.Mux.Control)
+ muxErr := <-dump.archive.Mux.Completed
+ archiveOut.Close()
+ if muxErr != nil {
+ if err != nil {
+ err = fmt.Errorf("archive writer: %v / %v", err, muxErr)
+ } else {
+ err = fmt.Errorf("archive writer: %v", muxErr)
+ }
+ log.Logvf(log.DebugLow, "%v", err)
+ } else {
+ log.Logvf(log.DebugLow, "mux completed successfully")
+ }
+ }()
+ }
+
+ // switch on what kind of execution to do
+ switch {
+ case dump.ToolOptions.DB == "" && dump.ToolOptions.Collection == "":
+ err = dump.CreateAllIntents()
+ case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection == "":
+ err = dump.CreateIntentsForDatabase(dump.ToolOptions.DB)
+ case dump.ToolOptions.DB != "" && dump.ToolOptions.Collection != "":
+ err = dump.CreateCollectionIntent(dump.ToolOptions.DB, dump.ToolOptions.Collection)
+ }
+ if err != nil {
+ return err
+ }
+
+ if dump.OutputOptions.Oplog {
+ err = dump.CreateOplogIntents()
+ if err != nil {
+ return err
+ }
+ }
+
+ if dump.OutputOptions.DumpDBUsersAndRoles && dump.ToolOptions.DB != "admin" {
+ err = dump.CreateUsersRolesVersionIntentsForDB(dump.ToolOptions.DB)
+ if err != nil {
+ return err
+ }
+ }
+
+ // verify we can use repair cursors
+ if dump.OutputOptions.Repair {
+ log.Logv(log.DebugLow, "verifying that the connected server supports repairCursor")
+ if dump.isMongos {
+ return fmt.Errorf("cannot use --repair on mongos")
+ }
+ exampleIntent := dump.manager.Peek()
+ if exampleIntent != nil {
+ supported, err := dump.sessionProvider.SupportsRepairCursor(
+ exampleIntent.DB, exampleIntent.C)
+ if !supported {
+ return err // no extra context needed
+ }
+ }
+ }
+
+ // IO Phase I
+ // metadata, users, roles, and versions
+
+ // TODO, either remove this debug or improve the language
+ log.Logvf(log.DebugHigh, "dump phase I: metadata, indexes, users, roles, version")
+
+ err = dump.DumpMetadata()
+ if err != nil {
+ return fmt.Errorf("error dumping metadata: %v", err)
+ }
+
+ if dump.OutputOptions.Archive != "" {
+ session, err := dump.sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ buildInfo, err := session.BuildInfo()
+ var serverVersion string
+ if err != nil {
+ log.Logvf(log.Always, "warning, couldn't get version information from server: %v", err)
+ serverVersion = "unknown"
+ } else {
+ serverVersion = buildInfo.Version
+ }
+ dump.archive.Prelude, err = archive.NewPrelude(dump.manager, dump.OutputOptions.NumParallelCollections, serverVersion)
+ if err != nil {
+ return fmt.Errorf("creating archive prelude: %v", err)
+ }
+ err = dump.archive.Prelude.Write(dump.archive.Out)
+ if err != nil {
+ return fmt.Errorf("error writing metadata into archive: %v", err)
+ }
+ }
+
+ err = dump.DumpSystemIndexes()
+ if err != nil {
+ return fmt.Errorf("error dumping system indexes: %v", err)
+ }
+
+ if dump.ToolOptions.DB == "admin" || dump.ToolOptions.DB == "" {
+ err = dump.DumpUsersAndRoles()
+ if err != nil {
+ return fmt.Errorf("error dumping users and roles: %v", err)
+ }
+ }
+ if dump.OutputOptions.DumpDBUsersAndRoles {
+ log.Logvf(log.Always, "dumping users and roles for %v", dump.ToolOptions.DB)
+ if dump.ToolOptions.DB == "admin" {
+ log.Logvf(log.Always, "skipping users/roles dump, already dumped admin database")
+ } else {
+ err = dump.DumpUsersAndRolesForDB(dump.ToolOptions.DB)
+ if err != nil {
+ return fmt.Errorf("error dumping users and roles for db: %v", err)
+ }
+ }
+ }
+
+ // If oplog capturing is enabled, we first check the most recent
+ // oplog entry and save its timestamp, this will let us later
+ // copy all oplog entries that occurred while dumping, creating
+ // what is effectively a point-in-time snapshot.
+ if dump.OutputOptions.Oplog {
+ err := dump.determineOplogCollectionName()
+ if err != nil {
+ return fmt.Errorf("error finding oplog: %v", err)
+ }
+ log.Logvf(log.Info, "getting most recent oplog timestamp")
+ dump.oplogStart, err = dump.getOplogStartTime()
+ if err != nil {
+ return fmt.Errorf("error getting oplog start: %v", err)
+ }
+ }
+
+ // IO Phase II
+ // regular collections
+
+ // TODO, either remove this debug or improve the language
+ log.Logvf(log.DebugHigh, "dump phase II: regular collections")
+
+ // kick off the progress bar manager and begin dumping intents
+ dump.progressManager.Start()
+ defer dump.progressManager.Stop()
+
+ if err := dump.DumpIntents(); err != nil {
+ return err
+ }
+
+ // IO Phase III
+ // oplog
+
+ // TODO, either remove this debug or improve the language
+ log.Logvf(log.DebugLow, "dump phase III: the oplog")
+
+ // If we are capturing the oplog, we dump all oplog entries that occurred
+ // while dumping the database. Before and after dumping the oplog,
+ // we check to see if the oplog has rolled over (i.e. the most recent entry when
+ // we started still exist, so we know we haven't lost data)
+ if dump.OutputOptions.Oplog {
+ log.Logvf(log.DebugLow, "checking if oplog entry %v still exists", dump.oplogStart)
+ exists, err := dump.checkOplogTimestampExists(dump.oplogStart)
+ if !exists {
+ return fmt.Errorf(
+ "oplog overflow: mongodump was unable to capture all new oplog entries during execution")
+ }
+ if err != nil {
+ return fmt.Errorf("unable to check oplog for overflow: %v", err)
+ }
+ log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart)
+
+ log.Logvf(log.Always, "writing captured oplog to %v", dump.manager.Oplog().Location)
+ err = dump.DumpOplogAfterTimestamp(dump.oplogStart)
+ if err != nil {
+ return fmt.Errorf("error dumping oplog: %v", err)
+ }
+
+ // check the oplog for a rollover one last time, to avoid a race condition
+ // wherein the oplog rolls over in the time after our first check, but before
+ // we copy it.
+ log.Logvf(log.DebugLow, "checking again if oplog entry %v still exists", dump.oplogStart)
+ exists, err = dump.checkOplogTimestampExists(dump.oplogStart)
+ if !exists {
+ return fmt.Errorf(
+ "oplog overflow: mongodump was unable to capture all new oplog entries during execution")
+ }
+ if err != nil {
+ return fmt.Errorf("unable to check oplog for overflow: %v", err)
+ }
+ log.Logvf(log.DebugHigh, "oplog entry %v still exists", dump.oplogStart)
+ }
+
+ log.Logvf(log.DebugLow, "finishing dump")
+
+ return err
+}
+
+// DumpIntents iterates through the previously-created intents and
+// dumps all of the found collections.
+func (dump *MongoDump) DumpIntents() error {
+ resultChan := make(chan error)
+
+ jobs := dump.OutputOptions.NumParallelCollections
+ if numIntents := len(dump.manager.Intents()); jobs > numIntents {
+ jobs = numIntents
+ }
+
+ if jobs > 1 {
+ dump.manager.Finalize(intents.LongestTaskFirst)
+ } else {
+ dump.manager.Finalize(intents.Legacy)
+ }
+
+ log.Logvf(log.Info, "dumping up to %v collections in parallel", jobs)
+
+ // start a goroutine for each job thread
+ for i := 0; i < jobs; i++ {
+ go func(id int) {
+ log.Logvf(log.DebugHigh, "starting dump routine with id=%v", id)
+ for {
+ intent := dump.manager.Pop()
+ if intent == nil {
+ log.Logvf(log.DebugHigh, "ending dump routine with id=%v, no more work to do", id)
+ resultChan <- nil
+ return
+ }
+ err := dump.DumpIntent(intent)
+ if err != nil {
+ resultChan <- err
+ return
+ }
+ dump.manager.Finish(intent)
+ }
+ }(i)
+ }
+
+ // wait until all goroutines are done or one of them errors out
+ for i := 0; i < jobs; i++ {
+ if err := <-resultChan; err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// DumpIntent dumps the specified database's collection.
+func (dump *MongoDump) DumpIntent(intent *intents.Intent) error {
+ session, err := dump.sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+ // in mgo, setting prefetch = 1.0 causes the driver to make requests for
+ // more results as soon as results are returned. This effectively
+ // duplicates the behavior of an exhaust cursor.
+ session.SetPrefetch(1.0)
+
+ err = intent.BSONFile.Open()
+ if err != nil {
+ return err
+ }
+ defer intent.BSONFile.Close()
+
+ var findQuery *mgo.Query
+ switch {
+ case len(dump.query) > 0:
+ findQuery = session.DB(intent.DB).C(intent.C).Find(dump.query)
+ case dump.InputOptions.TableScan:
+ // ---forceTablesScan runs the query without snapshot enabled
+ findQuery = session.DB(intent.DB).C(intent.C).Find(nil)
+ default:
+ findQuery = session.DB(intent.DB).C(intent.C).Find(nil).Snapshot()
+
+ }
+
+ var dumpCount int64
+
+ if dump.OutputOptions.Out == "-" {
+ log.Logvf(log.Always, "writing %v to stdout", intent.Namespace())
+ dumpCount, err = dump.dumpQueryToWriter(findQuery, intent)
+ if err == nil {
+ // on success, print the document count
+ log.Logvf(log.Always, "dumped %v %v", dumpCount, docPlural(dumpCount))
+ }
+ return err
+ }
+
+ // set where the intent will be written to
+ if dump.OutputOptions.Archive != "" {
+ if dump.OutputOptions.Archive == "-" {
+ intent.Location = "archive on stdout"
+ } else {
+ intent.Location = fmt.Sprintf("archive '%v'", dump.OutputOptions.Archive)
+ }
+ }
+
+ if !dump.OutputOptions.Repair {
+ log.Logvf(log.Always, "writing %v to %v", intent.Namespace(), intent.Location)
+ if dumpCount, err = dump.dumpQueryToWriter(findQuery, intent); err != nil {
+ return err
+ }
+ } else {
+ // handle repairs as a special case, since we cannot count them
+ log.Logvf(log.Always, "writing repair of %v to %v", intent.Namespace(), intent.Location)
+ repairIter := session.DB(intent.DB).C(intent.C).Repair()
+ repairCounter := progress.NewCounter(1) // this counter is ignored
+ if err := dump.dumpIterToWriter(repairIter, intent.BSONFile, repairCounter); err != nil {
+ return fmt.Errorf("repair error: %v", err)
+ }
+ _, repairCount := repairCounter.Progress()
+ log.Logvf(log.Always, "\trepair cursor found %v %v in %v",
+ repairCount, docPlural(repairCount), intent.Namespace())
+ }
+
+ log.Logvf(log.Always, "done dumping %v (%v %v)", intent.Namespace(), dumpCount, docPlural(dumpCount))
+ return nil
+}
+
+// dumpQueryToWriter takes an mgo Query, its intent, and a writer, performs the query,
+// and writes the raw bson results to the writer. Returns a final count of documents
+// dumped, and any errors that occured.
+func (dump *MongoDump) dumpQueryToWriter(
+ query *mgo.Query, intent *intents.Intent) (int64, error) {
+ var total int
+ var err error
+ if len(dump.query) == 0 {
+ total, err = query.Count()
+ if err != nil {
+ return int64(0), fmt.Errorf("error reading from db: %v", err)
+ }
+ log.Logvf(log.DebugLow, "counted %v %v in %v", total, docPlural(int64(total)), intent.Namespace())
+ } else {
+ log.Logvf(log.DebugLow, "not counting query on %v", intent.Namespace())
+ }
+
+ dumpProgressor := progress.NewCounter(int64(total))
+ bar := &progress.Bar{
+ Name: intent.Namespace(),
+ Watching: dumpProgressor,
+ BarLength: progressBarLength,
+ }
+ dump.progressManager.Attach(bar)
+ defer dump.progressManager.Detach(bar)
+
+ err = dump.dumpIterToWriter(query.Iter(), intent.BSONFile, dumpProgressor)
+ _, dumpCount := dumpProgressor.Progress()
+
+ return dumpCount, err
+}
+
+// dumpIterToWriter takes an mgo iterator, a writer, and a pointer to
+// a counter, and dumps the iterator's contents to the writer.
+func (dump *MongoDump) dumpIterToWriter(
+ iter *mgo.Iter, writer io.Writer, progressCount progress.Updateable) error {
+ var termErr error
+
+ // We run the result iteration in its own goroutine,
+ // this allows disk i/o to not block reads from the db,
+ // which gives a slight speedup on benchmarks
+ buffChan := make(chan []byte)
+ go func() {
+ for {
+ select {
+ case <-dump.shutdownIntentsNotifier.notified:
+ log.Logvf(log.DebugHigh, "terminating writes")
+ termErr = util.ErrTerminated
+ close(buffChan)
+ return
+ default:
+ raw := &bson.Raw{}
+ next := iter.Next(raw)
+ if !next {
+ // we check the iterator for errors below
+ close(buffChan)
+ return
+ }
+ nextCopy := make([]byte, len(raw.Data))
+ copy(nextCopy, raw.Data)
+ buffChan <- nextCopy
+ }
+ }
+ }()
+
+ // while there are still results in the database,
+ // grab results from the goroutine and write them to filesystem
+ for {
+ buff, alive := <-buffChan
+ if !alive {
+ if iter.Err() != nil {
+ return fmt.Errorf("error reading collection: %v", iter.Err())
+ }
+ break
+ }
+ _, err := writer.Write(buff)
+ if err != nil {
+ return fmt.Errorf("error writing to file: %v", err)
+ }
+ progressCount.Inc(1)
+ }
+ return termErr
+}
+
+// DumpUsersAndRolesForDB queries and dumps the users and roles tied to the given
+// database. Only works with an authentication schema version >= 3.
+func (dump *MongoDump) DumpUsersAndRolesForDB(db string) error {
+ session, err := dump.sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ dbQuery := bson.M{"db": db}
+ usersQuery := session.DB("admin").C("system.users").Find(dbQuery)
+ intent := dump.manager.Users()
+ err = intent.BSONFile.Open()
+ if err != nil {
+ return fmt.Errorf("error opening output stream for dumping Users: %v", err)
+ }
+ defer intent.BSONFile.Close()
+ _, err = dump.dumpQueryToWriter(usersQuery, intent)
+ if err != nil {
+ return fmt.Errorf("error dumping db users: %v", err)
+ }
+
+ rolesQuery := session.DB("admin").C("system.roles").Find(dbQuery)
+ intent = dump.manager.Roles()
+ err = intent.BSONFile.Open()
+ if err != nil {
+ return fmt.Errorf("error opening output stream for dumping Roles: %v", err)
+ }
+ defer intent.BSONFile.Close()
+ _, err = dump.dumpQueryToWriter(rolesQuery, intent)
+ if err != nil {
+ return fmt.Errorf("error dumping db roles: %v", err)
+ }
+
+ versionQuery := session.DB("admin").C("system.version").Find(nil)
+ intent = dump.manager.AuthVersion()
+ err = intent.BSONFile.Open()
+ if err != nil {
+ return fmt.Errorf("error opening output stream for dumping AuthVersion: %v", err)
+ }
+ defer intent.BSONFile.Close()
+ _, err = dump.dumpQueryToWriter(versionQuery, intent)
+ if err != nil {
+ return fmt.Errorf("error dumping db auth version: %v", err)
+ }
+
+ return nil
+}
+
+// DumpUsersAndRoles dumps all of the users and roles and versions
+// TODO: This and DumpUsersAndRolesForDB should be merged, correctly
+func (dump *MongoDump) DumpUsersAndRoles() error {
+ var err error
+ if dump.manager.Users() != nil {
+ err = dump.DumpIntent(dump.manager.Users())
+ if err != nil {
+ return err
+ }
+ }
+ if dump.manager.Roles() != nil {
+ err = dump.DumpIntent(dump.manager.Roles())
+ if err != nil {
+ return err
+ }
+ }
+ if dump.manager.AuthVersion() != nil {
+ err = dump.DumpIntent(dump.manager.AuthVersion())
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// DumpSystemIndexes dumps all of the system.indexes
+func (dump *MongoDump) DumpSystemIndexes() error {
+ for _, dbName := range dump.manager.SystemIndexDBs() {
+ err := dump.DumpIntent(dump.manager.SystemIndexes(dbName))
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// DumpMetadata dumps the metadata for each intent in the manager
+// that has metadata
+func (dump *MongoDump) DumpMetadata() error {
+ allIntents := dump.manager.Intents()
+ for _, intent := range allIntents {
+ if intent.MetadataFile != nil {
+ err := dump.dumpMetadata(intent)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// nopCloseWriter implements io.WriteCloser. It wraps up a io.Writer, and adds a no-op Close
+type nopCloseWriter struct {
+ io.Writer
+}
+
+// Close does nothing on nopCloseWriters
+func (*nopCloseWriter) Close() error {
+ return nil
+}
+
+// wrappedWriteCloser implements io.WriteCloser. It wraps up two WriteClosers. The Write method
+// of the io.WriteCloser is implemented by the embedded io.WriteCloser
+type wrappedWriteCloser struct {
+ io.WriteCloser
+ inner io.WriteCloser
+}
+
+// Close is part of the io.WriteCloser interface. Close closes both the embedded io.WriteCloser as
+// well as the inner io.WriteCloser
+func (wwc *wrappedWriteCloser) Close() error {
+ err := wwc.WriteCloser.Close()
+ if err != nil {
+ return err
+ }
+ return wwc.inner.Close()
+}
+
+func (dump *MongoDump) getArchiveOut() (out io.WriteCloser, err error) {
+ if dump.OutputOptions.Archive == "-" {
+ out = &nopCloseWriter{dump.stdout}
+ } else {
+ targetStat, err := os.Stat(dump.OutputOptions.Archive)
+ if err == nil && targetStat.IsDir() {
+ defaultArchiveFilePath :=
+ filepath.Join(dump.OutputOptions.Archive, "archive")
+ if dump.OutputOptions.Gzip {
+ defaultArchiveFilePath = defaultArchiveFilePath + ".gz"
+ }
+ out, err = os.Create(defaultArchiveFilePath)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ out, err = os.Create(dump.OutputOptions.Archive)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ if dump.OutputOptions.Gzip {
+ return &wrappedWriteCloser{
+ WriteCloser: gzip.NewWriter(out),
+ inner: out,
+ }, nil
+ }
+ return out, nil
+}
+
+// docPlural returns "document" or "documents" depending on the
+// count of documents passed in.
+func docPlural(count int64) string {
+ return util.Pluralize(int(count), "document", "documents")
+}
+
+func (dump *MongoDump) HandleInterrupt() {
+ if dump.shutdownIntentsNotifier != nil {
+ dump.shutdownIntentsNotifier.Notify()
+ }
+}
diff --git a/src/mongo/gotools/mongodump/mongodump_test.go b/src/mongo/gotools/mongodump/mongodump_test.go
new file mode 100644
index 00000000000..92a209ca5f8
--- /dev/null
+++ b/src/mongo/gotools/mongodump/mongodump_test.go
@@ -0,0 +1,616 @@
+package mongodump
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ "github.com/mongodb/mongo-tools/common/util"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+var (
+ // database with test data
+ testDB = "mongodump_test_db"
+ // temp database used for restoring a DB
+ testRestoreDB = "temp_mongodump_restore_test_db"
+ testCollectionNames = []string{"coll1", "coll2", "coll3"}
+ testServer = "localhost"
+ testPort = db.DefaultTestPort
+)
+
+const (
+ KerberosDumpDirectory = "dump-kerberos"
+)
+
+func simpleMongoDumpInstance() *MongoDump {
+ ssl := testutil.GetSSLOptions()
+ auth := testutil.GetAuthOptions()
+ namespace := &options.Namespace{
+ DB: testDB,
+ }
+ connection := &options.Connection{
+ Host: testServer,
+ Port: testPort,
+ }
+ toolOptions := &options.ToolOptions{
+ SSL: &ssl,
+ Namespace: namespace,
+ Connection: connection,
+ Auth: &auth,
+ Verbosity: &options.Verbosity{},
+ }
+ outputOptions := &OutputOptions{
+ NumParallelCollections: 1,
+ }
+ inputOptions := &InputOptions{}
+
+ log.SetVerbosity(toolOptions.Verbosity)
+
+ return &MongoDump{
+ ToolOptions: toolOptions,
+ InputOptions: inputOptions,
+ OutputOptions: outputOptions,
+ }
+}
+
+func getBareSession() (*mgo.Session, error) {
+ ssl := testutil.GetSSLOptions()
+ auth := testutil.GetAuthOptions()
+ sessionProvider, err := db.NewSessionProvider(options.ToolOptions{
+ Connection: &options.Connection{
+ Host: testServer,
+ Port: testPort,
+ },
+ Auth: &auth,
+ SSL: &ssl,
+ })
+ if err != nil {
+ return nil, err
+ }
+ session, err := sessionProvider.GetSession()
+ if err != nil {
+ return nil, err
+ }
+ return session, nil
+}
+
+// returns the number of .bson files in a directory
+// excluding system.indexes.bson
+func countNonIndexBSONFiles(dir string) (int, error) {
+ matchingFiles, err := getMatchingFiles(dir, ".*\\.bson")
+ if err != nil {
+ return 0, err
+ }
+ count := 0
+ for _, fileName := range matchingFiles {
+ if fileName != "system.indexes.bson" {
+ count++
+ }
+ }
+ return count, nil
+}
+
+// returns count of metadata files
+func countMetaDataFiles(dir string) (int, error) {
+ matchingFiles, err := getMatchingFiles(dir, ".*\\.metadata\\.json")
+ if err != nil {
+ return 0, err
+ }
+ return len(matchingFiles), nil
+}
+
+// returns filenames that match the given pattern
+func getMatchingFiles(dir, pattern string) ([]string, error) {
+ fileInfos, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ matchingFiles := []string{}
+ var matched bool
+ for _, fileInfo := range fileInfos {
+ fileName := fileInfo.Name()
+ if matched, err = regexp.MatchString(pattern, fileName); matched {
+ matchingFiles = append(matchingFiles, fileName)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matchingFiles, nil
+}
+
+// read all the database bson documents from dir and put it into another DB
+// ignore the inddexes for now
+func readBSONIntoDatabase(dir, restoreDBName string) error {
+ if ok := fileDirExists(dir); !ok {
+ return fmt.Errorf("error finding '%v' on local FS", dir)
+ }
+
+ session, err := getBareSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ fileInfos, err := ioutil.ReadDir(dir)
+ if err != nil {
+ return err
+ }
+
+ for _, fileInfo := range fileInfos {
+ fileName := fileInfo.Name()
+ if !strings.HasSuffix(fileName, ".bson") || fileName == "system.indexes.bson" {
+ continue
+ }
+
+ collectionName := fileName[:strings.LastIndex(fileName, ".bson")]
+ collection := session.DB(restoreDBName).C(collectionName)
+
+ file, err := os.Open(fmt.Sprintf("%s/%s", dir, fileName))
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(file))
+ defer bsonSource.Close()
+
+ var result bson.M
+ for bsonSource.Next(&result) {
+ err = collection.Insert(result)
+ if err != nil {
+ return err
+ }
+ }
+ if err = bsonSource.Err(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func setUpMongoDumpTestData() error {
+ session, err := getBareSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ for i, collectionName := range testCollectionNames {
+ coll := session.DB(testDB).C(collectionName)
+
+ for j := 0; j < 10*(i+1); j++ {
+ err = coll.Insert(bson.M{"collectionName": collectionName, "age": j})
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func tearDownMongoDumpTestData() error {
+ session, err := getBareSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ err = session.DB(testDB).DropDatabase()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func fileDirExists(name string) bool {
+ if _, err := os.Stat(name); err != nil {
+ if os.IsNotExist(err) {
+ return false
+ }
+ }
+ return true
+}
+
+func testQuery(md *MongoDump, session *mgo.Session) string {
+ origDB := session.DB(testDB)
+ restoredDB := session.DB(testRestoreDB)
+
+ // query to test --query* flags
+ bsonQuery := bson.M{"age": bson.M{"$lt": 10}}
+
+ // we can only dump using query per collection
+ for _, testCollName := range testCollectionNames {
+ md.ToolOptions.Namespace.Collection = testCollName
+
+ err := md.Init()
+ So(err, ShouldBeNil)
+
+ err = md.Dump()
+ So(err, ShouldBeNil)
+ }
+
+ path, err := os.Getwd()
+ So(err, ShouldBeNil)
+
+ dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
+ dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
+ So(fileDirExists(dumpDir), ShouldBeTrue)
+ So(fileDirExists(dumpDBDir), ShouldBeTrue)
+
+ err = readBSONIntoDatabase(dumpDBDir, testRestoreDB)
+ So(err, ShouldBeNil)
+
+ for _, testCollName := range testCollectionNames {
+ // count filtered docs
+ numDocs1, err := origDB.C(testCollName).Find(bsonQuery).Count()
+ So(err, ShouldBeNil)
+
+ // count number of all restored documents
+ numDocs2, err := restoredDB.C(testCollName).Find(nil).Count()
+ So(err, ShouldBeNil)
+
+ So(numDocs1, ShouldEqual, numDocs2)
+ }
+ return dumpDir
+}
+
+func TestMongoDumpValidateOptions(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a MongoDump instance", t, func() {
+ md := simpleMongoDumpInstance()
+
+ Convey("we cannot dump a collection when a database specified", func() {
+ md.ToolOptions.Namespace.Collection = "some_collection"
+ md.ToolOptions.Namespace.DB = ""
+
+ err := md.Init()
+ So(err, ShouldNotBeNil)
+ So(err.Error(), ShouldContainSubstring, "cannot dump a collection without a specified database")
+ })
+
+ Convey("we have to specify a collection name if using a query", func() {
+ md.ToolOptions.Namespace.Collection = ""
+ md.OutputOptions.Out = ""
+ md.InputOptions.Query = "{_id:\"\"}"
+
+ err := md.Init()
+ So(err, ShouldNotBeNil)
+ So(err.Error(), ShouldContainSubstring, "cannot dump using a query without a specified collection")
+ })
+
+ })
+}
+
+func TestMongoDumpKerberos(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.KerberosTestType)
+
+ Convey("Should be able to run mongodump with Kerberos auth", t, func() {
+ opts, err := testutil.GetKerberosOptions()
+
+ So(err, ShouldBeNil)
+
+ mongoDump := MongoDump{
+ ToolOptions: opts,
+ InputOptions: &InputOptions{},
+ OutputOptions: &OutputOptions{
+ NumParallelCollections: 1,
+ },
+ }
+
+ mongoDump.OutputOptions.Out = KerberosDumpDirectory
+
+ err = mongoDump.Init()
+ So(err, ShouldBeNil)
+ err = mongoDump.Dump()
+ So(err, ShouldBeNil)
+ path, err := os.Getwd()
+ So(err, ShouldBeNil)
+
+ dumpDir := util.ToUniversalPath(filepath.Join(path, KerberosDumpDirectory))
+ dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, opts.Namespace.DB))
+ So(fileDirExists(dumpDir), ShouldBeTrue)
+ So(fileDirExists(dumpDBDir), ShouldBeTrue)
+
+ dumpCollectionFile := util.ToUniversalPath(filepath.Join(dumpDBDir, opts.Namespace.Collection+".bson"))
+ So(fileDirExists(dumpCollectionFile), ShouldBeTrue)
+
+ countColls, err := countNonIndexBSONFiles(dumpDBDir)
+ So(err, ShouldBeNil)
+ So(countColls, ShouldEqual, 1)
+ })
+}
+
+func TestMongoDumpBSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.IntegrationTestType)
+ log.SetWriter(ioutil.Discard)
+
+ Convey("With a MongoDump instance", t, func() {
+ err := setUpMongoDumpTestData()
+ So(err, ShouldBeNil)
+
+ Convey("testing that using MongoDump WITHOUT giving a query dumps everything in the database and/or collection", func() {
+ md := simpleMongoDumpInstance()
+ md.InputOptions.Query = ""
+
+ Convey("and that for a particular collection", func() {
+ md.ToolOptions.Namespace.Collection = testCollectionNames[0]
+ err = md.Init()
+ So(err, ShouldBeNil)
+
+ Convey("it dumps to the default output directory", func() {
+ // we don't have to set this manually if parsing options via command line
+ md.OutputOptions.Out = "dump"
+ err = md.Dump()
+ So(err, ShouldBeNil)
+ path, err := os.Getwd()
+ So(err, ShouldBeNil)
+
+ dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
+ dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
+ So(fileDirExists(dumpDir), ShouldBeTrue)
+ So(fileDirExists(dumpDBDir), ShouldBeTrue)
+
+ err = readBSONIntoDatabase(dumpDBDir, testRestoreDB)
+ So(err, ShouldBeNil)
+
+ session, err := getBareSession()
+ So(err, ShouldBeNil)
+
+ countColls, err := countNonIndexBSONFiles(dumpDBDir)
+ So(err, ShouldBeNil)
+ So(countColls, ShouldEqual, 1)
+
+ collOriginal := session.DB(testDB).C(testCollectionNames[0])
+ collRestore := session.DB(testRestoreDB).C(testCollectionNames[0])
+
+ Convey("with the correct number of documents", func() {
+ numDocsOrig, err := collOriginal.Count()
+ So(err, ShouldBeNil)
+
+ numDocsRestore, err := collRestore.Count()
+ So(err, ShouldBeNil)
+
+ So(numDocsOrig, ShouldEqual, numDocsRestore)
+ })
+
+ Convey("that are the same as the documents in the test database", func() {
+ iter := collOriginal.Find(nil).Iter()
+
+ var result bson.M
+ for iter.Next(&result) {
+ restoredCount, err := collRestore.Find(result).Count()
+ So(err, ShouldBeNil)
+ So(restoredCount, ShouldNotEqual, 0)
+ }
+ So(iter.Close(), ShouldBeNil)
+ })
+
+ Reset(func() {
+ So(session.DB(testRestoreDB).DropDatabase(), ShouldBeNil)
+ So(os.RemoveAll(dumpDir), ShouldBeNil)
+ })
+ })
+
+ Convey("it dumps to a user-specified output directory", func() {
+ md.OutputOptions.Out = "dump_user"
+ err = md.Dump()
+ So(err, ShouldBeNil)
+ path, err := os.Getwd()
+ So(err, ShouldBeNil)
+
+ dumpDir := util.ToUniversalPath(filepath.Join(path, "dump_user"))
+ dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
+ So(fileDirExists(dumpDir), ShouldBeTrue)
+ So(fileDirExists(dumpDBDir), ShouldBeTrue)
+
+ countColls, err := countNonIndexBSONFiles(dumpDBDir)
+ So(err, ShouldBeNil)
+ So(countColls, ShouldEqual, 1)
+
+ Reset(func() {
+ So(os.RemoveAll(dumpDir), ShouldBeNil)
+ })
+
+ })
+
+ Convey("it dumps to standard output", func() {
+ md.OutputOptions.Out = "-"
+ stdoutBuf := &bytes.Buffer{}
+ md.stdout = stdoutBuf
+ err = md.Dump()
+ So(err, ShouldBeNil)
+ var count int
+ bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(ioutil.NopCloser(stdoutBuf)))
+ defer bsonSource.Close()
+
+ var result bson.Raw
+ for bsonSource.Next(&result) {
+ count++
+ }
+ So(bsonSource.Err(), ShouldBeNil)
+ So(count, ShouldEqual, 10) //The 0th collection has 10 documents
+
+ Reset(func() {
+ })
+
+ })
+
+ })
+
+ Convey("for an entire database", func() {
+ md.ToolOptions.Namespace.Collection = ""
+ err = md.Init()
+ So(err, ShouldBeNil)
+
+ Convey("that exists. The dumped directory should contain the necessary bson files", func() {
+ md.OutputOptions.Out = "dump"
+ err = md.Dump()
+ So(err, ShouldBeNil)
+ path, err := os.Getwd()
+ So(err, ShouldBeNil)
+
+ dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
+ dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
+ So(fileDirExists(dumpDir), ShouldBeTrue)
+ So(fileDirExists(dumpDBDir), ShouldBeTrue)
+
+ countColls, err := countNonIndexBSONFiles(dumpDBDir)
+ So(err, ShouldBeNil)
+ So(countColls, ShouldEqual, len(testCollectionNames))
+
+ Reset(func() {
+ So(os.RemoveAll(dumpDir), ShouldBeNil)
+ })
+
+ })
+
+ Convey("that does not exist. The dumped directory shouldn't be created", func() {
+ md.OutputOptions.Out = "dump"
+ md.ToolOptions.Namespace.DB = "nottestdb"
+ err = md.Dump()
+ So(err, ShouldBeNil)
+
+ path, err := os.Getwd()
+ So(err, ShouldBeNil)
+
+ dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
+ dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, "nottestdb"))
+
+ So(fileDirExists(dumpDir), ShouldBeFalse)
+ So(fileDirExists(dumpDBDir), ShouldBeFalse)
+ })
+
+ })
+ })
+
+ Convey("testing that using MongoDump WITH a query dumps a subset of documents in a database and/or collection", func() {
+ session, err := getBareSession()
+ So(err, ShouldBeNil)
+ md := simpleMongoDumpInstance()
+
+ // expect 10 documents per collection
+ bsonQuery := bson.M{"age": bson.M{"$lt": 10}}
+ jsonQuery, err := bsonutil.ConvertBSONValueToJSON(bsonQuery)
+ So(err, ShouldBeNil)
+ jsonQueryBytes, err := json.Marshal(jsonQuery)
+ So(err, ShouldBeNil)
+
+ Convey("using --query for all the collections in the database", func() {
+ md.InputOptions.Query = string(jsonQueryBytes)
+ md.ToolOptions.Namespace.DB = testDB
+ md.OutputOptions.Out = "dump"
+ dumpDir := testQuery(md, session)
+
+ Reset(func() {
+ So(session.DB(testRestoreDB).DropDatabase(), ShouldBeNil)
+ So(os.RemoveAll(dumpDir), ShouldBeNil)
+ })
+
+ })
+
+ Convey("using --queryFile for all the collections in the database", func() {
+ ioutil.WriteFile("example.json", jsonQueryBytes, 0777)
+ md.InputOptions.QueryFile = "example.json"
+ md.ToolOptions.Namespace.DB = testDB
+ md.OutputOptions.Out = "dump"
+ dumpDir := testQuery(md, session)
+
+ Reset(func() {
+ So(session.DB(testRestoreDB).DropDatabase(), ShouldBeNil)
+ So(os.RemoveAll(dumpDir), ShouldBeNil)
+ So(os.Remove("example.json"), ShouldBeNil)
+ })
+
+ })
+ })
+
+ Reset(func() {
+ So(tearDownMongoDumpTestData(), ShouldBeNil)
+ })
+ })
+}
+
+func TestMongoDumpMetaData(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.IntegrationTestType)
+ log.SetWriter(ioutil.Discard)
+
+ Convey("With a MongoDump instance", t, func() {
+ err := setUpMongoDumpTestData()
+ So(err, ShouldBeNil)
+
+ Convey("testing that the dumped directory contains information about indexes", func() {
+ md := simpleMongoDumpInstance()
+ md.OutputOptions.Out = "dump"
+ err = md.Init()
+ So(err, ShouldBeNil)
+
+ err = md.Dump()
+ So(err, ShouldBeNil)
+
+ path, err := os.Getwd()
+ So(err, ShouldBeNil)
+ dumpDir := util.ToUniversalPath(filepath.Join(path, "dump"))
+ dumpDBDir := util.ToUniversalPath(filepath.Join(dumpDir, testDB))
+ So(fileDirExists(dumpDir), ShouldBeTrue)
+ So(fileDirExists(dumpDBDir), ShouldBeTrue)
+
+ Convey("having one metadata file per collection", func() {
+ c1, err := countNonIndexBSONFiles(dumpDBDir)
+ So(err, ShouldBeNil)
+
+ c2, err := countMetaDataFiles(dumpDBDir)
+ So(err, ShouldBeNil)
+
+ So(c1, ShouldEqual, c2)
+
+ Convey("and that the JSON in a metadata file is valid", func() {
+ metaFiles, err := getMatchingFiles(dumpDBDir, ".*\\.metadata\\.json")
+ So(err, ShouldBeNil)
+ So(len(metaFiles), ShouldBeGreaterThan, 0)
+
+ oneMetaFile, err := os.Open(util.ToUniversalPath(filepath.Join(dumpDBDir, metaFiles[0])))
+ So(err, ShouldBeNil)
+ contents, err := ioutil.ReadAll(oneMetaFile)
+ var jsonResult map[string]interface{}
+ err = json.Unmarshal(contents, &jsonResult)
+ So(err, ShouldBeNil)
+
+ Convey("and contains an 'indexes' key", func() {
+ _, ok := jsonResult["indexes"]
+ So(ok, ShouldBeTrue)
+ So(oneMetaFile.Close(), ShouldBeNil)
+ })
+
+ })
+
+ })
+
+ Reset(func() {
+ So(os.RemoveAll(dumpDir), ShouldBeNil)
+ })
+ })
+
+ Reset(func() {
+ So(tearDownMongoDumpTestData(), ShouldBeNil)
+ })
+
+ })
+}
diff --git a/src/mongo/gotools/mongodump/oplog_dump.go b/src/mongo/gotools/mongodump/oplog_dump.go
new file mode 100644
index 00000000000..dba7835edfa
--- /dev/null
+++ b/src/mongo/gotools/mongodump/oplog_dump.go
@@ -0,0 +1,91 @@
+package mongodump
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// determineOplogCollectionName uses a command to infer
+// the name of the oplog collection in the connected db
+func (dump *MongoDump) determineOplogCollectionName() error {
+ masterDoc := bson.M{}
+ err := dump.sessionProvider.Run("isMaster", &masterDoc, "admin")
+ if err != nil {
+ return fmt.Errorf("error running command: %v", err)
+ }
+ if _, ok := masterDoc["hosts"]; ok {
+ log.Logvf(log.DebugLow, "determined cluster to be a replica set")
+ log.Logvf(log.DebugHigh, "oplog located in local.oplog.rs")
+ dump.oplogCollection = "oplog.rs"
+ return nil
+ }
+ if isMaster := masterDoc["ismaster"]; util.IsFalsy(isMaster) {
+ log.Logvf(log.Info, "mongodump is not connected to a master")
+ return fmt.Errorf("not connected to master")
+ }
+
+ log.Logvf(log.DebugLow, "not connected to a replica set, assuming master/slave")
+ log.Logvf(log.DebugHigh, "oplog located in local.oplog.$main")
+ dump.oplogCollection = "oplog.$main"
+ return nil
+
+}
+
+// getOplogStartTime returns the most recent oplog entry
+func (dump *MongoDump) getOplogStartTime() (bson.MongoTimestamp, error) {
+ mostRecentOplogEntry := db.Oplog{}
+
+ err := dump.sessionProvider.FindOne("local", dump.oplogCollection, 0, nil, []string{"-$natural"}, &mostRecentOplogEntry, 0)
+ if err != nil {
+ return 0, err
+ }
+ return mostRecentOplogEntry.Timestamp, nil
+}
+
+// checkOplogTimestampExists checks to make sure the oplog hasn't rolled over
+// since mongodump started. It does this by checking the oldest oplog entry
+// still in the database and making sure it happened at or before the timestamp
+// captured at the start of the dump.
+func (dump *MongoDump) checkOplogTimestampExists(ts bson.MongoTimestamp) (bool, error) {
+ oldestOplogEntry := db.Oplog{}
+ err := dump.sessionProvider.FindOne("local", dump.oplogCollection, 0, nil, []string{"+$natural"}, &oldestOplogEntry, 0)
+ if err != nil {
+ return false, fmt.Errorf("unable to read entry from oplog: %v", err)
+ }
+
+ log.Logvf(log.DebugHigh, "oldest oplog entry has timestamp %v", oldestOplogEntry.Timestamp)
+ if oldestOplogEntry.Timestamp > ts {
+ log.Logvf(log.Info, "oldest oplog entry of timestamp %v is older than %v",
+ oldestOplogEntry.Timestamp, ts)
+ return false, nil
+ }
+ return true, nil
+}
+
+// DumpOplogAfterTimestamp takes a timestamp and writer and dumps all oplog entries after
+// the given timestamp to the writer. Returns any errors that occur.
+func (dump *MongoDump) DumpOplogAfterTimestamp(ts bson.MongoTimestamp) error {
+ session, err := dump.sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+ intent := dump.manager.Oplog()
+ err = intent.BSONFile.Open()
+ if err != nil {
+ return fmt.Errorf("error opening output stream for dumping oplog: %v", err)
+ }
+ defer intent.BSONFile.Close()
+ session.SetPrefetch(1.0) // mimic exhaust cursor
+ queryObj := bson.M{"ts": bson.M{"$gt": ts}}
+ oplogQuery := session.DB("local").C(dump.oplogCollection).Find(queryObj).LogReplay()
+ oplogCount, err := dump.dumpQueryToWriter(oplogQuery, dump.manager.Oplog())
+ if err == nil {
+ log.Logvf(log.Always, "\tdumped %v oplog %v",
+ oplogCount, util.Pluralize(int(oplogCount), "entry", "entries"))
+ }
+ return err
+}
diff --git a/src/mongo/gotools/mongodump/options.go b/src/mongo/gotools/mongodump/options.go
new file mode 100644
index 00000000000..65401a1b83e
--- /dev/null
+++ b/src/mongo/gotools/mongodump/options.go
@@ -0,0 +1,62 @@
+package mongodump
+
+import (
+ "fmt"
+ "io/ioutil"
+)
+
+var Usage = `<options>
+
+Export the content of a running server into .bson files.
+
+Specify a database with -d and a collection with -c to only dump that database or collection.
+
+See http://docs.mongodb.org/manual/reference/program/mongodump/ for more information.`
+
+// InputOptions defines the set of options to use in retrieving data from the server.
+type InputOptions struct {
+ Query string `long:"query" short:"q" description:"query filter, as a JSON string, e.g., '{x:{$gt:1}}'"`
+ QueryFile string `long:"queryFile" description:"path to a file containing a query filter (JSON)"`
+ ReadPreference string `long:"readPreference" value-name:"<string>|<json>" description:"specify either a preference name or a preference json object"`
+ TableScan bool `long:"forceTableScan" description:"force a table scan"`
+}
+
+// Name returns a human-readable group name for input options.
+func (*InputOptions) Name() string {
+ return "query"
+}
+
+func (inputOptions *InputOptions) HasQuery() bool {
+ return inputOptions.Query != "" || inputOptions.QueryFile != ""
+}
+
+func (inputOptions *InputOptions) GetQuery() ([]byte, error) {
+ if inputOptions.Query != "" {
+ return []byte(inputOptions.Query), nil
+ } else if inputOptions.QueryFile != "" {
+ content, err := ioutil.ReadFile(inputOptions.QueryFile)
+ if err != nil {
+ err = fmt.Errorf("error reading queryFile: %s", err)
+ }
+ return content, err
+ }
+ panic("GetQuery can return valid values only for query or queryFile input")
+}
+
+// OutputOptions defines the set of options for writing dump data.
+type OutputOptions struct {
+ Out string `long:"out" value-name:"<directory-path>" short:"o" description:"output directory, or '-' for stdout (defaults to 'dump')"`
+ Gzip bool `long:"gzip" description:"compress archive our collection output with Gzip"`
+ Repair bool `long:"repair" description:"try to recover documents from damaged data files (not supported by all storage engines)"`
+ Oplog bool `long:"oplog" description:"use oplog for taking a point-in-time snapshot"`
+ Archive string `long:"archive" value-name:"<file-path>" optional:"true" optional-value:"-" description:"dump as an archive to the specified path. If flag is specified without a value, archive is written to stdout"`
+ DumpDBUsersAndRoles bool `long:"dumpDbUsersAndRoles" description:"dump user and role definitions for the specified database"`
+ ExcludedCollections []string `long:"excludeCollection" value-name:"<collection-name>" description:"collection to exclude from the dump (may be specified multiple times to exclude additional collections)"`
+ ExcludedCollectionPrefixes []string `long:"excludeCollectionsWithPrefix" value-name:"<collection-prefix>" description:"exclude all collections from the dump that have the given prefix (may be specified multiple times to exclude additional prefixes)"`
+ NumParallelCollections int `long:"numParallelCollections" short:"j" description:"number of collections to dump in parallel (4 by default)" default:"4" default-mask:"-"`
+}
+
+// Name returns a human-readable group name for output options.
+func (*OutputOptions) Name() string {
+ return "output"
+}
diff --git a/src/mongo/gotools/mongodump/prepare.go b/src/mongo/gotools/mongodump/prepare.go
new file mode 100644
index 00000000000..9c0078e17e8
--- /dev/null
+++ b/src/mongo/gotools/mongodump/prepare.go
@@ -0,0 +1,457 @@
+package mongodump
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/archive"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/log"
+ "gopkg.in/mgo.v2/bson"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+type NilPos struct{}
+
+func (NilPos) Pos() int64 {
+ return -1
+}
+
+type collectionInfo struct {
+ Name string `bson:"name"`
+ Options *bson.D `bson:"options"`
+}
+
+// writeFlusher wraps an io.Writer and adds a Flush function.
+type writeFlusher interface {
+ Flush() error
+ io.Writer
+}
+
+// writeFlushCloser is a writeFlusher implementation which exposes
+// a Close function which is implemented by calling Flush.
+type writeFlushCloser struct {
+ writeFlusher
+}
+
+// availableWriteFlusher wraps a writeFlusher and adds an Available function.
+type availableWriteFlusher interface {
+ Available() int
+ writeFlusher
+}
+
+// atomicFlusher is a availableWriteFlusher implementation
+// which guarantees atomic writes.
+type atomicFlusher struct {
+ availableWriteFlusher
+}
+
+// errorReader implements io.Reader.
+type errorReader struct{}
+
+// Read on an errorReader already returns an error.
+func (errorReader) Read([]byte) (int, error) {
+ return 0, os.ErrInvalid
+}
+
+// Close calls Flush.
+func (bwc writeFlushCloser) Close() error {
+ return bwc.Flush()
+}
+
+// realBSONFile implements the intents.file interface. It lets intents write to real BSON files
+// ok disk via an embedded bufio.Writer
+type realBSONFile struct {
+ io.WriteCloser
+ path string
+ // errorWrite adds a Read() method to this object allowing it to be an
+ // intent.file ( a ReadWriteOpenCloser )
+ errorReader
+ intent *intents.Intent
+ gzip bool
+ NilPos
+}
+
+// Open is part of the intents.file interface. realBSONFiles need to have Open called before
+// Read can be called
+func (f *realBSONFile) Open() (err error) {
+ if f.path == "" {
+ // This should not occur normally. All realBSONFile's should have a path
+ return fmt.Errorf("error creating BSON file without a path, namespace: %v",
+ f.intent.Namespace())
+ }
+ err = os.MkdirAll(filepath.Dir(f.path), os.ModeDir|os.ModePerm)
+ if err != nil {
+ return fmt.Errorf("error creating directory for BSON file %v: %v",
+ filepath.Dir(f.path), err)
+ }
+
+ fileName := f.path
+ file, err := os.Create(fileName)
+ if err != nil {
+ return fmt.Errorf("error creating BSON file %v: %v", fileName, err)
+ }
+ var writeCloser io.WriteCloser
+ if f.gzip {
+ writeCloser = gzip.NewWriter(file)
+ } else {
+ // wrap writer in buffer to reduce load on disk
+ writeCloser = writeFlushCloser{
+ atomicFlusher{
+ bufio.NewWriterSize(file, 32*1024),
+ },
+ }
+ }
+ f.WriteCloser = &wrappedWriteCloser{
+ WriteCloser: writeCloser,
+ inner: file,
+ }
+
+ return nil
+}
+
+// Write guarantees that when it returns, either the entire
+// contents of buf or none of it, has been flushed by the writer.
+// This is useful in the unlikely case that mongodump crashes.
+func (f atomicFlusher) Write(buf []byte) (int, error) {
+ if len(buf) > f.availableWriteFlusher.Available() {
+ f.availableWriteFlusher.Flush()
+ }
+ if len(buf) > f.availableWriteFlusher.Available() {
+ l, e := f.availableWriteFlusher.Write(buf)
+ f.availableWriteFlusher.Flush()
+ return l, e
+ }
+ return f.availableWriteFlusher.Write(buf)
+}
+
+// realMetadataFile implements intent.file, and corresponds to a Metadata file on disk
+type realMetadataFile struct {
+ io.WriteCloser
+ path string
+ errorReader
+ // errorWrite adds a Read() method to this object allowing it to be an
+ // intent.file ( a ReadWriteOpenCloser )
+ intent *intents.Intent
+ gzip bool
+ NilPos
+}
+
+// Open opens the file on disk that the intent indicates. Any directories needed are created.
+// If compression is needed, the File gets wrapped in a gzip.Writer
+func (f *realMetadataFile) Open() (err error) {
+ if f.path == "" {
+ return fmt.Errorf("No metadata path for %v.%v", f.intent.DB, f.intent.C)
+ }
+ err = os.MkdirAll(filepath.Dir(f.path), os.ModeDir|os.ModePerm)
+ if err != nil {
+ return fmt.Errorf("error creating directory for metadata file %v: %v",
+ filepath.Dir(f.path), err)
+ }
+
+ fileName := f.path
+ f.WriteCloser, err = os.Create(fileName)
+ if err != nil {
+ return fmt.Errorf("error creating metadata file %v: %v", fileName, err)
+ }
+ if f.gzip {
+ f.WriteCloser = &wrappedWriteCloser{
+ WriteCloser: gzip.NewWriter(f.WriteCloser),
+ inner: f.WriteCloser,
+ }
+ }
+ return nil
+}
+
+// stdoutFile implements the intents.file interface. stdoutFiles are used when single collections
+// are written directly (non-archive-mode) to standard out, via "--dir -"
+type stdoutFile struct {
+ io.Writer
+ errorReader
+ NilPos
+}
+
+// Open is part of the intents.file interface.
+func (f *stdoutFile) Open() error {
+ return nil
+}
+
+// Close is part of the intents.file interface. While we could actually close os.Stdout here,
+// that's actually a bad idea. Unsetting f.File here will cause future Writes to fail, which
+// is all we want.
+func (f *stdoutFile) Close() error {
+ f.Writer = nil
+ return nil
+}
+
+// shouldSkipCollection returns true when a collection name is excluded
+// by the mongodump options.
+func (dump *MongoDump) shouldSkipCollection(colName string) bool {
+ for _, excludedCollection := range dump.OutputOptions.ExcludedCollections {
+ if colName == excludedCollection {
+ return true
+ }
+ }
+ for _, excludedCollectionPrefix := range dump.OutputOptions.ExcludedCollectionPrefixes {
+ if strings.HasPrefix(colName, excludedCollectionPrefix) {
+ return true
+ }
+ }
+ return false
+}
+
+// outputPath creates a path for the collection to be written to (sans file extension).
+func (dump *MongoDump) outputPath(dbName, colName string) string {
+ var root string
+ if dump.OutputOptions.Out == "" {
+ root = "dump"
+ } else {
+ root = dump.OutputOptions.Out
+ }
+ if dbName == "" {
+ return filepath.Join(root, colName)
+ }
+ return filepath.Join(root, dbName, colName)
+}
+
+func checkStringForPathSeparator(s string, c *rune) bool {
+ for _, *c = range s {
+ if os.IsPathSeparator(uint8(*c)) {
+ return true
+ }
+ }
+ return false
+}
+
+// NewIntent creates a bare intent without populating the options.
+func (dump *MongoDump) NewIntent(dbName, colName string) (*intents.Intent, error) {
+ intent := &intents.Intent{
+ DB: dbName,
+ C: colName,
+ }
+ if dump.OutputOptions.Out == "-" {
+ intent.BSONFile = &stdoutFile{Writer: dump.stdout}
+ } else {
+ if dump.OutputOptions.Archive != "" {
+ intent.BSONFile = &archive.MuxIn{Intent: intent, Mux: dump.archive.Mux}
+ } else {
+ var c rune
+ if checkStringForPathSeparator(colName, &c) || checkStringForPathSeparator(dbName, &c) {
+ return nil, fmt.Errorf(`"%v.%v" contains a path separator '%c' `+
+ `and can't be dumped to the filesystem`, dbName, colName, c)
+ }
+ path := nameGz(dump.OutputOptions.Gzip, dump.outputPath(dbName, colName)+".bson")
+ intent.BSONFile = &realBSONFile{path: path, intent: intent, gzip: dump.OutputOptions.Gzip}
+ }
+ if !intent.IsSystemIndexes() {
+ if dump.OutputOptions.Archive != "" {
+ intent.MetadataFile = &archive.MetadataFile{
+ Intent: intent,
+ Buffer: &bytes.Buffer{},
+ }
+ } else {
+ path := nameGz(dump.OutputOptions.Gzip, dump.outputPath(dbName, colName+".metadata.json"))
+ intent.MetadataFile = &realMetadataFile{path: path, intent: intent, gzip: dump.OutputOptions.Gzip}
+ }
+ }
+ }
+
+ // get a document count for scheduling purposes
+ session, err := dump.sessionProvider.GetSession()
+ if err != nil {
+ return nil, err
+ }
+ defer session.Close()
+
+ count, err := session.DB(dbName).C(colName).Count()
+ if err != nil {
+ return nil, fmt.Errorf("error counting %v: %v", intent.Namespace(), err)
+ }
+ intent.Size = int64(count)
+
+ return intent, nil
+}
+
+// CreateOplogIntents creates an intents.Intent for the oplog and adds it to the manager
+func (dump *MongoDump) CreateOplogIntents() error {
+ err := dump.determineOplogCollectionName()
+ if err != nil {
+ return err
+ }
+ oplogIntent := &intents.Intent{
+ DB: "",
+ C: "oplog",
+ }
+ if dump.OutputOptions.Archive != "" {
+ oplogIntent.BSONFile = &archive.MuxIn{Mux: dump.archive.Mux, Intent: oplogIntent}
+ } else {
+ oplogIntent.BSONFile = &realBSONFile{path: dump.outputPath("oplog.bson", ""), intent: oplogIntent, gzip: dump.OutputOptions.Gzip}
+ }
+ dump.manager.Put(oplogIntent)
+ return nil
+}
+
+// CreateUsersRolesVersionIntentsForDB create intents to be written in to the specific
+// database folder, for the users, roles and version admin database collections
+// And then it adds the intents in to the manager
+func (dump *MongoDump) CreateUsersRolesVersionIntentsForDB(db string) error {
+
+ outDir := dump.outputPath(db, "")
+
+ usersIntent := &intents.Intent{
+ DB: db,
+ C: "$admin.system.users",
+ }
+ rolesIntent := &intents.Intent{
+ DB: db,
+ C: "$admin.system.roles",
+ }
+ versionIntent := &intents.Intent{
+ DB: db,
+ C: "$admin.system.version",
+ }
+ if dump.OutputOptions.Archive != "" {
+ usersIntent.BSONFile = &archive.MuxIn{Intent: usersIntent, Mux: dump.archive.Mux}
+ rolesIntent.BSONFile = &archive.MuxIn{Intent: rolesIntent, Mux: dump.archive.Mux}
+ versionIntent.BSONFile = &archive.MuxIn{Intent: versionIntent, Mux: dump.archive.Mux}
+ } else {
+ usersIntent.BSONFile = &realBSONFile{path: filepath.Join(outDir, nameGz(dump.OutputOptions.Gzip, "$admin.system.users.bson")), intent: usersIntent, gzip: dump.OutputOptions.Gzip}
+ rolesIntent.BSONFile = &realBSONFile{path: filepath.Join(outDir, nameGz(dump.OutputOptions.Gzip, "$admin.system.roles.bson")), intent: rolesIntent, gzip: dump.OutputOptions.Gzip}
+ versionIntent.BSONFile = &realBSONFile{path: filepath.Join(outDir, nameGz(dump.OutputOptions.Gzip, "$admin.system.version.bson")), intent: versionIntent, gzip: dump.OutputOptions.Gzip}
+ }
+ dump.manager.Put(usersIntent)
+ dump.manager.Put(rolesIntent)
+ dump.manager.Put(versionIntent)
+
+ return nil
+}
+
+// CreateCollectionIntent builds an intent for a given collection and
+// puts it into the intent manager.
+func (dump *MongoDump) CreateCollectionIntent(dbName, colName string) error {
+ if dump.shouldSkipCollection(colName) {
+ log.Logvf(log.DebugLow, "skipping dump of %v.%v, it is excluded", dbName, colName)
+ return nil
+ }
+
+ intent, err := dump.NewIntent(dbName, colName)
+ if err != nil {
+ return err
+ }
+
+ session, err := dump.sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ opts, err := db.GetCollectionOptions(session.DB(dbName).C(colName))
+ if err != nil {
+ return fmt.Errorf("error getting collection options: %v", err)
+ }
+
+ intent.Options = nil
+ if opts != nil {
+ optsInterface, _ := bsonutil.FindValueByKey("options", opts)
+ if optsInterface != nil {
+ if optsD, ok := optsInterface.(bson.D); ok {
+ intent.Options = &optsD
+ } else {
+ return fmt.Errorf("Failed to parse collection options as bson.D")
+ }
+ }
+ }
+
+ dump.manager.Put(intent)
+
+ log.Logvf(log.DebugLow, "enqueued collection '%v'", intent.Namespace())
+ return nil
+}
+
+func (dump *MongoDump) createIntentFromOptions(dbName string, ci *collectionInfo) error {
+ if dump.shouldSkipCollection(ci.Name) {
+ log.Logvf(log.DebugLow, "skipping dump of %v.%v, it is excluded", dbName, ci.Name)
+ return nil
+ }
+ intent, err := dump.NewIntent(dbName, ci.Name)
+ if err != nil {
+ return err
+ }
+ intent.Options = ci.Options
+ dump.manager.Put(intent)
+ log.Logvf(log.DebugLow, "enqueued collection '%v'", intent.Namespace())
+ return nil
+}
+
+// CreateIntentsForDatabase iterates through collections in a db
+// and builds dump intents for each collection.
+func (dump *MongoDump) CreateIntentsForDatabase(dbName string) error {
+ // we must ensure folders for empty databases are still created, for legacy purposes
+
+ session, err := dump.sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ colsIter, fullName, err := db.GetCollections(session.DB(dbName), "")
+ if err != nil {
+ return fmt.Errorf("error getting collections for database `%v`: %v", dbName, err)
+ }
+
+ collInfo := &collectionInfo{}
+ for colsIter.Next(collInfo) {
+ // Skip over indexes since they are also listed in system.namespaces in 2.6 or earlier
+ if strings.Contains(collInfo.Name, "$") && !strings.Contains(collInfo.Name, ".oplog.$") {
+ continue
+ }
+ if fullName {
+ namespacePrefix := dbName + "."
+ // if the collection info came from querying system.indexes (2.6 or earlier) then the
+ // "name" we get includes the db name as well, so we must remove it
+ if strings.HasPrefix(collInfo.Name, namespacePrefix) {
+ collInfo.Name = collInfo.Name[len(namespacePrefix):]
+ } else {
+ return fmt.Errorf("namespace '%v' format is invalid - expected to start with '%v'", collInfo.Name, namespacePrefix)
+ }
+ }
+ err := dump.createIntentFromOptions(dbName, collInfo)
+ if err != nil {
+ return err
+ }
+ }
+ return colsIter.Err()
+}
+
+// CreateAllIntents iterates through all dbs and collections and builds
+// dump intents for each collection.
+func (dump *MongoDump) CreateAllIntents() error {
+ dbs, err := dump.sessionProvider.DatabaseNames()
+ if err != nil {
+ return fmt.Errorf("error getting database names: %v", err)
+ }
+ log.Logvf(log.DebugHigh, "found databases: %v", strings.Join(dbs, ", "))
+ for _, dbName := range dbs {
+ if dbName == "local" {
+ // local can only be explicitly dumped
+ continue
+ }
+ if err := dump.CreateIntentsForDatabase(dbName); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func nameGz(gz bool, name string) string {
+ if gz {
+ return name + ".gz"
+ }
+ return name
+}
diff --git a/src/mongo/gotools/mongodump/prepare_test.go b/src/mongo/gotools/mongodump/prepare_test.go
new file mode 100644
index 00000000000..f570a57dbcf
--- /dev/null
+++ b/src/mongo/gotools/mongodump/prepare_test.go
@@ -0,0 +1,51 @@
+package mongodump
+
+import (
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "testing"
+)
+
+func TestSkipCollection(t *testing.T) {
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a mongodump that excludes collections 'test' and 'fake'"+
+ " and excludes prefixes 'pre-' and 'no'", t, func() {
+ md := &MongoDump{
+ OutputOptions: &OutputOptions{
+ ExcludedCollections: []string{"test", "fake"},
+ ExcludedCollectionPrefixes: []string{"pre-", "no"},
+ },
+ }
+
+ Convey("collection 'pre-test' should be skipped", func() {
+ So(md.shouldSkipCollection("pre-test"), ShouldBeTrue)
+ })
+
+ Convey("collection 'notest' should be skipped", func() {
+ So(md.shouldSkipCollection("notest"), ShouldBeTrue)
+ })
+
+ Convey("collection 'test' should be skipped", func() {
+ So(md.shouldSkipCollection("test"), ShouldBeTrue)
+ })
+
+ Convey("collection 'fake' should be skipped", func() {
+ So(md.shouldSkipCollection("fake"), ShouldBeTrue)
+ })
+
+ Convey("collection 'fake222' should not be skipped", func() {
+ So(md.shouldSkipCollection("fake222"), ShouldBeFalse)
+ })
+
+ Convey("collection 'random' should not be skipped", func() {
+ So(md.shouldSkipCollection("random"), ShouldBeFalse)
+ })
+
+ Convey("collection 'mytest' should not be skipped", func() {
+ So(md.shouldSkipCollection("mytest"), ShouldBeFalse)
+ })
+ })
+
+}
diff --git a/src/mongo/gotools/mongoexport/csv.go b/src/mongo/gotools/mongoexport/csv.go
new file mode 100644
index 00000000000..165cb560cce
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/csv.go
@@ -0,0 +1,148 @@
+package mongoexport
+
+import (
+ "encoding/csv"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/json"
+ "gopkg.in/mgo.v2/bson"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// type for reflect code
+var marshalDType = reflect.TypeOf(bsonutil.MarshalD{})
+
+// CSVExportOutput is an implementation of ExportOutput that writes documents to the output in CSV format.
+type CSVExportOutput struct {
+ // Fields is a list of field names in the bson documents to be exported.
+ // A field can also use dot-delimited modifiers to address nested structures,
+ // for example "location.city" or "addresses.0".
+ Fields []string
+
+ // NumExported maintains a running total of the number of documents written.
+ NumExported int64
+
+ // NoHeaderLine, if set, will export CSV data without a list of field names at the first line
+ NoHeaderLine bool
+
+ csvWriter *csv.Writer
+}
+
+// NewCSVExportOutput returns a CSVExportOutput configured to write output to the
+// given io.Writer, extracting the specified fields only.
+func NewCSVExportOutput(fields []string, noHeaderLine bool, out io.Writer) *CSVExportOutput {
+ return &CSVExportOutput{
+ fields,
+ 0,
+ noHeaderLine,
+ csv.NewWriter(out),
+ }
+}
+
+// WriteHeader writes a comma-delimited list of fields as the output header row.
+func (csvExporter *CSVExportOutput) WriteHeader() error {
+ if !csvExporter.NoHeaderLine {
+ csvExporter.csvWriter.Write(csvExporter.Fields)
+ return csvExporter.csvWriter.Error()
+ }
+ return nil
+}
+
+// WriteFooter is a no-op for CSV export formats.
+func (csvExporter *CSVExportOutput) WriteFooter() error {
+ // no CSV footer
+ return nil
+}
+
+// Flush writes any pending data to the underlying I/O stream.
+func (csvExporter *CSVExportOutput) Flush() error {
+ csvExporter.csvWriter.Flush()
+ return csvExporter.csvWriter.Error()
+}
+
+// ExportDocument writes a line to output with the CSV representation of a document.
+func (csvExporter *CSVExportOutput) ExportDocument(document bson.D) error {
+ rowOut := make([]string, 0, len(csvExporter.Fields))
+ extendedDoc, err := bsonutil.ConvertBSONValueToJSON(document)
+ if err != nil {
+ return err
+ }
+
+ for _, fieldName := range csvExporter.Fields {
+ fieldVal := extractFieldByName(fieldName, extendedDoc)
+ if fieldVal == nil {
+ rowOut = append(rowOut, "")
+ } else if reflect.TypeOf(fieldVal) == reflect.TypeOf(bson.M{}) ||
+ reflect.TypeOf(fieldVal) == reflect.TypeOf(bson.D{}) ||
+ reflect.TypeOf(fieldVal) == marshalDType ||
+ reflect.TypeOf(fieldVal) == reflect.TypeOf([]interface{}{}) {
+ buf, err := json.Marshal(fieldVal)
+ if err != nil {
+ rowOut = append(rowOut, "")
+ } else {
+ rowOut = append(rowOut, string(buf))
+ }
+ } else {
+ rowOut = append(rowOut, fmt.Sprintf("%v", fieldVal))
+ }
+ }
+ csvExporter.csvWriter.Write(rowOut)
+ csvExporter.NumExported++
+ return csvExporter.csvWriter.Error()
+}
+
+// extractFieldByName takes a field name and document, and returns a value representing
+// the value of that field in the document in a format that can be printed as a string.
+// It will also handle dot-delimited field names for nested arrays or documents.
+func extractFieldByName(fieldName string, document interface{}) interface{} {
+ dotParts := strings.Split(fieldName, ".")
+ var subdoc interface{} = document
+
+ for _, path := range dotParts {
+ docValue := reflect.ValueOf(subdoc)
+ if !docValue.IsValid() {
+ return ""
+ }
+ docType := docValue.Type()
+ docKind := docType.Kind()
+ if docKind == reflect.Map {
+ subdocVal := docValue.MapIndex(reflect.ValueOf(path))
+ if subdocVal.Kind() == reflect.Invalid {
+ return ""
+ }
+ subdoc = subdocVal.Interface()
+ } else if docKind == reflect.Slice {
+ if docType == marshalDType {
+ // dive into a D as a document
+ asD := bson.D(subdoc.(bsonutil.MarshalD))
+ var err error
+ subdoc, err = bsonutil.FindValueByKey(path, &asD)
+ if err != nil {
+ return ""
+ }
+ } else {
+ // check that the path can be converted to int
+ arrayIndex, err := strconv.Atoi(path)
+ if err != nil {
+ return ""
+ }
+ // bounds check for slice
+ if arrayIndex < 0 || arrayIndex >= docValue.Len() {
+ return ""
+ }
+ subdocVal := docValue.Index(arrayIndex)
+ if subdocVal.Kind() == reflect.Invalid {
+ return ""
+ }
+ subdoc = subdocVal.Interface()
+ }
+ } else {
+ // trying to index into a non-compound type - just return blank.
+ return ""
+ }
+ }
+ return subdoc
+}
diff --git a/src/mongo/gotools/mongoexport/csv_test.go b/src/mongo/gotools/mongoexport/csv_test.go
new file mode 100644
index 00000000000..cdf65814e72
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/csv_test.go
@@ -0,0 +1,139 @@
+package mongoexport
+
+import (
+ "bytes"
+ "encoding/csv"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "strings"
+ "testing"
+)
+
+func TestWriteCSV(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a CSV export output", t, func() {
+ fields := []string{"_id", "x", " y", "z.1.a"}
+ out := &bytes.Buffer{}
+
+ Convey("Headers should be written correctly", func() {
+ csvExporter := NewCSVExportOutput(fields, false, out)
+ err := csvExporter.WriteHeader()
+ So(err, ShouldBeNil)
+ csvExporter.ExportDocument(bson.D{{"_id", "12345"}})
+ csvExporter.WriteFooter()
+ csvExporter.Flush()
+ rec, err := csv.NewReader(strings.NewReader(out.String())).Read()
+ So(err, ShouldBeNil)
+ So(rec, ShouldResemble, []string{"_id", "x", " y", "z.1.a"})
+ })
+
+ Convey("Headers should not be written", func() {
+ csvExporter := NewCSVExportOutput(fields, true, out)
+ err := csvExporter.WriteHeader()
+ So(err, ShouldBeNil)
+ csvExporter.ExportDocument(bson.D{{"_id", "12345"}})
+ csvExporter.WriteFooter()
+ csvExporter.Flush()
+ rec, err := csv.NewReader(strings.NewReader(out.String())).Read()
+ So(err, ShouldBeNil)
+ So(rec, ShouldResemble, []string{"12345", "", "", ""})
+ })
+
+ Convey("Exported document with missing fields should print as blank", func() {
+ csvExporter := NewCSVExportOutput(fields, true, out)
+ csvExporter.ExportDocument(bson.D{{"_id", "12345"}})
+ csvExporter.WriteFooter()
+ csvExporter.Flush()
+ rec, err := csv.NewReader(strings.NewReader(out.String())).Read()
+ So(err, ShouldBeNil)
+ So(rec, ShouldResemble, []string{"12345", "", "", ""})
+ })
+
+ Convey("Exported document with index into nested objects should print correctly", func() {
+ csvExporter := NewCSVExportOutput(fields, true, out)
+ z := []interface{}{"x", bson.D{{"a", "T"}, {"B", 1}}}
+ csvExporter.ExportDocument(bson.D{{Name: "z", Value: z}})
+ csvExporter.WriteFooter()
+ csvExporter.Flush()
+ rec, err := csv.NewReader(strings.NewReader(out.String())).Read()
+ So(err, ShouldBeNil)
+ So(rec, ShouldResemble, []string{"", "", "", "T"})
+ })
+
+ Reset(func() {
+ out.Reset()
+ })
+
+ })
+}
+
+func TestExtractDField(t *testing.T) {
+ Convey("With a test bson.D", t, func() {
+ b := []interface{}{"inner", bsonutil.MarshalD{{"inner2", 1}}}
+ c := bsonutil.MarshalD{{"x", 5}}
+ d := bsonutil.MarshalD{{"z", nil}}
+ testD := bsonutil.MarshalD{
+ {"a", "string"},
+ {"b", b},
+ {"c", c},
+ {"d", d},
+ }
+
+ Convey("regular fields should be extracted by name", func() {
+ val := extractFieldByName("a", testD)
+ So(val, ShouldEqual, "string")
+ })
+
+ Convey("array fields should be extracted by name", func() {
+ val := extractFieldByName("b.1", testD)
+ So(val, ShouldResemble, bsonutil.MarshalD{{"inner2", 1}})
+ val = extractFieldByName("b.1.inner2", testD)
+ So(val, ShouldEqual, 1)
+ val = extractFieldByName("b.0", testD)
+ So(val, ShouldEqual, "inner")
+ })
+
+ Convey("subdocument fields should be extracted by name", func() {
+ val := extractFieldByName("c", testD)
+ So(val, ShouldResemble, bsonutil.MarshalD{{"x", 5}})
+ val = extractFieldByName("c.x", testD)
+ So(val, ShouldEqual, 5)
+
+ Convey("even if they contain null values", func() {
+ val := extractFieldByName("d", testD)
+ So(val, ShouldResemble, bsonutil.MarshalD{{"z", nil}})
+ val = extractFieldByName("d.z", testD)
+ So(val, ShouldEqual, nil)
+ val = extractFieldByName("d.z.nope", testD)
+ So(val, ShouldEqual, "")
+ })
+ })
+
+ Convey(`non-existing fields should return ""`, func() {
+ val := extractFieldByName("f", testD)
+ So(val, ShouldEqual, "")
+ val = extractFieldByName("c.nope", testD)
+ So(val, ShouldEqual, "")
+ val = extractFieldByName("c.nope.NOPE", testD)
+ So(val, ShouldEqual, "")
+ val = extractFieldByName("b.1000", testD)
+ So(val, ShouldEqual, "")
+ val = extractFieldByName("b.1.nada", testD)
+ So(val, ShouldEqual, "")
+ })
+
+ })
+
+ Convey(`Extraction of a non-document should return ""`, t, func() {
+ val := extractFieldByName("meh", []interface{}{"meh"})
+ So(val, ShouldEqual, "")
+ })
+
+ Convey(`Extraction of a nil document should return ""`, t, func() {
+ val := extractFieldByName("a", nil)
+ So(val, ShouldEqual, "")
+ })
+}
diff --git a/src/mongo/gotools/mongoexport/json.go b/src/mongo/gotools/mongoexport/json.go
new file mode 100644
index 00000000000..7697b0dc264
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/json.go
@@ -0,0 +1,109 @@
+package mongoexport
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/json"
+ "gopkg.in/mgo.v2/bson"
+ "io"
+)
+
+// JSONExportOutput is an implementation of ExportOutput that writes documents
+// to the output in JSON format.
+type JSONExportOutput struct {
+ // ArrayOutput when set to true indicates that the output should be written
+ // as a JSON array, where each document is an element in the array.
+ ArrayOutput bool
+ // Pretty when set to true indicates that the output will be written in pretty mode.
+ PrettyOutput bool
+ Encoder *json.Encoder
+ Out io.Writer
+ NumExported int64
+}
+
+// NewJSONExportOutput creates a new JSONExportOutput in array mode if specified,
+// configured to write data to the given io.Writer.
+func NewJSONExportOutput(arrayOutput bool, prettyOutput bool, out io.Writer) *JSONExportOutput {
+ return &JSONExportOutput{
+ arrayOutput,
+ prettyOutput,
+ json.NewEncoder(out),
+ out,
+ 0,
+ }
+}
+
+// WriteHeader writes the opening square bracket if in array mode, otherwise it
+// behaves as a no-op.
+func (jsonExporter *JSONExportOutput) WriteHeader() error {
+ if jsonExporter.ArrayOutput {
+ // TODO check # bytes written?
+ _, err := jsonExporter.Out.Write([]byte{json.ArrayStart})
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// WriteFooter writes the closing square bracket if in array mode, otherwise it
+// behaves as a no-op.
+func (jsonExporter *JSONExportOutput) WriteFooter() error {
+ if jsonExporter.ArrayOutput {
+ _, err := jsonExporter.Out.Write([]byte{json.ArrayEnd, '\n'})
+ // TODO check # bytes written?
+ if err != nil {
+ return err
+ }
+ }
+ if jsonExporter.PrettyOutput {
+ jsonExporter.Out.Write([]byte("\n"))
+ }
+ return nil
+}
+
+// Flush is a no-op for JSON export formats.
+func (jsonExporter *JSONExportOutput) Flush() error {
+ return nil
+}
+
+// ExportDocument converts the given document to extended JSON, and writes it
+// to the output.
+func (jsonExporter *JSONExportOutput) ExportDocument(document bson.D) error {
+ if jsonExporter.ArrayOutput || jsonExporter.PrettyOutput {
+ if jsonExporter.NumExported >= 1 {
+ if jsonExporter.ArrayOutput {
+ jsonExporter.Out.Write([]byte(","))
+ }
+ if jsonExporter.PrettyOutput {
+ jsonExporter.Out.Write([]byte("\n"))
+ }
+ }
+ extendedDoc, err := bsonutil.ConvertBSONValueToJSON(document)
+ if err != nil {
+ return err
+ }
+ jsonOut, err := json.Marshal(extendedDoc)
+ if err != nil {
+ return fmt.Errorf("error converting BSON to extended JSON: %v", err)
+ }
+ if jsonExporter.PrettyOutput {
+ var jsonFormatted bytes.Buffer
+ json.Indent(&jsonFormatted, jsonOut, "", "\t")
+ jsonOut = jsonFormatted.Bytes()
+ }
+ jsonExporter.Out.Write(jsonOut)
+ } else {
+ extendedDoc, err := bsonutil.ConvertBSONValueToJSON(document)
+ if err != nil {
+ return err
+ }
+ err = jsonExporter.Encoder.Encode(extendedDoc)
+ if err != nil {
+ return err
+ }
+ }
+ jsonExporter.NumExported++
+ return nil
+}
diff --git a/src/mongo/gotools/mongoexport/json_test.go b/src/mongo/gotools/mongoexport/json_test.go
new file mode 100644
index 00000000000..ff988f971c0
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/json_test.go
@@ -0,0 +1,73 @@
+package mongoexport
+
+import (
+ "bytes"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestWriteJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a JSON export output", t, func() {
+ out := &bytes.Buffer{}
+
+ Convey("Special types should serialize as extended JSON", func() {
+
+ Convey("ObjectId should have an extended JSON format", func() {
+ jsonExporter := NewJSONExportOutput(false, false, out)
+ objId := bson.NewObjectId()
+ err := jsonExporter.WriteHeader()
+ So(err, ShouldBeNil)
+ err = jsonExporter.ExportDocument(bson.D{{"_id", objId}})
+ So(err, ShouldBeNil)
+ err = jsonExporter.WriteFooter()
+ So(err, ShouldBeNil)
+ So(out.String(), ShouldEqual, `{"_id":{"$oid":"`+objId.Hex()+`"}}`+"\n")
+ })
+
+ Reset(func() {
+ out.Reset()
+ })
+ })
+
+ })
+}
+
+func TestJSONArray(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a JSON export output in array mode", t, func() {
+ out := &bytes.Buffer{}
+ Convey("exporting a bunch of documents should produce valid json", func() {
+ jsonExporter := NewJSONExportOutput(true, false, out)
+ err := jsonExporter.WriteHeader()
+ So(err, ShouldBeNil)
+
+ // Export a few docs of various types
+
+ testObjs := []interface{}{bson.NewObjectId(), "asd", 12345, 3.14159, bson.D{{"A", 1}}}
+ for _, obj := range testObjs {
+ err = jsonExporter.ExportDocument(bson.D{{"_id", obj}})
+ So(err, ShouldBeNil)
+ }
+
+ err = jsonExporter.WriteFooter()
+ So(err, ShouldBeNil)
+ // Unmarshal the whole thing, it should be valid json
+ fromJSON := []map[string]interface{}{}
+ err = json.Unmarshal(out.Bytes(), &fromJSON)
+ So(err, ShouldBeNil)
+ So(len(fromJSON), ShouldEqual, len(testObjs))
+
+ })
+
+ Reset(func() {
+ out.Reset()
+ })
+
+ })
+}
diff --git a/src/mongo/gotools/mongoexport/kerberos_test.go b/src/mongo/gotools/mongoexport/kerberos_test.go
new file mode 100644
index 00000000000..06c7ecb0b6e
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/kerberos_test.go
@@ -0,0 +1,44 @@
+package mongoexport
+
+import (
+ "bytes"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "strings"
+ "testing"
+)
+
+func TestKerberos(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.KerberosTestType)
+
+ Convey("Should be able to run mongoexport with Kerberos auth", t, func() {
+ opts, err := testutil.GetKerberosOptions()
+
+ So(err, ShouldBeNil)
+
+ sessionProvider, err := db.NewSessionProvider(*opts)
+ So(err, ShouldBeNil)
+
+ export := MongoExport{
+ ToolOptions: *opts,
+ OutputOpts: &OutputFormatOptions{},
+ InputOpts: &InputOptions{},
+ SessionProvider: sessionProvider,
+ }
+
+ var out bytes.Buffer
+ num, err := export.exportInternal(&out)
+
+ So(err, ShouldBeNil)
+ So(num, ShouldEqual, 1)
+ outputLines := strings.Split(strings.TrimSpace(out.String()), "\n")
+ So(len(outputLines), ShouldEqual, 1)
+ outMap := map[string]interface{}{}
+ So(json.Unmarshal([]byte(outputLines[0]), &outMap), ShouldBeNil)
+ So(outMap["kerberos"], ShouldEqual, true)
+ So(outMap["authenticated"], ShouldEqual, "yeah")
+ So(outMap["_id"].(map[string]interface{})["$oid"], ShouldEqual, "528fb35afb3a8030e2f643c3")
+ })
+}
diff --git a/src/mongo/gotools/mongoexport/main/mongoexport.go b/src/mongo/gotools/mongoexport/main/mongoexport.go
new file mode 100644
index 00000000000..c18451f03df
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/main/mongoexport.go
@@ -0,0 +1,143 @@
+// Main package for the mongoexport tool.
+package main
+
+import (
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongoexport"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "os"
+)
+
+func main() {
+ // initialize command-line opts
+ opts := options.New("mongoexport", mongoexport.Usage,
+ options.EnabledOptions{Auth: true, Connection: true, Namespace: true})
+
+ outputOpts := &mongoexport.OutputFormatOptions{}
+ opts.AddOptions(outputOpts)
+ inputOpts := &mongoexport.InputOptions{}
+ opts.AddOptions(inputOpts)
+
+ args, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'mongoexport --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+ if len(args) != 0 {
+ log.Logvf(log.Always, "too many positional arguments: %v", args)
+ log.Logvf(log.Always, "try 'mongoexport --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ log.SetVerbosity(opts.Verbosity)
+ signals.Handle()
+
+ // print help, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ // print version, if specified
+ if opts.PrintVersion() {
+ return
+ }
+
+ // connect directly, unless a replica set name is explicitly specified
+ _, setName := util.ParseConnectionString(opts.Host)
+ opts.Direct = (setName == "")
+ opts.ReplicaSetName = setName
+
+ provider, err := db.NewSessionProvider(*opts)
+
+ // temporarily allow secondary reads for the isMongos check
+ provider.SetReadPreference(mgo.Nearest)
+ isMongos, err := provider.IsMongos()
+ if err != nil {
+ log.Logvf(log.Always, "%v", err)
+ os.Exit(util.ExitError)
+ }
+
+ provider.SetFlags(db.DisableSocketTimeout)
+
+ if inputOpts.SlaveOk {
+ if inputOpts.ReadPreference != "" {
+ log.Logvf(log.Always, "--slaveOk can't be specified when --readPreference is specified")
+ os.Exit(util.ExitBadOptions)
+ }
+ log.Logvf(log.Always, "--slaveOk is deprecated and being internally rewritten as --readPreference=nearest")
+ inputOpts.ReadPreference = "nearest"
+ }
+
+ var mode mgo.Mode
+ if opts.ReplicaSetName != "" || isMongos {
+ mode = mgo.Primary
+ } else {
+ mode = mgo.Nearest
+ }
+ var tags bson.D
+ if inputOpts.ReadPreference != "" {
+ mode, tags, err = db.ParseReadPreference(inputOpts.ReadPreference)
+ if err != nil {
+ log.Logvf(log.Always, "error parsing --ReadPreference: %v", err)
+ os.Exit(util.ExitBadOptions)
+ }
+ if len(tags) > 0 {
+ provider.SetTags(tags)
+ }
+ }
+
+ // warn if we are trying to export from a secondary in a sharded cluster
+ if isMongos && mode != mgo.Primary {
+ log.Logvf(log.Always, db.WarningNonPrimaryMongosConnection)
+ }
+
+ provider.SetReadPreference(mode)
+
+ if err != nil {
+ log.Logvf(log.Always, "error connecting to host: %v", err)
+ os.Exit(util.ExitError)
+ }
+ exporter := mongoexport.MongoExport{
+ ToolOptions: *opts,
+ OutputOpts: outputOpts,
+ InputOpts: inputOpts,
+ SessionProvider: provider,
+ }
+
+ err = exporter.ValidateSettings()
+ if err != nil {
+ log.Logvf(log.Always, "error validating settings: %v", err)
+ log.Logvf(log.Always, "try 'mongoexport --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ writer, err := exporter.GetOutputWriter()
+ if err != nil {
+ log.Logvf(log.Always, "error opening output stream: %v", err)
+ os.Exit(util.ExitError)
+ }
+ if writer == nil {
+ writer = os.Stdout
+ } else {
+ defer writer.Close()
+ }
+
+ numDocs, err := exporter.Export(writer)
+ if err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ os.Exit(util.ExitError)
+ }
+
+ if numDocs == 1 {
+ log.Logvf(log.Always, "exported %v record", numDocs)
+ } else {
+ log.Logvf(log.Always, "exported %v records", numDocs)
+ }
+
+}
diff --git a/src/mongo/gotools/mongoexport/mongoexport.go b/src/mongo/gotools/mongoexport/mongoexport.go
new file mode 100644
index 00000000000..995e252f4df
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/mongoexport.go
@@ -0,0 +1,425 @@
+// Package mongoexport produces a JSON or CSV export of data stored in a MongoDB instance.
+package mongoexport
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/progress"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// Output types supported by mongoexport.
+const (
+ CSV = "csv"
+ JSON = "json"
+ progressBarLength = 24
+ progressBarWaitTime = time.Second
+ watchProgressorUpdateFrequency = 8000
+)
+
+// MongoExport is a container for the user-specified options and
+// internal state used for running mongoexport.
+type MongoExport struct {
+ // generic mongo tool options
+ ToolOptions options.ToolOptions
+
+ // OutputOpts controls options for how the exported data should be formatted
+ OutputOpts *OutputFormatOptions
+
+ InputOpts *InputOptions
+
+ // for connecting to the db
+ SessionProvider *db.SessionProvider
+ ExportOutput ExportOutput
+}
+
+// ExportOutput is an interface that specifies how a document should be formatted
+// and written to an output stream.
+type ExportOutput interface {
+ // WriteHeader outputs any pre-record headers that are written once
+ // per output file.
+ WriteHeader() error
+
+ // WriteRecord writes the given document to the given io.Writer according to
+ // the format supported by the underlying ExportOutput implementation.
+ ExportDocument(bson.D) error
+
+ // WriteFooter outputs any post-record headers that are written once per
+ // output file.
+ WriteFooter() error
+
+ // Flush writes any pending data to the underlying I/O stream.
+ Flush() error
+}
+
+// ValidateSettings returns an error if any settings specified on the command line
+// were invalid, or nil if they are valid.
+func (exp *MongoExport) ValidateSettings() error {
+ // Namespace must have a valid database if none is specified,
+ // use 'test'
+ if exp.ToolOptions.Namespace.DB == "" {
+ exp.ToolOptions.Namespace.DB = "test"
+ }
+
+ if exp.ToolOptions.Namespace.Collection == "" {
+ return fmt.Errorf("must specify a collection")
+ }
+ if err := util.ValidateCollectionName(exp.ToolOptions.Namespace.Collection); err != nil {
+ return err
+ }
+
+ exp.OutputOpts.Type = strings.ToLower(exp.OutputOpts.Type)
+
+ if exp.OutputOpts.CSVOutputType {
+ log.Logv(log.Always, "csv flag is deprecated; please use --type=csv instead")
+ exp.OutputOpts.Type = CSV
+ }
+
+ if exp.OutputOpts.Type == "" {
+ // special error for an empty type value
+ return fmt.Errorf("--type cannot be empty")
+ }
+ if exp.OutputOpts.Type != CSV && exp.OutputOpts.Type != JSON {
+ return fmt.Errorf("invalid output type '%v', choose 'json' or 'csv'", exp.OutputOpts.Type)
+ }
+
+ if exp.InputOpts.Query != "" && exp.InputOpts.ForceTableScan {
+ return fmt.Errorf("cannot use --forceTableScan when specifying --query")
+ }
+
+ if exp.InputOpts.Query != "" && exp.InputOpts.QueryFile != "" {
+ return fmt.Errorf("either --query or --queryFile can be specified as a query option")
+ }
+
+ if exp.InputOpts != nil && exp.InputOpts.HasQuery() {
+ content, err := exp.InputOpts.GetQuery()
+ if err != nil {
+ return err
+ }
+ _, err2 := getObjectFromByteArg(content)
+ if err2 != nil {
+ return err2
+ }
+ }
+
+ if exp.InputOpts != nil && exp.InputOpts.Sort != "" {
+ _, err := getSortFromArg(exp.InputOpts.Sort)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetOutputWriter opens and returns an io.WriteCloser for the output
+// options or nil if none is set. The caller is responsible for closing it.
+func (exp *MongoExport) GetOutputWriter() (io.WriteCloser, error) {
+ if exp.OutputOpts.OutputFile != "" {
+ // If the directory in which the output file is to be
+ // written does not exist, create it
+ fileDir := filepath.Dir(exp.OutputOpts.OutputFile)
+ err := os.MkdirAll(fileDir, 0750)
+ if err != nil {
+ return nil, err
+ }
+
+ file, err := os.Create(util.ToUniversalPath(exp.OutputOpts.OutputFile))
+ if err != nil {
+ return nil, err
+ }
+ return file, err
+ }
+ // No writer, so caller should assume Stdout (or some other reasonable default)
+ return nil, nil
+}
+
+// Take a comma-delimited set of field names and build a selector doc for query projection.
+// For fields containing a dot '.', we project the entire top-level portion.
+// e.g. "a,b,c.d.e,f.$" -> {a:1, b:1, "c":1, "f.$": 1}.
+func makeFieldSelector(fields string) bson.M {
+ selector := bson.M{"_id": 1}
+ if fields == "" {
+ return selector
+ }
+
+ for _, field := range strings.Split(fields, ",") {
+ // Projections like "a.0" work fine for nested documents not for arrays
+ // - if passed directly to mongod. To handle this, we have to retrieve
+ // the entire top-level document and then filter afterwards. An exception
+ // is made for '$' projections - which are passed directly to mongod.
+ if i := strings.LastIndex(field, "."); i != -1 && field[i+1:] != "$" {
+ field = field[:strings.Index(field, ".")]
+ }
+ selector[field] = 1
+ }
+ return selector
+}
+
+// getCount returns an estimate of how many documents the cursor will fetch
+// It always returns Limit if there is a limit, assuming that in general
+// limits will less then the total possible.
+// If there is a query and no limit then it returns 0, because it's too expensive to count the query.
+// Otherwise it returns the count minus the skip
+func (exp *MongoExport) getCount() (c int, err error) {
+ session, err := exp.SessionProvider.GetSession()
+ if err != nil {
+ return 0, err
+ }
+ defer session.Close()
+ if exp.InputOpts != nil && exp.InputOpts.Limit != 0 {
+ return exp.InputOpts.Limit, nil
+ }
+ if exp.InputOpts != nil && exp.InputOpts.Query != "" {
+ return 0, nil
+ }
+ q := session.DB(exp.ToolOptions.Namespace.DB).C(exp.ToolOptions.Namespace.Collection).Find(nil)
+ c, err = q.Count()
+ if err != nil {
+ return 0, err
+ }
+ var skip int
+ if exp.InputOpts != nil {
+ skip = exp.InputOpts.Skip
+ }
+ if skip > c {
+ c = 0
+ } else {
+ c -= skip
+ }
+ return c, nil
+}
+
+// getCursor returns a cursor that can be iterated over to get all the documents
+// to export, based on the options given to mongoexport. Also returns the
+// associated session, so that it can be closed once the cursor is used up.
+func (exp *MongoExport) getCursor() (*mgo.Iter, *mgo.Session, error) {
+ sortFields := []string{}
+ if exp.InputOpts != nil && exp.InputOpts.Sort != "" {
+ sortD, err := getSortFromArg(exp.InputOpts.Sort)
+ if err != nil {
+ return nil, nil, err
+ }
+ sortFields, err = bsonutil.MakeSortString(sortD)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ query := map[string]interface{}{}
+ if exp.InputOpts != nil && exp.InputOpts.HasQuery() {
+ var err error
+ content, err := exp.InputOpts.GetQuery()
+ if err != nil {
+ return nil, nil, err
+ }
+ query, err = getObjectFromByteArg(content)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ flags := 0
+ if len(query) == 0 && exp.InputOpts != nil &&
+ exp.InputOpts.ForceTableScan != true && exp.InputOpts.Sort == "" {
+ flags = flags | db.Snapshot
+ }
+
+ session, err := exp.SessionProvider.GetSession()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ skip := 0
+ if exp.InputOpts != nil {
+ skip = exp.InputOpts.Skip
+ }
+ limit := 0
+ if exp.InputOpts != nil {
+ limit = exp.InputOpts.Limit
+ }
+
+ if exp.InputOpts.AssertExists {
+ collNames, err := session.DB(exp.ToolOptions.Namespace.DB).CollectionNames()
+ if err != nil {
+ return nil, session, err
+ }
+ if !util.StringSliceContains(collNames, exp.ToolOptions.Namespace.Collection) {
+ return nil, session, fmt.Errorf("collection '%s' does not exist",
+ exp.ToolOptions.Namespace.Collection)
+ }
+ }
+
+ // build the query
+ q := session.DB(exp.ToolOptions.Namespace.DB).
+ C(exp.ToolOptions.Namespace.Collection).Find(query).Sort(sortFields...).
+ Skip(skip).Limit(limit)
+
+ if len(exp.OutputOpts.Fields) > 0 {
+ q.Select(makeFieldSelector(exp.OutputOpts.Fields))
+ }
+
+ q = db.ApplyFlags(q, session, flags)
+
+ return q.Iter(), session, nil
+
+}
+
+// Internal function that handles exporting to the given writer. Used primarily
+// for testing, because it bypasses writing to the file system.
+func (exp *MongoExport) exportInternal(out io.Writer) (int64, error) {
+
+ max, err := exp.getCount()
+ if err != nil {
+ return 0, err
+ }
+
+ progressManager := progress.NewProgressBarManager(log.Writer(0), progressBarWaitTime)
+ progressManager.Start()
+ defer progressManager.Stop()
+
+ watchProgressor := progress.NewCounter(int64(max))
+ bar := &progress.Bar{
+ Name: fmt.Sprintf("%v.%v", exp.ToolOptions.Namespace.DB, exp.ToolOptions.Namespace.Collection),
+ Watching: watchProgressor,
+ BarLength: progressBarLength,
+ }
+ progressManager.Attach(bar)
+ defer progressManager.Detach(bar)
+
+ exportOutput, err := exp.getExportOutput(out)
+ if err != nil {
+ return 0, err
+ }
+
+ cursor, session, err := exp.getCursor()
+ if err != nil {
+ return 0, err
+ }
+ defer session.Close()
+ defer cursor.Close()
+
+ connURL := exp.ToolOptions.Host
+ if connURL == "" {
+ connURL = util.DefaultHost
+ }
+ if exp.ToolOptions.Port != "" {
+ connURL = connURL + ":" + exp.ToolOptions.Port
+ }
+ log.Logvf(log.Always, "connected to: %v", connURL)
+
+ // Write headers
+ err = exportOutput.WriteHeader()
+ if err != nil {
+ return 0, err
+ }
+
+ var result bson.D
+
+ docsCount := int64(0)
+
+ // Write document content
+ for cursor.Next(&result) {
+ err := exportOutput.ExportDocument(result)
+ if err != nil {
+ return docsCount, err
+ }
+ docsCount++
+ if docsCount%watchProgressorUpdateFrequency == 0 {
+ watchProgressor.Set(docsCount)
+ }
+ }
+ watchProgressor.Set(docsCount)
+ if err := cursor.Err(); err != nil {
+ return docsCount, err
+ }
+
+ // Write footers
+ err = exportOutput.WriteFooter()
+ if err != nil {
+ return docsCount, err
+ }
+ exportOutput.Flush()
+ return docsCount, nil
+}
+
+// Export executes the entire export operation. It returns an integer of the count
+// of documents successfully exported, and a non-nil error if something went wrong
+// during the export operation.
+func (exp *MongoExport) Export(out io.Writer) (int64, error) {
+ count, err := exp.exportInternal(out)
+ return count, err
+}
+
+// getExportOutput returns an implementation of ExportOutput which can handle
+// transforming BSON documents into the appropriate output format and writing
+// them to an output stream.
+func (exp *MongoExport) getExportOutput(out io.Writer) (ExportOutput, error) {
+ if exp.OutputOpts.Type == CSV {
+ // TODO what if user specifies *both* --fields and --fieldFile?
+ var fields []string
+ var err error
+ if len(exp.OutputOpts.Fields) > 0 {
+ fields = strings.Split(exp.OutputOpts.Fields, ",")
+ } else if exp.OutputOpts.FieldFile != "" {
+ fields, err = util.GetFieldsFromFile(exp.OutputOpts.FieldFile)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, fmt.Errorf("CSV mode requires a field list")
+ }
+
+ exportFields := make([]string, 0, len(fields))
+ for _, field := range fields {
+ // for '$' field projections, exclude '.$' from the field name
+ if i := strings.LastIndex(field, "."); i != -1 && field[i+1:] == "$" {
+ exportFields = append(exportFields, field[:i])
+ } else {
+ exportFields = append(exportFields, field)
+ }
+ }
+
+ return NewCSVExportOutput(exportFields, exp.OutputOpts.NoHeaderLine, out), nil
+ }
+ return NewJSONExportOutput(exp.OutputOpts.JSONArray, exp.OutputOpts.Pretty, out), nil
+}
+
+// getObjectFromByteArg takes an object in extended JSON, and converts it to an object that
+// can be passed straight to db.collection.find(...) as a query or sort critera.
+// Returns an error if the string is not valid JSON, or extended JSON.
+func getObjectFromByteArg(queryRaw []byte) (map[string]interface{}, error) {
+ parsedJSON := map[string]interface{}{}
+ err := json.Unmarshal(queryRaw, &parsedJSON)
+ if err != nil {
+ return nil, fmt.Errorf("query '%v' is not valid JSON: %v", queryRaw, err)
+ }
+
+ err = bsonutil.ConvertJSONDocumentToBSON(parsedJSON)
+ if err != nil {
+ return nil, err
+ }
+ return parsedJSON, nil
+}
+
+// getSortFromArg takes a sort specification in JSON and returns it as a bson.D
+// object which preserves the ordering of the keys as they appear in the input.
+func getSortFromArg(queryRaw string) (bson.D, error) {
+ parsedJSON := bson.D{}
+ err := json.Unmarshal([]byte(queryRaw), &parsedJSON)
+ if err != nil {
+ return nil, fmt.Errorf("query '%v' is not valid JSON: %v", queryRaw, err)
+ }
+ // TODO: verify sort specification before returning a nil error
+ return parsedJSON, nil
+}
diff --git a/src/mongo/gotools/mongoexport/mongoexport_test.go b/src/mongo/gotools/mongoexport/mongoexport_test.go
new file mode 100644
index 00000000000..f5d0e7e2afa
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/mongoexport_test.go
@@ -0,0 +1,44 @@
+package mongoexport
+
+import (
+ "encoding/json"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "os"
+ "testing"
+)
+
+func TestExtendedJSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Serializing a doc to extended JSON should work", t, func() {
+ x := bson.M{
+ "_id": bson.NewObjectId(),
+ "hey": "sup",
+ "subdoc": bson.M{
+ "subid": bson.NewObjectId(),
+ },
+ "array": []interface{}{
+ bson.NewObjectId(),
+ bson.Undefined,
+ },
+ }
+ out, err := bsonutil.ConvertBSONValueToJSON(x)
+ So(err, ShouldBeNil)
+
+ jsonEncoder := json.NewEncoder(os.Stdout)
+ jsonEncoder.Encode(out)
+ })
+}
+
+func TestFieldSelect(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Using makeFieldSelector should return correct projection doc", t, func() {
+ So(makeFieldSelector("a,b"), ShouldResemble, bson.M{"_id": 1, "a": 1, "b": 1})
+ So(makeFieldSelector(""), ShouldResemble, bson.M{"_id": 1})
+ So(makeFieldSelector("x,foo.baz"), ShouldResemble, bson.M{"_id": 1, "foo": 1, "x": 1})
+ })
+}
diff --git a/src/mongo/gotools/mongoexport/options.go b/src/mongo/gotools/mongoexport/options.go
new file mode 100644
index 00000000000..54896655581
--- /dev/null
+++ b/src/mongo/gotools/mongoexport/options.go
@@ -0,0 +1,79 @@
+package mongoexport
+
+import (
+ "fmt"
+ "io/ioutil"
+)
+
+var Usage = `<options>
+
+Export data from MongoDB in CSV or JSON format.
+
+See http://docs.mongodb.org/manual/reference/program/mongoexport/ for more information.`
+
+// OutputFormatOptions defines the set of options to use in formatting exported data.
+type OutputFormatOptions struct {
+ // Fields is an option to directly specify comma-separated fields to export to CSV.
+ Fields string `long:"fields" value-name:"<field>[,<field>]*" short:"f" description:"comma separated list of field names (required for exporting CSV) e.g. -f \"name,age\" "`
+
+ // FieldFile is a filename that refers to a list of fields to export, 1 per line.
+ FieldFile string `long:"fieldFile" value-name:"<filename>" description:"file with field names - 1 per line"`
+
+ // Type selects the type of output to export as (json or csv).
+ Type string `long:"type" value-name:"<type>" default:"json" default-mask:"-" description:"the output format, either json or csv (defaults to 'json')"`
+
+ // Deprecated: allow legacy --csv option in place of --type=csv
+ CSVOutputType bool `long:"csv" default:"false" hidden:"true"`
+
+ // OutputFile specifies an output file path.
+ OutputFile string `long:"out" value-name:"<filename>" short:"o" description:"output file; if not specified, stdout is used"`
+
+ // JSONArray if set will export the documents an array of JSON documents.
+ JSONArray bool `long:"jsonArray" description:"output to a JSON array rather than one object per line"`
+
+ // Pretty displays JSON data in a human-readable form.
+ Pretty bool `long:"pretty" description:"output JSON formatted to be human-readable"`
+
+ // NoHeaderLine, if set, will export CSV data without a list of field names at the first line.
+ NoHeaderLine bool `long:"noHeaderLine" description:"export CSV data without a list of field names at the first line"`
+}
+
+// Name returns a human-readable group name for output format options.
+func (*OutputFormatOptions) Name() string {
+ return "output"
+}
+
+// InputOptions defines the set of options to use in retrieving data from the server.
+type InputOptions struct {
+ Query string `long:"query" value-name:"<json>" short:"q" description:"query filter, as a JSON string, e.g., '{x:{$gt:1}}'"`
+ QueryFile string `long:"queryFile" value-name:"<filename>" description:"path to a file containing a query filter (JSON)"`
+ SlaveOk bool `long:"slaveOk" short:"k" description:"allow secondary reads if available (default true)" default:"false" default-mask:"-"`
+ ReadPreference string `long:"readPreference" value-name:"<string>|<json>" description:"specify either a preference name or a preference json object"`
+ ForceTableScan bool `long:"forceTableScan" description:"force a table scan (do not use $snapshot)"`
+ Skip int `long:"skip" value-name:"<count>" description:"number of documents to skip"`
+ Limit int `long:"limit" value-name:"<count>" description:"limit the number of documents to export"`
+ Sort string `long:"sort" value-name:"<json>" description:"sort order, as a JSON string, e.g. '{x:1}'"`
+ AssertExists bool `long:"assertExists" default:"false" description:"if specified, export fails if the collection does not exist"`
+}
+
+// Name returns a human-readable group name for input options.
+func (*InputOptions) Name() string {
+ return "querying"
+}
+
+func (inputOptions *InputOptions) HasQuery() bool {
+ return inputOptions.Query != "" || inputOptions.QueryFile != ""
+}
+
+func (inputOptions *InputOptions) GetQuery() ([]byte, error) {
+ if inputOptions.Query != "" {
+ return []byte(inputOptions.Query), nil
+ } else if inputOptions.QueryFile != "" {
+ content, err := ioutil.ReadFile(inputOptions.QueryFile)
+ if err != nil {
+ err = fmt.Errorf("error reading queryFile: %s", err)
+ }
+ return content, err
+ }
+ panic("GetQuery can return valid values only for query or queryFile input")
+}
diff --git a/src/mongo/gotools/mongofiles/main/mongofiles.go b/src/mongo/gotools/mongofiles/main/mongofiles.go
new file mode 100644
index 00000000000..57be94931a9
--- /dev/null
+++ b/src/mongo/gotools/mongofiles/main/mongofiles.go
@@ -0,0 +1,76 @@
+// Main package for the mongofiles tool.
+package main
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongofiles"
+ "os"
+)
+
+func main() {
+ // initialize command-line opts
+ opts := options.New("mongofiles", mongofiles.Usage, options.EnabledOptions{Auth: true, Connection: true, Namespace: false})
+
+ storageOpts := &mongofiles.StorageOptions{}
+ opts.AddOptions(storageOpts)
+ inputOpts := &mongofiles.InputOptions{}
+ opts.AddOptions(inputOpts)
+
+ args, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'mongofiles --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // print help, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ // print version, if specified
+ if opts.PrintVersion() {
+ return
+ }
+ log.SetVerbosity(opts.Verbosity)
+ signals.Handle()
+
+ // add the specified database to the namespace options struct
+ opts.Namespace.DB = storageOpts.DB
+
+ // connect directly, unless a replica set name is explicitly specified
+ _, setName := util.ParseConnectionString(opts.Host)
+ opts.Direct = (setName == "")
+ opts.ReplicaSetName = setName
+
+ // create a session provider to connect to the db
+ provider, err := db.NewSessionProvider(*opts)
+ if err != nil {
+ log.Logvf(log.Always, "error connecting to host: %v", err)
+ os.Exit(util.ExitError)
+ }
+ mf := mongofiles.MongoFiles{
+ ToolOptions: opts,
+ StorageOptions: storageOpts,
+ SessionProvider: provider,
+ InputOptions: inputOpts,
+ }
+
+ if err := mf.ValidateCommand(args); err != nil {
+ log.Logvf(log.Always, "%v", err)
+ log.Logvf(log.Always, "try 'mongofiles --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ output, err := mf.Run(true)
+ if err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ os.Exit(util.ExitError)
+ }
+ fmt.Printf("%s", output)
+}
diff --git a/src/mongo/gotools/mongofiles/mongofiles.go b/src/mongo/gotools/mongofiles/mongofiles.go
new file mode 100644
index 00000000000..c2063185735
--- /dev/null
+++ b/src/mongo/gotools/mongofiles/mongofiles.go
@@ -0,0 +1,407 @@
+// Package mongofiles provides an interface to GridFS collections in a MongoDB instance.
+package mongofiles
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "io"
+ "os"
+ "regexp"
+ "time"
+)
+
+// List of possible commands for mongofiles.
+const (
+ List = "list"
+ Search = "search"
+ Put = "put"
+ Get = "get"
+ GetID = "get_id"
+ Delete = "delete"
+ DeleteID = "delete_id"
+)
+
+// MongoFiles is a container for the user-specified options and
+// internal state used for running mongofiles.
+type MongoFiles struct {
+ // generic mongo tool options
+ ToolOptions *options.ToolOptions
+
+ // mongofiles-specific storage options
+ StorageOptions *StorageOptions
+
+ // mongofiles-specific input options
+ InputOptions *InputOptions
+
+ // for connecting to the db
+ SessionProvider *db.SessionProvider
+
+ // command to run
+ Command string
+
+ // filename in GridFS
+ FileName string
+}
+
+// GFSFile represents a GridFS file.
+type GFSFile struct {
+ Id bson.ObjectId `bson:"_id"`
+ ChunkSize int `bson:"chunkSize"`
+ Name string `bson:"filename"`
+ Length int64 `bson:"length"`
+ Md5 string `bson:"md5"`
+ UploadDate time.Time `bson:"uploadDate"`
+ ContentType string `bson:"contentType,omitempty"`
+}
+
+// ValidateCommand ensures the arguments supplied are valid.
+func (mf *MongoFiles) ValidateCommand(args []string) error {
+ // make sure a command is specified and that we don't have
+ // too many arguments
+ if len(args) == 0 {
+ return fmt.Errorf("no command specified")
+ } else if len(args) > 2 {
+ return fmt.Errorf("too many positional arguments")
+ }
+
+ var fileName string
+ switch args[0] {
+ case List:
+ if len(args) == 1 {
+ fileName = ""
+ } else {
+ fileName = args[1]
+ }
+ case Search, Put, Get, Delete, GetID, DeleteID:
+ // also make sure the supporting argument isn't literally an
+ // empty string for example, mongofiles get ""
+ if len(args) == 1 || args[1] == "" {
+ return fmt.Errorf("'%v' argument missing", args[0])
+ }
+ fileName = args[1]
+ default:
+ return fmt.Errorf("'%v' is not a valid command", args[0])
+ }
+
+ if mf.StorageOptions.GridFSPrefix == "" {
+ return fmt.Errorf("--prefix can not be blank")
+ }
+
+ // set the mongofiles command and file name
+ mf.Command = args[0]
+ mf.FileName = fileName
+ return nil
+}
+
+// Query GridFS for files and display the results.
+func (mf *MongoFiles) findAndDisplay(gfs *mgo.GridFS, query bson.M) (string, error) {
+ display := ""
+
+ cursor := gfs.Find(query).Iter()
+ defer cursor.Close()
+
+ var file GFSFile
+ for cursor.Next(&file) {
+ display += fmt.Sprintf("%s\t%d\n", file.Name, file.Length)
+ }
+ if err := cursor.Err(); err != nil {
+ return "", fmt.Errorf("error retrieving list of GridFS files: %v", err)
+ }
+
+ return display, nil
+}
+
+// Return the local filename, as specified by the --local flag. Defaults to
+// the GridFile's name if not present. If GridFile is nil, uses the filename
+// given on the command line.
+func (mf *MongoFiles) getLocalFileName(gridFile *mgo.GridFile) string {
+ localFileName := mf.StorageOptions.LocalFileName
+ if localFileName == "" {
+ if gridFile != nil {
+ localFileName = gridFile.Name()
+ } else {
+ localFileName = mf.FileName
+ }
+ }
+ return localFileName
+}
+
+// handle logic for 'get' command
+func (mf *MongoFiles) handleGet(gfs *mgo.GridFS) (string, error) {
+ gFile, err := gfs.Open(mf.FileName)
+ if err != nil {
+ return "", fmt.Errorf("error opening GridFS file '%s': %v", mf.FileName, err)
+ }
+ defer gFile.Close()
+ if err = mf.writeFile(gFile); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("finished writing to %s\n", mf.getLocalFileName(gFile)), nil
+}
+
+// handle logic for 'get_id' command
+func (mf *MongoFiles) handleGetID(gfs *mgo.GridFS) (string, error) {
+ id, err := mf.parseID()
+ if err != nil {
+ return "", err
+ }
+ // with the parsed _id, grab the file and write it to disk
+ gFile, err := gfs.OpenId(id)
+ if err != nil {
+ return "", fmt.Errorf("error opening GridFS file with _id %s: %v", mf.FileName, err)
+ }
+ log.Logvf(log.Always, "found file '%v' with _id %v", gFile.Name(), mf.FileName)
+ defer gFile.Close()
+ if err = mf.writeFile(gFile); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("finished writing to: %s\n", mf.getLocalFileName(gFile)), nil
+}
+
+// logic for deleting a file with 'delete_id'
+func (mf *MongoFiles) handleDeleteID(gfs *mgo.GridFS) (string, error) {
+ id, err := mf.parseID()
+ if err != nil {
+ return "", err
+ }
+ if err = gfs.RemoveId(id); err != nil {
+ return "", fmt.Errorf("error while removing file with _id %v from GridFS: %v\n", mf.FileName, err)
+ }
+ return fmt.Sprintf("successfully deleted file with _id %v from GridFS\n", mf.FileName), nil
+}
+
+// parse and convert extended JSON
+func (mf *MongoFiles) parseID() (interface{}, error) {
+ // parse the id using extended json
+ var asJSON interface{}
+ err := json.Unmarshal([]byte(mf.FileName), &asJSON)
+ if err != nil {
+ return nil, fmt.Errorf(
+ "error parsing _id as json: %v; make sure you are properly escaping input", err)
+ }
+ id, err := bsonutil.ConvertJSONValueToBSON(asJSON)
+ if err != nil {
+ return nil, fmt.Errorf("error converting _id to bson: %v", err)
+ }
+ return id, nil
+}
+
+// writeFile writes a file from gridFS to stdout or the filesystem.
+func (mf *MongoFiles) writeFile(gridFile *mgo.GridFile) (err error) {
+ localFileName := mf.getLocalFileName(gridFile)
+ var localFile io.WriteCloser
+ if localFileName == "-" {
+ localFile = os.Stdout
+ } else {
+ if localFile, err = os.Create(localFileName); err != nil {
+ return fmt.Errorf("error while opening local file '%v': %v\n", localFileName, err)
+ }
+ defer localFile.Close()
+ log.Logvf(log.DebugLow, "created local file '%v'", localFileName)
+ }
+
+ if _, err = io.Copy(localFile, gridFile); err != nil {
+ return fmt.Errorf("error while writing data into local file '%v': %v\n", localFileName, err)
+ }
+ return nil
+}
+
+// handle logic for 'put' command.
+func (mf *MongoFiles) handlePut(gfs *mgo.GridFS) (output string, err error) {
+ localFileName := mf.getLocalFileName(nil)
+
+ // check if --replace flag turned on
+ if mf.StorageOptions.Replace {
+ err := gfs.Remove(mf.FileName)
+ if err != nil {
+ return "", err
+ }
+ output = fmt.Sprintf("removed all instances of '%v' from GridFS\n", mf.FileName)
+ }
+
+ var localFile io.ReadCloser
+
+ if localFileName == "-" {
+ localFile = os.Stdin
+ } else {
+ localFile, err = os.Open(localFileName)
+ if err != nil {
+ return "", fmt.Errorf("error while opening local file '%v' : %v\n", localFileName, err)
+ }
+ defer localFile.Close()
+ log.Logvf(log.DebugLow, "creating GridFS file '%v' from local file '%v'", mf.FileName, localFileName)
+ }
+
+ gFile, err := gfs.Create(mf.FileName)
+ if err != nil {
+ return "", fmt.Errorf("error while creating '%v' in GridFS: %v\n", mf.FileName, err)
+ }
+ defer func() {
+ // GridFS files flush a buffer on Close(), so it's important we
+ // capture any errors that occur as this function exits and
+ // overwrite the error if earlier writes executed successfully
+ if closeErr := gFile.Close(); err == nil && closeErr != nil {
+ log.Logvf(log.DebugHigh, "error occurred while closing GridFS file handler")
+ err = fmt.Errorf("error while storing '%v' into GridFS: %v\n", localFileName, closeErr)
+ }
+ }()
+
+ // set optional mime type
+ if mf.StorageOptions.ContentType != "" {
+ gFile.SetContentType(mf.StorageOptions.ContentType)
+ }
+
+ n, err := io.Copy(gFile, localFile)
+ if err != nil {
+ return "", fmt.Errorf("error while storing '%v' into GridFS: %v\n", localFileName, err)
+ }
+ log.Logvf(log.DebugLow, "copied %v bytes to server", n)
+
+ output += fmt.Sprintf("added file: %v\n", gFile.Name())
+ return output, nil
+}
+
+// Run the mongofiles utility. If displayHost is true, the connected host/port is
+// displayed.
+func (mf *MongoFiles) Run(displayHost bool) (string, error) {
+ connUrl := mf.ToolOptions.Host
+ if connUrl == "" {
+ connUrl = util.DefaultHost
+ }
+ if mf.ToolOptions.Port != "" {
+ connUrl = fmt.Sprintf("%s:%s", connUrl, mf.ToolOptions.Port)
+ }
+
+ var mode = mgo.Nearest
+ var tags bson.D
+
+ if mf.InputOptions.ReadPreference != "" {
+ var err error
+ mode, tags, err = db.ParseReadPreference(mf.InputOptions.ReadPreference)
+ if err != nil {
+ return "", fmt.Errorf("error parsing --readPreference : %v", err)
+ }
+ if len(tags) > 0 {
+ mf.SessionProvider.SetTags(tags)
+ }
+ }
+
+ mf.SessionProvider.SetReadPreference(mode)
+ mf.SessionProvider.SetTags(tags)
+ mf.SessionProvider.SetFlags(db.DisableSocketTimeout)
+
+ // get session
+ session, err := mf.SessionProvider.GetSession()
+ if err != nil {
+ return "", err
+ }
+ defer session.Close()
+
+ // check type of node we're connected to, and fall back to w=1 if standalone (for <= 2.4)
+ nodeType, err := mf.SessionProvider.GetNodeType()
+ if err != nil {
+ return "", fmt.Errorf("error determining type of node connected: %v", err)
+ }
+
+ log.Logvf(log.DebugLow, "connected to node type: %v", nodeType)
+
+ safety, err := db.BuildWriteConcern(mf.StorageOptions.WriteConcern, nodeType)
+ if err != nil {
+ return "", fmt.Errorf("error parsing write concern: %v", err)
+ }
+
+ // configure the session with the appropriate write concern and ensure the
+ // socket does not timeout
+ session.SetSafe(safety)
+
+ if displayHost {
+ log.Logvf(log.Always, "connected to: %v", connUrl)
+ }
+
+ // first validate the namespaces we'll be using: <db>.<prefix>.files and <db>.<prefix>.chunks
+ // it's ok to validate only <db>.<prefix>.chunks (the longer one)
+ err = util.ValidateFullNamespace(fmt.Sprintf("%s.%s.chunks", mf.StorageOptions.DB,
+ mf.StorageOptions.GridFSPrefix))
+
+ if err != nil {
+ return "", err
+ }
+ // get GridFS handle
+ gfs := session.DB(mf.StorageOptions.DB).GridFS(mf.StorageOptions.GridFSPrefix)
+
+ var output string
+
+ log.Logvf(log.Info, "handling mongofiles '%v' command...", mf.Command)
+
+ switch mf.Command {
+
+ case List:
+
+ query := bson.M{}
+ if mf.FileName != "" {
+ regex := bson.M{"$regex": "^" + regexp.QuoteMeta(mf.FileName)}
+ query = bson.M{"filename": regex}
+ }
+
+ output, err = mf.findAndDisplay(gfs, query)
+ if err != nil {
+ return "", err
+ }
+
+ case Search:
+
+ regex := bson.M{"$regex": mf.FileName}
+ query := bson.M{"filename": regex}
+
+ output, err = mf.findAndDisplay(gfs, query)
+ if err != nil {
+ return "", err
+ }
+
+ case Get:
+
+ output, err = mf.handleGet(gfs)
+ if err != nil {
+ return "", err
+ }
+
+ case GetID:
+
+ output, err = mf.handleGetID(gfs)
+ if err != nil {
+ return "", err
+ }
+
+ case Put:
+
+ output, err = mf.handlePut(gfs)
+ if err != nil {
+ return "", err
+ }
+
+ case Delete:
+
+ err = gfs.Remove(mf.FileName)
+ if err != nil {
+ return "", fmt.Errorf("error while removing '%v' from GridFS: %v\n", mf.FileName, err)
+ }
+ output = fmt.Sprintf("successfully deleted all instances of '%v' from GridFS\n", mf.FileName)
+
+ case DeleteID:
+
+ output, err = mf.handleDeleteID(gfs)
+ if err != nil {
+ return "", err
+ }
+
+ }
+
+ return output, nil
+}
diff --git a/src/mongo/gotools/mongofiles/mongofiles_test.go b/src/mongo/gotools/mongofiles/mongofiles_test.go
new file mode 100644
index 00000000000..8594eb6ba50
--- /dev/null
+++ b/src/mongo/gotools/mongofiles/mongofiles_test.go
@@ -0,0 +1,536 @@
+package mongofiles
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ "github.com/mongodb/mongo-tools/common/util"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+var (
+ testDB = "mongofiles_test_db"
+ testServer = "localhost"
+ testPort = db.DefaultTestPort
+
+ ssl = testutil.GetSSLOptions()
+ auth = testutil.GetAuthOptions()
+ connection = &options.Connection{
+ Host: testServer,
+ Port: testPort,
+ }
+ toolOptions = &options.ToolOptions{
+ SSL: &ssl,
+ Connection: connection,
+ Auth: &auth,
+ Verbosity: &options.Verbosity{},
+ }
+)
+
+// put in some test data into GridFS
+func setUpGridFSTestData() ([]interface{}, error) {
+ sessionProvider, err := db.NewSessionProvider(*toolOptions)
+ if err != nil {
+ return nil, err
+ }
+ session, err := sessionProvider.GetSession()
+ if err != nil {
+ return nil, err
+ }
+ defer session.Close()
+
+ bytesExpected := []interface{}{}
+ gfs := session.DB(testDB).GridFS("fs")
+
+ var testfile *mgo.GridFile
+
+ for i, item := range []string{"testfile1", "testfile2", "testfile3"} {
+ testfile, err = gfs.Create(item)
+ if err != nil {
+ return nil, err
+ }
+ defer testfile.Close()
+
+ n, err := testfile.Write([]byte(strings.Repeat("a", (i+1)*5)))
+ if err != nil {
+ return nil, err
+ }
+
+ bytesExpected = append(bytesExpected, n)
+ }
+
+ return bytesExpected, nil
+}
+
+// remove test data from GridFS
+func tearDownGridFSTestData() error {
+ sessionProvider, err := db.NewSessionProvider(*toolOptions)
+ if err != nil {
+ return err
+ }
+ session, err := sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ if err = session.DB(testDB).DropDatabase(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func simpleMongoFilesInstance(args []string) (*MongoFiles, error) {
+ sessionProvider, err := db.NewSessionProvider(*toolOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ mongofiles := MongoFiles{
+ ToolOptions: toolOptions,
+ InputOptions: &InputOptions{},
+ StorageOptions: &StorageOptions{GridFSPrefix: "fs", DB: testDB},
+ SessionProvider: sessionProvider,
+ Command: args[0],
+ FileName: args[1],
+ }
+
+ return &mongofiles, nil
+}
+
+// get an id of an existing file, for _id access
+func idOfFile(mf *MongoFiles, filename string) string {
+ session, _ := mf.SessionProvider.GetSession()
+ gfs := session.DB(mf.StorageOptions.DB).GridFS(mf.StorageOptions.GridFSPrefix)
+ gFile, _ := gfs.Open(filename)
+ bytes, _ := json.Marshal(gFile.Id())
+ return fmt.Sprintf("ObjectId(%v)", string(bytes))
+}
+
+// test output needs some cleaning
+func cleanAndTokenizeTestOutput(str string) []string {
+ // remove last \r\n in str to avoid unnecessary line on split
+ if str != "" {
+ str = str[:len(str)-1]
+ }
+
+ return strings.Split(strings.Trim(str, "\r\n"), "\n")
+}
+
+// return slices of files and bytes in each file represented by each line
+func getFilesAndBytesFromLines(lines []string) ([]interface{}, []interface{}) {
+ var fileName string
+ var byteCount int
+
+ filesGotten := []interface{}{}
+ bytesGotten := []interface{}{}
+
+ for _, line := range lines {
+ fmt.Sscanf(line, "%s\t%d", &fileName, &byteCount)
+
+ filesGotten = append(filesGotten, fileName)
+ bytesGotten = append(bytesGotten, byteCount)
+ }
+
+ return filesGotten, bytesGotten
+}
+
+// inefficient but fast way to ensure set equality of
+func ensureSetEquality(firstArray []interface{}, secondArray []interface{}) {
+ for _, line := range firstArray {
+ So(secondArray, ShouldContain, line)
+ }
+}
+
+// check if file exists
+func fileExists(name string) bool {
+ if _, err := os.Stat(name); err != nil {
+ if os.IsNotExist(err) {
+ return false
+ }
+ }
+ return true
+}
+
+// Test that it works whenever valid arguments are passed in and that
+// it barfs whenever invalid ones are passed
+func TestValidArguments(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a MongoFiles instance", t, func() {
+ args := []string{"search", "file"}
+ mf, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ Convey("It should error out when no arguments fed", func() {
+ args := []string{}
+ err := mf.ValidateCommand(args)
+ So(err, ShouldNotBeNil)
+ So(err.Error(), ShouldEqual, "no command specified")
+ })
+
+ Convey("It should error out when too many positional arguments provided", func() {
+ args := []string{"list", "something", "another"}
+ err := mf.ValidateCommand(args)
+ So(err, ShouldNotBeNil)
+ So(err.Error(), ShouldEqual, "too many positional arguments")
+ })
+
+ Convey("It should not error out when list command isn't given an argument", func() {
+ args := []string{"list"}
+ So(mf.ValidateCommand(args), ShouldBeNil)
+ So(mf.StorageOptions.LocalFileName, ShouldEqual, "")
+ })
+
+ Convey("It should error out when any of (get|put|delete|search) not given supporting argument", func() {
+ for _, command := range []string{"get", "put", "delete", "search"} {
+ args := []string{command}
+ err := mf.ValidateCommand(args)
+ So(err, ShouldNotBeNil)
+ So(err.Error(), ShouldEqual, fmt.Sprintf("'%v' argument missing", command))
+ }
+ })
+
+ Convey("It should error out when a nonsensical command is given", func() {
+ args := []string{"commandnonexistent"}
+
+ err := mf.ValidateCommand(args)
+ So(err, ShouldNotBeNil)
+ So(err.Error(), ShouldEqual, fmt.Sprintf("'%v' is not a valid command", args[0]))
+ })
+
+ })
+}
+
+// Test that the output from mongofiles is actually correct
+func TestMongoFilesCommands(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.IntegrationTestType)
+
+ Convey("Testing the various commands (get|get_id|put|delete|delete_id|search|list) "+
+ "with a MongoDump instance", t, func() {
+
+ bytesExpected, err := setUpGridFSTestData()
+ So(err, ShouldBeNil)
+
+ // []interface{} here so we can use 'ensureSetEquality' method for both []string and []int
+ filesExpected := []interface{}{"testfile1", "testfile2", "testfile3"}
+
+ Convey("Testing the 'list' command with a file that isn't in GridFS should", func() {
+ args := []string{"list", "gibberish"}
+
+ mf, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ Convey("produce no output", func() {
+ output, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(output), ShouldEqual, 0)
+ })
+ })
+
+ Convey("Testing the 'list' command with files that are in GridFS should", func() {
+ args := []string{"list", "testf"}
+
+ mf, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ Convey("produce some output", func() {
+ str, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ lines := cleanAndTokenizeTestOutput(str)
+ So(len(lines), ShouldEqual, len(filesExpected))
+
+ filesGotten, bytesGotten := getFilesAndBytesFromLines(lines)
+ ensureSetEquality(filesExpected, filesGotten)
+ ensureSetEquality(bytesExpected, bytesGotten)
+ })
+ })
+
+ Convey("Testing the 'search' command with files that are in GridFS should", func() {
+ args := []string{"search", "file"}
+
+ mf, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ Convey("produce some output", func() {
+ str, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ lines := cleanAndTokenizeTestOutput(str)
+ So(len(lines), ShouldEqual, len(filesExpected))
+
+ filesGotten, bytesGotten := getFilesAndBytesFromLines(lines)
+ ensureSetEquality(filesExpected, filesGotten)
+ ensureSetEquality(bytesExpected, bytesGotten)
+ })
+ })
+
+ Convey("Testing the 'get' command with a file that is in GridFS should", func() {
+ args := []string{"get", "testfile1"}
+
+ mf, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ Convey("copy the file to the local filesystem", func() {
+ str, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ testFile, err := os.Open("testfile1")
+ So(err, ShouldBeNil)
+ defer testFile.Close()
+
+ // pretty small file; so read all
+ testFile1Bytes, err := ioutil.ReadAll(testFile)
+ So(err, ShouldBeNil)
+ So(len(testFile1Bytes), ShouldEqual, bytesExpected[0])
+ })
+
+ Convey("store the file contents in a file with different name if '--local' flag used", func() {
+ mf.StorageOptions.LocalFileName = "testfile1copy"
+ str, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ testFile, err := os.Open("testfile1copy")
+ So(err, ShouldBeNil)
+ defer testFile.Close()
+
+ // pretty small file; so read all
+ testFile1Bytes, err := ioutil.ReadAll(testFile)
+ So(err, ShouldBeNil)
+ So(len(testFile1Bytes), ShouldEqual, bytesExpected[0])
+ })
+
+ // cleanup file we just copied to the local FS
+ Reset(func() {
+
+ // remove 'testfile1' or 'testfile1copy'
+ if fileExists("testfile1") {
+ err = os.Remove("testfile1")
+ }
+ So(err, ShouldBeNil)
+
+ if fileExists("testfile1copy") {
+ err = os.Remove("testfile1copy")
+ }
+ So(err, ShouldBeNil)
+
+ })
+ })
+
+ Convey("Testing the 'get_id' command with a file that is in GridFS should", func() {
+ // hack to grab an _id
+ args := []string{"get", "testfile1"}
+ mf, _ := simpleMongoFilesInstance(args)
+ idString := idOfFile(mf, "testfile1")
+
+ args = []string{"get_id", idString}
+ mf, err = simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ Convey("copy the file to the local filesystem", func() {
+ str, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ testFile, err := os.Open("testfile1")
+ So(err, ShouldBeNil)
+ defer testFile.Close()
+
+ // pretty small file; so read all
+ testFile1Bytes, err := ioutil.ReadAll(testFile)
+ So(err, ShouldBeNil)
+ So(len(testFile1Bytes), ShouldEqual, bytesExpected[0])
+ })
+
+ Reset(func() {
+ // remove 'testfile1' or 'testfile1copy'
+ if fileExists("testfile1") {
+ err = os.Remove("testfile1")
+ }
+ So(err, ShouldBeNil)
+ if fileExists("testfile1copy") {
+ err = os.Remove("testfile1copy")
+ }
+ So(err, ShouldBeNil)
+ })
+ })
+
+ Convey("Testing the 'put' command by putting some lorem ipsum file with 287613 bytes should", func() {
+ args := []string{"put", "lorem_ipsum_287613_bytes.txt"}
+
+ mf, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+ mf.StorageOptions.LocalFileName = util.ToUniversalPath("testdata/lorem_ipsum_287613_bytes.txt")
+
+ Convey("insert the file by creating two chunks (ceil(287,613 / 255 * 1024)) in GridFS", func() {
+ str, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ Convey("and should have exactly 287613 bytes", func() {
+ args = []string{"list", ""}
+
+ mfAfter, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ str, err = mfAfter.Run(false)
+ So(err, ShouldBeNil)
+
+ lines := cleanAndTokenizeTestOutput(str)
+ filesGotten, _ := getFilesAndBytesFromLines(lines)
+ So(len(lines), ShouldEqual, len(filesExpected)+1)
+ So(filesGotten, ShouldContain, "lorem_ipsum_287613_bytes.txt")
+ })
+
+ Convey("and should have exactly the same content as the original file", func() {
+ args = []string{"get", "lorem_ipsum_287613_bytes.txt"}
+ So(err, ShouldBeNil)
+ mfAfter, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ mfAfter.StorageOptions.LocalFileName = "lorem_ipsum_copy.txt"
+ str, err = mfAfter.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ loremIpsumOrig, err := os.Open(util.ToUniversalPath("testdata/lorem_ipsum_287613_bytes.txt"))
+ So(err, ShouldBeNil)
+
+ loremIpsumCopy, err := os.Open("lorem_ipsum_copy.txt")
+ So(err, ShouldBeNil)
+
+ Convey("compare the copy of the lorem ipsum file with the original 1KB at a time", func() {
+ dataBytesOrig := make([]byte, 1024)
+ dataBytesCopy := make([]byte, 1024)
+
+ defer loremIpsumOrig.Close()
+ defer loremIpsumCopy.Close()
+
+ var nReadOrig, nReadCopy int
+
+ for {
+ nReadOrig, err = loremIpsumOrig.Read(dataBytesOrig)
+
+ // err should either be nil
+ // or io.EOF --> indicating end of file
+ So(err, ShouldBeIn, []error{nil, io.EOF})
+
+ if nReadOrig == 0 {
+ break
+ }
+
+ nReadCopy, err = loremIpsumCopy.Read(dataBytesCopy)
+ So(err, ShouldBeNil)
+
+ So(nReadOrig, ShouldEqual, nReadCopy)
+ So(bytes.Compare(dataBytesOrig, dataBytesCopy), ShouldEqual, 0)
+ }
+ })
+
+ Reset(func() {
+ err = os.Remove("lorem_ipsum_copy.txt")
+ So(err, ShouldBeNil)
+ })
+
+ })
+
+ })
+
+ })
+
+ Convey("Testing the 'delete' command with a file that is in GridFS should", func() {
+ args := []string{"delete", "testfile2"}
+
+ mf, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ Convey("delete the file from GridFS", func() {
+ str, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ Convey("check that the file has been deleted from GridFS", func() {
+ args = []string{"list", ""}
+ mfAfter, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ str, err = mfAfter.Run(false)
+ So(err, ShouldBeNil)
+
+ lines := cleanAndTokenizeTestOutput(str)
+ So(len(lines), ShouldEqual, len(filesExpected)-1)
+
+ filesGotten, bytesGotten := getFilesAndBytesFromLines(lines)
+
+ So(filesGotten, ShouldNotContain, "testfile2")
+ So(bytesGotten, ShouldNotContain, bytesExpected[1])
+ })
+ })
+ })
+
+ Convey("Testing the 'delete_id' command with a file that is in GridFS should", func() {
+ // hack to grab an _id
+ args := []string{"get", "testfile2"}
+ mf, _ := simpleMongoFilesInstance(args)
+ idString := idOfFile(mf, "testfile2")
+
+ args = []string{"delete_id", idString}
+ mf, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ Convey("delete the file from GridFS", func() {
+ str, err := mf.Run(false)
+ So(err, ShouldBeNil)
+ So(len(str), ShouldNotEqual, 0)
+
+ Convey("check that the file has been deleted from GridFS", func() {
+ args = []string{"list", ""}
+ mfAfter, err := simpleMongoFilesInstance(args)
+ So(err, ShouldBeNil)
+ So(mf, ShouldNotBeNil)
+
+ str, err = mfAfter.Run(false)
+ So(err, ShouldBeNil)
+
+ lines := cleanAndTokenizeTestOutput(str)
+ So(len(lines), ShouldEqual, len(filesExpected)-1)
+
+ filesGotten, bytesGotten := getFilesAndBytesFromLines(lines)
+
+ So(filesGotten, ShouldNotContain, "testfile2")
+ So(bytesGotten, ShouldNotContain, bytesExpected[1])
+ })
+ })
+ })
+
+ Reset(func() {
+ So(tearDownGridFSTestData(), ShouldBeNil)
+ })
+ })
+
+}
diff --git a/src/mongo/gotools/mongofiles/options.go b/src/mongo/gotools/mongofiles/options.go
new file mode 100644
index 00000000000..2df4774ebbc
--- /dev/null
+++ b/src/mongo/gotools/mongofiles/options.go
@@ -0,0 +1,53 @@
+package mongofiles
+
+var Usage = `<options> <command> <filename or _id>
+
+Manipulate gridfs files using the command line.
+
+Possible commands include:
+ list - list all files; 'filename' is an optional prefix which listed filenames must begin with
+ search - search all files; 'filename' is a substring which listed filenames must contain
+ put - add a file with filename 'filename'
+ get - get a file with filename 'filename'
+ get_id - get a file with the given '_id'
+ delete - delete all files with filename 'filename'
+ delete_id - delete a file with the given '_id'
+
+See http://docs.mongodb.org/manual/reference/program/mongofiles/ for more information.`
+
+// StorageOptions defines the set of options to use in storing/retrieving data from server.
+type StorageOptions struct {
+ // Specified database to use. defaults to 'test' if none is specified
+ DB string `short:"d" value-name:"<database-name>" default:"test" default-mask:"-" long:"db" description:"database to use (default is 'test')"`
+
+ // 'LocalFileName' is an option that specifies what filename to use for (put|get)
+ LocalFileName string `long:"local" value-name:"<filename>" short:"l" description:"local filename for put|get"`
+
+ // 'ContentType' is an option that specifies the Content/MIME type to use for 'put'
+ ContentType string `long:"type" value-nane:"<content-type>" short:"t" description:"content/MIME type for put (optional)"`
+
+ // if set, 'Replace' will remove other files with same name after 'put'
+ Replace bool `long:"replace" short:"r" description:"remove other files with same name after put"`
+
+ // GridFSPrefix specifies what GridFS prefix to use; defaults to 'fs'
+ GridFSPrefix string `long:"prefix" value-name:"<prefix>" default:"fs" default-mask:"-" description:"GridFS prefix to use (default is 'fs')"`
+
+ // Specifies the write concern for each write operation that mongofiles writes to the target database.
+ // By default, mongofiles waits for a majority of members from the replica set to respond before returning.
+ WriteConcern string `long:"writeConcern" value-name:"<write-concern>" default:"majority" default-mask:"-" description:"write concern options e.g. --writeConcern majority, --writeConcern '{w: 3, wtimeout: 500, fsync: true, j: true}' (defaults to 'majority')"`
+}
+
+// Name returns a human-readable group name for storage options.
+func (_ *StorageOptions) Name() string {
+ return "storage"
+}
+
+// InputOptions defines the set of options to use in retrieving data from the server.
+type InputOptions struct {
+ ReadPreference string `long:"readPreference" value-name:"<string>|<json>" description:"specify either a preference name or a preference json object"`
+}
+
+// Name returns a human-readable group name for input options.
+func (*InputOptions) Name() string {
+ return "query"
+}
diff --git a/src/mongo/gotools/mongofiles/testdata/lorem_ipsum_287613_bytes.txt b/src/mongo/gotools/mongofiles/testdata/lorem_ipsum_287613_bytes.txt
new file mode 100644
index 00000000000..1b7d5ed8c26
--- /dev/null
+++ b/src/mongo/gotools/mongofiles/testdata/lorem_ipsum_287613_bytes.txt
@@ -0,0 +1,893 @@
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ultricies eu libero sed aliquet. Mauris nec sapien dolor. Duis faucibus finibus nisl. Pellentesque vulputate eros vitae euismod ornare. Vestibulum ullamcorper leo quis porta cursus. Fusce sem purus, semper id vehicula ut, tempus ac neque. Nunc vel urna vitae eros imperdiet feugiat et eget quam. Interdum et malesuada fames ac ante ipsum primis in faucibus. Sed posuere odio vitae fermentum bibendum. Nullam fermentum, arcu vitae euismod pulvinar, dui nulla ullamcorper enim, vel cursus lacus eros ac arcu.
+
+Maecenas eu nisi in lorem congue cursus sed quis lacus. Nam ultrices tortor urna, vel rhoncus magna semper et. Praesent vel interdum ante. Nullam ultrices accumsan quam, vitae maximus nulla finibus eget. Donec ac vehicula ipsum, rhoncus interdum sapien. Vestibulum vitae dui ut lorem sodales mattis. Nunc tempus sapien nec tellus auctor, ut iaculis urna iaculis. Donec eu purus sed felis blandit commodo. Vivamus pharetra velit tincidunt laoreet feugiat. Quisque gravida odio efficitur urna pharetra, ac posuere libero posuere. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Fusce tortor quam, efficitur ac elit quis, dignissim vulputate enim. Sed blandit tellus in orci efficitur, id posuere mauris sagittis. Nam rhoncus rutrum lorem sit amet aliquam. Cras in semper lacus.
+
+Pellentesque justo metus, malesuada sit amet justo vel, semper imperdiet nisl. Nulla euismod, quam non accumsan euismod, nisi odio pretium erat, a gravida neque neque ac nisi. Sed cursus vulputate urna ut pellentesque. Donec efficitur purus vitae magna ornare, sed imperdiet libero dapibus. Fusce a elit fringilla, iaculis tortor in, fermentum ipsum. Morbi suscipit, velit sit amet faucibus cursus, purus turpis auctor ante, vestibulum consectetur enim ligula a massa. Nam faucibus, massa et gravida rutrum, ante elit mattis sapien, quis vulputate nisl nibh sit amet nibh. Nulla facilisi. Vivamus nec lacus ac elit eleifend efficitur. Nam at tellus quis nisi mattis tincidunt dictum sed justo. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec quis nibh non diam porta condimentum. Quisque quis tincidunt leo, non facilisis erat. Cras nulla velit, faucibus sed aliquam quis, dignissim rutrum nisl.
+
+Morbi in consequat neque, et tincidunt sem. Mauris sit amet elit purus. Etiam semper luctus nunc et suscipit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Pellentesque pellentesque purus eget enim ornare, quis egestas risus rutrum. Nam non orci neque. Sed molestie ante sed ipsum tempor porttitor quis ut diam. Curabitur nec pretium massa. Integer molestie nec sapien in tincidunt. Nunc eu mauris sed metus commodo pulvinar. Maecenas a magna vel quam malesuada ultrices. Nam eu volutpat elit. Nulla finibus lacus sem, cursus iaculis quam porta id. Nullam pharetra posuere dui ut blandit. Aenean aliquet a enim et iaculis. Maecenas tristique, diam id elementum placerat, mi magna ultricies elit, sit amet commodo elit nibh at arcu.
+
+Nunc et porttitor sem. Maecenas vitae metus non est luctus pretium sit amet at diam. Praesent sapien magna, aliquet vel placerat quis, consequat vel diam. Mauris posuere elit sed urna laoreet, viverra hendrerit neque finibus. Fusce sapien ligula, commodo ac sapien quis, lacinia efficitur nunc. In vitae mollis arcu. Integer luctus efficitur diam. Vestibulum ligula velit, tincidunt ac rhoncus quis, dictum eget lorem. Aliquam erat volutpat. Nam vel lacus arcu. Donec ut elit scelerisque, convallis arcu sit amet, molestie magna.
+
+Aliquam lobortis hendrerit porta. Curabitur quis sodales magna. Mauris aliquam, risus non luctus semper, nulla quam commodo enim, ac dictum mi libero eget ipsum. Proin nec magna malesuada, rutrum mauris vitae, imperdiet lectus. Duis sit amet velit ac dolor facilisis auctor vitae vitae tellus. Duis interdum imperdiet blandit. Donec interdum tellus egestas vehicula tincidunt. Sed et lacus vitae libero convallis accumsan. Etiam id pretium mi. Maecenas vulputate vitae metus sed tristique. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas nec tincidunt elit, vel lacinia tellus. Nunc dictum, elit sit amet ultrices dignissim, odio velit pharetra magna, et congue nunc ligula sit amet leo. Cras tincidunt ultricies mollis. Nulla commodo pharetra dolor a tincidunt. Praesent congue semper sapien, eget placerat velit pulvinar sit amet.
+
+Ut dictum magna ut eleifend aliquet. Duis scelerisque lacus nec arcu convallis, ut maximus dui rhoncus. Donec non nunc blandit, efficitur leo a, ultrices nibh. Nulla suscipit gravida fringilla. Suspendisse hendrerit augue sit amet lacus posuere venenatis nec at sem. Nulla ac auctor purus. Cras hendrerit elit et tempus imperdiet. Sed vel lobortis lectus. Nulla ipsum turpis, aliquet sed metus et, eleifend tristique mi. Donec vitae nisl risus. Interdum et malesuada fames ac ante ipsum primis in faucibus. Aliquam mollis tortor quam, ac tempus eros suscipit in. Quisque feugiat felis at lectus auctor, eget cursus justo gravida. Vivamus commodo odio eu ex sagittis vehicula. Quisque posuere iaculis nulla sed ullamcorper. Vivamus ligula nibh, sodales nec iaculis sit amet, sagittis aliquam neque.
+
+Mauris et euismod mauris, a faucibus justo. Proin laoreet a ex sed rhoncus. Curabitur commodo, leo vitae pellentesque sodales, odio justo congue purus, ut consequat nisl diam et ex. Nulla facilisi. Suspendisse fringilla vulputate purus, non dapibus elit sollicitudin id. Nullam sed felis enim. Fusce ac sagittis purus, eu gravida neque. Nunc congue lorem non dui condimentum feugiat. Vestibulum varius risus diam, malesuada ornare arcu hendrerit sit amet.
+
+Praesent maximus magna metus, quis posuere nisi dictum sit amet. Vestibulum aliquet, lacus ut porttitor auctor, neque libero ullamcorper ligula, eu gravida quam elit eu ante. Duis hendrerit sagittis arcu, sit amet rhoncus ipsum. Integer sed ullamcorper libero. In eget malesuada diam. Quisque id nulla diam. Nunc facilisis lectus condimentum metus malesuada, nec pulvinar massa fringilla. Proin tristique elit vel leo aliquam, vel tempor dui vulputate. Donec maximus laoreet tempor. Fusce nec nunc eget nunc vehicula convallis ut ullamcorper nibh.
+
+Morbi luctus, lectus at accumsan vehicula, diam libero imperdiet metus, quis semper arcu nisl non eros. In mattis urna sollicitudin magna suscipit vestibulum nec non massa. Nullam et odio arcu. Duis suscipit sodales turpis id porta. Vestibulum dignissim sit amet mauris vitae lacinia. Nulla porttitor id risus a tristique. Curabitur pretium ac tellus tempor molestie. Quisque eget porttitor enim, eu pretium dolor. Vestibulum velit nunc, elementum vitae risus eu, venenatis condimentum lacus. Nam molestie elit velit. Etiam lorem massa, tempus id magna vitae, posuere vestibulum diam. Nulla nec mauris quis velit vulputate semper. Integer nec quam nisi. Sed ut lacus id lacus porta commodo. Aenean neque justo, fringilla ac felis sed, hendrerit aliquet ipsum. Suspendisse mattis sem sit amet egestas pellentesque.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Mauris lorem ligula, imperdiet vitae sagittis vitae, consectetur non ex. Donec tincidunt iaculis nisi quis faucibus. Phasellus vel arcu nunc. Nam tristique molestie lacus. Aenean ut dui felis. Curabitur in ultricies purus. Etiam porttitor sodales enim vitae venenatis. In rutrum sit amet dui at eleifend. Phasellus mattis orci orci, eget ornare libero vulputate a. Proin aliquam augue ac ipsum molestie gravida. Phasellus lobortis lacus vel purus porta, quis blandit elit euismod. Curabitur nec sapien sit amet sapien fringilla consectetur et in ligula. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vestibulum consectetur, erat eu ornare interdum, ex dolor commodo est, quis gravida ligula ipsum sed tellus. Integer aliquet aliquet libero, id tincidunt est suscipit id.
+
+In quis venenatis ligula. Sed cursus lacus ante, nec porta mauris fringilla eu. Integer id purus in libero efficitur luctus vitae eget eros. Quisque ipsum ipsum, tincidunt quis neque nec, fringilla dignissim sapien. Fusce sollicitudin purus at elit tincidunt varius. Etiam feugiat molestie porttitor. Sed vitae nisi eget ante luctus vulputate nec eu diam.
+
+Pellentesque blandit arcu quis vulputate dictum. Fusce semper nibh eu justo ullamcorper condimentum. Nulla dolor metus, ornare nec tellus nec, bibendum ultricies orci. Sed non hendrerit dui. Donec pretium, felis ut aliquet pellentesque, purus sapien congue ligula, in ornare orci nisi quis est. Nunc porttitor turpis nec elit scelerisque aliquam. Aliquam at congue tortor.
+
+Ut congue, augue a euismod fringilla, risus tellus ullamcorper augue, lobortis interdum turpis nibh ut ante. Phasellus sed tristique diam, vitae finibus ex. Nullam et varius tortor, sit amet rhoncus nisi. Vestibulum sapien purus, efficitur congue congue non, ultricies eu quam. Phasellus ac ligula hendrerit, rhoncus orci at, bibendum tellus. Suspendisse in felis nec ex ultricies mollis non eget ipsum. Suspendisse potenti. Nullam commodo sodales finibus. Donec rutrum tincidunt posuere. Proin vitae euismod lorem. Aliquam id diam ipsum.
+
+Phasellus neque nibh, porta vel dignissim eu, blandit quis eros. Vestibulum ut accumsan velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Aliquam finibus dignissim dui. Vivamus aliquet ultrices luctus. Mauris nec elit eget lorem pretium molestie sit amet sit amet est. Nulla arcu nisl, imperdiet ac sagittis et, faucibus elementum augue. Nam non volutpat purus. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae;
+
+Pellentesque bibendum bibendum quam eget dictum. Phasellus dictum odio at nulla tincidunt ornare. Maecenas ac diam id sem placerat faucibus. Pellentesque blandit at ligula ac sollicitudin. Etiam hendrerit porta justo, in varius orci lobortis vel. Pellentesque justo orci, sagittis id est vel, condimentum suscipit sem. Nullam convallis mollis elementum.
+
+Donec vel interdum libero, eget pellentesque quam. Nam maximus purus in egestas ornare. Duis fermentum, mauris non accumsan mattis, libero lacus posuere tellus, a semper elit nisi quis tortor. Vivamus eu finibus neque, id varius velit. Mauris mattis felis maximus volutpat porta. Praesent id ante a velit dignissim tristique quis sed leo. Ut purus turpis, consequat a tortor nec, lobortis sodales lectus.
+
+Sed venenatis faucibus nibh id sollicitudin. Vestibulum fringilla urna eget vestibulum auctor. Nulla vitae turpis congue, maximus purus eget, maximus ipsum. Curabitur bibendum ut urna at congue. Mauris posuere ipsum urna, a dictum risus dignissim eget. Sed eu magna eu ex aliquam posuere vel quis purus. Etiam dui velit, mattis in eros et, egestas mollis leo. Vestibulum blandit vulputate aliquam. Donec egestas risus vitae est aliquam bibendum. Donec a enim ut lorem cursus commodo. Nam faucibus ut justo id dapibus. Sed laoreet augue quis imperdiet venenatis. Phasellus ut ligula consequat, sagittis neque vulputate, imperdiet nulla. Phasellus pretium facilisis neque at sodales. Etiam et accumsan tellus. Morbi sit amet tincidunt leo, sit amet fringilla orci.
+
+Duis ante justo, lacinia a dui sit amet, pulvinar consectetur felis. Aliquam sagittis nibh at velit sodales pretium. Praesent facilisis eros eu orci pharetra, vitae porttitor justo pretium. Morbi ullamcorper sapien nec volutpat vehicula. Mauris neque massa, fermentum at sem ultricies, venenatis faucibus nulla. Etiam sollicitudin molestie pellentesque. Donec ullamcorper iaculis tristique. Nulla sed elit massa.
+
+Cras pulvinar tellus vitae dictum dapibus. Maecenas sollicitudin sed dui eu pharetra. Donec ligula ex, sagittis eget nibh nec, dignissim cursus purus. Proin semper dolor a sapien pretium euismod. Nam volutpat elementum nibh in hendrerit. Sed sed convallis leo. Quisque pharetra risus vel dui tincidunt dapibus. Ut enim est, pellentesque in aliquam non, ultricies vel ligula. Praesent luctus libero vitae commodo pretium. Aenean mattis ullamcorper accumsan. Suspendisse tempus dictum vehicula. Quisque ultricies nibh nisl. Proin tincidunt tincidunt magna non pharetra. In facilisis ligula sit amet consequat pellentesque.
+
+Phasellus scelerisque, ipsum vel porta vestibulum, tellus lacus condimentum odio, et tincidunt ex tellus sed risus. Curabitur a ligula suscipit, auctor felis in, tristique mauris. Donec auctor mollis mattis. In condimentum ante vel felis volutpat efficitur. Morbi venenatis posuere laoreet. Pellentesque id vehicula leo. Nam vitae tempor eros. Nullam gravida sed ex et molestie. Proin ac blandit sapien. Ut dictum risus vel ligula sollicitudin, sit amet semper leo vulputate. Proin aliquam tortor libero, sit amet sollicitudin ex rutrum et. Nulla consectetur faucibus mauris, eu condimentum nibh vulputate non.
+
+Fusce auctor feugiat nulla id consequat. Nullam sagittis ligula sit amet arcu iaculis, sit amet faucibus ligula gravida. Praesent non orci ante. Suspendisse eu lectus non massa blandit congue id at dui. Fusce neque lectus, faucibus rutrum fringilla ut, luctus eget arcu. Morbi sed libero eleifend, cursus orci et, ornare risus. Nunc sit amet elementum augue. Sed justo quam, tincidunt ut interdum molestie, aliquam sit amet orci. Aliquam pretium euismod euismod. Donec tristique, mauris non tincidunt scelerisque, lectus odio hendrerit turpis, ac fringilla neque neque non sapien. Vivamus pulvinar a libero a pulvinar. Integer eget euismod magna. Mauris velit ipsum, efficitur in lacus et, cursus dignissim leo. Duis a tincidunt risus, a pellentesque lorem.
+
+Pellentesque vel porta metus. Donec quis neque nec justo aliquet aliquam. Nullam non neque eget sapien dapibus pretium eget id diam. Nunc velit purus, consequat eu sodales et, cursus eu felis. Curabitur non nunc neque. Donec porttitor augue nisl, id sagittis leo iaculis et. Praesent iaculis, sem a accumsan tristique, turpis magna efficitur ex, a vestibulum leo dui eget sapien. Sed ut diam sed ipsum malesuada ullamcorper. Phasellus pharetra rhoncus vulputate. Nulla at nulla ut risus volutpat euismod. Pellentesque venenatis congue justo, et condimentum tortor commodo ut.
+
+Suspendisse commodo mattis nulla eu sodales. Etiam fringilla risus sed dui laoreet, a aliquam metus luctus. Nullam fringilla dictum ex, id suscipit quam consequat non. Sed sagittis euismod lorem, a dapibus nibh molestie in. Morbi blandit libero justo, sit amet pharetra orci vestibulum at. Ut fermentum efficitur placerat. Curabitur venenatis convallis lorem vitae consequat. Etiam at efficitur ligula. Suspendisse in tortor ut quam sollicitudin auctor.
+
+Phasellus mollis a ligula vel posuere. Morbi luctus ultrices ligula, id malesuada est consectetur id. Phasellus luctus tincidunt erat facilisis ultricies. Fusce accumsan pulvinar lorem, vitae porta sem tempus nec. Vestibulum id venenatis lorem. Sed placerat, dui pharetra dictum vulputate, massa dui dictum libero, nec volutpat nisl erat at felis. Nullam dignissim velit quis mauris iaculis suscipit.
+
+Ut non felis non turpis tempor suscipit. Nullam nibh tortor, egestas pharetra turpis sed, gravida suscipit est. Quisque accumsan orci id nisl congue placerat. Phasellus posuere neque id blandit facilisis. Praesent consequat sit amet tortor nec ornare. Curabitur felis elit, bibendum quis ex ac, fringilla tristique neque. Praesent semper magna eu dapibus sagittis. In hac habitasse platea dictumst. In in lacus rhoncus, tristique dui in, condimentum massa. Aliquam eu quam tincidunt, finibus nisl non, tincidunt mi. Donec porttitor consectetur sem, sit amet ultrices mauris lobortis quis. Etiam ut vulputate risus. Morbi aliquet tortor ut neque molestie ullamcorper. Vivamus volutpat lectus velit, at efficitur libero hendrerit sit amet. Proin aliquam ante nec quam viverra, non cursus nulla cursus. Vestibulum ac sollicitudin orci.
+
+Quisque lorem sem, facilisis sit amet suscipit a, consectetur vitae arcu. Maecenas ex orci, laoreet eu ex eu, posuere tristique turpis. Mauris vitae lacinia est, sed efficitur turpis. Sed erat purus, pharetra vitae urna convallis, tempor rutrum nunc. Nam sed sodales erat, sed maximus libero. Vivamus id ipsum eu libero accumsan rutrum. Suspendisse vitae maximus metus, vitae tempor elit. Aliquam iaculis ultricies congue. Fusce sed magna nibh. Etiam varius fermentum urna et interdum.
+
+Fusce sagittis commodo libero vitae scelerisque. Cras pulvinar, odio nec fermentum bibendum, felis enim dignissim purus, nec elementum enim purus non orci. In aliquam aliquam nisi, at luctus lorem interdum quis. Nunc ut erat facilisis, pharetra urna a, convallis sem. Donec quis est ut ex feugiat fringilla id sed lectus. Maecenas quis justo eu mi convallis rutrum. Mauris tincidunt aliquet lectus, in malesuada tortor volutpat vel. Nunc pellentesque pellentesque convallis. Maecenas odio dolor, molestie quis consectetur eu, ornare id nibh. Praesent at facilisis ipsum. Vestibulum tempus ac dolor ut faucibus. Cras sem leo, venenatis sit amet auctor eu, gravida et tortor. Vestibulum ut urna commodo sem dictum hendrerit.
+
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse eu mi libero. In rutrum leo id odio lacinia pellentesque. In justo metus, ultrices eu sagittis id, elementum quis nisl. Nullam eu molestie justo. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nulla eu egestas purus. Fusce lobortis vestibulum ullamcorper. Maecenas vehicula eu nibh vel cursus. Aenean dapibus enim quis ligula condimentum, quis sodales mi mattis. Ut elit massa, vestibulum a est vitae, sagittis pulvinar felis. Vestibulum facilisis nisi vel risus cursus, a convallis odio aliquam. Vestibulum eu pulvinar turpis. Quisque eu nibh ut felis volutpat porttitor.
+
+Phasellus sapien arcu, imperdiet nec condimentum vitae, imperdiet vitae justo. Vestibulum ligula metus, blandit in vestibulum ac, auctor ut felis. Vivamus consectetur, neque molestie tincidunt finibus, dolor arcu commodo nunc, nec lobortis neque nisi eu magna. Sed interdum erat in mattis posuere. Mauris dui orci, luctus et congue sit amet, efficitur molestie arcu. Duis et ultrices erat, non fermentum ante. Aenean vitae mi quam. Mauris suscipit diam in felis porttitor sollicitudin. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Mauris tristique scelerisque neque, id ullamcorper leo blandit ac. Aliquam interdum eros sit amet magna consequat condimentum. Vivamus sit amet purus quam. Duis ullamcorper mi tellus, in aliquet ligula pharetra vitae. Nulla finibus aliquet urna scelerisque molestie. Nam laoreet massa eu semper sodales.
+
+Nullam auctor, nunc a elementum lobortis, tortor nisi auctor enim, eget commodo leo elit porttitor mi. Nullam accumsan porttitor nibh. Aliquam tempor purus sem, et fermentum odio efficitur rutrum. Nullam rhoncus libero auctor pretium venenatis. Integer luctus magna nulla, ac pulvinar massa aliquet id. Cras ac felis rhoncus velit tincidunt maximus. Praesent vitae posuere neque. Quisque libero orci, vulputate sit amet aliquam sit amet, rutrum eu nunc. Etiam non sem et mi malesuada fermentum. Duis nisl elit, elementum at sollicitudin in, vehicula eu urna. Aenean vitae ligula est. Praesent quis rhoncus quam, et aliquet dui. Mauris lacus enim, porttitor a tempus non, porta sit amet nisi. Nullam sed leo congue ipsum cursus feugiat quis at dolor. Aenean sit amet arcu non risus hendrerit consectetur eu ultrices eros. Aliquam a lectus dapibus, ultrices urna sit amet, sagittis ex.
+
+Nulla imperdiet leo sit amet leo venenatis pellentesque. Etiam pulvinar posuere blandit. Suspendisse finibus vel nisi efficitur tempor. Duis in facilisis velit. Integer ipsum massa, pharetra in aliquam quis, commodo sit amet turpis. Suspendisse pellentesque diam eu pharetra placerat. Donec a cursus nunc. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Suspendisse dapibus augue velit, ut ullamcorper arcu varius ac. Nullam et lorem tempor, molestie ante efficitur, ultrices ante. Aliquam non dictum tortor. Aliquam leo mauris, consequat vitae facilisis pharetra, bibendum non purus.
+
+Maecenas in vehicula orci. Nunc metus sapien, fermentum in blandit a, elementum et nulla. Aliquam vitae porta sem. Maecenas euismod finibus sem, sed fermentum turpis viverra ut. Quisque scelerisque ex eu tempus laoreet. Nullam tempus hendrerit nibh. Mauris placerat nisl a odio molestie suscipit eget a tortor.
+
+Mauris varius dolor vehicula sem accumsan, eu egestas nulla luctus. Cras condimentum dignissim lacus in posuere. Suspendisse potenti. Vivamus ornare lacus facilisis finibus vehicula. Etiam at lectus ac lorem tincidunt vehicula in nec ipsum. Duis a lacus sem. Proin efficitur nisl sed massa pulvinar congue. Aliquam ut vestibulum ipsum. Duis semper vitae nibh quis suscipit. Donec cursus ante vitae dolor dapibus vulputate.
+
+Integer placerat aliquet lacus, gravida vehicula tortor dictum sit amet. Interdum et malesuada fames ac ante ipsum primis in faucibus. Etiam ac sem nec lorem volutpat dictum ac ut turpis. Pellentesque vitae turpis ut mauris fermentum luctus. Etiam aliquam quis mauris ac molestie. Nullam eleifend rhoncus magna sit amet malesuada. Ut tempus ipsum ac mi feugiat, sed sollicitudin odio mattis. Phasellus eget luctus erat.
+
+Curabitur placerat lacus et dolor feugiat aliquam. Fusce rhoncus massa urna, et consectetur leo dignissim eu. Pellentesque sit amet ligula lectus. Fusce nulla ligula, laoreet eu condimentum eu, commodo at neque. Fusce vitae ligula a arcu interdum ultrices. Mauris condimentum mauris felis, non ultrices turpis pulvinar eget. Ut sit amet pretium mauris. Donec velit mauris, euismod quis mauris nec, imperdiet eleifend ipsum.
+
+Donec eget tristique sapien, non dignissim risus. Vivamus nulla urna, lobortis vel placerat quis, iaculis eget elit. Maecenas tincidunt elementum justo ut consectetur. Aliquam condimentum nisl porttitor eros faucibus cursus. Donec varius feugiat rutrum. Duis eget elementum orci, id blandit odio. Duis eget aliquet metus. Aliquam ac justo risus. In sed erat vitae augue tempor consequat eu ut tellus.
+
+Cras vestibulum facilisis nisi. Praesent ac turpis ante. Integer a tincidunt nisi. Nullam vitae elementum sem, semper dictum odio. Integer eleifend finibus sapien non sagittis. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. In tempor placerat convallis. Curabitur elementum sit amet sapien vel accumsan. Nulla ac laoreet lorem. Vestibulum lacinia sodales sem. Aenean consectetur ultricies ligula, sit amet aliquet dui suscipit eget.
+
+Ut varius rutrum erat, ut feugiat eros condimentum eget. Morbi eget nulla nulla. In id eleifend turpis. Sed dignissim, ante et sollicitudin luctus, urna felis aliquet ipsum, et tincidunt lorem tellus commodo sapien. Cras porta elit non sapien venenatis auctor sit amet vulputate risus. Donec nec sem nisl. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+
+In vitae fermentum purus, quis posuere dui. Vivamus accumsan varius nulla a convallis. Maecenas ut odio sit amet orci pellentesque luctus ut in quam. Praesent ac nulla tellus. Proin at ante varius, tristique magna sed, aliquet libero. Ut et lacus in metus aliquet tincidunt. Integer at ornare felis. Aliquam eu ornare neque, tincidunt pretium massa. Donec mi arcu, finibus egestas metus ut, vestibulum pulvinar risus. Duis vel consequat urna. Quisque in mi eget ex bibendum finibus. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Maecenas vel elit vel dolor rutrum lacinia. Maecenas pulvinar, ante in molestie varius, tortor erat malesuada purus, non commodo enim sem sit amet augue. Donec vitae dictum augue.
+
+Morbi lacinia velit id orci dictum bibendum. Etiam sit amet felis varius, molestie quam eget, sollicitudin magna. Aenean dignissim tortor a libero lacinia congue. Nulla pharetra nunc facilisis dapibus placerat. Nunc pretium consequat viverra. Donec mattis gravida tellus non auctor. Aliquam commodo ac dolor id convallis. Proin nisi velit, ultrices vel bibendum vitae, egestas a eros. Vivamus orci nisl, egestas in lectus in, congue condimentum mi. Duis elit leo, consequat commodo condimentum eget, scelerisque in augue.
+
+Maecenas euismod, nibh nec ullamcorper posuere, lacus leo vehicula libero, et fermentum orci elit a tellus. Duis non sapien lobortis, posuere nunc id, tempor massa. Vivamus semper tortor eget mi interdum sagittis. Sed eu luctus mi. Aliquam maximus ante ex, a sollicitudin ipsum rutrum sed. Ut laoreet porttitor turpis sit amet commodo. Donec finibus mollis odio a luctus. Proin at malesuada erat.
+
+Cras varius metus sed sem rhoncus, sit amet fringilla risus imperdiet. Fusce vel nulla nec sapien hendrerit scelerisque. Duis non felis consequat, tempor quam sit amet, consectetur risus. Nam sollicitudin luctus felis, at fringilla libero auctor facilisis. Donec interdum varius magna, eget euismod magna pretium nec. Integer posuere posuere nisi, eu maximus enim cursus quis. Pellentesque porttitor eros nec tellus blandit, sit amet vulputate purus feugiat. Sed fringilla eu tellus in mattis. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Vestibulum ut molestie justo, in convallis urna. Proin a urna consectetur, dignissim tellus nec, rutrum odio. Donec in pellentesque dui. Curabitur vel finibus dolor. Donec convallis pellentesque mattis. Sed tempus velit hendrerit, sodales urna eget, posuere nulla.
+
+Vestibulum eget ligula bibendum diam pulvinar gravida. Sed sed viverra nibh. Cras lobortis augue neque, in eleifend tortor dapibus euismod. In at ante id lorem facilisis vulputate. Nam venenatis lorem quis justo congue, ut pharetra nisi feugiat. Pellentesque egestas, sapien et sollicitudin convallis, ligula erat rutrum elit, quis convallis nisi ante a enim. In hac habitasse platea dictumst. Proin lacinia maximus lacus id mattis. Nam gravida dui orci, nec scelerisque massa consectetur at. Nullam a mauris bibendum, condimentum eros vel, mollis tortor. Duis faucibus tristique augue. Vivamus porttitor vestibulum metus a elementum. Nulla egestas ligula ac purus placerat, quis bibendum mauris fringilla.
+
+Proin eros magna, aliquet sed vulputate vel, vehicula in velit. Ut eu elementum urna, ornare laoreet orci. Donec vehicula dapibus justo. Sed id metus quis felis rhoncus aliquam nec eu tellus. Duis eu purus elementum, ultricies sem id, mattis lacus. Suspendisse potenti. Nam semper ex in molestie gravida. Donec eleifend massa turpis, nec viverra sapien pharetra vel. Morbi vestibulum efficitur interdum. Maecenas tincidunt eros sit amet magna imperdiet, eu dapibus nunc maximus. Duis rutrum volutpat elit.
+
+In justo ligula, euismod ut ante eu, eleifend mattis arcu. Mauris suscipit porta molestie. Sed blandit et dolor sed finibus. Aliquam nec maximus nunc. Aenean et enim porttitor, egestas ligula sit amet, tincidunt purus. Cras pulvinar arcu a leo vehicula, non volutpat nisi interdum. Proin venenatis lobortis tempor. Sed pellentesque, quam eget lobortis lacinia, mauris felis cursus felis, nec pretium risus ipsum in nulla. Maecenas tincidunt lorem vitae velit ornare, vitae porttitor ex efficitur. Pellentesque euismod ac felis a imperdiet. Nunc aliquam, nunc et accumsan suscipit, nunc tortor congue turpis, at volutpat libero dui ac neque. Nunc ut efficitur lectus, nec gravida mauris.
+
+Duis eu urna id lectus finibus varius a sed magna. In vitae leo at lorem suscipit scelerisque. Proin vulputate orci dolor, non elementum diam congue at. Nulla odio sem, pulvinar quis arcu sed, convallis luctus quam. Sed nibh tellus, efficitur condimentum aliquam eu, pulvinar ac quam. In quis enim sem. Sed ullamcorper odio sit amet est volutpat aliquet. Sed tristique, odio ac viverra pulvinar, mauris nisi feugiat arcu, in congue dui enim sit amet metus. Ut eget congue enim, eu maximus velit. Vivamus suscipit, lacus vitae imperdiet vehicula, nunc odio maximus mauris, vitae semper nibh lectus ac augue. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Sed convallis enim quis imperdiet ultrices. Praesent eu ex ex.
+
+Etiam vitae fermentum eros, eget condimentum nisl. Nullam pharetra eros elementum, fermentum lacus a, auctor sapien. Nam at ante ut justo rutrum mattis quis eget nunc. Donec at ligula ante. Phasellus ullamcorper lectus neque, at cursus diam lobortis in. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Phasellus lectus nibh, euismod sit amet tincidunt in, convallis id purus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas maximus quam quis imperdiet accumsan. Nullam consequat enim arcu, eu rutrum risus dapibus at. Etiam a nibh erat. Nulla eget velit quis dui tincidunt eleifend nec id turpis. Cras ut venenatis augue, a hendrerit sem.
+
+In imperdiet eleifend odio ac blandit. Proin venenatis varius tincidunt. Mauris efficitur purus mauris. Integer pretium ullamcorper elit, nec vestibulum erat aliquet quis. Aliquam eget dolor ultrices, euismod felis tempor, lacinia lectus. Nunc vitae magna sit amet metus suscipit semper. Interdum et malesuada fames ac ante ipsum primis in faucibus. Ut dapibus lectus a lacinia egestas. Integer nisl erat, convallis ac odio vel, facilisis facilisis diam. Maecenas nec est dui. Aenean nec luctus magna. Cras quis urna tempor, rutrum nisi vitae, tempus enim. Aliquam et libero elementum justo lacinia sollicitudin a et quam. Pellentesque dictum imperdiet ipsum, sed porttitor quam ultricies eu. Aliquam sit amet ligula eu odio dapibus feugiat. Quisque ultrices purus blandit urna accumsan volutpat.
+
+Nullam id metus feugiat, ornare neque sit amet, posuere neque. Curabitur molestie ultricies mi, non sodales nibh interdum ac. Sed non scelerisque nunc. Proin sed massa ac magna gravida ullamcorper a nec ex. Praesent placerat augue vel euismod pellentesque. Mauris sollicitudin, orci pulvinar mattis laoreet, felis velit ultrices orci, eget iaculis libero enim laoreet orci. Nulla nec posuere nulla, aliquam sollicitudin elit. Suspendisse potenti. Aliquam erat volutpat. Mauris nec velit mattis nisl placerat consectetur in id magna. Nam ante mi, tristique nec magna at, ultricies varius libero. Nam ac est eget sem aliquet gravida. Quisque viverra tristique lectus ut eleifend.
+
+Etiam et vehicula ex, eget dapibus risus. Phasellus dictum in nunc eu vestibulum. Duis fermentum lectus ut est dignissim molestie. Maecenas euismod elit sed leo efficitur, ut sodales ligula consectetur. Aliquam tristique ultricies sapien, vitae auctor mi egestas ornare. Aenean vel mi at ex mattis consectetur. Aenean nec enim tristique velit cursus placerat. Duis rhoncus nisi lorem, commodo dictum dui sollicitudin imperdiet. Vestibulum tincidunt enim et elit mattis venenatis. Ut vitae rhoncus enim. Morbi tristique, augue ac facilisis gravida, ex nisi ultrices ipsum, id tempus eros ligula sit amet ante. Mauris a nunc augue. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Proin non arcu a nibh dictum malesuada nec nec mauris.
+
+Cras at neque sagittis, iaculis lectus sollicitudin, semper diam. Aliquam sit amet ultrices elit. Sed nibh erat, fermentum in eros at, fringilla rutrum sem. Aenean sapien enim, imperdiet laoreet lacus vitae, faucibus rhoncus nulla. Interdum et malesuada fames ac ante ipsum primis in faucibus. Sed consectetur vehicula feugiat. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nam ac placerat ex. Donec ut cursus ante. Phasellus ornare nunc eget augue vehicula, eget placerat quam pulvinar. Phasellus suscipit metus id fermentum elementum.
+
+Quisque semper sapien eget felis varius posuere. Cras sollicitudin, dui ullamcorper tristique laoreet, arcu sem tempus nulla, vel euismod dolor nulla vitae turpis. Mauris placerat, ex ut pharetra maximus, magna lorem posuere turpis, ut tempor lacus massa et urna. Donec pharetra quam ut tellus accumsan, ac tincidunt metus vehicula. Suspendisse at dapibus urna. Suspendisse placerat blandit porta. Phasellus ullamcorper cursus enim vel lobortis. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nam aliquet dolor eget risus hendrerit, a convallis ipsum sagittis. Fusce at pellentesque diam. Aliquam elementum gravida sollicitudin. Aliquam erat volutpat. Integer ultrices tempus tortor, eget suscipit nulla. Nullam ut convallis diam, non viverra justo.
+
+Cras augue lectus, feugiat sed sagittis sed, dapibus ut augue. Proin a lectus a eros semper maximus ac et diam. Suspendisse et augue pretium, pharetra sapien at, posuere mi. Sed vitae augue leo. Nulla placerat dapibus leo id euismod. Etiam id libero vel tellus egestas faucibus. Donec auctor rhoncus egestas. Curabitur ex magna, commodo nec aliquet ac, volutpat ut ante. Mauris tempus massa sed nunc euismod, non finibus dui semper. In ipsum risus, fringilla quis libero id, commodo sodales sem. Nunc velit dolor, eleifend ut placerat sit amet, fermentum ac sem. Aenean ullamcorper magna enim, ut sodales magna porta at. Nunc sodales lacus quis libero aliquam sodales. Proin lobortis sapien eu mollis egestas. Vivamus id ligula felis.
+
+Suspendisse vel purus sit amet lacus efficitur feugiat sagittis eget neque. Nam et magna non odio convallis pulvinar. Phasellus gravida lobortis tincidunt. Nulla sed sem eget nisl scelerisque lobortis non eu velit. Interdum et malesuada fames ac ante ipsum primis in faucibus. Nulla facilisi. Praesent neque erat, ornare nec mollis nec, convallis placerat enim. Mauris nec elit eu elit vulputate suscipit sed elementum dui.
+
+Ut ultricies elit sem, eu feugiat est venenatis iaculis. Nulla sapien mauris, lacinia a tellus pellentesque, varius viverra arcu. Quisque tincidunt interdum ligula, non bibendum quam cursus rhoncus. Nullam iaculis dolor nisi. Maecenas et mi turpis. Vivamus elit felis, gravida a interdum ac, maximus vitae orci. Fusce egestas elit ut mollis tincidunt. Donec sollicitudin vulputate auctor.
+
+Suspendisse potenti. Nam quis nisl nec leo pulvinar aliquet. Aenean at tellus imperdiet, vulputate elit quis, pulvinar diam. Nunc blandit dolor metus, vitae convallis ipsum facilisis non. Phasellus vehicula mauris at sem egestas, nec ultrices lectus feugiat. Nam volutpat lectus id nulla sagittis pretium. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Pellentesque cursus nulla quis urna vestibulum cursus id sit amet massa. Suspendisse congue enim in tortor placerat, eget convallis sapien ornare. Praesent egestas lacus a elementum molestie. Nulla at porttitor ligula, ultricies ultrices purus. Vivamus rutrum, nisi vitae rutrum tempor, lacus metus semper tortor, vitae placerat mauris nisi a nulla. Ut condimentum pellentesque venenatis. Ut auctor efficitur elit vel placerat. Aenean convallis nibh nec ex auctor ullamcorper.
+
+Aenean ut purus et augue laoreet pulvinar eu vel risus. Aliquam quis nibh ultrices, efficitur nulla sodales, euismod augue. Maecenas faucibus fringilla vulputate. Suspendisse ipsum ante, maximus quis arcu ac, malesuada porttitor augue. Nunc aliquam vehicula risus. Nulla augue neque, efficitur vel erat sed, ultricies placerat turpis. Nam augue ex, aliquam vel tristique et, accumsan nec urna. Quisque in tellus nec eros varius interdum quis eu libero.
+
+Sed tempor justo consequat elit efficitur dignissim. Suspendisse gravida tristique auctor. Phasellus consequat tempus eros non efficitur. Etiam ac turpis non leo congue dapibus sed ullamcorper urna. Etiam nulla enim, dictum ut sem eget, ultricies iaculis augue. Maecenas molestie felis sit amet dolor dictum, quis gravida arcu hendrerit. Pellentesque feugiat erat sed gravida convallis. Aliquam euismod orci elit, a semper nisi ullamcorper in. Duis eleifend accumsan pellentesque. Cras vel nunc facilisis, lobortis leo sit amet, sollicitudin velit.
+
+Donec id sapien quis nisi consectetur ultricies et ut elit. Duis risus nibh, malesuada nec eleifend ac, pretium et enim. Integer eget lacus est. Donec ac ultrices erat. Aliquam tincidunt massa lacinia venenatis vestibulum. Vestibulum ligula nibh, condimentum nec lorem sed, consectetur fermentum nunc. Donec pulvinar egestas sem, at dictum odio faucibus sed. Sed aliquet eu nunc lacinia tristique. Quisque sit amet mattis diam. Praesent justo est, facilisis eget hendrerit quis, sollicitudin sit amet magna. In hac habitasse platea dictumst. Integer egestas augue et auctor elementum. Nullam purus ex, sagittis ac dolor id, commodo maximus justo. Sed ac est eget leo sagittis cursus. Donec lacus nulla, sagittis eget neque sed, volutpat scelerisque mauris.
+
+Aliquam mauris elit, ultrices sed varius sit amet, luctus sed tortor. Proin dignissim magna id tristique porta. Integer finibus dui eu ante finibus, in scelerisque arcu aliquam. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi non velit eget dui auctor feugiat. Sed in est at libero tincidunt tempus. Mauris tincidunt eu nunc nec suscipit. Sed porta mi vulputate, euismod dolor in, lacinia elit. Mauris augue tortor, sodales sit amet magna eu, tempor scelerisque est. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In a mattis dui. Ut quis purus vestibulum libero scelerisque lobortis et id odio. Fusce at volutpat ligula. Etiam posuere augue tortor, nec tempus nisi ultrices sit amet.
+
+Morbi tristique sed augue sed suscipit. Donec ultrices, ante eu congue viverra, eros orci finibus lacus, quis cursus metus neque ac libero. Mauris ornare mauris nisl, nec cursus elit finibus ac. Suspendisse pharetra placerat posuere. Curabitur eget nisi tincidunt, pretium lectus nec, sagittis enim. Proin id tristique velit. Maecenas sed tellus non purus tincidunt laoreet non non urna. Aliquam erat volutpat. Aenean vel porttitor ante. Morbi interdum massa sed nibh facilisis pharetra. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Donec tristique auctor erat. Proin at dignissim metus, ut ultricies augue. Vestibulum egestas mauris tortor, at aliquet enim pretium in. Duis a fermentum ante, non commodo felis. Sed interdum gravida tortor et porttitor.
+
+Donec eget urna finibus nisi commodo porta. Duis vestibulum erat finibus turpis molestie cursus. Nullam suscipit lorem ultricies, varius lorem et, pulvinar enim. Morbi placerat metus ut lorem ultrices finibus. Vestibulum sed metus sapien. Aliquam finibus velit vel leo porta ultrices. Fusce volutpat lectus purus, iaculis interdum augue ultricies et. Nulla fringilla augue metus, sed eleifend ex elementum et.
+
+Integer nulla diam, tincidunt commodo ligula eget, commodo sodales eros. Vivamus pulvinar dignissim lectus, quis mattis ipsum sodales quis. In varius justo vehicula, suscipit diam in, finibus turpis. Quisque rutrum libero ac leo fringilla, quis aliquet est placerat. Nunc bibendum sapien sit amet viverra eleifend. Mauris tristique ligula et eros rutrum, nec dictum mi volutpat. Ut ut dolor condimentum urna scelerisque auctor. Phasellus a risus enim. Maecenas aliquet tortor at velit pretium, quis congue lorem sollicitudin. Aenean a sem nulla. Nam non nibh tortor. Fusce vitae vehicula urna, et volutpat tellus. Fusce consequat odio id ullamcorper euismod.
+
+Maecenas felis dui, convallis nec dignissim et, tincidunt non erat. Suspendisse id vulputate risus. Curabitur porttitor orci id purus tincidunt tincidunt. Morbi quam libero, facilisis fringilla magna rhoncus, facilisis volutpat nulla. Mauris ultrices egestas ex sed bibendum. Fusce accumsan, dui quis tempus auctor, nulla ligula varius tortor, in lacinia ante sem eu felis. Nullam sed eros sed libero egestas commodo id et diam. Etiam sed pharetra magna, a tempor tortor. Nunc quam velit, porttitor a sodales eget, consectetur sed leo. Mauris leo est, porta eget convallis ac, commodo feugiat lorem. Donec accumsan et dolor quis feugiat. Aenean finibus at velit a molestie. Aenean id malesuada purus. Cras vitae nisi fringilla, vulputate felis vitae, malesuada lorem. Donec eleifend ipsum finibus volutpat placerat. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae;
+
+Suspendisse maximus tempus urna, in dictum risus viverra non. Sed justo metus, rhoncus ut ante quis, mollis posuere ligula. Donec nec lobortis enim. Aliquam facilisis non turpis at venenatis. Quisque nec rhoncus nisl, consectetur dictum augue. Curabitur nulla sapien, porta vitae velit at, luctus sagittis sapien. Integer massa nulla, volutpat vel nibh vel, consectetur tristique sem.
+
+Maecenas rhoncus vel mauris a sollicitudin. Nulla velit lacus, viverra eu tincidunt non, luctus non lorem. Donec et urna non felis luctus lobortis ut sit amet orci. Nulla vulputate, est sit amet interdum malesuada, urna velit varius eros, at efficitur dui nisi et nunc. Cras aliquet orci augue, nec euismod ligula vehicula sit amet. Quisque rutrum egestas tristique. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nullam at bibendum metus. Aenean tempor dictum mauris id sagittis. Donec mollis pellentesque dui, nec malesuada libero tempor nec. Vivamus maximus nisl interdum, malesuada augue ut, posuere elit. Nunc a placerat ex.
+
+Nullam tristique urna ac mi imperdiet, vel dictum sem tristique. In hendrerit massa eu tortor venenatis placerat. Etiam mollis volutpat bibendum. Aliquam lacus nibh, lobortis vel lacinia vel, vulputate vitae augue. Sed sollicitudin, massa et maximus elementum, eros massa facilisis magna, vitae suscipit velit justo dignissim dui. Nulla commodo, lacus vitae fringilla varius, magna tellus tempor tortor, gravida pretium nibh erat ac augue. Donec cursus ante eu velit blandit rutrum. Maecenas et purus accumsan, viverra enim nec, elementum metus. Donec vel dapibus ligula. Sed viverra varius hendrerit. Curabitur rutrum aliquet dignissim. Phasellus id massa lorem. Vivamus in nulla sit amet lectus hendrerit facilisis sed vitae purus. Donec non commodo felis.
+
+Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Vivamus id consectetur massa, ac ultricies enim. Fusce id odio hendrerit, feugiat turpis non, condimentum lacus. Praesent fringilla vitae nisl ac lobortis. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aliquam tempus orci sem, sed sodales nisl semper at. Suspendisse malesuada metus dignissim, vehicula orci vel, volutpat odio. Quisque eget tortor rutrum, tincidunt urna ac, sagittis velit. Donec ligula mi, luctus sit amet convallis eget, interdum vitae augue. Vivamus feugiat, sem in porta pharetra, leo purus ultrices ligula, et venenatis mi magna ut mauris. Proin finibus ullamcorper odio non scelerisque. Maecenas condimentum nisl vel odio pharetra, at dapibus odio bibendum.
+
+Duis at felis finibus, molestie arcu at, placerat tortor. Sed auctor dui sit amet eros pellentesque, eu semper dolor scelerisque. Donec euismod nibh id tortor efficitur sollicitudin. Praesent sed elit eros. Nullam elit nulla, elementum quis euismod eu, convallis sit amet augue. Praesent mollis lobortis justo ac feugiat. Aliquam mattis sit amet neque vitae lobortis. Maecenas augue purus, semper vel lobortis a, auctor nec lorem. Aliquam erat volutpat. Praesent aliquet, metus quis sodales cursus, ante felis aliquam augue, sit amet sagittis arcu sapien ut odio. Quisque feugiat sem non nisl mollis porttitor non et tellus. Suspendisse commodo interdum arcu, sit amet mattis neque blandit vitae. Curabitur dapibus eros vitae facilisis placerat. Vivamus non imperdiet arcu. Nulla sagittis non orci ac venenatis. Etiam ac sem vulputate, sodales urna eu, dapibus lorem.
+
+Etiam ut lectus molestie, lacinia turpis in, auctor eros. Donec efficitur dolor eu risus imperdiet tincidunt. Phasellus faucibus, lacus at mattis hendrerit, massa turpis bibendum orci, sit amet malesuada neque libero eget eros. Aenean feugiat, mi vel porta consequat, diam purus blandit nibh, ac condimentum lacus dolor id eros. Ut auctor lectus in consectetur venenatis. Ut nec enim eu felis varius hendrerit. Quisque vitae posuere orci. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Vivamus sed aliquam sem, sed pellentesque elit. Vivamus bibendum dignissim elit, at facilisis libero posuere non.
+
+Donec enim mi, consectetur id pulvinar rhoncus, dictum in felis. Vestibulum ac pretium nulla. In nec lacus in nulla bibendum maximus. Sed efficitur non urna ac bibendum. Morbi condimentum nibh sed porttitor ultrices. Nulla tincidunt feugiat felis, eu vulputate massa volutpat eu. Ut id tortor quis risus ullamcorper aliquam. Nullam id mi dignissim, pretium est in, pellentesque libero. Pellentesque commodo sapien augue, ornare laoreet leo tristique suscipit. Etiam mattis nec nunc quis commodo. Etiam vel ipsum vel nulla lobortis sollicitudin.
+
+Cras porttitor, mi quis consequat efficitur, lectus justo bibendum dolor, et dapibus turpis nibh et tortor. Maecenas lacinia mattis neque. Sed nisi arcu, laoreet quis interdum ut, placerat non libero. Phasellus eleifend, metus sit amet efficitur hendrerit, dolor orci ultrices est, quis dignissim mi velit sit amet massa. Quisque sed molestie lacus, vel auctor diam. Fusce tempus justo sapien, nec viverra justo sollicitudin eget. Vivamus placerat ipsum sit amet erat rhoncus, ac suscipit ipsum rutrum. Ut porttitor vitae nisi et lobortis. Fusce pharetra tristique varius. Sed vitae pretium mi. Vestibulum efficitur dignissim ante et venenatis. Cras sit amet tellus non lacus rhoncus sagittis placerat vel ante.
+
+Morbi facilisis at turpis facilisis pretium. Morbi nisi orci, egestas a augue nec, ornare ultricies lorem. Integer faucibus mauris vulputate, convallis quam et, vehicula mi. Nulla sed magna libero. Proin tristique sit amet nisi et fringilla. Mauris mollis non neque quis mollis. Praesent rutrum felis eu interdum tempor. Fusce eu sapien vitae augue pulvinar eleifend vel at massa. Pellentesque et condimentum augue, eget dictum arcu. Praesent ultricies varius aliquet. Morbi eu diam quis nisl dictum placerat. Duis sagittis eu turpis ac lacinia. Aliquam in volutpat metus.
+
+Nullam sit amet neque ante. Aenean in pharetra quam, in suscipit tellus. Vivamus sed purus nec lacus ornare malesuada ac sit amet quam. Vestibulum quis purus quam. Nunc malesuada ullamcorper ante. Aenean nisi urna, placerat in rutrum id, sagittis ut dolor. Suspendisse eros elit, dictum ut aliquet venenatis, aliquet ac sem. Nam enim odio, vestibulum eget velit in, eleifend placerat dolor. Vestibulum mollis est sed dui sollicitudin, eu ultrices ante viverra. Quisque velit elit, vehicula eu lectus eget, consectetur elementum enim. Mauris dictum nisi eget tellus malesuada, sit amet facilisis lectus tristique.
+
+Curabitur euismod quam eleifend pulvinar eleifend. Pellentesque ac ultrices purus. Vestibulum egestas ligula at dui ornare commodo. Vivamus non molestie purus. Proin volutpat porta orci, non posuere tortor mollis in. Donec bibendum magna metus, eu ornare orci facilisis non. In vitae neque elementum, convallis ante a, condimentum nulla.
+
+Donec laoreet ipsum quis nunc vulputate mattis. Aenean in nunc ac tortor consequat suscipit. Curabitur eget maximus magna, fermentum auctor mi. Aliquam ac nibh enim. Donec rhoncus metus ligula, ut vulputate felis consectetur a. Fusce semper ligula sed commodo bibendum. Ut aliquet efficitur tempor.
+
+Donec efficitur massa ac purus sollicitudin aliquam. Integer nec erat blandit, mattis tortor a, dapibus libero. Quisque eget tellus ante. Curabitur fermentum bibendum massa, nec tincidunt lacus lobortis id. Nulla in maximus metus. Nullam malesuada turpis ut ipsum cursus consectetur. Nam egestas diam a lacus varius euismod. Donec finibus consectetur ante et semper. Phasellus nec tellus eget massa dignissim faucibus. Pellentesque ultricies eros ut enim venenatis auctor. Donec rhoncus lacinia quam. Pellentesque vitae justo at enim laoreet commodo in in libero. Suspendisse potenti. Donec eu ornare nisl. Integer at vehicula libero. Pellentesque quis dui est.
+
+Proin a posuere metus. Nullam at pulvinar tortor. Cras iaculis velit at ex malesuada tincidunt. Suspendisse potenti. Fusce sollicitudin pulvinar vulputate. Aliquam suscipit mi sed faucibus efficitur. Nullam eget ante molestie, pellentesque risus a, dictum nisl.
+
+Proin a rutrum diam. Fusce eleifend malesuada ipsum non gravida. Vivamus faucibus scelerisque pharetra. Aliquam orci tortor, posuere ac tellus vel, posuere euismod erat. In ac gravida ligula. Vivamus non orci ut nibh rutrum auctor vulputate nec enim. Proin volutpat faucibus tempor. Maecenas laoreet purus sit amet bibendum varius. Etiam sit amet libero id tellus tempus iaculis. Morbi metus lacus, fermentum vel rhoncus ac, iaculis id nibh. Aliquam a elit fermentum, feugiat ante non, fringilla nibh. Interdum et malesuada fames ac ante ipsum primis in faucibus. Ut varius sed erat sit amet venenatis. Nunc faucibus nulla augue, vel finibus orci volutpat in. Nam aliquam est quis dui vulputate, vitae vulputate sem vulputate.
+
+Aenean sagittis porta tincidunt. Aliquam tincidunt tortor urna, eu tincidunt neque malesuada sagittis. Pellentesque finibus interdum dolor et pulvinar. Morbi metus sem, finibus et rhoncus ac, pellentesque sed lacus. Pellentesque diam augue, fringilla at elit eget, bibendum condimentum mauris. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque mattis metus quis lorem bibendum condimentum. Phasellus vel massa purus.
+
+Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Nunc sed porta nunc. Morbi non ligula ac nunc rhoncus imperdiet sodales ac nulla. Nulla facilisi. Maecenas quis convallis ligula. Morbi et hendrerit felis, rhoncus viverra lectus. Fusce sit amet diam quis quam consectetur sagittis a et nisl. Pellentesque eu felis faucibus, malesuada eros interdum, sagittis nulla. Suspendisse potenti. Sed eget tincidunt eros. Fusce nec aliquam enim, at semper velit. Aenean interdum rhoncus sapien ut finibus. Suspendisse potenti.
+
+Aliquam eleifend pellentesque lacus at cursus. Mauris aliquet orci ut augue efficitur egestas. Donec ac ligula quam. Nam mollis nulla dolor, quis ultricies tellus luctus sollicitudin. Donec commodo nunc eu diam pretium, in blandit ex aliquam. Maecenas in urna at augue hendrerit porta egestas sit amet purus. Suspendisse potenti. Fusce sapien leo, mattis eu ligula ac, laoreet sagittis arcu. Donec tempor et odio sit amet gravida. Nunc non aliquam nisl. Nunc vitae cursus metus. Suspendisse semper metus gravida lacus aliquam egestas. Morbi sed bibendum neque, nec ornare libero. Duis ut tristique dolor, in interdum enim. Nullam pretium lacus sed metus ornare, et efficitur nibh blandit. Cras consectetur pretium efficitur.
+
+Curabitur dignissim, enim at egestas porta, dolor turpis molestie risus, vel malesuada justo risus pharetra lorem. In arcu tellus, semper eget elit et, posuere tincidunt elit. Sed lorem diam, accumsan id massa a, sagittis lobortis nibh. Morbi aliquam consequat lacus et facilisis. Sed sollicitudin ullamcorper tristique. Sed odio ipsum, malesuada pretium vehicula eu, vulputate sit amet dui. Sed malesuada tellus id purus cursus tempus. Sed metus massa, ultricies eu leo id, mattis mollis nulla. Proin sit amet ultrices ante. Maecenas auctor nibh nec erat mollis, non maximus ante tincidunt. Donec commodo rhoncus sem, nec ullamcorper quam pharetra eget. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Suspendisse nec viverra elit.
+
+Proin lobortis dui at metus molestie pellentesque. Praesent in ligula ac nisi varius facilisis. Nunc bibendum dolor id nunc vulputate ornare. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec aliquet, eros nec blandit mollis, mauris purus commodo tortor, in scelerisque ex lorem eget est. Morbi sit amet elit nec purus finibus iaculis a efficitur libero. Suspendisse molestie lectus ipsum. Duis aliquam nunc sit amet ex tempus mollis. Integer id orci molestie, consectetur enim id, semper ipsum. Duis et sodales dui. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Maecenas elementum imperdiet ultrices.
+
+Duis dignissim metus at tincidunt porta. Vivamus eget libero feugiat, ornare neque sed, cursus mi. Pellentesque vel ligula cursus, cursus lacus vel, aliquam nisi. Curabitur ac molestie massa. Fusce ut libero pulvinar, fermentum dui nec, rhoncus turpis. Aenean at pellentesque velit. Suspendisse fermentum mattis elit sed convallis.
+
+Ut nec posuere turpis. Nunc et dignissim massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Aliquam orci massa, suscipit non tortor consequat, laoreet eleifend turpis. Vivamus tempor sagittis congue. Aliquam risus augue, convallis sed dapibus vel, malesuada nec lorem. Donec congue nulla nec neque pharetra, vel viverra ligula convallis. Donec molestie arcu vitae metus tempus congue. Quisque tempus mattis convallis. Praesent ultricies pellentesque sem a placerat. Sed elementum sodales auctor. Aliquam sed tellus quam.
+
+Nullam quis orci aliquet, tempor odio vel, suscipit nulla. Sed elit risus, pellentesque a arcu ac, porttitor tincidunt quam. Quisque quis odio at neque pellentesque pretium non et purus. Aliquam mi enim, lacinia sed arcu placerat, finibus varius velit. Nulla id sem in dolor cursus mattis. Morbi vulputate quam ac est convallis, sed consequat lorem lacinia. Proin blandit consectetur augue, a tristique ligula. In mauris nunc, aliquet in elementum a, dictum et odio. Proin non semper ex, sed facilisis odio.
+
+Vivamus scelerisque, dui pellentesque blandit egestas, felis eros aliquet justo, id semper diam dui eget arcu. Integer maximus imperdiet finibus. Nunc nec dui eleifend, vehicula nisl eu, molestie lectus. Phasellus nec justo felis. Mauris mollis leo vel risus facilisis malesuada. Sed fermentum vitae mauris nec condimentum. Aliquam vel velit vitae nibh sodales tincidunt. In elit ex, molestie at justo a, tempor porta urna. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Duis laoreet mattis mauris. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos.
+
+Cras aliquam volutpat ultrices. Nullam eget quam eget sem pretium vehicula. Vestibulum eu odio convallis dui lacinia ullamcorper. In a velit dapibus, mollis ligula ac, porttitor turpis. Praesent volutpat ante eu turpis cursus, vel molestie dui tempor. Proin lectus mi, vestibulum ac congue sit amet, placerat id nisi. Sed sit amet tincidunt eros, nec ornare neque. Mauris congue maximus lacus, quis euismod ex lobortis et. In egestas vulputate lectus, sit amet porta massa facilisis sed. Integer consequat euismod neque, nec sollicitudin elit accumsan ut. Nunc sed augue imperdiet augue porta volutpat at ac metus.
+
+Cras euismod eu nulla in lacinia. Mauris ultrices felis erat, vel fermentum purus varius vitae. Morbi ac sollicitudin sem. Vivamus justo enim, pellentesque non sapien nec, fermentum malesuada felis. Donec eget dui tempus, consectetur sapien id, feugiat mi. Pellentesque tincidunt pellentesque orci quis blandit. Morbi ornare augue orci, ut elementum nisi pretium vitae. Mauris vehicula commodo ex, sit amet imperdiet diam ultrices et. Vestibulum vestibulum quam ut turpis pellentesque pharetra. Nulla tincidunt mi vitae lectus imperdiet tincidunt. Nullam sed dui nunc. Nunc erat orci, vehicula hendrerit lacus id, fermentum ultricies libero. Nunc at est velit. Vestibulum cursus ut lectus eget aliquet.
+
+Praesent ac pulvinar sapien. Maecenas tellus ex, mattis quis consequat fermentum, fringilla eget velit. Aliquam sed dapibus urna, id laoreet ipsum. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse vitae lacus ligula. Vivamus rutrum, elit id dignissim tempor, odio ipsum egestas felis, eget elementum nibh massa sit amet urna. Donec varius tempus pretium.
+
+Aliquam vehicula, orci vel congue condimentum, quam augue porttitor nunc, vel pellentesque felis justo a sapien. Suspendisse potenti. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus sit amet turpis enim. Aliquam nisi urna, fringilla in egestas sed, tincidunt at sapien. Vivamus vulputate tempus mauris, imperdiet sagittis justo tincidunt id. Ut quis metus ut nulla dapibus dapibus et vel eros.
+
+Nulla facilisis vestibulum mauris, vel pellentesque neque tincidunt a. Curabitur luctus varius orci non molestie. Aenean feugiat molestie mauris, eu pretium elit sollicitudin at. Nullam varius libero ullamcorper sollicitudin vestibulum. Cras at cursus quam. Aenean laoreet, quam sodales hendrerit blandit, nulla risus laoreet orci, quis rutrum neque nisl at massa. Nulla fermentum orci ante, semper consequat lectus pulvinar at. Vestibulum luctus dui et lectus mattis, vestibulum sollicitudin mauris lobortis. Praesent et finibus sem. Aliquam mattis diam quis maximus volutpat. Cras vitae felis augue. Vestibulum ultricies neque ac ultricies faucibus. Vivamus convallis eleifend nunc, sollicitudin blandit nisi porta et.
+
+Fusce metus sapien, ultricies in magna non, porttitor congue justo. Donec dapibus imperdiet malesuada. Curabitur et libero tellus. Duis venenatis eleifend enim, eget tempor arcu aliquet in. Donec gravida eleifend eros ut pretium. Proin eu tempus nisl. Mauris id facilisis turpis. Vivamus eu auctor nibh, sed pellentesque libero. Proin odio sapien, tempor sit amet ligula in, viverra vehicula lorem. Nam quis vulputate tortor, ut condimentum ex. Morbi sit amet consequat metus. Aliquam magna nisi, placerat aliquam felis ac, egestas pretium elit.
+
+Aliquam vitae volutpat augue, suscipit eleifend odio. Aenean viverra, metus at sollicitudin malesuada, risus lectus luctus libero, id tincidunt quam lorem ut nunc. Nunc nec ex a lorem posuere consectetur. Aliquam gravida odio eget augue sagittis, id porta sapien tristique. Fusce ornare ut eros eget scelerisque. Donec hendrerit eros at tellus lobortis, ac feugiat turpis venenatis. Sed malesuada semper mauris, vitae blandit leo aliquet vel.
+
+Vivamus sed nisl pulvinar, tempor felis quis, hendrerit risus. Sed condimentum odio libero, sed rutrum sapien lacinia ut. Quisque libero tellus, rhoncus et porttitor sed, imperdiet sed risus. Cras non viverra tellus. Maecenas nec diam vitae erat vulputate vulputate. Praesent ex tortor, eleifend rutrum eleifend vitae, facilisis id nunc. In sodales nunc vitae metus fermentum ornare. In sollicitudin efficitur elit id rutrum. Aenean fermentum sem magna, sit amet suscipit arcu egestas id. Aliquam eu tortor a erat rhoncus porta eu ac lacus. Nullam in nibh ut tellus imperdiet pretium. Maecenas cursus, urna ac mattis tempor, tellus odio rhoncus augue, non mollis ex est sed lectus. Nulla eu blandit quam. Sed dictum vehicula odio, sed rhoncus ex tempor quis. Integer in mauris libero. Curabitur porta felis in est hendrerit, eget semper ante pulvinar.
+
+Curabitur tempor felis vitae nulla auctor accumsan. Ut vulputate ex quis libero vehicula, ac vehicula ante sodales. Suspendisse consectetur diam eget neque mattis fringilla. Duis quis dictum diam, vitae vulputate nulla. In et dolor nec justo feugiat pellentesque. Fusce molestie dui orci, vitae egestas ante pretium sed. Interdum et malesuada fames ac ante ipsum primis in faucibus. Ut maximus cursus euismod. Quisque pharetra dolor vel orci venenatis, efficitur rutrum enim feugiat. Cras id odio in tortor sollicitudin congue a porta felis. In vehicula congue eros non egestas. Quisque arcu magna, ultrices ac cursus in, gravida at nisl. Phasellus malesuada ac erat at euismod.
+
+Duis feugiat sapien non mi pretium, auctor feugiat quam posuere. Integer laoreet libero eu lectus sagittis rhoncus sit amet sed dui. Duis maximus eros id diam vulputate condimentum. Fusce pulvinar magna vitae finibus bibendum. Nunc accumsan nulla et urna molestie dignissim. Maecenas pharetra posuere interdum. Etiam consequat porta enim, in feugiat diam malesuada non. Nam viverra erat et felis imperdiet tempus et in nibh. Duis faucibus nunc eu nisl hendrerit, quis faucibus neque tincidunt. Etiam commodo venenatis mi, in posuere velit faucibus sed. Aliquam id malesuada orci.
+
+Aliquam sed dignissim purus, id rutrum diam. Pellentesque at leo risus. Phasellus ornare tempus lectus. Nulla ultrices consequat quam imperdiet dictum. Vestibulum accumsan libero eu est aliquam pellentesque. Integer varius eget erat nec molestie. Sed auctor imperdiet turpis, eget elementum nisi dictum nec. Curabitur maximus non nulla non congue. Cras scelerisque, augue eget volutpat accumsan, metus ante tristique leo, eu bibendum risus augue non erat. Nunc eu risus tincidunt, imperdiet enim a, accumsan elit. Vestibulum aliquet quis magna ut placerat. Vestibulum volutpat est vitae erat consectetur fermentum. Mauris in augue ipsum. Pellentesque ex erat, sodales ut lobortis pellentesque, volutpat quis turpis.
+
+In luctus eget massa sit amet ultricies. Aenean metus mi, finibus nec dictum at, iaculis non elit. Phasellus dapibus faucibus dui. Nam eget scelerisque arcu. Donec suscipit dui quis odio dictum, et dapibus dui euismod. Sed pulvinar malesuada nisi, ac laoreet arcu ullamcorper id. Vivamus ut tincidunt metus, non consectetur lacus. In tempor, augue sit amet sagittis accumsan, dolor odio hendrerit sapien, sed ullamcorper dolor est non mi. Fusce porttitor auctor dolor, eget bibendum est tincidunt sed. In elementum sapien vitae risus dignissim placerat. Quisque at est eget enim posuere tincidunt in sit amet leo. Vestibulum tincidunt metus nisl, in vestibulum massa commodo id. Proin eu vestibulum lectus, id tristique est.
+
+Vivamus sem purus, rhoncus vitae leo at, sodales dictum tellus. Phasellus porttitor ipsum eget magna placerat, non vestibulum dui convallis. Nunc mattis vitae erat eu porttitor. Mauris vulputate ante sed orci cursus consequat. Nullam aliquet ultrices lectus, aliquam viverra quam bibendum sed. Phasellus vel urna eros. Donec vel arcu sapien. Sed gravida sodales molestie. Integer imperdiet bibendum nunc quis finibus. Suspendisse sed tortor id lectus fermentum porta nec ac lorem. Proin efficitur fringilla enim, vitae porta mauris lobortis vel. Integer in fermentum libero. Quisque varius, lorem interdum suscipit sodales, ante ante vulputate arcu, a scelerisque est leo at massa. Duis congue nunc velit, non vulputate felis commodo quis. In hac habitasse platea dictumst. Quisque varius, turpis eget tincidunt hendrerit, lacus urna vulputate nunc, quis vehicula velit metus at tortor.
+
+Vivamus a feugiat urna. Aenean egestas feugiat purus, non laoreet dui dictum a. Vivamus non massa quis tellus volutpat volutpat non rutrum mauris. Nullam eget tortor nibh. Ut eu nunc nulla. Nunc commodo arcu et scelerisque rhoncus. Cras lacinia, erat eget fringilla sagittis, tellus urna laoreet libero, id ullamcorper neque nulla et est. Nunc at tortor a massa faucibus ornare ac ac sapien. Sed pharetra lacus ut tellus dapibus pretium ac eget purus. Suspendisse vestibulum nisl in purus porttitor mattis. Vivamus eleifend aliquet nulla at hendrerit.
+
+Curabitur eu dictum lorem. Duis vel maximus lectus. Vestibulum nulla mi, facilisis eu augue in, tristique vehicula purus. Etiam et lorem at mauris aliquet rhoncus vel quis metus. Etiam feugiat odio ac mi vehicula, sit amet vehicula mi vehicula. Nullam dapibus, quam sit amet hendrerit blandit, nunc augue elementum massa, sit amet luctus mauris lacus ac metus. Nunc posuere, odio at blandit aliquet, velit elit blandit diam, ac iaculis libero leo vitae tellus. Suspendisse potenti. Aliquam sed libero mi. Mauris dui velit, ornare ut imperdiet eget, commodo iaculis arcu. Suspendisse at porttitor arcu. Proin aliquam, leo id suscipit lobortis, sapien magna ullamcorper nisi, sit amet sagittis quam lacus pretium metus. Praesent arcu sapien, volutpat nec augue eu, semper euismod ligula. Curabitur id purus in enim dapibus mollis. Mauris lacinia non lorem vel egestas. Vestibulum ex elit, efficitur consectetur blandit aliquet, lacinia sit amet odio.
+
+Nam commodo aliquet ex, eget viverra ipsum. Quisque ac vestibulum massa. Maecenas sed felis consequat, euismod orci ut, ullamcorper est. Donec venenatis tempor mauris, in scelerisque nulla lobortis nec. Nam lacinia erat odio, in aliquet odio laoreet nec. Aliquam vel dictum quam. Duis posuere venenatis sapien, bibendum lacinia velit. In hac habitasse platea dictumst. Nam vehicula rhoncus ante, eget tincidunt arcu dapibus sed.
+
+Phasellus nec tortor placerat, vestibulum neque id, aliquet nisl. Sed aliquet posuere eros, id congue orci eleifend fermentum. Suspendisse potenti. Suspendisse potenti. Cras volutpat sem turpis, mollis faucibus quam tempor a. Proin nec purus et magna condimentum lacinia. Praesent vitae volutpat lectus, ut placerat massa. Praesent ultrices pulvinar dapibus. Praesent vulputate justo ac pellentesque vehicula.
+
+Curabitur eu nisl sit amet enim consequat faucibus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque tellus lorem, vulputate id interdum ut, viverra quis sapien. Nulla vitae sem non ligula porttitor porttitor. Nam pretium augue nunc, at tempus dolor semper nec. Nullam maximus a neque sed iaculis. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque porttitor cursus augue vitae posuere. Curabitur volutpat varius facilisis. Donec varius nisl massa, eget condimentum nisi maximus et. Etiam feugiat lectus odio, ac finibus est luctus at. Duis metus magna, eleifend a varius eu, iaculis in libero. Pellentesque vehicula, sapien sed fringilla placerat, nisl dolor mollis nunc, ut sagittis leo diam in sapien. Pellentesque ex lacus, iaculis id ex sit amet, condimentum fringilla eros. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
+
+Nam felis dolor, egestas eu sollicitudin id, luctus nec diam. Aliquam erat volutpat. Praesent dignissim tristique orci at porta. Curabitur nec nisl tristique, tristique lectus ac, vestibulum nisl. Integer eu ligula vitae sapien tincidunt eleifend at vitae ligula. Cras ultricies sed mauris sit amet gravida. Ut tempor lacinia sollicitudin. Quisque aliquet purus eget fringilla fermentum.
+
+Aliquam facilisis laoreet quam at vulputate. Vestibulum aliquet in lorem et ornare. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Vivamus tempus porta nisl, id feugiat metus cursus a. Proin non commodo lacus. Sed blandit leo eu nibh placerat lobortis. Pellentesque vel ornare eros, a ullamcorper ex. Proin ut dignissim dolor.
+
+Quisque congue eu dui eget dictum. Curabitur efficitur, ligula ut accumsan tincidunt, magna metus pulvinar turpis, id placerat lectus metus at tortor. Etiam vel auctor nunc. Nam nec venenatis mi, egestas suscipit purus. Vestibulum dictum ac sapien sed tempor. Duis pharetra leo id risus auctor semper. Sed faucibus neque quis massa dignissim fermentum. Curabitur laoreet urna vel ex interdum posuere. Duis vestibulum porttitor lacus ac semper. In a neque id risus congue venenatis id sed est. Curabitur ultrices augue velit, ac mattis dolor venenatis ac. Sed vulputate urna eget ipsum sollicitudin iaculis. Vivamus convallis neque eu lectus congue, et convallis ex dapibus.
+
+Etiam maximus hendrerit mauris, eu dignissim lacus vulputate non. Integer congue quis neque sed laoreet. Praesent suscipit egestas nibh. Cras rhoncus vulputate mattis. Nullam quis erat ac ligula tincidunt maximus. Ut porta ultrices ligula in hendrerit. Suspendisse congue enim ac ipsum lobortis tristique. Quisque sapien turpis, convallis eget facilisis vel, tincidunt non nibh. Duis fermentum tellus vitae elementum pulvinar. Suspendisse mollis consequat metus. Nam venenatis laoreet odio non pulvinar.
+
+Mauris imperdiet hendrerit odio quis maximus. Vivamus interdum nibh risus. Quisque non nisi eleifend, tincidunt leo vitae, tincidunt enim. Proin auctor dui ut nulla lacinia facilisis. Maecenas porttitor pellentesque neque quis elementum. Sed sed luctus ex. Ut molestie ac sem non rhoncus. Donec suscipit libero nec venenatis tincidunt. Proin nec lacus elit.
+
+Vestibulum vel velit mauris. Morbi porta dolor nec commodo ornare. Donec eget felis ac massa lacinia cursus vel ut ligula. Vestibulum consequat arcu non suscipit laoreet. Integer massa dolor, luctus nec auctor ut, porttitor nec lorem. Donec sit amet risus et lorem laoreet dapibus nec quis quam. Vivamus fringilla quis ipsum gravida eleifend. Suspendisse potenti. Integer ultrices, ligula quis maximus molestie, lorem nunc auctor purus, ut venenatis orci augue ac purus. Quisque at aliquet sapien. Phasellus laoreet convallis magna, ac rhoncus risus ullamcorper ut.
+
+Pellentesque venenatis justo eget pretium vehicula. Donec quis laoreet risus. Fusce convallis arcu mauris, eu elementum lorem dictum ultrices. Maecenas dignissim orci eu tellus commodo, vitae blandit nisl interdum. Nunc accumsan nulla non feugiat mattis. Integer posuere sapien a lorem gravida sodales. Suspendisse nec urna ac orci interdum iaculis. Cras porttitor ligula eu enim lobortis tempus. Pellentesque in auctor libero. Nulla mi quam, molestie nec cursus vitae, imperdiet nec tellus. Etiam elementum nunc ac justo auctor placerat. Duis ut libero eget libero efficitur consequat sed vel ante. Proin id urna sed eros pellentesque maximus eu quis ligula. Integer venenatis vulputate ipsum, vel tempor eros finibus tempor. Nunc sit amet lacus eget sem elementum tristique. Nunc dui velit, dapibus vel quam a, aliquet interdum mi.
+
+Nunc congue enim augue, id malesuada lorem placerat eget. Proin aliquet, tellus a vulputate aliquam, leo tellus posuere augue, sed malesuada dolor est sagittis mauris. Vivamus efficitur erat ac imperdiet bibendum. Cras faucibus, dui nec vestibulum porttitor, nunc mi tincidunt orci, et facilisis arcu diam at ligula. Proin vitae metus consequat, cursus magna non, tincidunt velit. Aenean eget blandit risus. Suspendisse potenti. Sed dignissim feugiat erat molestie fermentum.
+
+Aenean venenatis consequat pellentesque. Aenean condimentum ut urna non tempor. Fusce vitae risus consequat, lobortis sem ac, egestas lorem. Ut lobortis sem vitae iaculis blandit. Phasellus pellentesque, nisi a fermentum pharetra, erat nisl maximus nisi, vel interdum tortor ipsum sed enim. Quisque non ante magna. Etiam lacinia justo a risus malesuada, sit amet fringilla ipsum rutrum. In tellus turpis, viverra quis facilisis vitae, blandit sit amet elit. Etiam viverra ut enim pellentesque rutrum. Aliquam ac laoreet mauris. Quisque volutpat justo lectus, ac egestas eros posuere sed. Morbi sagittis, est vitae laoreet tincidunt, risus eros efficitur neque, id placerat ante erat eget elit. Etiam sed dolor est. Etiam rhoncus justo sed sollicitudin finibus. Praesent placerat cursus nibh et ornare.
+
+Morbi volutpat rutrum purus nec iaculis. Aenean augue nulla, convallis et orci euismod, porta viverra neque. Duis enim mi, pulvinar vitae placerat ac, pellentesque rutrum purus. Praesent fringilla risus ut dui tincidunt, tempor euismod lectus tincidunt. Donec in pellentesque nisi. Praesent consectetur velit eget faucibus blandit. Nulla id neque pellentesque, posuere est quis, vulputate lectus.
+
+Nulla a dictum risus. In eu nulla eu dolor auctor consectetur. Maecenas finibus vel ante vel suscipit. Phasellus venenatis non purus ac posuere. Vivamus nec consectetur risus. Aenean arcu leo, fringilla in maximus quis, dignissim vel tellus. Nullam a ipsum in justo scelerisque lacinia eget eu nulla. Quisque convallis viverra hendrerit. Cras aliquet augue sapien, sit amet dignissim lectus faucibus non. Proin luctus dui leo, eu porta odio bibendum at. Donec ut lorem blandit, feugiat sem a, venenatis lectus. In amet.
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas varius massa orci, sit amet laoreet justo posuere ac. Ut vel blandit mi, id feugiat justo. Phasellus sed odio dictum, elementum nulla vel, elementum sem. Donec ac ligula lorem. Etiam pharetra augue massa, at auctor lorem cursus in. Quisque tristique est non ullamcorper gravida. Suspendisse interdum venenatis consequat. Ut fermentum enim purus, a efficitur massa tristique eu. Donec vitae mauris vitae tortor ultrices finibus. Aenean eu felis et diam imperdiet elementum. Suspendisse sed eleifend erat, ac posuere tortor. Vestibulum nec sem fermentum, tristique purus vel, cursus diam. Vestibulum ut volutpat nulla. Mauris ac lacinia dolor. Proin lacus nisi, dignissim non ornare quis, ultrices vitae sapien. Vivamus pulvinar mauris id sem tincidunt, nec convallis enim imperdiet.
+
+Aenean non tincidunt mauris. Interdum et malesuada fames ac ante ipsum primis in faucibus. Ut porttitor est vitae ante ultrices posuere. Cras pretium nisl sed nisl suscipit, ut scelerisque ex luctus. Proin nec neque pretium, dapibus sem nec, viverra sem. Mauris vehicula ultrices lectus ac sagittis. Nam suscipit lacus at urna venenatis blandit. Donec sed lorem lectus. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec luctus velit velit, nec varius elit lacinia in. Sed commodo pellentesque lobortis.
+
+Aliquam ut purus iaculis, consectetur metus in, molestie quam. Aliquam vulputate tellus eget malesuada vulputate. Vestibulum feugiat neque velit, in laoreet orci ultricies sed. Duis id congue ipsum. Maecenas elementum nisl nec risus sagittis, ac cursus mi accumsan. Duis vestibulum elit non nunc vestibulum rhoncus. In mollis venenatis dolor ut tristique. Sed tempus turpis eu efficitur accumsan. Proin molestie velit metus, ut euismod justo aliquam sed.
+
+Aliquam tellus sapien, sagittis sed augue quis, convallis commodo lorem. Nulla a turpis non massa feugiat tincidunt ac et libero. Etiam tempor elit sed nunc fermentum, nec pharetra nulla dictum. Nunc viverra tincidunt porttitor. Nulla pretium lectus ac dui vehicula, ac tincidunt nunc ultricies. Praesent bibendum elit ac mauris tincidunt lobortis. Quisque mattis nulla magna, quis interdum libero maximus id. Curabitur nec ultrices enim, a ornare ex. Cras id mauris ut sapien ullamcorper pharetra non quis lorem. Sed vel auctor tortor. Vivamus sed orci placerat, lobortis nisi ac, imperdiet ipsum. Quisque dapibus sodales dapibus. Nunc quam arcu, faucibus et eros vel, gravida congue quam. Donec id est efficitur dolor suscipit sollicitudin at et turpis. Morbi nibh orci, euismod quis egestas vel, imperdiet quis libero. Nam ultrices erat quis elit vulputate maximus.
+
+Vivamus a tortor in leo efficitur imperdiet ut ac justo. Donec auctor ex non elit ullamcorper, id mollis lectus aliquet. Cras arcu purus, finibus ut ullamcorper nec, suscipit quis nibh. Donec at iaculis metus. Quisque id massa maximus, blandit massa eu, cursus nisl. Aenean vel sollicitudin neque, id vehicula dui. Aenean dictum iaculis sapien nec laoreet. Quisque vel finibus tellus. Proin iaculis enim dignissim sem fermentum, vel mattis metus lobortis. Sed euismod pulvinar placerat. Vestibulum eget suscipit quam, vel ultricies urna. In euismod lorem vitae elementum malesuada. Donec quam quam, rhoncus et fringilla at, malesuada et massa. Aenean posuere ipsum sed dui pellentesque venenatis eu eget purus. Donec a luctus mauris.
+
+Aenean auctor viverra ultrices. Nunc eu massa sem. Vivamus pellentesque neque non luctus luctus. Donec vel vulputate massa. Nunc condimentum, erat sed vestibulum vestibulum, augue arcu hendrerit magna, mollis ultricies quam nibh dignissim magna. Etiam quis egestas nisi. Sed quam lacus, elementum et dui vitae, scelerisque accumsan diam. Cras eleifend est dui. In bibendum euismod lorem vitae ullamcorper. Nunc faucibus et lorem in faucibus.
+
+Sed aliquet varius turpis, a sollicitudin felis accumsan pulvinar. Nunc vestibulum ante et tristique tristique. In et efficitur purus. Vestibulum malesuada urna id nunc imperdiet tempus. Nunc eleifend sapien at velit ultricies, dictum elementum felis volutpat. Suspendisse imperdiet ut erat eu aliquam. Maecenas tincidunt sem nec sodales sollicitudin. Morbi quam augue, tincidunt vitae lectus et, lobortis efficitur dui. Ut elit ex, viverra in risus sit amet, congue blandit lacus. Etiam fringilla magna at purus sagittis, ac vehicula elit vestibulum. Cras pharetra tellus molestie tortor placerat, a vehicula dui placerat. Vivamus ac sapien sapien. Donec eleifend ligula vitae tortor sodales hendrerit non sed risus. Aliquam fermentum et urna et malesuada. Cras euismod nulla vel velit egestas, euismod laoreet ante vehicula. Maecenas orci elit, blandit eu blandit sodales, mollis ac turpis.
+
+Nam tortor est, gravida a rutrum sed, venenatis id orci. Duis massa tortor, mollis fermentum fermentum sit amet, sagittis ut nisl. Vestibulum quis sagittis purus. Suspendisse varius nec ipsum nec molestie. Vestibulum molestie molestie rhoncus. Cras dignissim sapien vitae libero tincidunt elementum. Fusce vehicula sodales orci, sed convallis ligula consequat in. In consectetur sem at laoreet lacinia. Fusce luctus faucibus tellus, in malesuada sem consectetur sit amet. Ut gravida, nisl at finibus egestas, ipsum libero viverra elit, at rutrum metus elit ac nunc. Praesent eu dolor rutrum, imperdiet justo eget, ultrices tortor. Aenean id venenatis lorem. Duis consequat elit a nisi elementum convallis. Pellentesque porta lorem vel ipsum tempus imperdiet. Aliquam suscipit justo sit amet dui imperdiet, ut ultrices leo ullamcorper. In dapibus, felis id auctor pulvinar, metus metus cursus odio, at semper justo nibh sollicitudin sem.
+
+Nam quis elit ac tortor venenatis luctus. Pellentesque consectetur tincidunt fringilla. Morbi a nunc sed libero tempor vehicula. Mauris cursus mi neque, id lobortis turpis auctor aliquet. Donec at volutpat urna. Quisque tincidunt velit mi, sed rutrum elit ornare ac. Nunc dolor libero, ultrices eget est a, facilisis auctor mi. Integer non feugiat libero, eu pulvinar leo. Fusce feugiat suscipit nibh ac iaculis. Duis vulputate felis quis enim auctor, eu dictum sapien scelerisque. Nullam sem nisl, tempor egestas imperdiet sit amet, venenatis eu ligula. Pellentesque arcu quam, bibendum sed consectetur nec, commodo a purus. Ut in ex libero. Aenean dignissim ex orci, sed tempus lectus viverra sed. Vestibulum euismod massa arcu, quis iaculis libero mattis id. Proin lectus nibh, euismod non varius quis, tincidunt sit amet urna.
+
+Suspendisse potenti. Integer dapibus gravida lacinia. Curabitur sodales ac erat vitae gravida. Vestibulum id tortor nec lectus tempus gravida sit amet id ante. Nam malesuada dapibus urna a vehicula. Sed ultricies nulla nec eleifend consequat. Maecenas elementum ante at porttitor elementum. Ut at augue vitae mauris volutpat semper. Morbi viverra justo in mauris convallis, vel consequat leo faucibus. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae;
+
+Duis efficitur augue diam, ac rhoncus mauris sagittis ac. Etiam eleifend rhoncus justo, eu interdum lorem elementum eu. Suspendisse ex libero, mollis consequat turpis sed, condimentum sodales risus. Nunc pellentesque dui vel odio scelerisque, ut aliquam mauris gravida. Quisque laoreet tincidunt tortor id viverra. Morbi eget ipsum tortor. Praesent orci mauris, euismod ut nisi in, fermentum ullamcorper nulla. Curabitur facilisis vestibulum luctus. Aliquam sollicitudin et mauris vel feugiat. Duis non quam eu sapien hendrerit tristique. Fusce venenatis dignissim porta. Duis id felis purus. Aliquam ac velit in orci ornare varius. Nulla quis ex lectus.
+
+Ut tincidunt commodo augue, ut viverra mauris fringilla at. Integer sit amet ullamcorper felis. Nullam aliquam massa quam, id tincidunt mauris porta id. Integer nibh sapien, vulputate sit amet laoreet tincidunt, hendrerit eu quam. Morbi vitae felis et diam accumsan luctus ac sed est. Donec vitae viverra diam, at rutrum elit. Donec condimentum justo id dolor viverra vestibulum posuere quis purus. Aliquam id magna sit amet magna dapibus hendrerit et vitae quam.
+
+Phasellus in mauris turpis. Etiam nec ante eu mi maximus commodo quis eu risus. Etiam a turpis non tortor viverra gravida luctus vitae est. Quisque eget gravida quam, sit amet bibendum nulla. In mollis sapien nisl, nec efficitur mi rutrum sed. Suspendisse potenti. Nulla efficitur sagittis diam nec rutrum. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam tempus lorem in purus sollicitudin cursus.
+
+Cras id rhoncus ligula. Vivamus vel tortor malesuada, eleifend neque ac, sollicitudin enim. Proin a euismod neque. Suspendisse odio quam, placerat ac hendrerit at, porttitor in sapien. Ut vitae risus velit. Maecenas sagittis, leo efficitur lobortis elementum, dui ante semper ex, quis auctor velit arcu eget ligula. Aliquam sollicitudin, nulla tempus consequat egestas, nibh diam pulvinar purus, vitae maximus justo nibh maximus lacus.
+
+Nulla rutrum magna quis mi gravida euismod. Fusce nec facilisis massa. Phasellus iaculis, eros fringilla imperdiet condimentum, orci sem fermentum massa, quis scelerisque lacus ante vitae dolor. Cras interdum egestas lectus rhoncus tristique. Etiam nec sollicitudin sapien, ut tristique nibh. Integer a imperdiet erat. In interdum nisi vel urna aliquet, eget malesuada purus dapibus. Sed hendrerit lectus at hendrerit accumsan.
+
+Maecenas semper pretium sapien nec ullamcorper. Praesent nec neque quis nunc porta ultricies interdum vel ipsum. Donec dapibus lorem quis quam hendrerit, vitae laoreet massa pellentesque. Etiam pretium sapien vitae turpis interdum, ut rhoncus nisl bibendum. Nunc ac velit ac ex sollicitudin ultrices id in arcu. Phasellus tristique, nibh et rhoncus luctus, magna erat egestas velit, nec dignissim turpis ipsum ac felis. Maecenas convallis arcu et lectus vehicula, eget iaculis quam ultrices. Duis malesuada suscipit aliquet. Sed pulvinar eros quis nisl cursus, elementum sodales tortor fringilla. Nulla feugiat tristique sem eu tempus. Quisque at velit condimentum, consequat augue rhoncus, accumsan nulla. Sed varius sodales varius.
+
+Nunc consequat, mauris eget hendrerit fermentum, felis nisi efficitur lectus, eget dignissim leo purus quis purus. Praesent libero lacus, sodales id justo id, maximus condimentum purus. Sed tristique egestas lorem vel efficitur. Praesent vestibulum tincidunt faucibus. Ut fringilla eros sed purus mattis pharetra. Sed convallis turpis in sapien dictum, sed molestie orci accumsan. Sed eros nisi, cursus cursus nulla sit amet, sollicitudin interdum quam. Vestibulum tincidunt eros convallis, iaculis odio in, vulputate nisl. Duis scelerisque finibus purus, at porttitor sem molestie nec. Nullam sed eros dignissim, tincidunt nibh id, porta metus. Sed eget magna quis sapien commodo bibendum. Vivamus non purus nec ligula facilisis blandit a a mi. Suspendisse hendrerit, erat eget tempus mollis, justo dui dictum nunc, at pulvinar purus velit elementum augue.
+
+Fusce sed venenatis sem. Sed at libero non magna varius porttitor eu vel sapien. Cras mattis non lorem sit amet fermentum. Nam sagittis nisi magna, sit amet semper urna viverra tincidunt. Cras et leo sit amet turpis lacinia dictum. Donec iaculis nulla posuere ex varius tristique. Pellentesque dictum lacus vel nulla maximus cursus. Nulla tristique lorem pellentesque est dignissim, et venenatis felis pellentesque. Nulla vitae leo at metus posuere commodo sed et ex. Curabitur est odio, laoreet eu malesuada sed, mattis ut diam. Integer erat velit, rhoncus quis nulla ornare, dictum scelerisque tellus. Suspendisse potenti. Integer accumsan lacus ac dictum pulvinar. Integer non magna blandit nibh rhoncus varius. Nulla vulputate erat ut cursus rutrum.
+
+Sed iaculis a eros sit amet egestas. Proin finibus libero at vestibulum finibus. Mauris gravida porta ipsum at placerat. Cras egestas nulla a orci consequat eleifend. In sit amet faucibus arcu. Fusce eu neque facilisis, porttitor massa vel, vehicula nisi. Aenean eu posuere sapien. Aenean in risus at lectus semper auctor. Morbi hendrerit porta urna, eu fringilla velit ultricies nec. Donec quis lorem volutpat erat volutpat accumsan eu non turpis. Nulla quis laoreet metus, at lobortis leo. Suspendisse at rutrum nulla, a tincidunt nibh. Etiam tempor mi et augue iaculis porttitor.
+
+Etiam eget ipsum id sapien sodales auctor. Proin libero nibh, lacinia lobortis dapibus ac, faucibus at dolor. Pellentesque sit amet purus at felis gravida porta. Suspendisse ut molestie massa. Curabitur dignissim leo arcu. Nulla nibh ante, tempus eu posuere eu, egestas venenatis lectus. Donec commodo pharetra laoreet. Quisque ac quam egestas, auctor leo aliquam, lacinia elit. Nullam eget nisi a tellus efficitur vestibulum. Sed molestie luctus arcu a viverra.
+
+Sed sagittis, augue et pharetra bibendum, augue purus dignissim diam, nec iaculis turpis ex eu nisl. Donec cursus, orci nec volutpat dignissim, sem enim condimentum neque, ut volutpat velit turpis vitae lectus. Maecenas eu elit eget ipsum venenatis pharetra. Etiam consectetur luctus tortor. Mauris odio massa, gravida ac libero et, semper aliquet turpis. Fusce eleifend imperdiet justo, eu molestie ipsum egestas nec. Duis vehicula quis erat sit amet dictum. Vestibulum sit amet ultricies massa. Vivamus auctor, sem vitae vulputate bibendum, risus dolor pharetra sapien, a posuere lacus libero eget ipsum. Fusce egestas at libero sed iaculis. Nunc sit amet dui scelerisque, fringilla diam in, tempor tellus. Curabitur facilisis tortor quis mauris interdum, nec mattis dolor bibendum.
+
+Nunc suscipit varius vestibulum. Praesent luctus lectus risus, tristique hendrerit nisi faucibus non. Quisque turpis leo, hendrerit a vulputate vel, imperdiet non ipsum. Sed dui est, lobortis sed tortor non, tempor tempus lorem. Cras eget egestas ipsum. Sed ante lorem, porttitor varius pulvinar eu, vehicula ut turpis. Aenean tristique sapien vitae lobortis luctus.
+
+Maecenas accumsan elit nec diam facilisis iaculis. Etiam volutpat vestibulum lectus condimentum blandit. Nulla interdum sapien sed velit tempus, a vehicula odio porta. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Duis viverra elit dui, eget aliquam eros imperdiet non. Nam porttitor tellus risus, in pretium leo tempus facilisis. Donec vel euismod lectus.
+
+Ut sed consectetur felis. Phasellus condimentum diam vitae ante commodo ultrices. Etiam iaculis, nulla mollis sodales scelerisque, ipsum eros luctus felis, vel cursus eros quam vel felis. Cras dictum eros ut auctor rutrum. Nullam cursus vehicula tortor in placerat. Pellentesque sodales euismod semper. Nullam quis vulputate augue. Aliquam in nulla ac tellus gravida semper ut et nibh. Phasellus tempor molestie purus eu ullamcorper. Etiam metus neque, imperdiet vitae turpis at, elementum mollis velit. Donec mollis auctor nunc non tristique.
+
+Morbi rutrum magna egestas, volutpat elit eget, dictum nibh. Aliquam erat volutpat. Phasellus in tristique leo. Donec sodales pretium erat eget pellentesque. Aliquam in nunc ut augue accumsan laoreet. Pellentesque sed ante sit amet tellus vulputate suscipit. Praesent interdum neque varius mi fringilla ullamcorper. Quisque a felis nibh.
+
+Curabitur porttitor, augue et tincidunt viverra, eros libero feugiat metus, vitae lobortis mauris sapien eu dui. Cras ligula eros, auctor ac nisi ut, condimentum tincidunt ex. Vivamus vel aliquam lacus, a facilisis augue. Sed in est nisl. Integer mattis, arcu sit amet placerat consectetur, leo quam elementum justo, at hendrerit urna metus in velit. Suspendisse scelerisque suscipit odio non sagittis. Proin in fermentum elit. Duis interdum, libero quis molestie rhoncus, turpis urna cursus nulla, venenatis finibus orci diam a nibh. Ut ut massa a ex convallis commodo dictum sed urna. Nam id felis ipsum. Nunc tincidunt dignissim libero, at tempus dui porttitor sit amet. Vivamus nulla ipsum, pretium non fringilla et, tristique ut est. Etiam tristique vitae enim quis elementum.
+
+Curabitur sodales nec diam vulputate hendrerit. Suspendisse consectetur convallis sem et sagittis. Donec lobortis vestibulum eros sit amet efficitur. Nulla pellentesque tempor massa sit amet tempor. Praesent vestibulum elit auctor imperdiet faucibus. Nunc consequat nunc lectus, quis egestas augue suscipit et. Suspendisse eleifend eget lorem sed ornare. Integer non aliquam nisl. Proin metus odio, faucibus pellentesque dapibus vel, scelerisque nec arcu. Pellentesque ut velit nulla. Integer porttitor nec enim ac luctus. Praesent elementum ac est in aliquam.
+
+Mauris at dui dignissim, pharetra dui nec, vulputate dolor. Nunc ac commodo enim. Mauris eleifend est nunc, eget pulvinar justo egestas et. Vestibulum id volutpat lectus, vel rhoncus risus. Ut augue justo, gravida nec libero tincidunt, vulputate fringilla dolor. Suspendisse aliquet risus vel ante tempus, vel laoreet tellus bibendum. Quisque non vestibulum nisi, non malesuada libero. Cras quam nibh, tempor vel massa id, laoreet semper libero. Aliquam fermentum nunc vitae nibh vulputate, ac dignissim sapien vestibulum. Mauris pellentesque pretium massa vitae cursus. Phasellus in lacus augue. Integer finibus pulvinar arcu, in scelerisque lorem tincidunt sit amet. Pellentesque varius turpis sollicitudin, ornare odio nec, venenatis augue. Nullam commodo lacus a placerat consequat. Curabitur eu lobortis tortor. Curabitur varius iaculis lorem in mollis.
+
+Curabitur at convallis lectus, id vestibulum enim. Donec quis velit eget leo dictum venenatis id et velit. Phasellus ut tincidunt libero. Aliquam tincidunt tellus sed tortor facilisis laoreet. Morbi cursus pellentesque lectus, et tempor turpis condimentum at. In tempor auctor metus sed accumsan. Nulla ornare dapibus mi. Aenean ut ullamcorper eros, vitae condimentum ipsum. Nam in turpis ligula. Suspendisse ac dolor odio. Curabitur vel libero ac mi mattis consectetur ac id nunc. Cras sit amet justo nec risus malesuada posuere ut sit amet augue.
+
+Sed pretium odio eu libero pretium, ut ullamcorper eros placerat. Praesent volutpat tincidunt massa, eget fermentum lacus congue eget. Pellentesque nec purus aliquet nulla sagittis vehicula. Vivamus posuere cursus lacus at blandit. Phasellus mauris sapien, imperdiet eget ex id, posuere vehicula augue. Nulla nulla ligula, ornare porta massa vel, commodo tincidunt arcu. Morbi fermentum blandit eros vitae eleifend. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Duis tempor pretium magna et ornare. Donec sed nunc sed ipsum laoreet maximus. Sed congue massa nec augue sodales, et placerat diam aliquet. Donec in sem lorem. Nullam pretium massa non magna feugiat pretium. Morbi auctor, nunc quis faucibus venenatis, sem sem porttitor mauris, non elementum mauris felis a leo. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+
+Mauris imperdiet condimentum leo, et efficitur nulla posuere id. Nullam facilisis magna non pharetra molestie. Donec volutpat tincidunt pulvinar. Phasellus molestie, neque sit amet lacinia fermentum, sapien quam iaculis ligula, id suscipit augue ante faucibus dolor. Pellentesque aliquet, tortor eu dictum consectetur, lectus quam laoreet lorem, id congue risus arcu a libero. Sed ac luctus justo. In hac habitasse platea dictumst. Praesent ultrices ante vitae ante sollicitudin elementum. Aliquam egestas porttitor velit sit amet imperdiet. Curabitur quis lacus ac metus egestas tincidunt.
+
+Nunc ut ipsum arcu. Duis suscipit, nisi posuere commodo posuere, eros mi tempus magna, ac venenatis diam erat eget massa. Etiam eu posuere sapien. Maecenas in ipsum consectetur, luctus mi eu, mattis nibh. Donec consectetur augue sit amet velit scelerisque, a aliquet dui venenatis. Morbi libero sapien, consequat faucibus congue eget, elementum sed magna. Phasellus malesuada arcu at est lobortis, id porttitor leo elementum. Praesent luctus placerat tellus vel volutpat. Nam at enim cursus, aliquam arcu ac, imperdiet dolor. Proin auctor diam elit, non aliquet orci lobortis nec. Curabitur commodo, sapien non placerat accumsan, leo sapien rutrum neque, at dapibus orci libero a nunc. Aliquam egestas sem non tellus convallis, eget rutrum eros posuere.
+
+Sed tincidunt at elit sed venenatis. Aliquam sit amet iaculis mi. Pellentesque laoreet lobortis quam, vel accumsan nisl hendrerit at. Phasellus quis purus nisl. Fusce quis laoreet nunc. Integer quis nisi justo. Vivamus porttitor malesuada orci sed porta. Nunc ullamcorper faucibus sem, ac euismod ipsum condimentum sed. Aenean iaculis nunc vitae sapien auctor, sit amet rutrum nisl commodo. Vivamus condimentum ex eu arcu posuere, nec ultricies eros lobortis. Cras vehicula massa quis auctor condimentum.
+
+Cras arcu nisl, sodales nec leo id, iaculis aliquam urna. Praesent fringilla, nisl suscipit posuere laoreet, sem magna tristique augue, id consequat ligula dui nec tortor. Sed at mattis tellus. Curabitur feugiat porttitor mauris, at gravida est. Pellentesque in libero in dui posuere facilisis. Praesent in posuere libero. Pellentesque vehicula leo mauris. Quisque commodo, nulla a placerat consequat, elit ligula blandit leo, vitae gravida turpis risus ultricies libero. Ut feugiat, augue vel malesuada ornare, magna nisi dictum est, sed egestas augue nisi eu urna. Vestibulum euismod nulla erat, sit amet accumsan felis posuere vel.
+
+Etiam pretium turpis eget semper efficitur. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Mauris in dictum velit. Suspendisse mauris dolor, sodales vehicula mauris blandit, venenatis aliquet orci. Etiam non vulputate sapien. Quisque ut metus egestas, luctus mi in, ornare dolor. Curabitur tincidunt dapibus neque, sit amet commodo est dignissim vel. Curabitur vel pharetra velit. Aliquam ligula ante, efficitur sed cursus sed, tempus et justo. Nulla faucibus sodales odio et ultricies. Proin sit amet nisl non orci ornare tempor. Sed nec lobortis sapien, eget congue mauris. Fusce facilisis ex non molestie lacinia. Vivamus venenatis iaculis quam. Sed est felis, elementum in lectus a, facilisis bibendum quam. Donec luctus non purus in commodo.
+
+Fusce ac mi vitae ex rutrum bibendum. Nulla venenatis lobortis pharetra. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Suspendisse potenti. Interdum et malesuada fames ac ante ipsum primis in faucibus. Phasellus et efficitur nibh. Morbi auctor magna diam, eget dapibus tortor tincidunt vitae. Aenean luctus eros a metus tristique suscipit. Sed luctus, risus ac scelerisque molestie, felis lectus vestibulum nunc, a posuere libero eros eu nibh. Donec gravida eget quam eget ultricies. Donec et aliquet lectus, ac aliquam ante.
+
+Maecenas lacus magna, dictum quis tempus ac, consectetur vitae purus. Sed ut arcu bibendum, malesuada urna quis, interdum nulla. Phasellus non urna ut dui rhoncus bibendum. Duis vel gravida dui. Pellentesque mollis turpis libero, sit amet vehicula magna feugiat nec. Vivamus consectetur libero ut nibh efficitur interdum. Quisque pretium auctor quam, ac commodo sapien congue a. Integer posuere facilisis mi, a placerat purus viverra malesuada. Nam ornare elit sit amet orci hendrerit, at pulvinar est porttitor. Pellentesque efficitur odio eget consectetur efficitur. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Nulla aliquam tristique diam vitae luctus. Pellentesque tortor nibh, lobortis ac velit at, congue ultrices augue. Donec et arcu ultricies, fringilla elit eget, congue lorem. Nunc quis dui in felis gravida bibendum ut id justo.
+
+Suspendisse quis quam vel turpis egestas auctor. Duis suscipit rutrum pellentesque. Sed vitae tincidunt mauris. Vestibulum rhoncus risus et facilisis hendrerit. Duis consectetur, ante nec eleifend elementum, libero mi pretium arcu, non pretium massa quam non neque. Etiam commodo egestas felis. Nam et elementum elit. Ut sit amet odio ac velit tristique rhoncus. Integer volutpat enim ut dictum rhoncus. Vestibulum viverra neque elementum, laoreet leo nec, tempor ipsum.
+
+Ut condimentum nibh id ante fermentum venenatis. Nullam scelerisque facilisis magna non sodales. Ut luctus libero augue, eget congue risus rhoncus quis. Fusce vitae lorem congue, euismod magna finibus, tincidunt justo. Aenean dapibus tortor nec lacinia pellentesque. Aenean condimentum convallis maximus. Aliquam feugiat lorem quis tellus hendrerit dictum. Nunc mollis pharetra erat vitae lobortis. Phasellus auctor velit fermentum fermentum porta.
+
+Vivamus efficitur ligula ac tincidunt pretium. Mauris rhoncus leo in sem dictum, non tempor augue tempor. Aenean rutrum augue eget justo mollis volutpat. Sed efficitur turpis vel lacus placerat, a lobortis nibh porttitor. Aliquam eleifend ultricies nulla at lacinia. Mauris eu ipsum laoreet, iaculis urna a, pretium arcu. Mauris convallis ut ligula a varius. Integer maximus venenatis risus sed tincidunt. Cras aliquet nisl ac diam ornare, ac lobortis ex rutrum. In vel mauris vestibulum, ornare purus id, iaculis lorem. Nulla condimentum tellus vel leo suscipit, in vehicula velit tempor. Cras in orci sollicitudin, placerat justo non, tristique massa. Praesent facilisis et elit sit amet placerat. Donec nec justo in nunc ultrices finibus.
+
+Fusce lacinia laoreet orci, nec egestas mauris mollis ac. Maecenas scelerisque in libero a tincidunt. Integer varius dui rutrum urna aliquam, id posuere nunc suscipit. Ut eget sollicitudin est. Nam augue nulla, commodo ut cursus sit amet, semper eu nibh. Maecenas sodales, sapien in maximus posuere, odio ante lobortis arcu, a varius diam sapien ut ipsum. Vestibulum sagittis, mauris sed ullamcorper tristique, purus quam mollis lacus, eget cursus tellus mi sit amet diam. Etiam quis lectus tristique, luctus justo eu, suscipit tortor. Phasellus vel neque ornare, dignissim nisi sit amet, porta est. Proin porttitor nisl vitae lectus tincidunt laoreet. Mauris finibus justo eu tellus egestas, a vestibulum sem vestibulum. Donec vel massa pretium, blandit orci ut, mollis lorem. Etiam mauris neque, eleifend vitae neque in, efficitur posuere mi. Morbi in elit volutpat, volutpat ligula et, porta ipsum. Aenean porta condimentum leo, vel ultrices eros imperdiet quis.
+
+Aliquam placerat, nulla vel aliquam hendrerit, turpis nibh euismod elit, a pellentesque tortor leo quis libero. Donec velit orci, ullamcorper id aliquet in, convallis varius libero. Quisque a magna est. Cras luctus purus elit, at aliquam tellus feugiat ut. Sed gravida scelerisque tortor, quis laoreet ante efficitur quis. Phasellus et tortor eget magna pulvinar laoreet et et urna. Sed ac vehicula sem, blandit semper turpis. Praesent pharetra libero dui, sed fringilla urna blandit eu. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi id felis commodo, euismod libero sollicitudin, auctor ante. Fusce tristique facilisis gravida.
+
+Curabitur elementum sit amet magna eget egestas. Integer a libero vitae nisl sagittis gravida. Quisque leo ipsum, ultrices id justo nec, scelerisque vehicula nibh. Nunc vitae commodo eros. Nunc elementum justo luctus laoreet faucibus. Vestibulum ornare lorem non eros gravida, vitae varius diam condimentum. Vivamus porta fermentum elit vitae imperdiet. Cras auctor, est vitae bibendum posuere, justo dolor iaculis risus, sit amet gravida tortor diam quis mi. Vivamus vel tortor vitae lectus tristique consectetur. Integer rutrum posuere sapien commodo consectetur. Nullam fermentum in enim non imperdiet. Proin dapibus erat ac auctor tincidunt. Nunc tortor diam, pretium quis odio a, convallis eleifend turpis. Mauris vulputate lacinia enim, at mollis enim. Etiam ut mi et dolor consectetur volutpat vitae vel eros.
+
+Donec sollicitudin mauris a justo semper consectetur. Morbi nec justo a dui faucibus semper. Nunc ornare vitae mauris vitae gravida. Integer quis commodo neque, bibendum commodo nisl. Sed sagittis posuere purus id dapibus. Nunc hendrerit at mi a mollis. Fusce augue odio, tristique cursus ullamcorper in, ultricies at ex. Integer lobortis ultricies risus, in luctus turpis consectetur sit amet. Vivamus quam lectus, dapibus imperdiet posuere in, lacinia id orci. Donec pharetra augue ac velit pulvinar blandit. Curabitur in sagittis purus. Etiam eleifend elit metus, ac tempus leo ullamcorper eget. Nulla viverra maximus ipsum, ac sollicitudin nulla auctor quis.
+
+Nunc quis varius urna. Maecenas vel orci ac tellus pulvinar tincidunt. Sed bibendum pulvinar ex sit amet pulvinar. Sed quis rutrum ipsum. Nunc sit amet mi nunc. Fusce ac tempor sapien, ac interdum tortor. Nunc sit amet varius odio. Aenean ac fringilla ante, ac tempor nibh. Ut vitae felis vel mauris condimentum scelerisque quis et magna. Mauris sit amet leo sagittis, congue dui vel, varius leo. Curabitur semper vestibulum metus, non bibendum leo vulputate eu. Fusce at pharetra ante. Sed porttitor ligula turpis, vitae eleifend sem porttitor vel. Praesent convallis imperdiet orci ac tincidunt. Nam a tortor ac sapien interdum mattis. Vivamus iaculis quam in hendrerit molestie.
+
+Phasellus sit amet dapibus massa. Vestibulum scelerisque erat turpis, eget fermentum libero blandit at. Morbi eu ligula metus. Phasellus pretium enim vitae ligula malesuada, vel bibendum turpis venenatis. Integer pretium tellus et placerat vehicula. Maecenas ut turpis eu lectus tempus lacinia id eu lacus. Aliquam laoreet lacus et purus sagittis, ut gravida dolor convallis. Sed euismod, nisl nec tincidunt tempus, velit eros fermentum nisi, ut tincidunt sem tellus rutrum enim. Etiam dignissim ipsum vitae magna laoreet semper. Sed sit amet neque placerat, porta eros in, pulvinar lorem.
+
+Duis convallis imperdiet augue, et porta orci. Maecenas venenatis, sem ut ultricies euismod, ex velit tempor massa, at imperdiet dui nisl quis sapien. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed eleifend mattis lacus, eget pharetra erat vestibulum non. Mauris tellus quam, lobortis ut elit quis, varius aliquet erat. Proin mauris dolor, varius eu commodo quis, porta sed erat. Morbi ut nisl accumsan, sollicitudin nisi ac, tempor leo. Nulla facilisis nibh dolor, vitae tincidunt ex hendrerit ut. Suspendisse quis neque tellus. Maecenas ut odio nec risus sagittis gravida. Phasellus feugiat cursus dapibus. Morbi efficitur condimentum elit sed pharetra. Mauris ornare pharetra nisl, ac gravida dolor condimentum at. Aliquam lobortis finibus lorem, id pretium libero vestibulum vitae.
+
+Vestibulum pretium eleifend justo, sit amet imperdiet justo faucibus a. Suspendisse consectetur ipsum quis purus rutrum imperdiet. Nam nibh ex, tincidunt nec blandit sed, venenatis vitae mauris. Integer rutrum tincidunt tortor, ut mattis tortor fermentum ac. Duis congue dui sed est suscipit, nec semper lectus lobortis. Vestibulum felis ante, hendrerit ac venenatis sed, tincidunt iaculis augue. Duis pharetra blandit metus sed semper. Fusce ornare varius placerat. Vivamus sollicitudin lacus id nunc sollicitudin, a viverra felis pellentesque. Phasellus a felis in sapien facilisis imperdiet. Quisque ac purus dapibus metus fermentum mollis.
+
+Donec diam nisl, faucibus feugiat condimentum vel, eleifend eu magna. Sed tempus, justo a bibendum suscipit, sem nunc viverra enim, id semper nunc eros sit amet mauris. Praesent ultrices porttitor ex eu lacinia. Integer quis aliquet nibh, sit amet porttitor elit. Curabitur vel elementum quam. Sed fermentum vehicula egestas. In metus massa, sodales vel mauris id, finibus dapibus metus. Donec lectus ante, ullamcorper non posuere in, fringilla non velit. Quisque cursus interdum elementum. Phasellus vestibulum massa non sem fringilla congue. Maecenas nec arcu diam. Vivamus id suscipit odio, vel condimentum leo. Nulla sed dolor mattis, interdum lacus imperdiet, interdum nulla. Maecenas sagittis, ipsum vitae dapibus luctus, ipsum dui tempus tortor, quis porta dolor dui in sapien. Nulla vel porta neque, quis auctor massa.
+
+Suspendisse viverra nec risus fermentum maximus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent eu placerat nulla. Donec varius faucibus posuere. Nulla fermentum ultricies mauris at euismod. In hac habitasse platea dictumst. Proin et nisl purus. Cras quis risus sit amet lectus maximus semper. Quisque pellentesque luctus erat convallis maximus. Sed et lacus vel sapien pellentesque accumsan id elementum dolor.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nulla eget aliquet nunc. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aenean et tincidunt massa, sed maximus leo. Aliquam in cursus tortor. Praesent ornare ante vitae leo pretium cursus. Nunc sodales neque urna, eu tincidunt dui placerat at. Integer vel arcu vel velit euismod sollicitudin quis sit amet ligula. Nunc facilisis, eros eu pharetra mollis, magna odio rutrum leo, eget placerat erat massa non metus. Nunc nec auctor felis.
+
+Vestibulum et tempus ipsum. Duis molestie felis et ex scelerisque, quis faucibus dolor viverra. Suspendisse rhoncus volutpat dolor. Duis ac augue iaculis, vulputate dui sit amet, gravida ante. Mauris porttitor purus eu ligula tempus volutpat. Aenean quam neque, venenatis id est et, blandit pharetra enim. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent vitae malesuada dui. Praesent tortor ligula, tincidunt at suscipit laoreet, tristique vitae magna. Phasellus gravida augue lacinia velit cursus lacinia.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque vitae blandit dui, a facilisis sapien. Praesent malesuada massa sed orci lacinia vulputate. Cras a est vitae quam sodales pellentesque. Nam posuere condimentum mollis. Quisque ultricies nisl libero, vel scelerisque nunc interdum non. In porttitor consectetur placerat. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque et tortor sed nibh scelerisque suscipit. Integer a auctor velit, in tempor magna. Curabitur iaculis ut purus vel consequat.
+
+Quisque at consequat turpis, ut aliquet dolor. Aenean quis mauris sit amet diam tempor porta ac eu purus. Maecenas commodo faucibus interdum. Praesent in tincidunt felis, vel tincidunt nibh. Integer posuere enim a purus tristique tincidunt. Etiam nisi odio, vehicula sed mauris vel, ornare blandit augue. Fusce finibus mi lorem, eu egestas lectus commodo quis. Nunc scelerisque, erat mollis varius congue, ante ligula suscipit neque, nec ultrices urna leo rutrum nibh. Vivamus pulvinar lacinia elit at lobortis. Sed molestie turpis dapibus sapien imperdiet, vitae scelerisque ligula volutpat. Nam fermentum ipsum est, ut vulputate arcu maximus eu. Sed tristique, massa sit amet dictum bibendum, neque tellus volutpat ipsum, ut faucibus purus arcu vel quam. Vivamus laoreet risus non nisi ullamcorper, molestie tincidunt diam scelerisque. Sed eget congue velit.
+
+Sed eu dapibus eros. In at est augue. Nunc malesuada, tortor quis molestie euismod, erat sem porta arcu, vitae facilisis purus ligula vitae mauris. Aliquam erat volutpat. Nunc scelerisque porta eros, finibus elementum ipsum ultricies ut. Quisque vestibulum libero quis lectus semper suscipit. Sed malesuada eu lorem in placerat.
+
+Nunc metus arcu, rutrum eu varius in, auctor vitae diam. Maecenas ultricies faucibus hendrerit. Integer tincidunt, orci a bibendum dapibus, nulla tellus dapibus urna, vel sodales sapien neque eget mi. Nunc elementum enim sapien, sed egestas diam eleifend sit amet. Mauris sapien ligula, finibus nec augue in, volutpat dictum velit. Nunc a ligula vitae massa pellentesque sollicitudin. Aliquam rutrum porttitor volutpat. Proin convallis sollicitudin commodo. Duis eu rutrum risus, a auctor felis. Proin volutpat arcu velit, sed condimentum magna varius sit amet. In et sapien efficitur, iaculis justo eu, euismod nibh.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aenean hendrerit eros non purus dapibus, vel laoreet ipsum tincidunt. Duis scelerisque sollicitudin rutrum. Pellentesque rutrum ultricies sem, vitae mollis elit efficitur ut. Ut consectetur scelerisque ultrices. Vivamus id urna scelerisque nibh interdum mattis. In tristique tortor ut dictum laoreet. Quisque fermentum, augue blandit lacinia luctus, ligula nunc commodo velit, accumsan tempus orci quam ac nibh. Praesent ante risus, pulvinar in nisl ac, malesuada porttitor magna.
+
+Nam nunc ex, condimentum ac volutpat ac, pretium sed tortor. Integer venenatis, nunc id ullamcorper aliquam, eros arcu blandit sapien, id maximus erat nunc sed ligula. Proin tincidunt libero et purus tincidunt maximus. Nulla laoreet nisl eu velit pharetra, id porttitor mauris dictum. Mauris blandit pharetra lectus sit amet sagittis. In sit amet lorem hendrerit, varius justo eu, ultricies odio. Curabitur ante nibh, scelerisque at elementum a, condimentum viverra tortor. Donec tellus arcu, ultricies at posuere at, sagittis at sem. Phasellus non eros eu dui blandit fringilla. Maecenas hendrerit arcu porta, feugiat neque ac, venenatis ipsum. Nam ut elit nec lectus sodales posuere. Proin aliquet accumsan sapien, non porta quam. Praesent vulputate ante ut malesuada efficitur.
+
+Nullam pulvinar arcu orci, semper vehicula nibh fringilla ac. Duis porta ullamcorper risus sed facilisis. In vitae consectetur sapien, eget porttitor velit. Ut ac leo luctus, gravida erat sit amet, fermentum orci. Proin feugiat orci eget erat sagittis, sed aliquet ipsum luctus. Morbi eu est tristique, consequat neque eu, suscipit odio. Maecenas faucibus lacinia laoreet. Nam ut tellus odio. Sed facilisis tincidunt sodales. Proin hendrerit dolor quis nulla elementum, ut pulvinar ex tincidunt. Quisque vitae purus ac risus sagittis fringilla. Phasellus fermentum faucibus suscipit.
+
+Donec congue enim id efficitur lacinia. Praesent tempus, velit a euismod ornare, lorem felis pharetra nulla, in aliquam diam quam in nibh. Nulla facilisi. Morbi malesuada urna nibh, nec semper libero malesuada non. Maecenas quis tortor vitae nisl condimentum ornare. Quisque convallis suscipit metus vel malesuada. Vivamus fringilla mattis mi eget luctus. Fusce ex arcu, efficitur vitae elit eget, aliquam faucibus lacus. Sed interdum nisl nec libero aliquam lobortis. Aenean semper, magna non lacinia rhoncus, metus lacus commodo sapien, at molestie magna urna ac magna. Duis elementum rutrum erat id sodales.
+
+Suspendisse bibendum quam ut augue faucibus semper. Maecenas sed purus consequat, sagittis quam nec, dapibus ante. Nunc erat nunc, ultrices nec nisi vel, cursus consequat sem. Vivamus molestie turpis ac sem malesuada luctus. Morbi laoreet sit amet odio id finibus. Praesent lacus justo, rhoncus non nulla commodo, posuere sodales sem. Aliquam condimentum porta condimentum. Integer congue eros risus, sed pharetra odio vestibulum vel. Mauris sagittis orci et lacus finibus luctus ut nec enim. Pellentesque magna massa, tristique a lectus et, pharetra placerat mauris. Donec eu est in leo sollicitudin elementum vitae tristique ipsum. Donec pulvinar consequat enim. Nunc cursus lorem ut dapibus maximus. Quisque vulputate ligula est, vitae vestibulum ante dapibus a.
+
+Fusce tempus nibh eget euismod ultrices. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Vestibulum mattis maximus nulla, ac consectetur erat scelerisque sed. Maecenas faucibus dui eros, finibus venenatis eros semper non. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed iaculis nisl risus, vitae blandit tortor mattis in. Morbi nisi enim, pulvinar eget pellentesque ac, faucibus in mi. Sed in mollis eros, sit amet maximus arcu.
+
+Nam luctus velit sed ipsum pharetra, eu mattis diam tempus. Phasellus volutpat nisi vitae imperdiet rhoncus. Mauris finibus ut mauris et euismod. Nullam sed efficitur libero. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis erat lectus, lacinia sit amet tempor ut, auctor et odio. Morbi tristique euismod erat, quis venenatis justo. Duis eu arcu placerat, pulvinar velit at, fermentum enim. Sed rutrum ipsum non ipsum condimentum consequat. Suspendisse vitae commodo turpis, eget imperdiet risus. Aenean fringilla, augue id hendrerit auctor, urna mi eleifend magna, in semper neque purus eu arcu. Suspendisse et leo mi. Donec consequat imperdiet urna, sed aliquam eros mollis in. Nullam condimentum fringilla hendrerit. Suspendisse ornare tincidunt lacus, id tristique tellus porta at. Suspendisse posuere sagittis erat, quis viverra diam varius in.
+
+Cras eget ex nec tortor iaculis ultricies a id urna. In neque ante, gravida sed rutrum in, finibus volutpat mi. Pellentesque malesuada nunc ex, vitae suscipit urna bibendum a. Etiam eleifend augue dui, ut laoreet nisi molestie et. Phasellus eu leo erat. Pellentesque in lorem ut velit ullamcorper laoreet luctus nec diam. Ut vulputate iaculis scelerisque. Praesent luctus justo justo, vulputate condimentum ipsum porttitor eget. Proin sit amet fermentum urna, sed pellentesque tellus. Suspendisse eu ullamcorper eros, ac finibus tellus. In auctor fermentum lectus a maximus. Pellentesque a pulvinar velit. Aliquam sed magna elementum, ornare ligula eu, porta odio. Nullam efficitur tortor nunc, sit amet finibus dui ornare tempor.
+
+Vestibulum enim dolor, mollis sed pulvinar vel, venenatis et justo. Cras porttitor id augue eget porta. Praesent tempor enim ut arcu dapibus molestie. Sed facilisis tortor vel nunc ultricies, non egestas ligula laoreet. Aliquam aliquet sit amet ex eu consequat. Ut ornare lectus non nisl iaculis bibendum. Aliquam dignissim, tellus dictum maximus tempus, purus metus fringilla purus, sed mattis enim justo quis mi. Donec at ipsum non eros sodales convallis. Aliquam tincidunt risus nisl, commodo pharetra nunc imperdiet ac. Nulla a elementum turpis, vel pharetra erat. Nulla interdum sed lacus quis elementum. Suspendisse blandit imperdiet erat, nec sollicitudin libero blandit ac. Suspendisse consectetur lacinia odio, eu pharetra elit fermentum non. Sed nec neque urna. Quisque vel sem eu risus tincidunt eleifend. In dictum efficitur bibendum.
+
+Cras ac quam eleifend, suscipit diam sit amet, maximus quam. Proin sit amet libero eu urna efficitur sollicitudin. Fusce nec finibus nulla, vitae ornare sem. Vivamus venenatis porttitor magna, sed venenatis ante placerat quis. Fusce et nulla hendrerit, semper nibh nec, auctor mi. Aenean sit amet leo eget mauris accumsan luctus. Cras tortor metus, vehicula ac ultricies eu, egestas ut massa. Fusce sollicitudin ex pretium, dapibus urna nec, varius nibh. Proin molestie quam metus, a volutpat arcu consectetur eget. Nam sagittis, odio sed rhoncus egestas, diam nibh efficitur nisi, convallis ultrices justo eros non neque. Proin vulputate tincidunt ipsum, vitae tristique risus. Aliquam feugiat luctus dui, id elementum nisl finibus at. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae;
+
+Praesent et velit est. Donec odio turpis, accumsan imperdiet iaculis in, mollis vitae orci. Sed sed molestie elit, at tristique lorem. Suspendisse consectetur ante id feugiat condimentum. Integer nec mauris sed lorem vestibulum commodo eu eget nunc. Vivamus faucibus, libero fermentum elementum vehicula, orci risus efficitur risus, ut posuere mi nisl non elit. Suspendisse sit amet libero magna. Integer sit amet mi et nulla euismod luctus id sit amet felis.
+
+Nulla facilisi. Sed fermentum urna quam, sed pharetra tellus sodales blandit. Vivamus sodales dui nec consequat euismod. Vivamus aliquet gravida metus, vitae consequat augue bibendum id. Curabitur fermentum laoreet turpis, ut interdum lectus dictum vitae. Fusce faucibus nisi ex, vitae sollicitudin turpis cursus at. Cras sodales tincidunt vehicula. Sed vitae leo quis nisl lacinia auctor. Proin faucibus elementum nibh, laoreet lobortis risus ornare sed.
+
+Vestibulum venenatis, augue ac tristique eleifend, tellus arcu imperdiet magna, ac eleifend lacus ipsum aliquam urna. Nam laoreet erat non rutrum ullamcorper. Mauris hendrerit aliquet tortor malesuada porttitor. Proin accumsan dolor porttitor augue ullamcorper, vitae vestibulum eros dapibus. Cras sagittis lorem lacus, ut rutrum lorem bibendum id. Praesent tristique semper ornare. Morbi posuere sit amet risus et faucibus. Maecenas a velit at nibh consequat pharetra sit amet eget enim. Morbi commodo enim magna, ac pretium sapien pellentesque eu. Mauris aliquet nisi venenatis, consequat purus at, aliquet risus.
+
+Morbi posuere erat ipsum, sit amet consequat enim consectetur in. Sed risus arcu, elementum dignissim tincidunt eu, efficitur feugiat mauris. Maecenas a mattis leo. Duis porta et felis sed ultricies. Curabitur eu aliquet lectus. Nunc ante felis, blandit eu lobortis sit amet, tempor eget urna. Mauris non metus nec metus viverra feugiat. Donec pellentesque tortor ac vehicula porttitor. Aliquam nunc sapien, dignissim nec tincidunt a, sollicitudin at nunc. Sed ut leo purus.
+
+In ullamcorper neque ex, eu sodales eros tincidunt sed. Quisque aliquam elit pretium, varius erat ac, iaculis turpis. Mauris id odio vestibulum, dictum sem sed, pulvinar felis. Ut magna turpis, hendrerit ac faucibus vel, euismod convallis velit. Maecenas rhoncus nisl lacus, nec dignissim leo imperdiet ac. Duis sed ante ut purus cursus ultrices ut eu nisi. Donec ut ante nibh. Vivamus lobortis purus leo, et vehicula magna consectetur a. Suspendisse gravida semper ligula vitae facilisis. Ut sit amet vestibulum elit, id sodales diam. Suspendisse potenti. Proin dapibus scelerisque turpis at dignissim.
+
+Interdum et malesuada fames ac ante ipsum primis in faucibus. Sed accumsan vulputate metus ut mattis. Ut semper porttitor justo in laoreet. Mauris sit amet mollis magna, vel condimentum elit. Quisque non aliquet justo. Fusce eget leo at enim commodo molestie. Praesent ipsum nulla, ultrices eget ex in, tristique ullamcorper felis. Nulla posuere commodo semper. Nam id mauris sit amet lacus luctus suscipit. Sed scelerisque gravida tristique. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer condimentum nulla semper, convallis leo sit amet, tempor nisl. Cras semper diam ac leo ornare aliquet et et lectus. Fusce sed nunc vitae nunc auctor semper et ac arcu.
+
+Aenean molestie nibh varius nisi consectetur elementum. Praesent condimentum, mi sit amet pretium suscipit, nisl est pharetra metus, sit amet feugiat neque quam vel purus. Nunc vehicula vestibulum mi eget gravida. Nullam consequat odio eget feugiat faucibus. Quisque pretium condimentum sollicitudin. Vestibulum vitae sem ut velit accumsan varius sit amet a tortor. Nunc eu mi a lorem varius bibendum vitae quis lacus. Maecenas gravida tristique lectus at pharetra. Aenean vehicula vehicula ex ut accumsan.
+
+In at consequat massa. Mauris finibus tempor nisi. Fusce a congue nulla. Aenean tempor mi vel ligula consectetur elementum. Nam scelerisque nisl et nulla faucibus, a molestie nisi bibendum. Curabitur venenatis lacus vestibulum, ultricies tellus et, elementum mauris. Pellentesque facilisis id libero id cursus. Maecenas lacinia quam quis arcu tristique aliquet. Fusce eu elit lobortis, accumsan dolor at, finibus nisl. Suspendisse facilisis dictum egestas. Cras volutpat diam ut nulla eleifend efficitur. Donec vel dapibus velit. Curabitur in mollis enim, sit amet suscipit dui. Nullam suscipit, mauris et suscipit molestie, nisl nulla elementum urna, ac varius dolor elit eget libero. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
+
+Vivamus vel dui ac lectus scelerisque elementum dictum nec orci. Suspendisse et venenatis arcu. Nullam velit orci, volutpat sed convallis in, pretium vel felis. Phasellus sollicitudin urna nec est porta, a consectetur massa egestas. Vivamus in malesuada lacus. Ut pellentesque sagittis velit, gravida vulputate neque efficitur sed. Vestibulum vitae libero et dui iaculis bibendum a nec velit. In aliquet ultricies pellentesque. Nunc suscipit, nulla id maximus viverra, nisi turpis dignissim nunc, sit amet auctor sapien ipsum sit amet magna. Mauris pretium velit congue turpis mollis faucibus. Duis non nunc sapien. Vivamus facilisis lacinia lectus, et tempor elit.
+
+Duis mi ligula, dignissim non sapien quis, congue consequat enim. Aenean lobortis purus ac tellus maximus efficitur. Cras iaculis erat sagittis feugiat viverra. Maecenas viverra, orci eu sodales porttitor, libero arcu efficitur nulla, a pellentesque nunc sapien non mi. Ut dignissim imperdiet vehicula. Nam eu sapien convallis, pulvinar felis id, sodales lorem. Praesent ornare tristique mi nec posuere. Pellentesque egestas diam nec condimentum fringilla. Nunc pulvinar urna aliquet ex vehicula suscipit. Sed pretium orci nunc, quis gravida ipsum consequat sit amet. Integer sit amet libero eu mauris ultricies auctor eu nec mi. Donec pulvinar eros erat, eget molestie neque dictum sit amet. Sed vitae venenatis nisi, tincidunt ultricies enim. Nam et velit gravida, malesuada dolor eget, feugiat massa. Morbi vel pellentesque arcu. Sed vulputate libero vel ipsum placerat posuere.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Cras mattis ultrices enim id posuere. Proin sollicitudin posuere lectus, in tempus odio porta quis. Etiam semper sapien elit, eu imperdiet tortor iaculis sed. Ut id faucibus arcu. Suspendisse tincidunt, tortor sed dapibus ullamcorper, odio ex egestas purus, eget posuere ante elit quis augue. Nulla facilisi. Pellentesque feugiat euismod elit, eu luctus tellus feugiat a. Aliquam cursus rhoncus mauris at consequat. Morbi dapibus metus id est bibendum, et mollis eros lobortis. Nulla erat turpis, sodales sit amet dictum id, pharetra sed magna. Proin efficitur erat id libero congue pellentesque eu eu massa. Ut a lobortis nunc. Aliquam sollicitudin diam vel magna tempor convallis. Vivamus non tempus orci. Fusce lacinia, ipsum vitae finibus imperdiet, elit lorem pretium elit, tincidunt pretium odio erat in diam.
+
+Morbi suscipit rhoncus odio a molestie. Donec eleifend ipsum eget efficitur varius. Etiam faucibus pretium urna, sed fermentum magna feugiat ut. Aenean ornare gravida vehicula. Aenean sagittis est pretium mollis facilisis. Sed scelerisque placerat erat, vel lacinia nibh feugiat vitae. Praesent vel dapibus lacus. Nunc bibendum tempor lorem et faucibus. Praesent mattis blandit neque interdum varius. Nunc pharetra orci sed ipsum tincidunt, non suscipit nisl malesuada. Maecenas tincidunt libero sit amet mattis lacinia.
+
+Duis accumsan sem erat, a ornare nibh faucibus vulputate. Ut rutrum scelerisque sapien vitae consectetur. Aliquam quis tristique tortor. Maecenas nibh lacus, varius a blandit eu, dapibus sit amet sem. Vivamus accumsan, libero sit amet suscipit elementum, nisl magna fermentum ipsum, laoreet elementum orci nisl et ligula. Curabitur in ligula placerat, scelerisque tellus in, ultricies nibh. Nam nunc libero, egestas at mauris dignissim, consectetur congue urna. Suspendisse molestie diam nec ipsum molestie, eu rutrum nulla sollicitudin. Duis quis facilisis arcu, in semper leo. Quisque viverra ultricies orci, eu mattis eros pulvinar mattis. Pellentesque vel finibus ante. Praesent ac mi facilisis, mollis augue vitae, rhoncus mauris. Pellentesque commodo vestibulum maximus. Donec accumsan urna id iaculis malesuada. Integer varius elit nec orci pulvinar, ut ultrices metus vulputate.
+
+Cras posuere neque mauris, in dignissim magna tincidunt sit amet. Aliquam sit amet mi dolor. Quisque elementum molestie posuere. Vestibulum tempor mollis purus, vitae vestibulum purus tempor quis. Aenean ut augue massa. Suspendisse tincidunt tincidunt erat, in consequat massa vulputate id. Duis cursus eget enim eu tristique. Proin quis nulla sed velit commodo dignissim. Praesent lacinia ante a ante lobortis, id imperdiet augue rutrum. Quisque purus lacus, sollicitudin euismod venenatis sit amet, eleifend nec eros. Sed luctus faucibus dolor ut eleifend. Quisque tincidunt ante elit, nec vulputate eros fermentum vel. In posuere leo vel risus efficitur mollis. Phasellus imperdiet pharetra orci.
+
+Fusce auctor sagittis turpis, nec pharetra dolor pharetra vel. Vestibulum luctus sagittis gravida. Nulla quam erat, sagittis non elit id, gravida hendrerit leo. In eleifend elit at efficitur blandit. Sed quis dignissim nulla. Sed in dapibus tortor. Vivamus lacinia, ligula vitae cursus porttitor, dui urna condimentum nisi, quis hendrerit dolor eros vel neque. Curabitur eget lectus vel elit lobortis scelerisque. Etiam congue, risus feugiat faucibus rutrum, urna orci egestas felis, auctor finibus est urna id eros. Morbi rutrum, arcu quis dictum euismod, turpis urna lacinia enim, ac malesuada justo elit non lorem. Sed vel orci nec ex rutrum faucibus. Praesent nisl sapien, ultrices quis justo eu, molestie suscipit ante. Donec gravida quis purus eu dignissim. Donec vulputate convallis ipsum vitae pellentesque. Pellentesque ut urna mi.
+
+In id quam vel libero mollis commodo a ac sem. Sed ornare elit est, molestie condimentum justo mattis sed. Vivamus tempor velit sit amet libero venenatis ultrices. Cras faucibus orci venenatis diam fermentum commodo. Donec pulvinar augue lacus, vitae dictum nisl auctor sed. Suspendisse ut nisi porttitor, porta neque id, tincidunt dolor. Fusce mollis laoreet arcu nec ultricies. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Duis ultricies nisl eget dui semper dapibus. Aenean vitae lacus est. Proin vel erat sed ex euismod facilisis. Interdum et malesuada fames ac ante ipsum primis in faucibus.
+
+Ut non fermentum tellus, sed vehicula augue. Etiam blandit lacus sapien, luctus sagittis leo auctor sit amet. Sed ipsum massa, eleifend sit amet augue non, tristique vulputate lacus. Suspendisse sit amet leo odio. Quisque dignissim, erat non eleifend accumsan, nisl diam blandit neque, eget sodales enim ipsum in lorem. Praesent erat dolor, pulvinar vitae turpis sit amet, auctor dignissim ligula. Fusce eget commodo massa. Nullam sit amet tincidunt libero, id vehicula erat. Nulla a fermentum elit. Aenean maximus luctus auctor. Integer sit amet maximus diam, ac lobortis sapien.
+
+Sed at ultricies velit, in laoreet dui. Pellentesque sit amet euismod mauris. Fusce euismod vehicula mauris. Phasellus magna nisi, maximus vel elit et, fringilla aliquet elit. Proin varius, ipsum eget scelerisque malesuada, ipsum felis vulputate tortor, eu luctus justo ipsum sit amet elit. Suspendisse lacus leo, mollis et malesuada eget, pharetra nec massa. Donec tristique fringilla pharetra. Maecenas malesuada mi turpis. Nulla id mauris purus.
+
+Nullam rutrum in ex non placerat. Cras rutrum nulla sit amet felis ultricies feugiat. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse laoreet turpis eu eros vestibulum, cursus blandit arcu ultricies. Ut et quam eu diam gravida pulvinar a non dui. Sed ut lectus sem. In tristique finibus elit sit amet aliquet. Vestibulum convallis nunc arcu, in euismod ante vulputate et. Etiam tempor enim non iaculis elementum. Suspendisse feugiat sem non nisi imperdiet, eu convallis ante malesuada. Duis consectetur, ligula at viverra vehicula, neque neque aliquam arcu, sed eleifend elit arcu non diam. Fusce in magna et neque ultrices varius. Sed ante nibh, egestas id ligula sit amet, ullamcorper aliquet erat. Nulla dictum neque et sagittis blandit. Curabitur tincidunt sollicitudin ligula in consectetur. Fusce velit est, aliquet eu tempor ut, lobortis quis justo.
+
+Proin vel augue ut ex viverra lobortis. Maecenas ultricies vulputate metus, in consectetur dolor gravida quis. Suspendisse potenti. Curabitur vitae mauris a dolor efficitur accumsan eget eget tellus. Suspendisse tristique quam ac pellentesque viverra. Curabitur ex turpis, consequat non est at, finibus ultricies sem. Duis bibendum mi vel libero aliquam scelerisque. Sed eget rhoncus sapien. In dictum, neque vitae efficitur accumsan, nulla ipsum ultrices eros, vitae porttitor massa ex vel augue. Curabitur aliquet dui et urna dapibus, a elementum diam dapibus. Pellentesque leo libero, ornare vitae fringilla non, venenatis vitae massa. Interdum et malesuada fames ac ante ipsum primis in faucibus. Suspendisse dapibus nisi ut nunc vulputate pellentesque. Suspendisse auctor erat non viverra fringilla. Pellentesque feugiat dictum urna, eu auctor metus aliquam vitae. Nunc nulla sem, maximus in lacinia non, viverra eu nulla.
+
+In fringilla cursus nisi vel tempus. Mauris blandit leo vel facilisis blandit. Quisque auctor magna quis justo commodo, in laoreet justo pharetra. In hac habitasse platea dictumst. Cras imperdiet cursus eros, quis rhoncus neque viverra in. Praesent rutrum aliquam euismod. In vitae elit blandit erat efficitur vehicula vitae quis lectus. Fusce consectetur nibh sit amet felis placerat consectetur. Morbi leo risus, dictum vel vestibulum vel, tempor id erat. Suspendisse facilisis massa nec risus maximus, nec semper purus fringilla. Cras dapibus diam eu elit sollicitudin, in tempor tellus accumsan. Proin pulvinar varius sollicitudin. Nullam quis tellus ac est imperdiet malesuada.
+
+Morbi sem nulla, egestas a luctus at, egestas id magna. Pellentesque ac tristique neque, in vestibulum enim. Fusce turpis nisi, commodo a justo id, fermentum vulputate sem. Phasellus fermentum elementum dui, id dictum leo fermentum et. Fusce porttitor enim odio, sit amet porttitor dolor luctus eget. Etiam ligula libero, finibus vitae enim vitae, facilisis fringilla mi. Fusce eget fermentum dui.
+
+Cras quis ipsum ultricies, tincidunt nibh non, commodo nisl. In commodo diam et quam porttitor, non sagittis ante feugiat. Vestibulum ultricies elit non lectus ultrices, a egestas dui tempus. Etiam faucibus ipsum ante, interdum condimentum ligula pellentesque at. Integer ornare bibendum libero vel accumsan. Donec ornare finibus diam fringilla pharetra. Nam pellentesque nibh quis diam tincidunt faucibus. Sed tortor arcu, posuere id enim accumsan, tristique lobortis velit. Suspendisse massa turpis, maximus ut eros vitae, sollicitudin efficitur libero. Phasellus ut scelerisque nisl. Ut ligula risus, venenatis at orci non, hendrerit aliquam mi. Vestibulum a varius ante, ac pulvinar diam. Integer hendrerit fringilla erat, eu egestas mi fringilla molestie. Aliquam erat volutpat. Nunc ut feugiat elit. Etiam a bibendum dui.
+
+Morbi ornare molestie lobortis. Aliquam erat nunc, placerat eget volutpat in, vehicula nec tortor. Maecenas et libero nec nibh mollis bibendum quis et neque. Fusce eleifend eros quis consequat hendrerit. Nunc ac dolor odio. Nullam condimentum ut dolor id venenatis. Quisque ultrices, urna quis commodo elementum, augue lectus tristique turpis, at lobortis nibh dolor sit amet lectus. Curabitur accumsan tortor ex, ut sagittis tortor volutpat a. Morbi justo diam, iaculis et felis vel, pretium porttitor mi. Cras volutpat enim ut posuere sollicitudin. Nulla suscipit diam ut varius volutpat. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+
+Duis ut convallis est, ac cursus purus. Fusce euismod gravida diam non lacinia. Pellentesque quis arcu fermentum, elementum erat et, porttitor sem. Sed sed mauris sed urna auctor ultricies. Mauris vel sodales purus. Vivamus semper lorem nec ligula ultricies, lobortis lobortis metus scelerisque. Morbi in dolor hendrerit metus sodales mollis sed eget neque. Nam sollicitudin, nulla id consequat malesuada, ligula nulla imperdiet lacus, nec pellentesque nunc leo convallis elit. Aenean vestibulum ipsum quis nulla laoreet, ut convallis velit sodales. Quisque dolor tellus, dignissim sit amet nulla ut, mollis vulputate ligula. Sed tempus porta rutrum. Sed tincidunt justo eget est ullamcorper, quis tempor odio convallis.
+
+Pellentesque tortor felis, euismod a orci at, posuere tristique neque. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Integer congue elit dignissim dolor feugiat, non pellentesque quam aliquam. Aenean porttitor, mi nec aliquet vehicula, magna diam euismod elit, gravida commodo nunc erat ut nulla. Mauris arcu odio, dictum a fermentum et, tempor quis nisl. Vestibulum congue rhoncus semper. Suspendisse ut convallis ante, non consequat nibh. Vivamus blandit laoreet accumsan. Maecenas feugiat congue mi ac aliquet. Nunc nisl massa, facilisis vel fringilla et, vestibulum ac lorem. Proin eget ipsum id turpis hendrerit pharetra in eget nisi. Cras tellus arcu, tristique id dictum ornare, tempus et ante. Aenean aliquam elementum metus vitae pretium.
+
+Cras et purus tellus. Quisque ipsum enim, sagittis sit amet vulputate in, sollicitudin in felis. Vivamus a commodo nisi. Aliquam ligula neque, venenatis vel risus id, pellentesque gravida sapien. Donec leo ipsum, tincidunt non suscipit eu, scelerisque sit amet tortor. Donec sit amet nisl tristique, placerat ex id, aliquam nibh. Etiam fringilla nisl sem, ac pellentesque ex lobortis eget.
+
+Donec luctus dui sit amet imperdiet accumsan. Sed tempus rutrum finibus. Nunc aliquet vitae ligula non tempus. Pellentesque mauris tortor, ullamcorper at velit in, consectetur commodo nisi. Vestibulum tempor massa quis est ultricies lobortis. Aliquam et elit bibendum, sodales nulla in, sollicitudin tellus. Morbi rhoncus eros nec quam ultricies varius. Praesent vitae venenatis velit, eget dignissim velit. Aliquam pellentesque, urna vitae dictum tristique, nibh mauris vehicula felis, ut eleifend orci magna a nulla. Fusce vel laoreet dolor, a imperdiet lacus. Vivamus at pharetra tortor. Aliquam ut ultricies magna, eget vehicula neque.
+
+Cras laoreet facilisis varius. Donec congue tempor orci, euismod sagittis nulla ornare et. Integer sollicitudin id felis ac mollis. Aliquam eget elit in nulla posuere consequat. Mauris nec hendrerit libero, id elementum diam. Donec rhoncus consectetur eros, non condimentum sapien malesuada sed. Pellentesque sagittis enim luctus fermentum sodales. Nam condimentum molestie nulla quis cursus. Quisque vitae sollicitudin diam. Fusce mattis elementum lectus a rutrum. Donec egestas dui eros, ut dictum metus tincidunt ut. Nullam at eros est. Mauris mollis vestibulum velit vel facilisis. In accumsan nisi in lorem commodo maximus.
+
+Nam nec libero dictum, cursus eros quis, ultricies metus. Sed in leo sapien. Suspendisse sollicitudin orci vitae interdum iaculis. Nullam cursus id nunc eget scelerisque. Curabitur non tincidunt elit. Duis gravida auctor pellentesque. Integer sodales ultrices nibh a ornare. Phasellus efficitur mi arcu, at pulvinar turpis gravida eu. Aliquam vitae posuere urna. Sed iaculis aliquet ipsum vel mollis.
+
+Pellentesque interdum bibendum eros vel convallis. Sed iaculis erat tortor, quis suscipit quam laoreet vitae. Sed ut augue dignissim, viverra diam molestie, vehicula est. Ut facilisis aliquet ipsum, non finibus mauris pretium non. Donec vel dapibus tellus. Proin at justo tellus. Praesent eget risus quis urna maximus dictum. Cras sapien ipsum, ullamcorper eget augue nec, pellentesque tempus ante. Aenean ut mattis justo. Fusce congue massa a augue dapibus dapibus. Maecenas interdum enim et ligula tincidunt accumsan.
+
+Aliquam et tempor arcu. Sed auctor lacus justo, ut dictum diam auctor sit amet. Quisque sed quam rutrum, pulvinar justo non, dignissim felis. Donec in est eget nulla convallis tristique ut nec nunc. Maecenas pulvinar felis sem, at pulvinar augue sodales non. In magna ex, mollis id finibus sit amet, imperdiet a nisi. Fusce ullamcorper, leo et suscipit consectetur, ex odio sodales elit, scelerisque scelerisque turpis risus et ex. Morbi sed ultrices ex. Duis vel arcu rutrum, volutpat dui vel, luctus ligula. Maecenas nibh ante, porttitor vestibulum quam ut, consequat consectetur elit.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Duis consequat lorem vitae massa volutpat, eu venenatis orci placerat. Integer varius sagittis volutpat. In vel mollis ante. Maecenas eget vestibulum dolor, ut aliquam sapien. Nam efficitur orci laoreet, lobortis nunc eu, pretium quam. Suspendisse et purus a quam vestibulum faucibus a tristique magna. Nulla at enim gravida massa eleifend molestie vitae quis erat. Integer tristique nisi libero, et varius lacus posuere eget. Donec interdum sed nisi a congue. Nam sodales mattis pharetra. Curabitur gravida sapien nec viverra posuere. Duis a dolor vulputate, sollicitudin mi vitae, accumsan erat. Sed leo neque, rhoncus posuere fringilla vitae, porttitor vel nulla.
+
+In hac habitasse platea dictumst. Etiam a mollis dolor, nec suscipit ex. Aenean nec bibendum velit. Donec fermentum, nisl vel porta semper, nunc velit porttitor felis, egestas malesuada magna tellus vel tortor. Integer fermentum nulla at eros fringilla, sit amet fringilla lectus luctus. Nulla scelerisque arcu ac rhoncus iaculis. Proin lobortis tincidunt velit, at mattis augue eleifend id. Sed pellentesque semper diam sit amet ultricies. Etiam felis lectus, molestie id orci quis, porttitor dictum mauris. Nulla facilisi. Fusce tempus urna quis sollicitudin blandit. Phasellus sed sodales est, quis viverra velit. Duis eget auctor risus. Aliquam tempor turpis quis turpis aliquet, id viverra ipsum vestibulum. Integer ac finibus tellus.
+
+Donec scelerisque placerat metus, ac tincidunt turpis ornare non. Aenean dignissim pharetra ex vel dignissim. Sed egestas tristique lacus, et convallis nibh vestibulum rutrum. Nulla facilisi. Sed posuere volutpat ex, vel consequat nunc dapibus at. Curabitur sit amet dapibus risus. Fusce dui est, varius venenatis libero sit amet, tincidunt facilisis felis. Morbi pharetra volutpat mauris vitae varius. Nam vestibulum, arcu at efficitur facilisis, ex mauris ultricies sem, at interdum metus nunc at est. Phasellus id leo eu lacus aliquet gravida eu ac tortor.
+
+Etiam dapibus sem eu tellus luctus, at laoreet enim feugiat. Morbi mollis justo quam, in egestas ex pulvinar a. Etiam et aliquam metus. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nam sit amet elit sed nunc vestibulum iaculis ut vel augue. Quisque risus metus, ultrices id ipsum sit amet, dapibus consequat leo. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed nulla sapien, consequat rhoncus accumsan vitae, finibus a mauris. Vivamus at odio arcu. Nam in lacus non dui laoreet pulvinar. Sed quis tempor urna, ut condimentum turpis. Cras vulputate eros erat, sit amet auctor orci blandit nec. Integer consectetur fringilla rhoncus. Suspendisse suscipit lectus finibus consectetur imperdiet.
+
+Proin pellentesque ligula vel lacus laoreet, id elementum diam facilisis. Ut et ipsum ligula. Sed in nisi vel erat maximus cursus sed eu velit. Aenean porttitor felis arcu, aliquet maximus ante mollis id. Praesent laoreet nisi lacus, sit amet rutrum turpis blandit vel. Integer in volutpat elit. Suspendisse scelerisque elit et erat tempus, sed consectetur leo molestie. Etiam eleifend massa sit amet ante euismod facilisis.
+
+Proin accumsan sed nunc quis sollicitudin. Aliquam vehicula orci eu libero placerat, sed condimentum justo hendrerit. Morbi eu turpis ut sapien fringilla molestie vel non risus. Nunc porttitor est nec est interdum, imperdiet volutpat sem malesuada. Curabitur a lacus eu enim cursus tristique. Morbi pharetra mollis tincidunt. Sed viverra libero tempus sem tristique, quis elementum ipsum tincidunt. Duis tincidunt feugiat tortor pellentesque tempor. Mauris pellentesque pretium ex porta consectetur. Vestibulum euismod sollicitudin nibh id maximus. Aenean bibendum, mi quis dapibus facilisis, purus dolor viverra risus, nec aliquam velit quam at ipsum. Vivamus enim velit, rutrum at finibus non, placerat a justo. Praesent maximus nunc sed maximus fringilla. Sed in est in odio auctor tempus. Quisque erat lorem, sodales ut eros quis, dictum porttitor ipsum.
+
+Ut facilisis pellentesque leo, aliquam imperdiet leo maximus a. Donec eget turpis porttitor, euismod lorem vitae, condimentum lorem. Sed non convallis metus, a tristique metus. Aenean nec est a libero ultrices fermentum eget malesuada sapien. Phasellus faucibus elit felis, in efficitur lectus maximus nec. Nullam mollis quam est, ac finibus eros efficitur ut. Proin pretium, metus id lacinia molestie, mi diam dignissim nulla, ac feugiat dui dui a urna. Aliquam erat volutpat. Donec eget viverra nunc. Vivamus a facilisis est. Morbi varius felis orci, eget tempus quam congue vitae.
+
+Suspendisse in ipsum ut turpis ornare pellentesque sed sed velit. Morbi posuere in sapien tempus egestas. Aenean fermentum ipsum vel risus dictum, a mollis lectus tristique. Vestibulum sed sapien sed sem cursus sodales. Quisque ultricies ligula ut erat gravida molestie. Cras tincidunt urna odio, at varius lectus sagittis eget. Donec rhoncus accumsan tincidunt.
+
+Curabitur risus lorem, tempus euismod hendrerit eget, consequat vitae risus. Pellentesque malesuada laoreet tempus. Nunc sagittis, mi ut tristique sodales, tortor quam pulvinar ipsum, vestibulum accumsan dui augue a velit. Quisque faucibus nibh fermentum tempor vehicula. Morbi id rutrum velit, ut imperdiet justo. Nulla nec neque libero. Fusce consequat ornare tincidunt. Praesent eget imperdiet libero. Aliquam convallis risus sed risus condimentum ultricies. Duis sed purus purus. Quisque pulvinar faucibus finibus. Etiam fringilla sapien et tellus sollicitudin consectetur. Proin viverra eu nunc malesuada ullamcorper.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nullam auctor, elit vitae euismod laoreet, leo erat blandit felis, quis porta nibh sem a massa. Nam vehicula est purus, vel convallis mauris commodo a. Proin cursus tortor eu velit consectetur fermentum. Nunc et egestas purus, et volutpat orci. Mauris eleifend id tellus in eleifend. Duis lectus tellus, malesuada et velit at, hendrerit finibus nulla. Ut tincidunt sagittis orci, mollis condimentum urna lobortis quis. Integer vitae dictum eros. Phasellus eu hendrerit neque. Aenean sit amet lectus nunc. Pellentesque tortor sapien, euismod rutrum placerat quis, cursus eu nunc. Suspendisse pretium, erat non mollis pellentesque, sapien neque rhoncus justo, in facilisis odio augue ac lacus. Nunc a sapien sodales, convallis nisl ac, gravida ante. Suspendisse sollicitudin eu leo eget facilisis. Donec sodales justo eu lacus tincidunt, sit amet tristique ipsum egestas.
+
+Suspendisse pharetra dictum neque, vel elementum sem condimentum lobortis. Aenean eget aliquet dolor. Aliquam erat volutpat. Ut feugiat tempor pretium. Phasellus faucibus eros et mauris ultrices, in pretium felis consectetur. Nullam ac turpis venenatis, feugiat massa vel, tristique turpis. Nunc eu ligula non quam laoreet dictum. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Maecenas sed mi imperdiet quam commodo accumsan. Vivamus pharetra iaculis diam, non tempus tortor pharetra dapibus. Nulla posuere, velit nec vehicula cursus, mi massa ultrices lectus, ut scelerisque quam velit sed velit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas commodo, lorem ut elementum accumsan, sem leo lacinia tortor, ac placerat purus erat eget libero. Nunc posuere scelerisque ante, ut eleifend mauris vehicula nec.
+
+Donec rutrum quam dolor, id varius velit efficitur non. Aliquam eros lacus, dapibus at leo sit amet, ultricies ullamcorper ante. Nam quis condimentum leo. Curabitur porta vel nulla ac lobortis. Sed et tellus eu erat cursus bibendum. Vivamus id eros eget enim molestie volutpat vel at lectus. Ut ut neque erat. Sed vulputate erat justo, tristique bibendum mauris elementum eget. Pellentesque at convallis nisi. In commodo placerat elit et semper. Fusce nec sed.
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas varius massa orci, sit amet laoreet justo posuere ac. Ut vel blandit mi, id feugiat justo. Phasellus sed odio dictum, elementum nulla vel, elementum sem. Donec ac ligula lorem. Etiam pharetra augue massa, at auctor lorem cursus in. Quisque tristique est non ullamcorper gravida. Suspendisse interdum venenatis consequat. Ut fermentum enim purus, a efficitur massa tristique eu. Donec vitae mauris vitae tortor ultrices finibus. Aenean eu felis et diam imperdiet elementum. Suspendisse sed eleifend erat, ac posuere tortor. Vestibulum nec sem fermentum, tristique purus vel, cursus diam. Vestibulum ut volutpat nulla. Mauris ac lacinia dolor. Proin lacus nisi, dignissim non ornare quis, ultrices vitae sapien. Vivamus pulvinar mauris id sem tincidunt, nec convallis enim imperdiet.
+
+Aenean non tincidunt mauris. Interdum et malesuada fames ac ante ipsum primis in faucibus. Ut porttitor est vitae ante ultrices posuere. Cras pretium nisl sed nisl suscipit, ut scelerisque ex luctus. Proin nec neque pretium, dapibus sem nec, viverra sem. Mauris vehicula ultrices lectus ac sagittis. Nam suscipit lacus at urna venenatis blandit. Donec sed lorem lectus. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec luctus velit velit, nec varius elit lacinia in. Sed commodo pellentesque lobortis.
+
+Aliquam ut purus iaculis, consectetur metus in, molestie quam. Aliquam vulputate tellus eget malesuada vulputate. Vestibulum feugiat neque velit, in laoreet orci ultricies sed. Duis id congue ipsum. Maecenas elementum nisl nec risus sagittis, ac cursus mi accumsan. Duis vestibulum elit non nunc vestibulum rhoncus. In mollis venenatis dolor ut tristique. Sed tempus turpis eu efficitur accumsan. Proin molestie velit metus, ut euismod justo aliquam sed.
+
+Aliquam tellus sapien, sagittis sed augue quis, convallis commodo lorem. Nulla a turpis non massa feugiat tincidunt ac et libero. Etiam tempor elit sed nunc fermentum, nec pharetra nulla dictum. Nunc viverra tincidunt porttitor. Nulla pretium lectus ac dui vehicula, ac tincidunt nunc ultricies. Praesent bibendum elit ac mauris tincidunt lobortis. Quisque mattis nulla magna, quis interdum libero maximus id. Curabitur nec ultrices enim, a ornare ex. Cras id mauris ut sapien ullamcorper pharetra non quis lorem. Sed vel auctor tortor. Vivamus sed orci placerat, lobortis nisi ac, imperdiet ipsum. Quisque dapibus sodales dapibus. Nunc quam arcu, faucibus et eros vel, gravida congue quam. Donec id est efficitur dolor suscipit sollicitudin at et turpis. Morbi nibh orci, euismod quis egestas vel, imperdiet quis libero. Nam ultrices erat quis elit vulputate maximus.
+
+Vivamus a tortor in leo efficitur imperdiet ut ac justo. Donec auctor ex non elit ullamcorper, id mollis lectus aliquet. Cras arcu purus, finibus ut ullamcorper nec, suscipit quis nibh. Donec at iaculis metus. Quisque id massa maximus, blandit massa eu, cursus nisl. Aenean vel sollicitudin neque, id vehicula dui. Aenean dictum iaculis sapien nec laoreet. Quisque vel finibus tellus. Proin iaculis enim dignissim sem fermentum, vel mattis metus lobortis. Sed euismod pulvinar placerat. Vestibulum eget suscipit quam, vel ultricies urna. In euismod lorem vitae elementum malesuada. Donec quam quam, rhoncus et fringilla at, malesuada et massa. Aenean posuere ipsum sed dui pellentesque venenatis eu eget purus. Donec a luctus mauris.
+
+Aenean auctor viverra ultrices. Nunc eu massa sem. Vivamus pellentesque neque non luctus luctus. Donec vel vulputate massa. Nunc condimentum, erat sed vestibulum vestibulum, augue arcu hendrerit magna, mollis ultricies quam nibh dignissim magna. Etiam quis egestas nisi. Sed quam lacus, elementum et dui vitae, scelerisque accumsan diam. Cras eleifend est dui. In bibendum euismod lorem vitae ullamcorper. Nunc faucibus et lorem in faucibus.
+
+Sed aliquet varius turpis, a sollicitudin felis accumsan pulvinar. Nunc vestibulum ante et tristique tristique. In et efficitur purus. Vestibulum malesuada urna id nunc imperdiet tempus. Nunc eleifend sapien at velit ultricies, dictum elementum felis volutpat. Suspendisse imperdiet ut erat eu aliquam. Maecenas tincidunt sem nec sodales sollicitudin. Morbi quam augue, tincidunt vitae lectus et, lobortis efficitur dui. Ut elit ex, viverra in risus sit amet, congue blandit lacus. Etiam fringilla magna at purus sagittis, ac vehicula elit vestibulum. Cras pharetra tellus molestie tortor placerat, a vehicula dui placerat. Vivamus ac sapien sapien. Donec eleifend ligula vitae tortor sodales hendrerit non sed risus. Aliquam fermentum et urna et malesuada. Cras euismod nulla vel velit egestas, euismod laoreet ante vehicula. Maecenas orci elit, blandit eu blandit sodales, mollis ac turpis.
+
+Nam tortor est, gravida a rutrum sed, venenatis id orci. Duis massa tortor, mollis fermentum fermentum sit amet, sagittis ut nisl. Vestibulum quis sagittis purus. Suspendisse varius nec ipsum nec molestie. Vestibulum molestie molestie rhoncus. Cras dignissim sapien vitae libero tincidunt elementum. Fusce vehicula sodales orci, sed convallis ligula consequat in. In consectetur sem at laoreet lacinia. Fusce luctus faucibus tellus, in malesuada sem consectetur sit amet. Ut gravida, nisl at finibus egestas, ipsum libero viverra elit, at rutrum metus elit ac nunc. Praesent eu dolor rutrum, imperdiet justo eget, ultrices tortor. Aenean id venenatis lorem. Duis consequat elit a nisi elementum convallis. Pellentesque porta lorem vel ipsum tempus imperdiet. Aliquam suscipit justo sit amet dui imperdiet, ut ultrices leo ullamcorper. In dapibus, felis id auctor pulvinar, metus metus cursus odio, at semper justo nibh sollicitudin sem.
+
+Nam quis elit ac tortor venenatis luctus. Pellentesque consectetur tincidunt fringilla. Morbi a nunc sed libero tempor vehicula. Mauris cursus mi neque, id lobortis turpis auctor aliquet. Donec at volutpat urna. Quisque tincidunt velit mi, sed rutrum elit ornare ac. Nunc dolor libero, ultrices eget est a, facilisis auctor mi. Integer non feugiat libero, eu pulvinar leo. Fusce feugiat suscipit nibh ac iaculis. Duis vulputate felis quis enim auctor, eu dictum sapien scelerisque. Nullam sem nisl, tempor egestas imperdiet sit amet, venenatis eu ligula. Pellentesque arcu quam, bibendum sed consectetur nec, commodo a purus. Ut in ex libero. Aenean dignissim ex orci, sed tempus lectus viverra sed. Vestibulum euismod massa arcu, quis iaculis libero mattis id. Proin lectus nibh, euismod non varius quis, tincidunt sit amet urna.
+
+Suspendisse potenti. Integer dapibus gravida lacinia. Curabitur sodales ac erat vitae gravida. Vestibulum id tortor nec lectus tempus gravida sit amet id ante. Nam malesuada dapibus urna a vehicula. Sed ultricies nulla nec eleifend consequat. Maecenas elementum ante at porttitor elementum. Ut at augue vitae mauris volutpat semper. Morbi viverra justo in mauris convallis, vel consequat leo faucibus. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae;
+
+Duis efficitur augue diam, ac rhoncus mauris sagittis ac. Etiam eleifend rhoncus justo, eu interdum lorem elementum eu. Suspendisse ex libero, mollis consequat turpis sed, condimentum sodales risus. Nunc pellentesque dui vel odio scelerisque, ut aliquam mauris gravida. Quisque laoreet tincidunt tortor id viverra. Morbi eget ipsum tortor. Praesent orci mauris, euismod ut nisi in, fermentum ullamcorper nulla. Curabitur facilisis vestibulum luctus. Aliquam sollicitudin et mauris vel feugiat. Duis non quam eu sapien hendrerit tristique. Fusce venenatis dignissim porta. Duis id felis purus. Aliquam ac velit in orci ornare varius. Nulla quis ex lectus.
+
+Ut tincidunt commodo augue, ut viverra mauris fringilla at. Integer sit amet ullamcorper felis. Nullam aliquam massa quam, id tincidunt mauris porta id. Integer nibh sapien, vulputate sit amet laoreet tincidunt, hendrerit eu quam. Morbi vitae felis et diam accumsan luctus ac sed est. Donec vitae viverra diam, at rutrum elit. Donec condimentum justo id dolor viverra vestibulum posuere quis purus. Aliquam id magna sit amet magna dapibus hendrerit et vitae quam.
+
+Phasellus in mauris turpis. Etiam nec ante eu mi maximus commodo quis eu risus. Etiam a turpis non tortor viverra gravida luctus vitae est. Quisque eget gravida quam, sit amet bibendum nulla. In mollis sapien nisl, nec efficitur mi rutrum sed. Suspendisse potenti. Nulla efficitur sagittis diam nec rutrum. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam tempus lorem in purus sollicitudin cursus.
+
+Cras id rhoncus ligula. Vivamus vel tortor malesuada, eleifend neque ac, sollicitudin enim. Proin a euismod neque. Suspendisse odio quam, placerat ac hendrerit at, porttitor in sapien. Ut vitae risus velit. Maecenas sagittis, leo efficitur lobortis elementum, dui ante semper ex, quis auctor velit arcu eget ligula. Aliquam sollicitudin, nulla tempus consequat egestas, nibh diam pulvinar purus, vitae maximus justo nibh maximus lacus.
+
+Nulla rutrum magna quis mi gravida euismod. Fusce nec facilisis massa. Phasellus iaculis, eros fringilla imperdiet condimentum, orci sem fermentum massa, quis scelerisque lacus ante vitae dolor. Cras interdum egestas lectus rhoncus tristique. Etiam nec sollicitudin sapien, ut tristique nibh. Integer a imperdiet erat. In interdum nisi vel urna aliquet, eget malesuada purus dapibus. Sed hendrerit lectus at hendrerit accumsan.
+
+Maecenas semper pretium sapien nec ullamcorper. Praesent nec neque quis nunc porta ultricies interdum vel ipsum. Donec dapibus lorem quis quam hendrerit, vitae laoreet massa pellentesque. Etiam pretium sapien vitae turpis interdum, ut rhoncus nisl bibendum. Nunc ac velit ac ex sollicitudin ultrices id in arcu. Phasellus tristique, nibh et rhoncus luctus, magna erat egestas velit, nec dignissim turpis ipsum ac felis. Maecenas convallis arcu et lectus vehicula, eget iaculis quam ultrices. Duis malesuada suscipit aliquet. Sed pulvinar eros quis nisl cursus, elementum sodales tortor fringilla. Nulla feugiat tristique sem eu tempus. Quisque at velit condimentum, consequat augue rhoncus, accumsan nulla. Sed varius sodales varius.
+
+Nunc consequat, mauris eget hendrerit fermentum, felis nisi efficitur lectus, eget dignissim leo purus quis purus. Praesent libero lacus, sodales id justo id, maximus condimentum purus. Sed tristique egestas lorem vel efficitur. Praesent vestibulum tincidunt faucibus. Ut fringilla eros sed purus mattis pharetra. Sed convallis turpis in sapien dictum, sed molestie orci accumsan. Sed eros nisi, cursus cursus nulla sit amet, sollicitudin interdum quam. Vestibulum tincidunt eros convallis, iaculis odio in, vulputate nisl. Duis scelerisque finibus purus, at porttitor sem molestie nec. Nullam sed eros dignissim, tincidunt nibh id, porta metus. Sed eget magna quis sapien commodo bibendum. Vivamus non purus nec ligula facilisis blandit a a mi. Suspendisse hendrerit, erat eget tempus mollis, justo dui dictum nunc, at pulvinar purus velit elementum augue.
+
+Fusce sed venenatis sem. Sed at libero non magna varius porttitor eu vel sapien. Cras mattis non lorem sit amet fermentum. Nam sagittis nisi magna, sit amet semper urna viverra tincidunt. Cras et leo sit amet turpis lacinia dictum. Donec iaculis nulla posuere ex varius tristique. Pellentesque dictum lacus vel nulla maximus cursus. Nulla tristique lorem pellentesque est dignissim, et venenatis felis pellentesque. Nulla vitae leo at metus posuere commodo sed et ex. Curabitur est odio, laoreet eu malesuada sed, mattis ut diam. Integer erat velit, rhoncus quis nulla ornare, dictum scelerisque tellus. Suspendisse potenti. Integer accumsan lacus ac dictum pulvinar. Integer non magna blandit nibh rhoncus varius. Nulla vulputate erat ut cursus rutrum.
+
+Sed iaculis a eros sit amet egestas. Proin finibus libero at vestibulum finibus. Mauris gravida porta ipsum at placerat. Cras egestas nulla a orci consequat eleifend. In sit amet faucibus arcu. Fusce eu neque facilisis, porttitor massa vel, vehicula nisi. Aenean eu posuere sapien. Aenean in risus at lectus semper auctor. Morbi hendrerit porta urna, eu fringilla velit ultricies nec. Donec quis lorem volutpat erat volutpat accumsan eu non turpis. Nulla quis laoreet metus, at lobortis leo. Suspendisse at rutrum nulla, a tincidunt nibh. Etiam tempor mi et augue iaculis porttitor.
+
+Etiam eget ipsum id sapien sodales auctor. Proin libero nibh, lacinia lobortis dapibus ac, faucibus at dolor. Pellentesque sit amet purus at felis gravida porta. Suspendisse ut molestie massa. Curabitur dignissim leo arcu. Nulla nibh ante, tempus eu posuere eu, egestas venenatis lectus. Donec commodo pharetra laoreet. Quisque ac quam egestas, auctor leo aliquam, lacinia elit. Nullam eget nisi a tellus efficitur vestibulum. Sed molestie luctus arcu a viverra.
+
+Sed sagittis, augue et pharetra bibendum, augue purus dignissim diam, nec iaculis turpis ex eu nisl. Donec cursus, orci nec volutpat dignissim, sem enim condimentum neque, ut volutpat velit turpis vitae lectus. Maecenas eu elit eget ipsum venenatis pharetra. Etiam consectetur luctus tortor. Mauris odio massa, gravida ac libero et, semper aliquet turpis. Fusce eleifend imperdiet justo, eu molestie ipsum egestas nec. Duis vehicula quis erat sit amet dictum. Vestibulum sit amet ultricies massa. Vivamus auctor, sem vitae vulputate bibendum, risus dolor pharetra sapien, a posuere lacus libero eget ipsum. Fusce egestas at libero sed iaculis. Nunc sit amet dui scelerisque, fringilla diam in, tempor tellus. Curabitur facilisis tortor quis mauris interdum, nec mattis dolor bibendum.
+
+Nunc suscipit varius vestibulum. Praesent luctus lectus risus, tristique hendrerit nisi faucibus non. Quisque turpis leo, hendrerit a vulputate vel, imperdiet non ipsum. Sed dui est, lobortis sed tortor non, tempor tempus lorem. Cras eget egestas ipsum. Sed ante lorem, porttitor varius pulvinar eu, vehicula ut turpis. Aenean tristique sapien vitae lobortis luctus.
+
+Maecenas accumsan elit nec diam facilisis iaculis. Etiam volutpat vestibulum lectus condimentum blandit. Nulla interdum sapien sed velit tempus, a vehicula odio porta. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Duis viverra elit dui, eget aliquam eros imperdiet non. Nam porttitor tellus risus, in pretium leo tempus facilisis. Donec vel euismod lectus.
+
+Ut sed consectetur felis. Phasellus condimentum diam vitae ante commodo ultrices. Etiam iaculis, nulla mollis sodales scelerisque, ipsum eros luctus felis, vel cursus eros quam vel felis. Cras dictum eros ut auctor rutrum. Nullam cursus vehicula tortor in placerat. Pellentesque sodales euismod semper. Nullam quis vulputate augue. Aliquam in nulla ac tellus gravida semper ut et nibh. Phasellus tempor molestie purus eu ullamcorper. Etiam metus neque, imperdiet vitae turpis at, elementum mollis velit. Donec mollis auctor nunc non tristique.
+
+Morbi rutrum magna egestas, volutpat elit eget, dictum nibh. Aliquam erat volutpat. Phasellus in tristique leo. Donec sodales pretium erat eget pellentesque. Aliquam in nunc ut augue accumsan laoreet. Pellentesque sed ante sit amet tellus vulputate suscipit. Praesent interdum neque varius mi fringilla ullamcorper. Quisque a felis nibh.
+
+Curabitur porttitor, augue et tincidunt viverra, eros libero feugiat metus, vitae lobortis mauris sapien eu dui. Cras ligula eros, auctor ac nisi ut, condimentum tincidunt ex. Vivamus vel aliquam lacus, a facilisis augue. Sed in est nisl. Integer mattis, arcu sit amet placerat consectetur, leo quam elementum justo, at hendrerit urna metus in velit. Suspendisse scelerisque suscipit odio non sagittis. Proin in fermentum elit. Duis interdum, libero quis molestie rhoncus, turpis urna cursus nulla, venenatis finibus orci diam a nibh. Ut ut massa a ex convallis commodo dictum sed urna. Nam id felis ipsum. Nunc tincidunt dignissim libero, at tempus dui porttitor sit amet. Vivamus nulla ipsum, pretium non fringilla et, tristique ut est. Etiam tristique vitae enim quis elementum.
+
+Curabitur sodales nec diam vulputate hendrerit. Suspendisse consectetur convallis sem et sagittis. Donec lobortis vestibulum eros sit amet efficitur. Nulla pellentesque tempor massa sit amet tempor. Praesent vestibulum elit auctor imperdiet faucibus. Nunc consequat nunc lectus, quis egestas augue suscipit et. Suspendisse eleifend eget lorem sed ornare. Integer non aliquam nisl. Proin metus odio, faucibus pellentesque dapibus vel, scelerisque nec arcu. Pellentesque ut velit nulla. Integer porttitor nec enim ac luctus. Praesent elementum ac est in aliquam.
+
+Mauris at dui dignissim, pharetra dui nec, vulputate dolor. Nunc ac commodo enim. Mauris eleifend est nunc, eget pulvinar justo egestas et. Vestibulum id volutpat lectus, vel rhoncus risus. Ut augue justo, gravida nec libero tincidunt, vulputate fringilla dolor. Suspendisse aliquet risus vel ante tempus, vel laoreet tellus bibendum. Quisque non vestibulum nisi, non malesuada libero. Cras quam nibh, tempor vel massa id, laoreet semper libero. Aliquam fermentum nunc vitae nibh vulputate, ac dignissim sapien vestibulum. Mauris pellentesque pretium massa vitae cursus. Phasellus in lacus augue. Integer finibus pulvinar arcu, in scelerisque lorem tincidunt sit amet. Pellentesque varius turpis sollicitudin, ornare odio nec, venenatis augue. Nullam commodo lacus a placerat consequat. Curabitur eu lobortis tortor. Curabitur varius iaculis lorem in mollis.
+
+Curabitur at convallis lectus, id vestibulum enim. Donec quis velit eget leo dictum venenatis id et velit. Phasellus ut tincidunt libero. Aliquam tincidunt tellus sed tortor facilisis laoreet. Morbi cursus pellentesque lectus, et tempor turpis condimentum at. In tempor auctor metus sed accumsan. Nulla ornare dapibus mi. Aenean ut ullamcorper eros, vitae condimentum ipsum. Nam in turpis ligula. Suspendisse ac dolor odio. Curabitur vel libero ac mi mattis consectetur ac id nunc. Cras sit amet justo nec risus malesuada posuere ut sit amet augue.
+
+Sed pretium odio eu libero pretium, ut ullamcorper eros placerat. Praesent volutpat tincidunt massa, eget fermentum lacus congue eget. Pellentesque nec purus aliquet nulla sagittis vehicula. Vivamus posuere cursus lacus at blandit. Phasellus mauris sapien, imperdiet eget ex id, posuere vehicula augue. Nulla nulla ligula, ornare porta massa vel, commodo tincidunt arcu. Morbi fermentum blandit eros vitae eleifend. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Duis tempor pretium magna et ornare. Donec sed nunc sed ipsum laoreet maximus. Sed congue massa nec augue sodales, et placerat diam aliquet. Donec in sem lorem. Nullam pretium massa non magna feugiat pretium. Morbi auctor, nunc quis faucibus venenatis, sem sem porttitor mauris, non elementum mauris felis a leo. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+
+Mauris imperdiet condimentum leo, et efficitur nulla posuere id. Nullam facilisis magna non pharetra molestie. Donec volutpat tincidunt pulvinar. Phasellus molestie, neque sit amet lacinia fermentum, sapien quam iaculis ligula, id suscipit augue ante faucibus dolor. Pellentesque aliquet, tortor eu dictum consectetur, lectus quam laoreet lorem, id congue risus arcu a libero. Sed ac luctus justo. In hac habitasse platea dictumst. Praesent ultrices ante vitae ante sollicitudin elementum. Aliquam egestas porttitor velit sit amet imperdiet. Curabitur quis lacus ac metus egestas tincidunt.
+
+Nunc ut ipsum arcu. Duis suscipit, nisi posuere commodo posuere, eros mi tempus magna, ac venenatis diam erat eget massa. Etiam eu posuere sapien. Maecenas in ipsum consectetur, luctus mi eu, mattis nibh. Donec consectetur augue sit amet velit scelerisque, a aliquet dui venenatis. Morbi libero sapien, consequat faucibus congue eget, elementum sed magna. Phasellus malesuada arcu at est lobortis, id porttitor leo elementum. Praesent luctus placerat tellus vel volutpat. Nam at enim cursus, aliquam arcu ac, imperdiet dolor. Proin auctor diam elit, non aliquet orci lobortis nec. Curabitur commodo, sapien non placerat accumsan, leo sapien rutrum neque, at dapibus orci libero a nunc. Aliquam egestas sem non tellus convallis, eget rutrum eros posuere.
+
+Sed tincidunt at elit sed venenatis. Aliquam sit amet iaculis mi. Pellentesque laoreet lobortis quam, vel accumsan nisl hendrerit at. Phasellus quis purus nisl. Fusce quis laoreet nunc. Integer quis nisi justo. Vivamus porttitor malesuada orci sed porta. Nunc ullamcorper faucibus sem, ac euismod ipsum condimentum sed. Aenean iaculis nunc vitae sapien auctor, sit amet rutrum nisl commodo. Vivamus condimentum ex eu arcu posuere, nec ultricies eros lobortis. Cras vehicula massa quis auctor condimentum.
+
+Cras arcu nisl, sodales nec leo id, iaculis aliquam urna. Praesent fringilla, nisl suscipit posuere laoreet, sem magna tristique augue, id consequat ligula dui nec tortor. Sed at mattis tellus. Curabitur feugiat porttitor mauris, at gravida est. Pellentesque in libero in dui posuere facilisis. Praesent in posuere libero. Pellentesque vehicula leo mauris. Quisque commodo, nulla a placerat consequat, elit ligula blandit leo, vitae gravida turpis risus ultricies libero. Ut feugiat, augue vel malesuada ornare, magna nisi dictum est, sed egestas augue nisi eu urna. Vestibulum euismod nulla erat, sit amet accumsan felis posuere vel.
+
+Etiam pretium turpis eget semper efficitur. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Mauris in dictum velit. Suspendisse mauris dolor, sodales vehicula mauris blandit, venenatis aliquet orci. Etiam non vulputate sapien. Quisque ut metus egestas, luctus mi in, ornare dolor. Curabitur tincidunt dapibus neque, sit amet commodo est dignissim vel. Curabitur vel pharetra velit. Aliquam ligula ante, efficitur sed cursus sed, tempus et justo. Nulla faucibus sodales odio et ultricies. Proin sit amet nisl non orci ornare tempor. Sed nec lobortis sapien, eget congue mauris. Fusce facilisis ex non molestie lacinia. Vivamus venenatis iaculis quam. Sed est felis, elementum in lectus a, facilisis bibendum quam. Donec luctus non purus in commodo.
+
+Fusce ac mi vitae ex rutrum bibendum. Nulla venenatis lobortis pharetra. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Suspendisse potenti. Interdum et malesuada fames ac ante ipsum primis in faucibus. Phasellus et efficitur nibh. Morbi auctor magna diam, eget dapibus tortor tincidunt vitae. Aenean luctus eros a metus tristique suscipit. Sed luctus, risus ac scelerisque molestie, felis lectus vestibulum nunc, a posuere libero eros eu nibh. Donec gravida eget quam eget ultricies. Donec et aliquet lectus, ac aliquam ante.
+
+Maecenas lacus magna, dictum quis tempus ac, consectetur vitae purus. Sed ut arcu bibendum, malesuada urna quis, interdum nulla. Phasellus non urna ut dui rhoncus bibendum. Duis vel gravida dui. Pellentesque mollis turpis libero, sit amet vehicula magna feugiat nec. Vivamus consectetur libero ut nibh efficitur interdum. Quisque pretium auctor quam, ac commodo sapien congue a. Integer posuere facilisis mi, a placerat purus viverra malesuada. Nam ornare elit sit amet orci hendrerit, at pulvinar est porttitor. Pellentesque efficitur odio eget consectetur efficitur. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Nulla aliquam tristique diam vitae luctus. Pellentesque tortor nibh, lobortis ac velit at, congue ultrices augue. Donec et arcu ultricies, fringilla elit eget, congue lorem. Nunc quis dui in felis gravida bibendum ut id justo.
+
+Suspendisse quis quam vel turpis egestas auctor. Duis suscipit rutrum pellentesque. Sed vitae tincidunt mauris. Vestibulum rhoncus risus et facilisis hendrerit. Duis consectetur, ante nec eleifend elementum, libero mi pretium arcu, non pretium massa quam non neque. Etiam commodo egestas felis. Nam et elementum elit. Ut sit amet odio ac velit tristique rhoncus. Integer volutpat enim ut dictum rhoncus. Vestibulum viverra neque elementum, laoreet leo nec, tempor ipsum.
+
+Ut condimentum nibh id ante fermentum venenatis. Nullam scelerisque facilisis magna non sodales. Ut luctus libero augue, eget congue risus rhoncus quis. Fusce vitae lorem congue, euismod magna finibus, tincidunt justo. Aenean dapibus tortor nec lacinia pellentesque. Aenean condimentum convallis maximus. Aliquam feugiat lorem quis tellus hendrerit dictum. Nunc mollis pharetra erat vitae lobortis. Phasellus auctor velit fermentum fermentum porta.
+
+Vivamus efficitur ligula ac tincidunt pretium. Mauris rhoncus leo in sem dictum, non tempor augue tempor. Aenean rutrum augue eget justo mollis volutpat. Sed efficitur turpis vel lacus placerat, a lobortis nibh porttitor. Aliquam eleifend ultricies nulla at lacinia. Mauris eu ipsum laoreet, iaculis urna a, pretium arcu. Mauris convallis ut ligula a varius. Integer maximus venenatis risus sed tincidunt. Cras aliquet nisl ac diam ornare, ac lobortis ex rutrum. In vel mauris vestibulum, ornare purus id, iaculis lorem. Nulla condimentum tellus vel leo suscipit, in vehicula velit tempor. Cras in orci sollicitudin, placerat justo non, tristique massa. Praesent facilisis et elit sit amet placerat. Donec nec justo in nunc ultrices finibus.
+
+Fusce lacinia laoreet orci, nec egestas mauris mollis ac. Maecenas scelerisque in libero a tincidunt. Integer varius dui rutrum urna aliquam, id posuere nunc suscipit. Ut eget sollicitudin est. Nam augue nulla, commodo ut cursus sit amet, semper eu nibh. Maecenas sodales, sapien in maximus posuere, odio ante lobortis arcu, a varius diam sapien ut ipsum. Vestibulum sagittis, mauris sed ullamcorper tristique, purus quam mollis lacus, eget cursus tellus mi sit amet diam. Etiam quis lectus tristique, luctus justo eu, suscipit tortor. Phasellus vel neque ornare, dignissim nisi sit amet, porta est. Proin porttitor nisl vitae lectus tincidunt laoreet. Mauris finibus justo eu tellus egestas, a vestibulum sem vestibulum. Donec vel massa pretium, blandit orci ut, mollis lorem. Etiam mauris neque, eleifend vitae neque in, efficitur posuere mi. Morbi in elit volutpat, volutpat ligula et, porta ipsum. Aenean porta condimentum leo, vel ultrices eros imperdiet quis.
+
+Aliquam placerat, nulla vel aliquam hendrerit, turpis nibh euismod elit, a pellentesque tortor leo quis libero. Donec velit orci, ullamcorper id aliquet in, convallis varius libero. Quisque a magna est. Cras luctus purus elit, at aliquam tellus feugiat ut. Sed gravida scelerisque tortor, quis laoreet ante efficitur quis. Phasellus et tortor eget magna pulvinar laoreet et et urna. Sed ac vehicula sem, blandit semper turpis. Praesent pharetra libero dui, sed fringilla urna blandit eu. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi id felis commodo, euismod libero sollicitudin, auctor ante. Fusce tristique facilisis gravida.
+
+Curabitur elementum sit amet magna eget egestas. Integer a libero vitae nisl sagittis gravida. Quisque leo ipsum, ultrices id justo nec, scelerisque vehicula nibh. Nunc vitae commodo eros. Nunc elementum justo luctus laoreet faucibus. Vestibulum ornare lorem non eros gravida, vitae varius diam condimentum. Vivamus porta fermentum elit vitae imperdiet. Cras auctor, est vitae bibendum posuere, justo dolor iaculis risus, sit amet gravida tortor diam quis mi. Vivamus vel tortor vitae lectus tristique consectetur. Integer rutrum posuere sapien commodo consectetur. Nullam fermentum in enim non imperdiet. Proin dapibus erat ac auctor tincidunt. Nunc tortor diam, pretium quis odio a, convallis eleifend turpis. Mauris vulputate lacinia enim, at mollis enim. Etiam ut mi et dolor consectetur volutpat vitae vel eros.
+
+Donec sollicitudin mauris a justo semper consectetur. Morbi nec justo a dui faucibus semper. Nunc ornare vitae mauris vitae gravida. Integer quis commodo neque, bibendum commodo nisl. Sed sagittis posuere purus id dapibus. Nunc hendrerit at mi a mollis. Fusce augue odio, tristique cursus ullamcorper in, ultricies at ex. Integer lobortis ultricies risus, in luctus turpis consectetur sit amet. Vivamus quam lectus, dapibus imperdiet posuere in, lacinia id orci. Donec pharetra augue ac velit pulvinar blandit. Curabitur in sagittis purus. Etiam eleifend elit metus, ac tempus leo ullamcorper eget. Nulla viverra maximus ipsum, ac sollicitudin nulla auctor quis.
+
+Nunc quis varius urna. Maecenas vel orci ac tellus pulvinar tincidunt. Sed bibendum pulvinar ex sit amet pulvinar. Sed quis rutrum ipsum. Nunc sit amet mi nunc. Fusce ac tempor sapien, ac interdum tortor. Nunc sit amet varius odio. Aenean ac fringilla ante, ac tempor nibh. Ut vitae felis vel mauris condimentum scelerisque quis et magna. Mauris sit amet leo sagittis, congue dui vel, varius leo. Curabitur semper vestibulum metus, non bibendum leo vulputate eu. Fusce at pharetra ante. Sed porttitor ligula turpis, vitae eleifend sem porttitor vel. Praesent convallis imperdiet orci ac tincidunt. Nam a tortor ac sapien interdum mattis. Vivamus iaculis quam in hendrerit molestie.
+
+Phasellus sit amet dapibus massa. Vestibulum scelerisque erat turpis, eget fermentum libero blandit at. Morbi eu ligula metus. Phasellus pretium enim vitae ligula malesuada, vel bibendum turpis venenatis. Integer pretium tellus et placerat vehicula. Maecenas ut turpis eu lectus tempus lacinia id eu lacus. Aliquam laoreet lacus et purus sagittis, ut gravida dolor convallis. Sed euismod, nisl nec tincidunt tempus, velit eros fermentum nisi, ut tincidunt sem tellus rutrum enim. Etiam dignissim ipsum vitae magna laoreet semper. Sed sit amet neque placerat, porta eros in, pulvinar lorem.
+
+Duis convallis imperdiet augue, et porta orci. Maecenas venenatis, sem ut ultricies euismod, ex velit tempor massa, at imperdiet dui nisl quis sapien. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed eleifend mattis lacus, eget pharetra erat vestibulum non. Mauris tellus quam, lobortis ut elit quis, varius aliquet erat. Proin mauris dolor, varius eu commodo quis, porta sed erat. Morbi ut nisl accumsan, sollicitudin nisi ac, tempor leo. Nulla facilisis nibh dolor, vitae tincidunt ex hendrerit ut. Suspendisse quis neque tellus. Maecenas ut odio nec risus sagittis gravida. Phasellus feugiat cursus dapibus. Morbi efficitur condimentum elit sed pharetra. Mauris ornare pharetra nisl, ac gravida dolor condimentum at. Aliquam lobortis finibus lorem, id pretium libero vestibulum vitae.
+
+Vestibulum pretium eleifend justo, sit amet imperdiet justo faucibus a. Suspendisse consectetur ipsum quis purus rutrum imperdiet. Nam nibh ex, tincidunt nec blandit sed, venenatis vitae mauris. Integer rutrum tincidunt tortor, ut mattis tortor fermentum ac. Duis congue dui sed est suscipit, nec semper lectus lobortis. Vestibulum felis ante, hendrerit ac venenatis sed, tincidunt iaculis augue. Duis pharetra blandit metus sed semper. Fusce ornare varius placerat. Vivamus sollicitudin lacus id nunc sollicitudin, a viverra felis pellentesque. Phasellus a felis in sapien facilisis imperdiet. Quisque ac purus dapibus metus fermentum mollis.
+
+Donec diam nisl, faucibus feugiat condimentum vel, eleifend eu magna. Sed tempus, justo a bibendum suscipit, sem nunc viverra enim, id semper nunc eros sit amet mauris. Praesent ultrices porttitor ex eu lacinia. Integer quis aliquet nibh, sit amet porttitor elit. Curabitur vel elementum quam. Sed fermentum vehicula egestas. In metus massa, sodales vel mauris id, finibus dapibus metus. Donec lectus ante, ullamcorper non posuere in, fringilla non velit. Quisque cursus interdum elementum. Phasellus vestibulum massa non sem fringilla congue. Maecenas nec arcu diam. Vivamus id suscipit odio, vel condimentum leo. Nulla sed dolor mattis, interdum lacus imperdiet, interdum nulla. Maecenas sagittis, ipsum vitae dapibus luctus, ipsum dui tempus tortor, quis porta dolor dui in sapien. Nulla vel porta neque, quis auctor massa.
+
+Suspendisse viverra nec risus fermentum maximus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent eu placerat nulla. Donec varius faucibus posuere. Nulla fermentum ultricies mauris at euismod. In hac habitasse platea dictumst. Proin et nisl purus. Cras quis risus sit amet lectus maximus semper. Quisque pellentesque luctus erat convallis maximus. Sed et lacus vel sapien pellentesque accumsan id elementum dolor.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nulla eget aliquet nunc. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aenean et tincidunt massa, sed maximus leo. Aliquam in cursus tortor. Praesent ornare ante vitae leo pretium cursus. Nunc sodales neque urna, eu tincidunt dui placerat at. Integer vel arcu vel velit euismod sollicitudin quis sit amet ligula. Nunc facilisis, eros eu pharetra mollis, magna odio rutrum leo, eget placerat erat massa non metus. Nunc nec auctor felis.
+
+Vestibulum et tempus ipsum. Duis molestie felis et ex scelerisque, quis faucibus dolor viverra. Suspendisse rhoncus volutpat dolor. Duis ac augue iaculis, vulputate dui sit amet, gravida ante. Mauris porttitor purus eu ligula tempus volutpat. Aenean quam neque, venenatis id est et, blandit pharetra enim. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent vitae malesuada dui. Praesent tortor ligula, tincidunt at suscipit laoreet, tristique vitae magna. Phasellus gravida augue lacinia velit cursus lacinia.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque vitae blandit dui, a facilisis sapien. Praesent malesuada massa sed orci lacinia vulputate. Cras a est vitae quam sodales pellentesque. Nam posuere condimentum mollis. Quisque ultricies nisl libero, vel scelerisque nunc interdum non. In porttitor consectetur placerat. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque et tortor sed nibh scelerisque suscipit. Integer a auctor velit, in tempor magna. Curabitur iaculis ut purus vel consequat.
+
+Quisque at consequat turpis, ut aliquet dolor. Aenean quis mauris sit amet diam tempor porta ac eu purus. Maecenas commodo faucibus interdum. Praesent in tincidunt felis, vel tincidunt nibh. Integer posuere enim a purus tristique tincidunt. Etiam nisi odio, vehicula sed mauris vel, ornare blandit augue. Fusce finibus mi lorem, eu egestas lectus commodo quis. Nunc scelerisque, erat mollis varius congue, ante ligula suscipit neque, nec ultrices urna leo rutrum nibh. Vivamus pulvinar lacinia elit at lobortis. Sed molestie turpis dapibus sapien imperdiet, vitae scelerisque ligula volutpat. Nam fermentum ipsum est, ut vulputate arcu maximus eu. Sed tristique, massa sit amet dictum bibendum, neque tellus volutpat ipsum, ut faucibus purus arcu vel quam. Vivamus laoreet risus non nisi ullamcorper, molestie tincidunt diam scelerisque. Sed eget congue velit.
+
+Sed eu dapibus eros. In at est augue. Nunc malesuada, tortor quis molestie euismod, erat sem porta arcu, vitae facilisis purus ligula vitae mauris. Aliquam erat volutpat. Nunc scelerisque porta eros, finibus elementum ipsum ultricies ut. Quisque vestibulum libero quis lectus semper suscipit. Sed malesuada eu lorem in placerat.
+
+Nunc metus arcu, rutrum eu varius in, auctor vitae diam. Maecenas ultricies faucibus hendrerit. Integer tincidunt, orci a bibendum dapibus, nulla tellus dapibus urna, vel sodales sapien neque eget mi. Nunc elementum enim sapien, sed egestas diam eleifend sit amet. Mauris sapien ligula, finibus nec augue in, volutpat dictum velit. Nunc a ligula vitae massa pellentesque sollicitudin. Aliquam rutrum porttitor volutpat. Proin convallis sollicitudin commodo. Duis eu rutrum risus, a auctor felis. Proin volutpat arcu velit, sed condimentum magna varius sit amet. In et sapien efficitur, iaculis justo eu, euismod nibh.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aenean hendrerit eros non purus dapibus, vel laoreet ipsum tincidunt. Duis scelerisque sollicitudin rutrum. Pellentesque rutrum ultricies sem, vitae mollis elit efficitur ut. Ut consectetur scelerisque ultrices. Vivamus id urna scelerisque nibh interdum mattis. In tristique tortor ut dictum laoreet. Quisque fermentum, augue blandit lacinia luctus, ligula nunc commodo velit, accumsan tempus orci quam ac nibh. Praesent ante risus, pulvinar in nisl ac, malesuada porttitor magna.
+
+Nam nunc ex, condimentum ac volutpat ac, pretium sed tortor. Integer venenatis, nunc id ullamcorper aliquam, eros arcu blandit sapien, id maximus erat nunc sed ligula. Proin tincidunt libero et purus tincidunt maximus. Nulla laoreet nisl eu velit pharetra, id porttitor mauris dictum. Mauris blandit pharetra lectus sit amet sagittis. In sit amet lorem hendrerit, varius justo eu, ultricies odio. Curabitur ante nibh, scelerisque at elementum a, condimentum viverra tortor. Donec tellus arcu, ultricies at posuere at, sagittis at sem. Phasellus non eros eu dui blandit fringilla. Maecenas hendrerit arcu porta, feugiat neque ac, venenatis ipsum. Nam ut elit nec lectus sodales posuere. Proin aliquet accumsan sapien, non porta quam. Praesent vulputate ante ut malesuada efficitur.
+
+Nullam pulvinar arcu orci, semper vehicula nibh fringilla ac. Duis porta ullamcorper risus sed facilisis. In vitae consectetur sapien, eget porttitor velit. Ut ac leo luctus, gravida erat sit amet, fermentum orci. Proin feugiat orci eget erat sagittis, sed aliquet ipsum luctus. Morbi eu est tristique, consequat neque eu, suscipit odio. Maecenas faucibus lacinia laoreet. Nam ut tellus odio. Sed facilisis tincidunt sodales. Proin hendrerit dolor quis nulla elementum, ut pulvinar ex tincidunt. Quisque vitae purus ac risus sagittis fringilla. Phasellus fermentum faucibus suscipit.
+
+Donec congue enim id efficitur lacinia. Praesent tempus, velit a euismod ornare, lorem felis pharetra nulla, in aliquam diam quam in nibh. Nulla facilisi. Morbi malesuada urna nibh, nec semper libero malesuada non. Maecenas quis tortor vitae nisl condimentum ornare. Quisque convallis suscipit metus vel malesuada. Vivamus fringilla mattis mi eget luctus. Fusce ex arcu, efficitur vitae elit eget, aliquam faucibus lacus. Sed interdum nisl nec libero aliquam lobortis. Aenean semper, magna non lacinia rhoncus, metus lacus commodo sapien, at molestie magna urna ac magna. Duis elementum rutrum erat id sodales.
+
+Suspendisse bibendum quam ut augue faucibus semper. Maecenas sed purus consequat, sagittis quam nec, dapibus ante. Nunc erat nunc, ultrices nec nisi vel, cursus consequat sem. Vivamus molestie turpis ac sem malesuada luctus. Morbi laoreet sit amet odio id finibus. Praesent lacus justo, rhoncus non nulla commodo, posuere sodales sem. Aliquam condimentum porta condimentum. Integer congue eros risus, sed pharetra odio vestibulum vel. Mauris sagittis orci et lacus finibus luctus ut nec enim. Pellentesque magna massa, tristique a lectus et, pharetra placerat mauris. Donec eu est in leo sollicitudin elementum vitae tristique ipsum. Donec pulvinar consequat enim. Nunc cursus lorem ut dapibus maximus. Quisque vulputate ligula est, vitae vestibulum ante dapibus a.
+
+Fusce tempus nibh eget euismod ultrices. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Vestibulum mattis maximus nulla, ac consectetur erat scelerisque sed. Maecenas faucibus dui eros, finibus venenatis eros semper non. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed iaculis nisl risus, vitae blandit tortor mattis in. Morbi nisi enim, pulvinar eget pellentesque ac, faucibus in mi. Sed in mollis eros, sit amet maximus arcu.
+
+Nam luctus velit sed ipsum pharetra, eu mattis diam tempus. Phasellus volutpat nisi vitae imperdiet rhoncus. Mauris finibus ut mauris et euismod. Nullam sed efficitur libero. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis erat lectus, lacinia sit amet tempor ut, auctor et odio. Morbi tristique euismod erat, quis venenatis justo. Duis eu arcu placerat, pulvinar velit at, fermentum enim. Sed rutrum ipsum non ipsum condimentum consequat. Suspendisse vitae commodo turpis, eget imperdiet risus. Aenean fringilla, augue id hendrerit auctor, urna mi eleifend magna, in semper neque purus eu arcu. Suspendisse et leo mi. Donec consequat imperdiet urna, sed aliquam eros mollis in. Nullam condimentum fringilla hendrerit. Suspendisse ornare tincidunt lacus, id tristique tellus porta at. Suspendisse posuere sagittis erat, quis viverra diam varius in.
+
+Cras eget ex nec tortor iaculis ultricies a id urna. In neque ante, gravida sed rutrum in, finibus volutpat mi. Pellentesque malesuada nunc ex, vitae suscipit urna bibendum a. Etiam eleifend augue dui, ut laoreet nisi molestie et. Phasellus eu leo erat. Pellentesque in lorem ut velit ullamcorper laoreet luctus nec diam. Ut vulputate iaculis scelerisque. Praesent luctus justo justo, vulputate condimentum ipsum porttitor eget. Proin sit amet fermentum urna, sed pellentesque tellus. Suspendisse eu ullamcorper eros, ac finibus tellus. In auctor fermentum lectus a maximus. Pellentesque a pulvinar velit. Aliquam sed magna elementum, ornare ligula eu, porta odio. Nullam efficitur tortor nunc, sit amet finibus dui ornare tempor.
+
+Vestibulum enim dolor, mollis sed pulvinar vel, venenatis et justo. Cras porttitor id augue eget porta. Praesent tempor enim ut arcu dapibus molestie. Sed facilisis tortor vel nunc ultricies, non egestas ligula laoreet. Aliquam aliquet sit amet ex eu consequat. Ut ornare lectus non nisl iaculis bibendum. Aliquam dignissim, tellus dictum maximus tempus, purus metus fringilla purus, sed mattis enim justo quis mi. Donec at ipsum non eros sodales convallis. Aliquam tincidunt risus nisl, commodo pharetra nunc imperdiet ac. Nulla a elementum turpis, vel pharetra erat. Nulla interdum sed lacus quis elementum. Suspendisse blandit imperdiet erat, nec sollicitudin libero blandit ac. Suspendisse consectetur lacinia odio, eu pharetra elit fermentum non. Sed nec neque urna. Quisque vel sem eu risus tincidunt eleifend. In dictum efficitur bibendum.
+
+Cras ac quam eleifend, suscipit diam sit amet, maximus quam. Proin sit amet libero eu urna efficitur sollicitudin. Fusce nec finibus nulla, vitae ornare sem. Vivamus venenatis porttitor magna, sed venenatis ante placerat quis. Fusce et nulla hendrerit, semper nibh nec, auctor mi. Aenean sit amet leo eget mauris accumsan luctus. Cras tortor metus, vehicula ac ultricies eu, egestas ut massa. Fusce sollicitudin ex pretium, dapibus urna nec, varius nibh. Proin molestie quam metus, a volutpat arcu consectetur eget. Nam sagittis, odio sed rhoncus egestas, diam nibh efficitur nisi, convallis ultrices justo eros non neque. Proin vulputate tincidunt ipsum, vitae tristique risus. Aliquam feugiat luctus dui, id elementum nisl finibus at. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae;
+
+Praesent et velit est. Donec odio turpis, accumsan imperdiet iaculis in, mollis vitae orci. Sed sed molestie elit, at tristique lorem. Suspendisse consectetur ante id feugiat condimentum. Integer nec mauris sed lorem vestibulum commodo eu eget nunc. Vivamus faucibus, libero fermentum elementum vehicula, orci risus efficitur risus, ut posuere mi nisl non elit. Suspendisse sit amet libero magna. Integer sit amet mi et nulla euismod luctus id sit amet felis.
+
+Nulla facilisi. Sed fermentum urna quam, sed pharetra tellus sodales blandit. Vivamus sodales dui nec consequat euismod. Vivamus aliquet gravida metus, vitae consequat augue bibendum id. Curabitur fermentum laoreet turpis, ut interdum lectus dictum vitae. Fusce faucibus nisi ex, vitae sollicitudin turpis cursus at. Cras sodales tincidunt vehicula. Sed vitae leo quis nisl lacinia auctor. Proin faucibus elementum nibh, laoreet lobortis risus ornare sed.
+
+Vestibulum venenatis, augue ac tristique eleifend, tellus arcu imperdiet magna, ac eleifend lacus ipsum aliquam urna. Nam laoreet erat non rutrum ullamcorper. Mauris hendrerit aliquet tortor malesuada porttitor. Proin accumsan dolor porttitor augue ullamcorper, vitae vestibulum eros dapibus. Cras sagittis lorem lacus, ut rutrum lorem bibendum id. Praesent tristique semper ornare. Morbi posuere sit amet risus et faucibus. Maecenas a velit at nibh consequat pharetra sit amet eget enim. Morbi commodo enim magna, ac pretium sapien pellentesque eu. Mauris aliquet nisi venenatis, consequat purus at, aliquet risus.
+
+Morbi posuere erat ipsum, sit amet consequat enim consectetur in. Sed risus arcu, elementum dignissim tincidunt eu, efficitur feugiat mauris. Maecenas a mattis leo. Duis porta et felis sed ultricies. Curabitur eu aliquet lectus. Nunc ante felis, blandit eu lobortis sit amet, tempor eget urna. Mauris non metus nec metus viverra feugiat. Donec pellentesque tortor ac vehicula porttitor. Aliquam nunc sapien, dignissim nec tincidunt a, sollicitudin at nunc. Sed ut leo purus.
+
+In ullamcorper neque ex, eu sodales eros tincidunt sed. Quisque aliquam elit pretium, varius erat ac, iaculis turpis. Mauris id odio vestibulum, dictum sem sed, pulvinar felis. Ut magna turpis, hendrerit ac faucibus vel, euismod convallis velit. Maecenas rhoncus nisl lacus, nec dignissim leo imperdiet ac. Duis sed ante ut purus cursus ultrices ut eu nisi. Donec ut ante nibh. Vivamus lobortis purus leo, et vehicula magna consectetur a. Suspendisse gravida semper ligula vitae facilisis. Ut sit amet vestibulum elit, id sodales diam. Suspendisse potenti. Proin dapibus scelerisque turpis at dignissim.
+
+Interdum et malesuada fames ac ante ipsum primis in faucibus. Sed accumsan vulputate metus ut mattis. Ut semper porttitor justo in laoreet. Mauris sit amet mollis magna, vel condimentum elit. Quisque non aliquet justo. Fusce eget leo at enim commodo molestie. Praesent ipsum nulla, ultrices eget ex in, tristique ullamcorper felis. Nulla posuere commodo semper. Nam id mauris sit amet lacus luctus suscipit. Sed scelerisque gravida tristique. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer condimentum nulla semper, convallis leo sit amet, tempor nisl. Cras semper diam ac leo ornare aliquet et et lectus. Fusce sed nunc vitae nunc auctor semper et ac arcu.
+
+Aenean molestie nibh varius nisi consectetur elementum. Praesent condimentum, mi sit amet pretium suscipit, nisl est pharetra metus, sit amet feugiat neque quam vel purus. Nunc vehicula vestibulum mi eget gravida. Nullam consequat odio eget feugiat faucibus. Quisque pretium condimentum sollicitudin. Vestibulum vitae sem ut velit accumsan varius sit amet a tortor. Nunc eu mi a lorem varius bibendum vitae quis lacus. Maecenas gravida tristique lectus at pharetra. Aenean vehicula vehicula ex ut accumsan.
+
+In at consequat massa. Mauris finibus tempor nisi. Fusce a congue nulla. Aenean tempor mi vel ligula consectetur elementum. Nam scelerisque nisl et nulla faucibus, a molestie nisi bibendum. Curabitur venenatis lacus vestibulum, ultricies tellus et, elementum mauris. Pellentesque facilisis id libero id cursus. Maecenas lacinia quam quis arcu tristique aliquet. Fusce eu elit lobortis, accumsan dolor at, finibus nisl. Suspendisse facilisis dictum egestas. Cras volutpat diam ut nulla eleifend efficitur. Donec vel dapibus velit. Curabitur in mollis enim, sit amet suscipit dui. Nullam suscipit, mauris et suscipit molestie, nisl nulla elementum urna, ac varius dolor elit eget libero. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
+
+Vivamus vel dui ac lectus scelerisque elementum dictum nec orci. Suspendisse et venenatis arcu. Nullam velit orci, volutpat sed convallis in, pretium vel felis. Phasellus sollicitudin urna nec est porta, a consectetur massa egestas. Vivamus in malesuada lacus. Ut pellentesque sagittis velit, gravida vulputate neque efficitur sed. Vestibulum vitae libero et dui iaculis bibendum a nec velit. In aliquet ultricies pellentesque. Nunc suscipit, nulla id maximus viverra, nisi turpis dignissim nunc, sit amet auctor sapien ipsum sit amet magna. Mauris pretium velit congue turpis mollis faucibus. Duis non nunc sapien. Vivamus facilisis lacinia lectus, et tempor elit.
+
+Duis mi ligula, dignissim non sapien quis, congue consequat enim. Aenean lobortis purus ac tellus maximus efficitur. Cras iaculis erat sagittis feugiat viverra. Maecenas viverra, orci eu sodales porttitor, libero arcu efficitur nulla, a pellentesque nunc sapien non mi. Ut dignissim imperdiet vehicula. Nam eu sapien convallis, pulvinar felis id, sodales lorem. Praesent ornare tristique mi nec posuere. Pellentesque egestas diam nec condimentum fringilla. Nunc pulvinar urna aliquet ex vehicula suscipit. Sed pretium orci nunc, quis gravida ipsum consequat sit amet. Integer sit amet libero eu mauris ultricies auctor eu nec mi. Donec pulvinar eros erat, eget molestie neque dictum sit amet. Sed vitae venenatis nisi, tincidunt ultricies enim. Nam et velit gravida, malesuada dolor eget, feugiat massa. Morbi vel pellentesque arcu. Sed vulputate libero vel ipsum placerat posuere.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Cras mattis ultrices enim id posuere. Proin sollicitudin posuere lectus, in tempus odio porta quis. Etiam semper sapien elit, eu imperdiet tortor iaculis sed. Ut id faucibus arcu. Suspendisse tincidunt, tortor sed dapibus ullamcorper, odio ex egestas purus, eget posuere ante elit quis augue. Nulla facilisi. Pellentesque feugiat euismod elit, eu luctus tellus feugiat a. Aliquam cursus rhoncus mauris at consequat. Morbi dapibus metus id est bibendum, et mollis eros lobortis. Nulla erat turpis, sodales sit amet dictum id, pharetra sed magna. Proin efficitur erat id libero congue pellentesque eu eu massa. Ut a lobortis nunc. Aliquam sollicitudin diam vel magna tempor convallis. Vivamus non tempus orci. Fusce lacinia, ipsum vitae finibus imperdiet, elit lorem pretium elit, tincidunt pretium odio erat in diam.
+
+Morbi suscipit rhoncus odio a molestie. Donec eleifend ipsum eget efficitur varius. Etiam faucibus pretium urna, sed fermentum magna feugiat ut. Aenean ornare gravida vehicula. Aenean sagittis est pretium mollis facilisis. Sed scelerisque placerat erat, vel lacinia nibh feugiat vitae. Praesent vel dapibus lacus. Nunc bibendum tempor lorem et faucibus. Praesent mattis blandit neque interdum varius. Nunc pharetra orci sed ipsum tincidunt, non suscipit nisl malesuada. Maecenas tincidunt libero sit amet mattis lacinia.
+
+Duis accumsan sem erat, a ornare nibh faucibus vulputate. Ut rutrum scelerisque sapien vitae consectetur. Aliquam quis tristique tortor. Maecenas nibh lacus, varius a blandit eu, dapibus sit amet sem. Vivamus accumsan, libero sit amet suscipit elementum, nisl magna fermentum ipsum, laoreet elementum orci nisl et ligula. Curabitur in ligula placerat, scelerisque tellus in, ultricies nibh. Nam nunc libero, egestas at mauris dignissim, consectetur congue urna. Suspendisse molestie diam nec ipsum molestie, eu rutrum nulla sollicitudin. Duis quis facilisis arcu, in semper leo. Quisque viverra ultricies orci, eu mattis eros pulvinar mattis. Pellentesque vel finibus ante. Praesent ac mi facilisis, mollis augue vitae, rhoncus mauris. Pellentesque commodo vestibulum maximus. Donec accumsan urna id iaculis malesuada. Integer varius elit nec orci pulvinar, ut ultrices metus vulputate.
+
+Cras posuere neque mauris, in dignissim magna tincidunt sit amet. Aliquam sit amet mi dolor. Quisque elementum molestie posuere. Vestibulum tempor mollis purus, vitae vestibulum purus tempor quis. Aenean ut augue massa. Suspendisse tincidunt tincidunt erat, in consequat massa vulputate id. Duis cursus eget enim eu tristique. Proin quis nulla sed velit commodo dignissim. Praesent lacinia ante a ante lobortis, id imperdiet augue rutrum. Quisque purus lacus, sollicitudin euismod venenatis sit amet, eleifend nec eros. Sed luctus faucibus dolor ut eleifend. Quisque tincidunt ante elit, nec vulputate eros fermentum vel. In posuere leo vel risus efficitur mollis. Phasellus imperdiet pharetra orci.
+
+Fusce auctor sagittis turpis, nec pharetra dolor pharetra vel. Vestibulum luctus sagittis gravida. Nulla quam erat, sagittis non elit id, gravida hendrerit leo. In eleifend elit at efficitur blandit. Sed quis dignissim nulla. Sed in dapibus tortor. Vivamus lacinia, ligula vitae cursus porttitor, dui urna condimentum nisi, quis hendrerit dolor eros vel neque. Curabitur eget lectus vel elit lobortis scelerisque. Etiam congue, risus feugiat faucibus rutrum, urna orci egestas felis, auctor finibus est urna id eros. Morbi rutrum, arcu quis dictum euismod, turpis urna lacinia enim, ac malesuada justo elit non lorem. Sed vel orci nec ex rutrum faucibus. Praesent nisl sapien, ultrices quis justo eu, molestie suscipit ante. Donec gravida quis purus eu dignissim. Donec vulputate convallis ipsum vitae pellentesque. Pellentesque ut urna mi.
+
+In id quam vel libero mollis commodo a ac sem. Sed ornare elit est, molestie condimentum justo mattis sed. Vivamus tempor velit sit amet libero venenatis ultrices. Cras faucibus orci venenatis diam fermentum commodo. Donec pulvinar augue lacus, vitae dictum nisl auctor sed. Suspendisse ut nisi porttitor, porta neque id, tincidunt dolor. Fusce mollis laoreet arcu nec ultricies. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Duis ultricies nisl eget dui semper dapibus. Aenean vitae lacus est. Proin vel erat sed ex euismod facilisis. Interdum et malesuada fames ac ante ipsum primis in faucibus.
+
+Ut non fermentum tellus, sed vehicula augue. Etiam blandit lacus sapien, luctus sagittis leo auctor sit amet. Sed ipsum massa, eleifend sit amet augue non, tristique vulputate lacus. Suspendisse sit amet leo odio. Quisque dignissim, erat non eleifend accumsan, nisl diam blandit neque, eget sodales enim ipsum in lorem. Praesent erat dolor, pulvinar vitae turpis sit amet, auctor dignissim ligula. Fusce eget commodo massa. Nullam sit amet tincidunt libero, id vehicula erat. Nulla a fermentum elit. Aenean maximus luctus auctor. Integer sit amet maximus diam, ac lobortis sapien.
+
+Sed at ultricies velit, in laoreet dui. Pellentesque sit amet euismod mauris. Fusce euismod vehicula mauris. Phasellus magna nisi, maximus vel elit et, fringilla aliquet elit. Proin varius, ipsum eget scelerisque malesuada, ipsum felis vulputate tortor, eu luctus justo ipsum sit amet elit. Suspendisse lacus leo, mollis et malesuada eget, pharetra nec massa. Donec tristique fringilla pharetra. Maecenas malesuada mi turpis. Nulla id mauris purus.
+
+Nullam rutrum in ex non placerat. Cras rutrum nulla sit amet felis ultricies feugiat. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse laoreet turpis eu eros vestibulum, cursus blandit arcu ultricies. Ut et quam eu diam gravida pulvinar a non dui. Sed ut lectus sem. In tristique finibus elit sit amet aliquet. Vestibulum convallis nunc arcu, in euismod ante vulputate et. Etiam tempor enim non iaculis elementum. Suspendisse feugiat sem non nisi imperdiet, eu convallis ante malesuada. Duis consectetur, ligula at viverra vehicula, neque neque aliquam arcu, sed eleifend elit arcu non diam. Fusce in magna et neque ultrices varius. Sed ante nibh, egestas id ligula sit amet, ullamcorper aliquet erat. Nulla dictum neque et sagittis blandit. Curabitur tincidunt sollicitudin ligula in consectetur. Fusce velit est, aliquet eu tempor ut, lobortis quis justo.
+
+Proin vel augue ut ex viverra lobortis. Maecenas ultricies vulputate metus, in consectetur dolor gravida quis. Suspendisse potenti. Curabitur vitae mauris a dolor efficitur accumsan eget eget tellus. Suspendisse tristique quam ac pellentesque viverra. Curabitur ex turpis, consequat non est at, finibus ultricies sem. Duis bibendum mi vel libero aliquam scelerisque. Sed eget rhoncus sapien. In dictum, neque vitae efficitur accumsan, nulla ipsum ultrices eros, vitae porttitor massa ex vel augue. Curabitur aliquet dui et urna dapibus, a elementum diam dapibus. Pellentesque leo libero, ornare vitae fringilla non, venenatis vitae massa. Interdum et malesuada fames ac ante ipsum primis in faucibus. Suspendisse dapibus nisi ut nunc vulputate pellentesque. Suspendisse auctor erat non viverra fringilla. Pellentesque feugiat dictum urna, eu auctor metus aliquam vitae. Nunc nulla sem, maximus in lacinia non, viverra eu nulla.
+
+In fringilla cursus nisi vel tempus. Mauris blandit leo vel facilisis blandit. Quisque auctor magna quis justo commodo, in laoreet justo pharetra. In hac habitasse platea dictumst. Cras imperdiet cursus eros, quis rhoncus neque viverra in. Praesent rutrum aliquam euismod. In vitae elit blandit erat efficitur vehicula vitae quis lectus. Fusce consectetur nibh sit amet felis placerat consectetur. Morbi leo risus, dictum vel vestibulum vel, tempor id erat. Suspendisse facilisis massa nec risus maximus, nec semper purus fringilla. Cras dapibus diam eu elit sollicitudin, in tempor tellus accumsan. Proin pulvinar varius sollicitudin. Nullam quis tellus ac est imperdiet malesuada.
+
+Morbi sem nulla, egestas a luctus at, egestas id magna. Pellentesque ac tristique neque, in vestibulum enim. Fusce turpis nisi, commodo a justo id, fermentum vulputate sem. Phasellus fermentum elementum dui, id dictum leo fermentum et. Fusce porttitor enim odio, sit amet porttitor dolor luctus eget. Etiam ligula libero, finibus vitae enim vitae, facilisis fringilla mi. Fusce eget fermentum dui.
+
+Cras quis ipsum ultricies, tincidunt nibh non, commodo nisl. In commodo diam et quam porttitor, non sagittis ante feugiat. Vestibulum ultricies elit non lectus ultrices, a egestas dui tempus. Etiam faucibus ipsum ante, interdum condimentum ligula pellentesque at. Integer ornare bibendum libero vel accumsan. Donec ornare finibus diam fringilla pharetra. Nam pellentesque nibh quis diam tincidunt faucibus. Sed tortor arcu, posuere id enim accumsan, tristique lobortis velit. Suspendisse massa turpis, maximus ut eros vitae, sollicitudin efficitur libero. Phasellus ut scelerisque nisl. Ut ligula risus, venenatis at orci non, hendrerit aliquam mi. Vestibulum a varius ante, ac pulvinar diam. Integer hendrerit fringilla erat, eu egestas mi fringilla molestie. Aliquam erat volutpat. Nunc ut feugiat elit. Etiam a bibendum dui.
+
+Morbi ornare molestie lobortis. Aliquam erat nunc, placerat eget volutpat in, vehicula nec tortor. Maecenas et libero nec nibh mollis bibendum quis et neque. Fusce eleifend eros quis consequat hendrerit. Nunc ac dolor odio. Nullam condimentum ut dolor id venenatis. Quisque ultrices, urna quis commodo elementum, augue lectus tristique turpis, at lobortis nibh dolor sit amet lectus. Curabitur accumsan tortor ex, ut sagittis tortor volutpat a. Morbi justo diam, iaculis et felis vel, pretium porttitor mi. Cras volutpat enim ut posuere sollicitudin. Nulla suscipit diam ut varius volutpat. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+
+Duis ut convallis est, ac cursus purus. Fusce euismod gravida diam non lacinia. Pellentesque quis arcu fermentum, elementum erat et, porttitor sem. Sed sed mauris sed urna auctor ultricies. Mauris vel sodales purus. Vivamus semper lorem nec ligula ultricies, lobortis lobortis metus scelerisque. Morbi in dolor hendrerit metus sodales mollis sed eget neque. Nam sollicitudin, nulla id consequat malesuada, ligula nulla imperdiet lacus, nec pellentesque nunc leo convallis elit. Aenean vestibulum ipsum quis nulla laoreet, ut convallis velit sodales. Quisque dolor tellus, dignissim sit amet nulla ut, mollis vulputate ligula. Sed tempus porta rutrum. Sed tincidunt justo eget est ullamcorper, quis tempor odio convallis.
+
+Pellentesque tortor felis, euismod a orci at, posuere tristique neque. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Integer congue elit dignissim dolor feugiat, non pellentesque quam aliquam. Aenean porttitor, mi nec aliquet vehicula, magna diam euismod elit, gravida commodo nunc erat ut nulla. Mauris arcu odio, dictum a fermentum et, tempor quis nisl. Vestibulum congue rhoncus semper. Suspendisse ut convallis ante, non consequat nibh. Vivamus blandit laoreet accumsan. Maecenas feugiat congue mi ac aliquet. Nunc nisl massa, facilisis vel fringilla et, vestibulum ac lorem. Proin eget ipsum id turpis hendrerit pharetra in eget nisi. Cras tellus arcu, tristique id dictum ornare, tempus et ante. Aenean aliquam elementum metus vitae pretium.
+
+Cras et purus tellus. Quisque ipsum enim, sagittis sit amet vulputate in, sollicitudin in felis. Vivamus a commodo nisi. Aliquam ligula neque, venenatis vel risus id, pellentesque gravida sapien. Donec leo ipsum, tincidunt non suscipit eu, scelerisque sit amet tortor. Donec sit amet nisl tristique, placerat ex id, aliquam nibh. Etiam fringilla nisl sem, ac pellentesque ex lobortis eget.
+
+Donec luctus dui sit amet imperdiet accumsan. Sed tempus rutrum finibus. Nunc aliquet vitae ligula non tempus. Pellentesque mauris tortor, ullamcorper at velit in, consectetur commodo nisi. Vestibulum tempor massa quis est ultricies lobortis. Aliquam et elit bibendum, sodales nulla in, sollicitudin tellus. Morbi rhoncus eros nec quam ultricies varius. Praesent vitae venenatis velit, eget dignissim velit. Aliquam pellentesque, urna vitae dictum tristique, nibh mauris vehicula felis, ut eleifend orci magna a nulla. Fusce vel laoreet dolor, a imperdiet lacus. Vivamus at pharetra tortor. Aliquam ut ultricies magna, eget vehicula neque.
+
+Cras laoreet facilisis varius. Donec congue tempor orci, euismod sagittis nulla ornare et. Integer sollicitudin id felis ac mollis. Aliquam eget elit in nulla posuere consequat. Mauris nec hendrerit libero, id elementum diam. Donec rhoncus consectetur eros, non condimentum sapien malesuada sed. Pellentesque sagittis enim luctus fermentum sodales. Nam condimentum molestie nulla quis cursus. Quisque vitae sollicitudin diam. Fusce mattis elementum lectus a rutrum. Donec egestas dui eros, ut dictum metus tincidunt ut. Nullam at eros est. Mauris mollis vestibulum velit vel facilisis. In accumsan nisi in lorem commodo maximus.
+
+Nam nec libero dictum, cursus eros quis, ultricies metus. Sed in leo sapien. Suspendisse sollicitudin orci vitae interdum iaculis. Nullam cursus id nunc eget scelerisque. Curabitur non tincidunt elit. Duis gravida auctor pellentesque. Integer sodales ultrices nibh a ornare. Phasellus efficitur mi arcu, at pulvinar turpis gravida eu. Aliquam vitae posuere urna. Sed iaculis aliquet ipsum vel mollis.
+
+Pellentesque interdum bibendum eros vel convallis. Sed iaculis erat tortor, quis suscipit quam laoreet vitae. Sed ut augue dignissim, viverra diam molestie, vehicula est. Ut facilisis aliquet ipsum, non finibus mauris pretium non. Donec vel dapibus tellus. Proin at justo tellus. Praesent eget risus quis urna maximus dictum. Cras sapien ipsum, ullamcorper eget augue nec, pellentesque tempus ante. Aenean ut mattis justo. Fusce congue massa a augue dapibus dapibus. Maecenas interdum enim et ligula tincidunt accumsan.
+
+Aliquam et tempor arcu. Sed auctor lacus justo, ut dictum diam auctor sit amet. Quisque sed quam rutrum, pulvinar justo non, dignissim felis. Donec in est eget nulla convallis tristique ut nec nunc. Maecenas pulvinar felis sem, at pulvinar augue sodales non. In magna ex, mollis id finibus sit amet, imperdiet a nisi. Fusce ullamcorper, leo et suscipit consectetur, ex odio sodales elit, scelerisque scelerisque turpis risus et ex. Morbi sed ultrices ex. Duis vel arcu rutrum, volutpat dui vel, luctus ligula. Maecenas nibh ante, porttitor vestibulum quam ut, consequat consectetur elit.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Duis consequat lorem vitae massa volutpat, eu venenatis orci placerat. Integer varius sagittis volutpat. In vel mollis ante. Maecenas eget vestibulum dolor, ut aliquam sapien. Nam efficitur orci laoreet, lobortis nunc eu, pretium quam. Suspendisse et purus a quam vestibulum faucibus a tristique magna. Nulla at enim gravida massa eleifend molestie vitae quis erat. Integer tristique nisi libero, et varius lacus posuere eget. Donec interdum sed nisi a congue. Nam sodales mattis pharetra. Curabitur gravida sapien nec viverra posuere. Duis a dolor vulputate, sollicitudin mi vitae, accumsan erat. Sed leo neque, rhoncus posuere fringilla vitae, porttitor vel nulla.
+
+In hac habitasse platea dictumst. Etiam a mollis dolor, nec suscipit ex. Aenean nec bibendum velit. Donec fermentum, nisl vel porta semper, nunc velit porttitor felis, egestas malesuada magna tellus vel tortor. Integer fermentum nulla at eros fringilla, sit amet fringilla lectus luctus. Nulla scelerisque arcu ac rhoncus iaculis. Proin lobortis tincidunt velit, at mattis augue eleifend id. Sed pellentesque semper diam sit amet ultricies. Etiam felis lectus, molestie id orci quis, porttitor dictum mauris. Nulla facilisi. Fusce tempus urna quis sollicitudin blandit. Phasellus sed sodales est, quis viverra velit. Duis eget auctor risus. Aliquam tempor turpis quis turpis aliquet, id viverra ipsum vestibulum. Integer ac finibus tellus.
+
+Donec scelerisque placerat metus, ac tincidunt turpis ornare non. Aenean dignissim pharetra ex vel dignissim. Sed egestas tristique lacus, et convallis nibh vestibulum rutrum. Nulla facilisi. Sed posuere volutpat ex, vel consequat nunc dapibus at. Curabitur sit amet dapibus risus. Fusce dui est, varius venenatis libero sit amet, tincidunt facilisis felis. Morbi pharetra volutpat mauris vitae varius. Nam vestibulum, arcu at efficitur facilisis, ex mauris ultricies sem, at interdum metus nunc at est. Phasellus id leo eu lacus aliquet gravida eu ac tortor.
+
+Etiam dapibus sem eu tellus luctus, at laoreet enim feugiat. Morbi mollis justo quam, in egestas ex pulvinar a. Etiam et aliquam metus. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nam sit amet elit sed nunc vestibulum iaculis ut vel augue. Quisque risus metus, ultrices id ipsum sit amet, dapibus consequat leo. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed nulla sapien, consequat rhoncus accumsan vitae, finibus a mauris. Vivamus at odio arcu. Nam in lacus non dui laoreet pulvinar. Sed quis tempor urna, ut condimentum turpis. Cras vulputate eros erat, sit amet auctor orci blandit nec. Integer consectetur fringilla rhoncus. Suspendisse suscipit lectus finibus consectetur imperdiet.
+
+Proin pellentesque ligula vel lacus laoreet, id elementum diam facilisis. Ut et ipsum ligula. Sed in nisi vel erat maximus cursus sed eu velit. Aenean porttitor felis arcu, aliquet maximus ante mollis id. Praesent laoreet nisi lacus, sit amet rutrum turpis blandit vel. Integer in volutpat elit. Suspendisse scelerisque elit et erat tempus, sed consectetur leo molestie. Etiam eleifend massa sit amet ante euismod facilisis.
+
+Proin accumsan sed nunc quis sollicitudin. Aliquam vehicula orci eu libero placerat, sed condimentum justo hendrerit. Morbi eu turpis ut sapien fringilla molestie vel non risus. Nunc porttitor est nec est interdum, imperdiet volutpat sem malesuada. Curabitur a lacus eu enim cursus tristique. Morbi pharetra mollis tincidunt. Sed viverra libero tempus sem tristique, quis elementum ipsum tincidunt. Duis tincidunt feugiat tortor pellentesque tempor. Mauris pellentesque pretium ex porta consectetur. Vestibulum euismod sollicitudin nibh id maximus. Aenean bibendum, mi quis dapibus facilisis, purus dolor viverra risus, nec aliquam velit quam at ipsum. Vivamus enim velit, rutrum at finibus non, placerat a justo. Praesent maximus nunc sed maximus fringilla. Sed in est in odio auctor tempus. Quisque erat lorem, sodales ut eros quis, dictum porttitor ipsum.
+
+Ut facilisis pellentesque leo, aliquam imperdiet leo maximus a. Donec eget turpis porttitor, euismod lorem vitae, condimentum lorem. Sed non convallis metus, a tristique metus. Aenean nec est a libero ultrices fermentum eget malesuada sapien. Phasellus faucibus elit felis, in efficitur lectus maximus nec. Nullam mollis quam est, ac finibus eros efficitur ut. Proin pretium, metus id lacinia molestie, mi diam dignissim nulla, ac feugiat dui dui a urna. Aliquam erat volutpat. Donec eget viverra nunc. Vivamus a facilisis est. Morbi varius felis orci, eget tempus quam congue vitae.
+
+Suspendisse in ipsum ut turpis ornare pellentesque sed sed velit. Morbi posuere in sapien tempus egestas. Aenean fermentum ipsum vel risus dictum, a mollis lectus tristique. Vestibulum sed sapien sed sem cursus sodales. Quisque ultricies ligula ut erat gravida molestie. Cras tincidunt urna odio, at varius lectus sagittis eget. Donec rhoncus accumsan tincidunt.
+
+Curabitur risus lorem, tempus euismod hendrerit eget, consequat vitae risus. Pellentesque malesuada laoreet tempus. Nunc sagittis, mi ut tristique sodales, tortor quam pulvinar ipsum, vestibulum accumsan dui augue a velit. Quisque faucibus nibh fermentum tempor vehicula. Morbi id rutrum velit, ut imperdiet justo. Nulla nec neque libero. Fusce consequat ornare tincidunt. Praesent eget imperdiet libero. Aliquam convallis risus sed risus condimentum ultricies. Duis sed purus purus. Quisque pulvinar faucibus finibus. Etiam fringilla sapien et tellus sollicitudin consectetur. Proin viverra eu nunc malesuada ullamcorper.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nullam auctor, elit vitae euismod laoreet, leo erat blandit felis, quis porta nibh sem a massa. Nam vehicula est purus, vel convallis mauris commodo a. Proin cursus tortor eu velit consectetur fermentum. Nunc et egestas purus, et volutpat orci. Mauris eleifend id tellus in eleifend. Duis lectus tellus, malesuada et velit at, hendrerit finibus nulla. Ut tincidunt sagittis orci, mollis condimentum urna lobortis quis. Integer vitae dictum eros. Phasellus eu hendrerit neque. Aenean sit amet lectus nunc. Pellentesque tortor sapien, euismod rutrum placerat quis, cursus eu nunc. Suspendisse pretium, erat non mollis pellentesque, sapien neque rhoncus justo, in facilisis odio augue ac lacus. Nunc a sapien sodales, convallis nisl ac, gravida ante. Suspendisse sollicitudin eu leo eget facilisis. Donec sodales justo eu lacus tincidunt, sit amet tristique ipsum egestas.
+
+Suspendisse pharetra dictum neque, vel elementum sem condimentum lobortis. Aenean eget aliquet dolor. Aliquam erat volutpat. Ut feugiat tempor pretium. Phasellus faucibus eros et mauris ultrices, in pretium felis consectetur. Nullam ac turpis venenatis, feugiat massa vel, tristique turpis. Nunc eu ligula non quam laoreet dictum. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Maecenas sed mi imperdiet quam commodo accumsan. Vivamus pharetra iaculis diam, non tempus tortor pharetra dapibus. Nulla posuere, velit nec vehicula cursus, mi massa ultrices lectus, ut scelerisque quam velit sed velit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas commodo, lorem ut elementum accumsan, sem leo lacinia tortor, ac placerat purus erat eget libero. Nunc posuere scelerisque ante, ut eleifend mauris vehicula nec.
+
+Donec rutrum quam dolor, id varius velit efficitur non. Aliquam eros lacus, dapibus at leo sit amet, ultricies ullamcorper ante. Nam quis condimentum leo. Curabitur porta vel nulla ac lobortis. Sed et tellus eu erat cursus bibendum. Vivamus id eros eget enim molestie volutpat vel at lectus. Ut ut neque erat. Sed vulputate erat justo, tristique bibendum mauris elementum eget. Pellentesque at convallis nisi. In commodo placerat elit et semper. Fusce nec sed.
+
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas varius massa orci, sit amet laoreet justo posuere ac. Ut vel blandit mi, id feugiat justo. Phasellus sed odio dictum, elementum nulla vel, elementum sem. Donec ac ligula lorem. Etiam pharetra augue massa, at auctor lorem cursus in. Quisque tristique est non ullamcorper gravida. Suspendisse interdum venenatis consequat. Ut fermentum enim purus, a efficitur massa tristique eu. Donec vitae mauris vitae tortor ultrices finibus. Aenean eu felis et diam imperdiet elementum. Suspendisse sed eleifend erat, ac posuere tortor. Vestibulum nec sem fermentum, tristique purus vel, cursus diam. Vestibulum ut volutpat nulla. Mauris ac lacinia dolor. Proin lacus nisi, dignissim non ornare quis, ultrices vitae sapien. Vivamus pulvinar mauris id sem tincidunt, nec convallis enim imperdiet.
+
+Aenean non tincidunt mauris. Interdum et malesuada fames ac ante ipsum primis in faucibus. Ut porttitor est vitae ante ultrices posuere. Cras pretium nisl sed nisl suscipit, ut scelerisque ex luctus. Proin nec neque pretium, dapibus sem nec, viverra sem. Mauris vehicula ultrices lectus ac sagittis. Nam suscipit lacus at urna venenatis blandit. Donec sed lorem lectus. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec luctus velit velit, nec varius elit lacinia in. Sed commodo pellentesque lobortis.
+
+Aliquam ut purus iaculis, consectetur metus in, molestie quam. Aliquam vulputate tellus eget malesuada vulputate. Vestibulum feugiat neque velit, in laoreet orci ultricies sed. Duis id congue ipsum. Maecenas elementum nisl nec risus sagittis, ac cursus mi accumsan. Duis vestibulum elit non nunc vestibulum rhoncus. In mollis venenatis dolor ut tristique. Sed tempus turpis eu efficitur accumsan. Proin molestie velit metus, ut euismod justo aliquam sed.
+
+Aliquam tellus sapien, sagittis sed augue quis, convallis commodo lorem. Nulla a turpis non massa feugiat tincidunt ac et libero. Etiam tempor elit sed nunc fermentum, nec pharetra nulla dictum. Nunc viverra tincidunt porttitor. Nulla pretium lectus ac dui vehicula, ac tincidunt nunc ultricies. Praesent bibendum elit ac mauris tincidunt lobortis. Quisque mattis nulla magna, quis interdum libero maximus id. Curabitur nec ultrices enim, a ornare ex. Cras id mauris ut sapien ullamcorper pharetra non quis lorem. Sed vel auctor tortor. Vivamus sed orci placerat, lobortis nisi ac, imperdiet ipsum. Quisque dapibus sodales dapibus. Nunc quam arcu, faucibus et eros vel, gravida congue quam. Donec id est efficitur dolor suscipit sollicitudin at et turpis. Morbi nibh orci, euismod quis egestas vel, imperdiet quis libero. Nam ultrices erat quis elit vulputate maximus.
+
+Vivamus a tortor in leo efficitur imperdiet ut ac justo. Donec auctor ex non elit ullamcorper, id mollis lectus aliquet. Cras arcu purus, finibus ut ullamcorper nec, suscipit quis nibh. Donec at iaculis metus. Quisque id massa maximus, blandit massa eu, cursus nisl. Aenean vel sollicitudin neque, id vehicula dui. Aenean dictum iaculis sapien nec laoreet. Quisque vel finibus tellus. Proin iaculis enim dignissim sem fermentum, vel mattis metus lobortis. Sed euismod pulvinar placerat. Vestibulum eget suscipit quam, vel ultricies urna. In euismod lorem vitae elementum malesuada. Donec quam quam, rhoncus et fringilla at, malesuada et massa. Aenean posuere ipsum sed dui pellentesque venenatis eu eget purus. Donec a luctus mauris.
+
+Aenean auctor viverra ultrices. Nunc eu massa sem. Vivamus pellentesque neque non luctus luctus. Donec vel vulputate massa. Nunc condimentum, erat sed vestibulum vestibulum, augue arcu hendrerit magna, mollis ultricies quam nibh dignissim magna. Etiam quis egestas nisi. Sed quam lacus, elementum et dui vitae, scelerisque accumsan diam. Cras eleifend est dui. In bibendum euismod lorem vitae ullamcorper. Nunc faucibus et lorem in faucibus.
+
+Sed aliquet varius turpis, a sollicitudin felis accumsan pulvinar. Nunc vestibulum ante et tristique tristique. In et efficitur purus. Vestibulum malesuada urna id nunc imperdiet tempus. Nunc eleifend sapien at velit ultricies, dictum elementum felis volutpat. Suspendisse imperdiet ut erat eu aliquam. Maecenas tincidunt sem nec sodales sollicitudin. Morbi quam augue, tincidunt vitae lectus et, lobortis efficitur dui. Ut elit ex, viverra in risus sit amet, congue blandit lacus. Etiam fringilla magna at purus sagittis, ac vehicula elit vestibulum. Cras pharetra tellus molestie tortor placerat, a vehicula dui placerat. Vivamus ac sapien sapien. Donec eleifend ligula vitae tortor sodales hendrerit non sed risus. Aliquam fermentum et urna et malesuada. Cras euismod nulla vel velit egestas, euismod laoreet ante vehicula. Maecenas orci elit, blandit eu blandit sodales, mollis ac turpis.
+
+Nam tortor est, gravida a rutrum sed, venenatis id orci. Duis massa tortor, mollis fermentum fermentum sit amet, sagittis ut nisl. Vestibulum quis sagittis purus. Suspendisse varius nec ipsum nec molestie. Vestibulum molestie molestie rhoncus. Cras dignissim sapien vitae libero tincidunt elementum. Fusce vehicula sodales orci, sed convallis ligula consequat in. In consectetur sem at laoreet lacinia. Fusce luctus faucibus tellus, in malesuada sem consectetur sit amet. Ut gravida, nisl at finibus egestas, ipsum libero viverra elit, at rutrum metus elit ac nunc. Praesent eu dolor rutrum, imperdiet justo eget, ultrices tortor. Aenean id venenatis lorem. Duis consequat elit a nisi elementum convallis. Pellentesque porta lorem vel ipsum tempus imperdiet. Aliquam suscipit justo sit amet dui imperdiet, ut ultrices leo ullamcorper. In dapibus, felis id auctor pulvinar, metus metus cursus odio, at semper justo nibh sollicitudin sem.
+
+Nam quis elit ac tortor venenatis luctus. Pellentesque consectetur tincidunt fringilla. Morbi a nunc sed libero tempor vehicula. Mauris cursus mi neque, id lobortis turpis auctor aliquet. Donec at volutpat urna. Quisque tincidunt velit mi, sed rutrum elit ornare ac. Nunc dolor libero, ultrices eget est a, facilisis auctor mi. Integer non feugiat libero, eu pulvinar leo. Fusce feugiat suscipit nibh ac iaculis. Duis vulputate felis quis enim auctor, eu dictum sapien scelerisque. Nullam sem nisl, tempor egestas imperdiet sit amet, venenatis eu ligula. Pellentesque arcu quam, bibendum sed consectetur nec, commodo a purus. Ut in ex libero. Aenean dignissim ex orci, sed tempus lectus viverra sed. Vestibulum euismod massa arcu, quis iaculis libero mattis id. Proin lectus nibh, euismod non varius quis, tincidunt sit amet urna.
+
+Suspendisse potenti. Integer dapibus gravida lacinia. Curabitur sodales ac erat vitae gravida. Vestibulum id tortor nec lectus tempus gravida sit amet id ante. Nam malesuada dapibus urna a vehicula. Sed ultricies nulla nec eleifend consequat. Maecenas elementum ante at porttitor elementum. Ut at augue vitae mauris volutpat semper. Morbi viverra justo in mauris convallis, vel consequat leo faucibus. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae;
+
+Duis efficitur augue diam, ac rhoncus mauris sagittis ac. Etiam eleifend rhoncus justo, eu interdum lorem elementum eu. Suspendisse ex libero, mollis consequat turpis sed, condimentum sodales risus. Nunc pellentesque dui vel odio scelerisque, ut aliquam mauris gravida. Quisque laoreet tincidunt tortor id viverra. Morbi eget ipsum tortor. Praesent orci mauris, euismod ut nisi in, fermentum ullamcorper nulla. Curabitur facilisis vestibulum luctus. Aliquam sollicitudin et mauris vel feugiat. Duis non quam eu sapien hendrerit tristique. Fusce venenatis dignissim porta. Duis id felis purus. Aliquam ac velit in orci ornare varius. Nulla quis ex lectus.
+
+Ut tincidunt commodo augue, ut viverra mauris fringilla at. Integer sit amet ullamcorper felis. Nullam aliquam massa quam, id tincidunt mauris porta id. Integer nibh sapien, vulputate sit amet laoreet tincidunt, hendrerit eu quam. Morbi vitae felis et diam accumsan luctus ac sed est. Donec vitae viverra diam, at rutrum elit. Donec condimentum justo id dolor viverra vestibulum posuere quis purus. Aliquam id magna sit amet magna dapibus hendrerit et vitae quam.
+
+Phasellus in mauris turpis. Etiam nec ante eu mi maximus commodo quis eu risus. Etiam a turpis non tortor viverra gravida luctus vitae est. Quisque eget gravida quam, sit amet bibendum nulla. In mollis sapien nisl, nec efficitur mi rutrum sed. Suspendisse potenti. Nulla efficitur sagittis diam nec rutrum. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam tempus lorem in purus sollicitudin cursus.
+
+Cras id rhoncus ligula. Vivamus vel tortor malesuada, eleifend neque ac, sollicitudin enim. Proin a euismod neque. Suspendisse odio quam, placerat ac hendrerit at, porttitor in sapien. Ut vitae risus velit. Maecenas sagittis, leo efficitur lobortis elementum, dui ante semper ex, quis auctor velit arcu eget ligula. Aliquam sollicitudin, nulla tempus consequat egestas, nibh diam pulvinar purus, vitae maximus justo nibh maximus lacus.
+
+Nulla rutrum magna quis mi gravida euismod. Fusce nec facilisis massa. Phasellus iaculis, eros fringilla imperdiet condimentum, orci sem fermentum massa, quis scelerisque lacus ante vitae dolor. Cras interdum egestas lectus rhoncus tristique. Etiam nec sollicitudin sapien, ut tristique nibh. Integer a imperdiet erat. In interdum nisi vel urna aliquet, eget malesuada purus dapibus. Sed hendrerit lectus at hendrerit accumsan.
+
+Maecenas semper pretium sapien nec ullamcorper. Praesent nec neque quis nunc porta ultricies interdum vel ipsum. Donec dapibus lorem quis quam hendrerit, vitae laoreet massa pellentesque. Etiam pretium sapien vitae turpis interdum, ut rhoncus nisl bibendum. Nunc ac velit ac ex sollicitudin ultrices id in arcu. Phasellus tristique, nibh et rhoncus luctus, magna erat egestas velit, nec dignissim turpis ipsum ac felis. Maecenas convallis arcu et lectus vehicula, eget iaculis quam ultrices. Duis malesuada suscipit aliquet. Sed pulvinar eros quis nisl cursus, elementum sodales tortor fringilla. Nulla feugiat tristique sem eu tempus. Quisque at velit condimentum, consequat augue rhoncus, accumsan nulla. Sed varius sodales varius.
+
+Nunc consequat, mauris eget hendrerit fermentum, felis nisi efficitur lectus, eget dignissim leo purus quis purus. Praesent libero lacus, sodales id justo id, maximus condimentum purus. Sed tristique egestas lorem vel efficitur. Praesent vestibulum tincidunt faucibus. Ut fringilla eros sed purus mattis pharetra. Sed convallis turpis in sapien dictum, sed molestie orci accumsan. Sed eros nisi, cursus cursus nulla sit amet, sollicitudin interdum quam. Vestibulum tincidunt eros convallis, iaculis odio in, vulputate nisl. Duis scelerisque finibus purus, at porttitor sem molestie nec. Nullam sed eros dignissim, tincidunt nibh id, porta metus. Sed eget magna quis sapien commodo bibendum. Vivamus non purus nec ligula facilisis blandit a a mi. Suspendisse hendrerit, erat eget tempus mollis, justo dui dictum nunc, at pulvinar purus velit elementum augue.
+
+Fusce sed venenatis sem. Sed at libero non magna varius porttitor eu vel sapien. Cras mattis non lorem sit amet fermentum. Nam sagittis nisi magna, sit amet semper urna viverra tincidunt. Cras et leo sit amet turpis lacinia dictum. Donec iaculis nulla posuere ex varius tristique. Pellentesque dictum lacus vel nulla maximus cursus. Nulla tristique lorem pellentesque est dignissim, et venenatis felis pellentesque. Nulla vitae leo at metus posuere commodo sed et ex. Curabitur est odio, laoreet eu malesuada sed, mattis ut diam. Integer erat velit, rhoncus quis nulla ornare, dictum scelerisque tellus. Suspendisse potenti. Integer accumsan lacus ac dictum pulvinar. Integer non magna blandit nibh rhoncus varius. Nulla vulputate erat ut cursus rutrum.
+
+Sed iaculis a eros sit amet egestas. Proin finibus libero at vestibulum finibus. Mauris gravida porta ipsum at placerat. Cras egestas nulla a orci consequat eleifend. In sit amet faucibus arcu. Fusce eu neque facilisis, porttitor massa vel, vehicula nisi. Aenean eu posuere sapien. Aenean in risus at lectus semper auctor. Morbi hendrerit porta urna, eu fringilla velit ultricies nec. Donec quis lorem volutpat erat volutpat accumsan eu non turpis. Nulla quis laoreet metus, at lobortis leo. Suspendisse at rutrum nulla, a tincidunt nibh. Etiam tempor mi et augue iaculis porttitor.
+
+Etiam eget ipsum id sapien sodales auctor. Proin libero nibh, lacinia lobortis dapibus ac, faucibus at dolor. Pellentesque sit amet purus at felis gravida porta. Suspendisse ut molestie massa. Curabitur dignissim leo arcu. Nulla nibh ante, tempus eu posuere eu, egestas venenatis lectus. Donec commodo pharetra laoreet. Quisque ac quam egestas, auctor leo aliquam, lacinia elit. Nullam eget nisi a tellus efficitur vestibulum. Sed molestie luctus arcu a viverra.
+
+Sed sagittis, augue et pharetra bibendum, augue purus dignissim diam, nec iaculis turpis ex eu nisl. Donec cursus, orci nec volutpat dignissim, sem enim condimentum neque, ut volutpat velit turpis vitae lectus. Maecenas eu elit eget ipsum venenatis pharetra. Etiam consectetur luctus tortor. Mauris odio massa, gravida ac libero et, semper aliquet turpis. Fusce eleifend imperdiet justo, eu molestie ipsum egestas nec. Duis vehicula quis erat sit amet dictum. Vestibulum sit amet ultricies massa. Vivamus auctor, sem vitae vulputate bibendum, risus dolor pharetra sapien, a posuere lacus libero eget ipsum. Fusce egestas at libero sed iaculis. Nunc sit amet dui scelerisque, fringilla diam in, tempor tellus. Curabitur facilisis tortor quis mauris interdum, nec mattis dolor bibendum.
+
+Nunc suscipit varius vestibulum. Praesent luctus lectus risus, tristique hendrerit nisi faucibus non. Quisque turpis leo, hendrerit a vulputate vel, imperdiet non ipsum. Sed dui est, lobortis sed tortor non, tempor tempus lorem. Cras eget egestas ipsum. Sed ante lorem, porttitor varius pulvinar eu, vehicula ut turpis. Aenean tristique sapien vitae lobortis luctus.
+
+Maecenas accumsan elit nec diam facilisis iaculis. Etiam volutpat vestibulum lectus condimentum blandit. Nulla interdum sapien sed velit tempus, a vehicula odio porta. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Duis viverra elit dui, eget aliquam eros imperdiet non. Nam porttitor tellus risus, in pretium leo tempus facilisis. Donec vel euismod lectus.
+
+Ut sed consectetur felis. Phasellus condimentum diam vitae ante commodo ultrices. Etiam iaculis, nulla mollis sodales scelerisque, ipsum eros luctus felis, vel cursus eros quam vel felis. Cras dictum eros ut auctor rutrum. Nullam cursus vehicula tortor in placerat. Pellentesque sodales euismod semper. Nullam quis vulputate augue. Aliquam in nulla ac tellus gravida semper ut et nibh. Phasellus tempor molestie purus eu ullamcorper. Etiam metus neque, imperdiet vitae turpis at, elementum mollis velit. Donec mollis auctor nunc non tristique.
+
+Morbi rutrum magna egestas, volutpat elit eget, dictum nibh. Aliquam erat volutpat. Phasellus in tristique leo. Donec sodales pretium erat eget pellentesque. Aliquam in nunc ut augue accumsan laoreet. Pellentesque sed ante sit amet tellus vulputate suscipit. Praesent interdum neque varius mi fringilla ullamcorper. Quisque a felis nibh.
+
+Curabitur porttitor, augue et tincidunt viverra, eros libero feugiat metus, vitae lobortis mauris sapien eu dui. Cras ligula eros, auctor ac nisi ut, condimentum tincidunt ex. Vivamus vel aliquam lacus, a facilisis augue. Sed in est nisl. Integer mattis, arcu sit amet placerat consectetur, leo quam elementum justo, at hendrerit urna metus in velit. Suspendisse scelerisque suscipit odio non sagittis. Proin in fermentum elit. Duis interdum, libero quis molestie rhoncus, turpis urna cursus nulla, venenatis finibus orci diam a nibh. Ut ut massa a ex convallis commodo dictum sed urna. Nam id felis ipsum. Nunc tincidunt dignissim libero, at tempus dui porttitor sit amet. Vivamus nulla ipsum, pretium non fringilla et, tristique ut est. Etiam tristique vitae enim quis elementum.
+
+Curabitur sodales nec diam vulputate hendrerit. Suspendisse consectetur convallis sem et sagittis. Donec lobortis vestibulum eros sit amet efficitur. Nulla pellentesque tempor massa sit amet tempor. Praesent vestibulum elit auctor imperdiet faucibus. Nunc consequat nunc lectus, quis egestas augue suscipit et. Suspendisse eleifend eget lorem sed ornare. Integer non aliquam nisl. Proin metus odio, faucibus pellentesque dapibus vel, scelerisque nec arcu. Pellentesque ut velit nulla. Integer porttitor nec enim ac luctus. Praesent elementum ac est in aliquam.
+
+Mauris at dui dignissim, pharetra dui nec, vulputate dolor. Nunc ac commodo enim. Mauris eleifend est nunc, eget pulvinar justo egestas et. Vestibulum id volutpat lectus, vel rhoncus risus. Ut augue justo, gravida nec libero tincidunt, vulputate fringilla dolor. Suspendisse aliquet risus vel ante tempus, vel laoreet tellus bibendum. Quisque non vestibulum nisi, non malesuada libero. Cras quam nibh, tempor vel massa id, laoreet semper libero. Aliquam fermentum nunc vitae nibh vulputate, ac dignissim sapien vestibulum. Mauris pellentesque pretium massa vitae cursus. Phasellus in lacus augue. Integer finibus pulvinar arcu, in scelerisque lorem tincidunt sit amet. Pellentesque varius turpis sollicitudin, ornare odio nec, venenatis augue. Nullam commodo lacus a placerat consequat. Curabitur eu lobortis tortor. Curabitur varius iaculis lorem in mollis.
+
+Curabitur at convallis lectus, id vestibulum enim. Donec quis velit eget leo dictum venenatis id et velit. Phasellus ut tincidunt libero. Aliquam tincidunt tellus sed tortor facilisis laoreet. Morbi cursus pellentesque lectus, et tempor turpis condimentum at. In tempor auctor metus sed accumsan. Nulla ornare dapibus mi. Aenean ut ullamcorper eros, vitae condimentum ipsum. Nam in turpis ligula. Suspendisse ac dolor odio. Curabitur vel libero ac mi mattis consectetur ac id nunc. Cras sit amet justo nec risus malesuada posuere ut sit amet augue.
+
+Sed pretium odio eu libero pretium, ut ullamcorper eros placerat. Praesent volutpat tincidunt massa, eget fermentum lacus congue eget. Pellentesque nec purus aliquet nulla sagittis vehicula. Vivamus posuere cursus lacus at blandit. Phasellus mauris sapien, imperdiet eget ex id, posuere vehicula augue. Nulla nulla ligula, ornare porta massa vel, commodo tincidunt arcu. Morbi fermentum blandit eros vitae eleifend. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Duis tempor pretium magna et ornare. Donec sed nunc sed ipsum laoreet maximus. Sed congue massa nec augue sodales, et placerat diam aliquet. Donec in sem lorem. Nullam pretium massa non magna feugiat pretium. Morbi auctor, nunc quis faucibus venenatis, sem sem porttitor mauris, non elementum mauris felis a leo. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+
+Mauris imperdiet condimentum leo, et efficitur nulla posuere id. Nullam facilisis magna non pharetra molestie. Donec volutpat tincidunt pulvinar. Phasellus molestie, neque sit amet lacinia fermentum, sapien quam iaculis ligula, id suscipit augue ante faucibus dolor. Pellentesque aliquet, tortor eu dictum consectetur, lectus quam laoreet lorem, id congue risus arcu a libero. Sed ac luctus justo. In hac habitasse platea dictumst. Praesent ultrices ante vitae ante sollicitudin elementum. Aliquam egestas porttitor velit sit amet imperdiet. Curabitur quis lacus ac metus egestas tincidunt.
+
+Nunc ut ipsum arcu. Duis suscipit, nisi posuere commodo posuere, eros mi tempus magna, ac venenatis diam erat eget massa. Etiam eu posuere sapien. Maecenas in ipsum consectetur, luctus mi eu, mattis nibh. Donec consectetur augue sit amet velit scelerisque, a aliquet dui venenatis. Morbi libero sapien, consequat faucibus congue eget, elementum sed magna. Phasellus malesuada arcu at est lobortis, id porttitor leo elementum. Praesent luctus placerat tellus vel volutpat. Nam at enim cursus, aliquam arcu ac, imperdiet dolor. Proin auctor diam elit, non aliquet orci lobortis nec. Curabitur commodo, sapien non placerat accumsan, leo sapien rutrum neque, at dapibus orci libero a nunc. Aliquam egestas sem non tellus convallis, eget rutrum eros posuere.
+
+Sed tincidunt at elit sed venenatis. Aliquam sit amet iaculis mi. Pellentesque laoreet lobortis quam, vel accumsan nisl hendrerit at. Phasellus quis purus nisl. Fusce quis laoreet nunc. Integer quis nisi justo. Vivamus porttitor malesuada orci sed porta. Nunc ullamcorper faucibus sem, ac euismod ipsum condimentum sed. Aenean iaculis nunc vitae sapien auctor, sit amet rutrum nisl commodo. Vivamus condimentum ex eu arcu posuere, nec ultricies eros lobortis. Cras vehicula massa quis auctor condimentum.
+
+Cras arcu nisl, sodales nec leo id, iaculis aliquam urna. Praesent fringilla, nisl suscipit posuere laoreet, sem magna tristique augue, id consequat ligula dui nec tortor. Sed at mattis tellus. Curabitur feugiat porttitor mauris, at gravida est. Pellentesque in libero in dui posuere facilisis. Praesent in posuere libero. Pellentesque vehicula leo mauris. Quisque commodo, nulla a placerat consequat, elit ligula blandit leo, vitae gravida turpis risus ultricies libero. Ut feugiat, augue vel malesuada ornare, magna nisi dictum est, sed egestas augue nisi eu urna. Vestibulum euismod nulla erat, sit amet accumsan felis posuere vel.
+
+Etiam pretium turpis eget semper efficitur. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Mauris in dictum velit. Suspendisse mauris dolor, sodales vehicula mauris blandit, venenatis aliquet orci. Etiam non vulputate sapien. Quisque ut metus egestas, luctus mi in, ornare dolor. Curabitur tincidunt dapibus neque, sit amet commodo est dignissim vel. Curabitur vel pharetra velit. Aliquam ligula ante, efficitur sed cursus sed, tempus et justo. Nulla faucibus sodales odio et ultricies. Proin sit amet nisl non orci ornare tempor. Sed nec lobortis sapien, eget congue mauris. Fusce facilisis ex non molestie lacinia. Vivamus venenatis iaculis quam. Sed est felis, elementum in lectus a, facilisis bibendum quam. Donec luctus non purus in commodo.
+
+Fusce ac mi vitae ex rutrum bibendum. Nulla venenatis lobortis pharetra. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Suspendisse potenti. Interdum et malesuada fames ac ante ipsum primis in faucibus. Phasellus et efficitur nibh. Morbi auctor magna diam, eget dapibus tortor tincidunt vitae. Aenean luctus eros a metus tristique suscipit. Sed luctus, risus ac scelerisque molestie, felis lectus vestibulum nunc, a posuere libero eros eu nibh. Donec gravida eget quam eget ultricies. Donec et aliquet lectus, ac aliquam ante.
+
+Maecenas lacus magna, dictum quis tempus ac, consectetur vitae purus. Sed ut arcu bibendum, malesuada urna quis, interdum nulla. Phasellus non urna ut dui rhoncus bibendum. Duis vel gravida dui. Pellentesque mollis turpis libero, sit amet vehicula magna feugiat nec. Vivamus consectetur libero ut nibh efficitur interdum. Quisque pretium auctor quam, ac commodo sapien congue a. Integer posuere facilisis mi, a placerat purus viverra malesuada. Nam ornare elit sit amet orci hendrerit, at pulvinar est porttitor. Pellentesque efficitur odio eget consectetur efficitur. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Nulla aliquam tristique diam vitae luctus. Pellentesque tortor nibh, lobortis ac velit at, congue ultrices augue. Donec et arcu ultricies, fringilla elit eget, congue lorem. Nunc quis dui in felis gravida bibendum ut id justo.
+
+Suspendisse quis quam vel turpis egestas auctor. Duis suscipit rutrum pellentesque. Sed vitae tincidunt mauris. Vestibulum rhoncus risus et facilisis hendrerit. Duis consectetur, ante nec eleifend elementum, libero mi pretium arcu, non pretium massa quam non neque. Etiam commodo egestas felis. Nam et elementum elit. Ut sit amet odio ac velit tristique rhoncus. Integer volutpat enim ut dictum rhoncus. Vestibulum viverra neque elementum, laoreet leo nec, tempor ipsum.
+
+Ut condimentum nibh id ante fermentum venenatis. Nullam scelerisque facilisis magna non sodales. Ut luctus libero augue, eget congue risus rhoncus quis. Fusce vitae lorem congue, euismod magna finibus, tincidunt justo. Aenean dapibus tortor nec lacinia pellentesque. Aenean condimentum convallis maximus. Aliquam feugiat lorem quis tellus hendrerit dictum. Nunc mollis pharetra erat vitae lobortis. Phasellus auctor velit fermentum fermentum porta.
+
+Vivamus efficitur ligula ac tincidunt pretium. Mauris rhoncus leo in sem dictum, non tempor augue tempor. Aenean rutrum augue eget justo mollis volutpat. Sed efficitur turpis vel lacus placerat, a lobortis nibh porttitor. Aliquam eleifend ultricies nulla at lacinia. Mauris eu ipsum laoreet, iaculis urna a, pretium arcu. Mauris convallis ut ligula a varius. Integer maximus venenatis risus sed tincidunt. Cras aliquet nisl ac diam ornare, ac lobortis ex rutrum. In vel mauris vestibulum, ornare purus id, iaculis lorem. Nulla condimentum tellus vel leo suscipit, in vehicula velit tempor. Cras in orci sollicitudin, placerat justo non, tristique massa. Praesent facilisis et elit sit amet placerat. Donec nec justo in nunc ultrices finibus.
+
+Fusce lacinia laoreet orci, nec egestas mauris mollis ac. Maecenas scelerisque in libero a tincidunt. Integer varius dui rutrum urna aliquam, id posuere nunc suscipit. Ut eget sollicitudin est. Nam augue nulla, commodo ut cursus sit amet, semper eu nibh. Maecenas sodales, sapien in maximus posuere, odio ante lobortis arcu, a varius diam sapien ut ipsum. Vestibulum sagittis, mauris sed ullamcorper tristique, purus quam mollis lacus, eget cursus tellus mi sit amet diam. Etiam quis lectus tristique, luctus justo eu, suscipit tortor. Phasellus vel neque ornare, dignissim nisi sit amet, porta est. Proin porttitor nisl vitae lectus tincidunt laoreet. Mauris finibus justo eu tellus egestas, a vestibulum sem vestibulum. Donec vel massa pretium, blandit orci ut, mollis lorem. Etiam mauris neque, eleifend vitae neque in, efficitur posuere mi. Morbi in elit volutpat, volutpat ligula et, porta ipsum. Aenean porta condimentum leo, vel ultrices eros imperdiet quis.
+
+Aliquam placerat, nulla vel aliquam hendrerit, turpis nibh euismod elit, a pellentesque tortor leo quis libero. Donec velit orci, ullamcorper id aliquet in, convallis varius libero. Quisque a magna est. Cras luctus purus elit, at aliquam tellus feugiat ut. Sed gravida scelerisque tortor, quis laoreet ante efficitur quis. Phasellus et tortor eget magna pulvinar laoreet et et urna. Sed ac vehicula sem, blandit semper turpis. Praesent pharetra libero dui, sed fringilla urna blandit eu. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi id felis commodo, euismod libero sollicitudin, auctor ante. Fusce tristique facilisis gravida.
+
+Curabitur elementum sit amet magna eget egestas. Integer a libero vitae nisl sagittis gravida. Quisque leo ipsum, ultrices id justo nec, scelerisque vehicula nibh. Nunc vitae commodo eros. Nunc elementum justo luctus laoreet faucibus. Vestibulum ornare lorem non eros gravida, vitae varius diam condimentum. Vivamus porta fermentum elit vitae imperdiet. Cras auctor, est vitae bibendum posuere, justo dolor iaculis risus, sit amet gravida tortor diam quis mi. Vivamus vel tortor vitae lectus tristique consectetur. Integer rutrum posuere sapien commodo consectetur. Nullam fermentum in enim non imperdiet. Proin dapibus erat ac auctor tincidunt. Nunc tortor diam, pretium quis odio a, convallis eleifend turpis. Mauris vulputate lacinia enim, at mollis enim. Etiam ut mi et dolor consectetur volutpat vitae vel eros.
+
+Donec sollicitudin mauris a justo semper consectetur. Morbi nec justo a dui faucibus semper. Nunc ornare vitae mauris vitae gravida. Integer quis commodo neque, bibendum commodo nisl. Sed sagittis posuere purus id dapibus. Nunc hendrerit at mi a mollis. Fusce augue odio, tristique cursus ullamcorper in, ultricies at ex. Integer lobortis ultricies risus, in luctus turpis consectetur sit amet. Vivamus quam lectus, dapibus imperdiet posuere in, lacinia id orci. Donec pharetra augue ac velit pulvinar blandit. Curabitur in sagittis purus. Etiam eleifend elit metus, ac tempus leo ullamcorper eget. Nulla viverra maximus ipsum, ac sollicitudin nulla auctor quis.
+
+Nunc quis varius urna. Maecenas vel orci ac tellus pulvinar tincidunt. Sed bibendum pulvinar ex sit amet pulvinar. Sed quis rutrum ipsum. Nunc sit amet mi nunc. Fusce ac tempor sapien, ac interdum tortor. Nunc sit amet varius odio. Aenean ac fringilla ante, ac tempor nibh. Ut vitae felis vel mauris condimentum scelerisque quis et magna. Mauris sit amet leo sagittis, congue dui vel, varius leo. Curabitur semper vestibulum metus, non bibendum leo vulputate eu. Fusce at pharetra ante. Sed porttitor ligula turpis, vitae eleifend sem porttitor vel. Praesent convallis imperdiet orci ac tincidunt. Nam a tortor ac sapien interdum mattis. Vivamus iaculis quam in hendrerit molestie.
+
+Phasellus sit amet dapibus massa. Vestibulum scelerisque erat turpis, eget fermentum libero blandit at. Morbi eu ligula metus. Phasellus pretium enim vitae ligula malesuada, vel bibendum turpis venenatis. Integer pretium tellus et placerat vehicula. Maecenas ut turpis eu lectus tempus lacinia id eu lacus. Aliquam laoreet lacus et purus sagittis, ut gravida dolor convallis. Sed euismod, nisl nec tincidunt tempus, velit eros fermentum nisi, ut tincidunt sem tellus rutrum enim. Etiam dignissim ipsum vitae magna laoreet semper. Sed sit amet neque placerat, porta eros in, pulvinar lorem.
+
+Duis convallis imperdiet augue, et porta orci. Maecenas venenatis, sem ut ultricies euismod, ex velit tempor massa, at imperdiet dui nisl quis sapien. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Sed eleifend mattis lacus, eget pharetra erat vestibulum non. Mauris tellus quam, lobortis ut elit quis, varius aliquet erat. Proin mauris dolor, varius eu commodo quis, porta sed erat. Morbi ut nisl accumsan, sollicitudin nisi ac, tempor leo. Nulla facilisis nibh dolor, vitae tincidunt ex hendrerit ut. Suspendisse quis neque tellus. Maecenas ut odio nec risus sagittis gravida. Phasellus feugiat cursus dapibus. Morbi efficitur condimentum elit sed pharetra. Mauris ornare pharetra nisl, ac gravida dolor condimentum at. Aliquam lobortis finibus lorem, id pretium libero vestibulum vitae.
+
+Vestibulum pretium eleifend justo, sit amet imperdiet justo faucibus a. Suspendisse consectetur ipsum quis purus rutrum imperdiet. Nam nibh ex, tincidunt nec blandit sed, venenatis vitae mauris. Integer rutrum tincidunt tortor, ut mattis tortor fermentum ac. Duis congue dui sed est suscipit, nec semper lectus lobortis. Vestibulum felis ante, hendrerit ac venenatis sed, tincidunt iaculis augue. Duis pharetra blandit metus sed semper. Fusce ornare varius placerat. Vivamus sollicitudin lacus id nunc sollicitudin, a viverra felis pellentesque. Phasellus a felis in sapien facilisis imperdiet. Quisque ac purus dapibus metus fermentum mollis.
+
+Donec diam nisl, faucibus feugiat condimentum vel, eleifend eu magna. Sed tempus, justo a bibendum suscipit, sem nunc viverra enim, id semper nunc eros sit amet mauris. Praesent ultrices porttitor ex eu lacinia. Integer quis aliquet nibh, sit amet porttitor elit. Curabitur vel elementum quam. Sed fermentum vehicula egestas. In metus massa, sodales vel mauris id, finibus dapibus metus. Donec lectus ante, ullamcorper non posuere in, fringilla non velit. Quisque cursus interdum elementum. Phasellus vestibulum massa non sem fringilla congue. Maecenas nec arcu diam. Vivamus id suscipit odio, vel condimentum leo. Nulla sed dolor mattis, interdum lacus imperdiet, interdum nulla. Maecenas sagittis, ipsum vitae dapibus luctus, ipsum dui tempus tortor, quis porta dolor dui in sapien. Nulla vel porta neque, quis auctor massa.
+
+Suspendisse viverra nec risus fermentum maximus. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent eu placerat nulla. Donec varius faucibus posuere. Nulla fermentum ultricies mauris at euismod. In hac habitasse platea dictumst. Proin et nisl purus. Cras quis risus sit amet lectus maximus semper. Quisque pellentesque luctus erat convallis maximus. Sed et lacus vel sapien pellentesque accumsan id elementum dolor.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Nulla eget aliquet nunc. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aenean et tincidunt massa, sed maximus leo. Aliquam in cursus tortor. Praesent ornare ante vitae leo pretium cursus. Nunc sodales neque urna, eu tincidunt dui placerat at. Integer vel arcu vel velit euismod sollicitudin quis sit amet ligula. Nunc facilisis, eros eu pharetra mollis, magna odio rutrum leo, eget placerat erat massa non metus. Nunc nec auctor felis.
+
+Vestibulum et tempus ipsum. Duis molestie felis et ex scelerisque, quis faucibus dolor viverra. Suspendisse rhoncus volutpat dolor. Duis ac augue iaculis, vulputate dui sit amet, gravida ante. Mauris porttitor purus eu ligula tempus volutpat. Aenean quam neque, venenatis id est et, blandit pharetra enim. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent vitae malesuada dui. Praesent tortor ligula, tincidunt at suscipit laoreet, tristique vitae magna. Phasellus gravida augue lacinia velit cursus lacinia.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque vitae blandit dui, a facilisis sapien. Praesent malesuada massa sed orci lacinia vulputate. Cras a est vitae quam sodales pellentesque. Nam posuere condimentum mollis. Quisque ultricies nisl libero, vel scelerisque nunc interdum non. In porttitor consectetur placerat. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Quisque et tortor sed nibh scelerisque suscipit. Integer a auctor velit, in tempor magna. Curabitur iaculis ut purus vel consequat.
+
+Quisque at consequat turpis, ut aliquet dolor. Aenean quis mauris sit amet diam tempor porta ac eu purus. Maecenas commodo faucibus interdum. Praesent in tincidunt felis, vel tincidunt nibh. Integer posuere enim a purus tristique tincidunt. Etiam nisi odio, vehicula sed mauris vel, ornare blandit augue. Fusce finibus mi lorem, eu egestas lectus commodo quis. Nunc scelerisque, erat mollis varius congue, ante ligula suscipit neque, nec ultrices urna leo rutrum nibh. Vivamus pulvinar lacinia elit at lobortis. Sed molestie turpis dapibus sapien imperdiet, vitae scelerisque ligula volutpat. Nam fermentum ipsum est, ut vulputate arcu maximus eu. Sed tristique, massa sit amet dictum bibendum, neque tellus volutpat ipsum, ut faucibus purus arcu vel quam. Vivamus laoreet risus non nisi ullamcorper, molestie tincidunt diam scelerisque. Sed eget congue velit.
+
+Sed eu dapibus eros. In at est augue. Nunc malesuada, tortor quis molestie euismod, erat sem porta arcu, vitae facilisis purus ligula vitae mauris. Aliquam erat volutpat. Nunc scelerisque porta eros, finibus elementum ipsum ultricies ut. Quisque vestibulum libero quis lectus semper suscipit. Sed malesuada eu lorem in placerat.
+
+Nunc metus arcu, rutrum eu varius in, auctor vitae diam. Maecenas ultricies faucibus hendrerit. Integer tincidunt, orci a bibendum dapibus, nulla tellus dapibus urna, vel sodales sapien neque eget mi. Nunc elementum enim sapien, sed egestas diam eleifend sit amet. Mauris sapien ligula, finibus nec augue in, volutpat dictum velit. Nunc a ligula vitae massa pellentesque sollicitudin. Aliquam rutrum porttitor volutpat. Proin convallis sollicitudin commodo. Duis eu rutrum risus, a auctor felis. Proin volutpat arcu velit, sed condimentum magna varius sit amet. In et sapien efficitur, iaculis justo eu, euismod nibh.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Aenean hendrerit eros non purus dapibus, vel laoreet ipsum tincidunt. Duis scelerisque sollicitudin rutrum. Pellentesque rutrum ultricies sem, vitae mollis elit efficitur ut. Ut consectetur scelerisque ultrices. Vivamus id urna scelerisque nibh interdum mattis. In tristique tortor ut dictum laoreet. Quisque fermentum, augue blandit lacinia luctus, ligula nunc commodo velit, accumsan tempus orci quam ac nibh. Praesent ante risus, pulvinar in nisl ac, malesuada porttitor magna.
+
+Nam nunc ex, condimentum ac volutpat ac, pretium sed tortor. Integer venenatis, nunc id ullamcorper aliquam, eros arcu blandit sapien, id maximus erat nunc sed ligula. Proin tincidunt libero et purus tincidunt maximus. Nulla laoreet nisl eu velit pharetra, id porttitor mauris dictum. Mauris blandit pharetra lectus sit amet sagittis. In sit amet lorem hendrerit, varius justo eu, ultricies odio. Curabitur ante nibh, scelerisque at elementum a, condimentum viverra tortor. Donec tellus arcu, ultricies at posuere at, sagittis at sem. Phasellus non eros eu dui blandit fringilla. Maecenas hendrerit arcu porta, feugiat neque ac, venenatis ipsum. Nam ut elit nec lectus sodales posuere. Proin aliquet accumsan sapien, non porta quam. Praesent vulputate ante ut malesuada efficitur.
+
+Nullam pulvinar arcu orci, semper vehicula nibh fringilla ac. Duis porta ullamcorper risus sed facilisis. In vitae consectetur sapien, eget porttitor velit. Ut ac leo luctus, gravida erat sit amet, fermentum orci. Proin feugiat orci eget erat sagittis, sed aliquet ipsum luctus. Morbi eu est tristique, consequat neque eu, suscipit odio. Maecenas faucibus lacinia laoreet. Nam ut tellus odio. Sed facilisis tincidunt sodales. Proin hendrerit dolor quis nulla elementum, ut pulvinar ex tincidunt. Quisque vitae purus ac risus sagittis fringilla. Phasellus fermentum faucibus suscipit.
+
+Donec congue enim id efficitur lacinia. Praesent tempus, velit a euismod ornare, lorem felis pharetra nulla, in aliquam diam quam in nibh. Nulla facilisi. Morbi malesuada urna nibh, nec semper libero malesuada non. Maecenas quis tortor vitae nisl condimentum ornare. Quisque convallis suscipit metus vel malesuada. Vivamus fringilla mattis mi eget luctus. Fusce ex arcu, efficitur vitae elit eget, aliquam faucibus lacus. Sed interdum nisl nec libero aliquam lobortis. Aenean semper, magna non lacinia rhoncus, metus lacus commodo sapien, at molestie magna urna ac magna. Duis elementum rutrum erat id sodales.
+
+Suspendisse bibendum quam ut augue faucibus semper. Maecenas sed purus consequat, sagittis quam nec, dapibus ante. Nunc erat nunc, ultrices nec nisi vel, cursus consequat sem. Vivamus molestie turpis ac sem malesuada luctus. Morbi laoreet sit amet odio id finibus. Praesent lacus justo, rhoncus non nulla commodo, posuere sodales sem. Aliquam condimentum porta condimentum. Integer congue eros risus, sed pharetra odio vestibulum vel. Mauris sagittis orci et lacus finibus luctus ut nec enim. Pellentesque magna massa, tristique a lectus et, pharetra placerat mauris. Donec eu est in leo sollicitudin elementum vitae tristique ipsum. Donec pulvinar consequat enim. Nunc cursus lorem ut dapibus maximus. Quisque vulputate ligula est, vitae vestibulum ante dapibus a.
+
+Fusce tempus nibh eget euismod ultrices. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Vestibulum mattis maximus nulla, ac consectetur erat scelerisque sed. Maecenas faucibus dui eros, finibus venenatis eros semper non. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed iaculis nisl risus, vitae blandit tortor mattis in. Morbi nisi enim, pulvinar eget pellentesque ac, faucibus in mi. Sed in mollis eros, sit amet maximus arcu.
+
+Nam luctus velit sed ipsum pharetra, eu mattis diam tempus. Phasellus volutpat nisi vitae imperdiet rhoncus. Mauris finibus ut mauris et euismod. Nullam sed efficitur libero. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Duis erat lectus, lacinia sit amet tempor ut, auctor et odio. Morbi tristique euismod erat, quis venenatis justo. Duis eu arcu placerat, pulvinar velit at, fermentum enim. Sed rutrum ipsum non ipsum condimentum consequat. Suspendisse vitae commodo turpis, eget imperdiet risus. Aenean fringilla, augue id hendrerit auctor, urna mi eleifend magna, in semper neque purus eu arcu. Suspendisse et leo mi. Donec consequat imperdiet urna, sed aliquam eros mollis in. Nullam condimentum fringilla hendrerit. Suspendisse ornare tincidunt lacus, id tristique tellus porta at. Suspendisse posuere sagittis erat, quis viverra diam varius in.
+
+Cras eget ex nec tortor iaculis ultricies a id urna. In neque ante, gravida sed rutrum in, finibus volutpat mi. Pellentesque malesuada nunc ex, vitae suscipit urna bibendum a. Etiam eleifend augue dui, ut laoreet nisi molestie et. Phasellus eu leo erat. Pellentesque in lorem ut velit ullamcorper laoreet luctus nec diam. Ut vulputate iaculis scelerisque. Praesent luctus justo justo, vulputate condimentum ipsum porttitor eget. Proin sit amet fermentum urna, sed pellentesque tellus. Suspendisse eu ullamcorper eros, ac finibus tellus. In auctor fermentum lectus a maximus. Pellentesque a pulvinar velit. Aliquam sed magna elementum, ornare ligula eu, porta odio. Nullam efficitur tortor nunc, sit amet finibus dui ornare tempor.
+
+Vestibulum enim dolor, mollis sed pulvinar vel, venenatis et justo. Cras porttitor id augue eget porta. Praesent tempor enim ut arcu dapibus molestie. Sed facilisis tortor vel nunc ultricies, non egestas ligula laoreet. Aliquam aliquet sit amet ex eu consequat. Ut ornare lectus non nisl iaculis bibendum. Aliquam dignissim, tellus dictum maximus tempus, purus metus fringilla purus, sed mattis enim justo quis mi. Donec at ipsum non eros sodales convallis. Aliquam tincidunt risus nisl, commodo pharetra nunc imperdiet ac. Nulla a elementum turpis, vel pharetra erat. Nulla interdum sed lacus quis elementum. Suspendisse blandit imperdiet erat, nec sollicitudin libero blandit ac. Suspendisse consectetur lacinia odio, eu pharetra elit fermentum non. Sed nec neque urna. Quisque vel sem eu risus tincidunt eleifend. In dictum efficitur bibendum.
+
+Cras ac quam eleifend, suscipit diam sit amet, maximus quam. Proin sit amet libero eu urna efficitur sollicitudin. Fusce nec finibus nulla, vitae ornare sem. Vivamus venenatis porttitor magna, sed venenatis ante placerat quis. Fusce et nulla hendrerit, semper nibh nec, auctor mi. Aenean sit amet leo eget mauris accumsan luctus. Cras tortor metus, vehicula ac ultricies eu, egestas ut massa. Fusce sollicitudin ex pretium, dapibus urna nec, varius nibh. Proin molestie quam metus, a volutpat arcu consectetur eget. Nam sagittis, odio sed rhoncus egestas, diam nibh efficitur nisi, convallis ultrices justo eros non neque. Proin vulputate tincidunt ipsum, vitae tristique risus. Aliquam feugiat luctus dui, id elementum nisl finibus at. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae;
+
+Praesent et velit est. Donec odio turpis, accumsan imperdiet iaculis in, mollis vitae orci. Sed sed molestie elit, at tristique lorem. Suspendisse consectetur ante id feugiat condimentum. Integer nec mauris sed lorem vestibulum commodo eu eget nunc. Vivamus faucibus, libero fermentum elementum vehicula, orci risus efficitur risus, ut posuere mi nisl non elit. Suspendisse sit amet libero magna. Integer sit amet mi et nulla euismod luctus id sit amet felis.
+
+Nulla facilisi. Sed fermentum urna quam, sed pharetra tellus sodales blandit. Vivamus sodales dui nec consequat euismod. Vivamus aliquet gravida metus, vitae consequat augue bibendum id. Curabitur fermentum laoreet turpis, ut interdum lectus dictum vitae. Fusce faucibus nisi ex, vitae sollicitudin turpis cursus at. Cras sodales tincidunt vehicula. Sed vitae leo quis nisl lacinia auctor. Proin faucibus elementum nibh, laoreet lobortis risus ornare sed.
+
+Vestibulum venenatis, augue ac tristique eleifend, tellus arcu imperdiet magna, ac eleifend lacus ipsum aliquam urna. Nam laoreet erat non rutrum ullamcorper. Mauris hendrerit aliquet tortor malesuada porttitor. Proin accumsan dolor porttitor augue ullamcorper, vitae vestibulum eros dapibus. Cras sagittis lorem lacus, ut rutrum lorem bibendum id. Praesent tristique semper ornare. Morbi posuere sit amet risus et faucibus. Maecenas a velit at nibh consequat pharetra sit amet eget enim. Morbi commodo enim magna, ac pretium sapien pellentesque eu. Mauris aliquet nisi venenatis, consequat purus at, aliquet risus.
+
+Morbi posuere erat ipsum, sit amet consequat enim consectetur in. Sed risus arcu, elementum dignissim tincidunt eu, efficitur feugiat mauris. Maecenas a mattis leo. Duis porta et felis sed ultricies. Curabitur eu aliquet lectus. Nunc ante felis, blandit eu lobortis sit amet, tempor eget urna. Mauris non metus nec metus viverra feugiat. Donec pellentesque tortor ac vehicula porttitor. Aliquam nunc sapien, dignissim nec tincidunt a, sollicitudin at nunc. Sed ut leo purus.
+
+In ullamcorper neque ex, eu sodales eros tincidunt sed. Quisque aliquam elit pretium, varius erat ac, iaculis turpis. Mauris id odio vestibulum, dictum sem sed, pulvinar felis. Ut magna turpis, hendrerit ac faucibus vel, euismod convallis velit. Maecenas rhoncus nisl lacus, nec dignissim leo imperdiet ac. Duis sed ante ut purus cursus ultrices ut eu nisi. Donec ut ante nibh. Vivamus lobortis purus leo, et vehicula magna consectetur a. Suspendisse gravida semper ligula vitae facilisis. Ut sit amet vestibulum elit, id sodales diam. Suspendisse potenti. Proin dapibus scelerisque turpis at dignissim.
+
+Interdum et malesuada fames ac ante ipsum primis in faucibus. Sed accumsan vulputate metus ut mattis. Ut semper porttitor justo in laoreet. Mauris sit amet mollis magna, vel condimentum elit. Quisque non aliquet justo. Fusce eget leo at enim commodo molestie. Praesent ipsum nulla, ultrices eget ex in, tristique ullamcorper felis. Nulla posuere commodo semper. Nam id mauris sit amet lacus luctus suscipit. Sed scelerisque gravida tristique. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Integer condimentum nulla semper, convallis leo sit amet, tempor nisl. Cras semper diam ac leo ornare aliquet et et lectus. Fusce sed nunc vitae nunc auctor semper et ac arcu.
+
+Aenean molestie nibh varius nisi consectetur elementum. Praesent condimentum, mi sit amet pretium suscipit, nisl est pharetra metus, sit amet feugiat neque quam vel purus. Nunc vehicula vestibulum mi eget gravida. Nullam consequat odio eget feugiat faucibus. Quisque pretium condimentum sollicitudin. Vestibulum vitae sem ut velit accumsan varius sit amet a tortor. Nunc eu mi a lorem varius bibendum vitae quis lacus. Maecenas gravida tristique lectus at pharetra. Aenean vehicula vehicula ex ut accumsan.
+
+In at consequat massa. Mauris finibus tempor nisi. Fusce a congue nulla. Aenean tempor mi vel ligula consectetur elementum. Nam scelerisque nisl et nulla faucibus, a molestie nisi bibendum. Curabitur venenatis lacus vestibulum, ultricies tellus et, elementum mauris. Pellentesque facilisis id libero id cursus. Maecenas lacinia quam quis arcu tristique aliquet. Fusce eu elit lobortis, accumsan dolor at, finibus nisl. Suspendisse facilisis dictum egestas. Cras volutpat diam ut nulla eleifend efficitur. Donec vel dapibus velit. Curabitur in mollis enim, sit amet suscipit dui. Nullam suscipit, mauris et suscipit molestie, nisl nulla elementum urna, ac varius dolor elit eget libero. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas.
+
+Vivamus vel dui ac lectus scelerisque elementum dictum nec orci. Suspendisse et venenatis arcu. Nullam velit orci, volutpat sed convallis in, pretium vel felis. Phasellus sollicitudin urna nec est porta, a consectetur massa egestas. Vivamus in malesuada lacus. Ut pellentesque sagittis velit, gravida vulputate neque efficitur sed. Vestibulum vitae libero et dui iaculis bibendum a nec velit. In aliquet ultricies pellentesque. Nunc suscipit, nulla id maximus viverra, nisi turpis dignissim nunc, sit amet auctor sapien ipsum sit amet magna. Mauris pretium velit congue turpis mollis faucibus. Duis non nunc sapien. Vivamus facilisis lacinia lectus, et tempor elit.
+
+Duis mi ligula, dignissim non sapien quis, congue consequat enim. Aenean lobortis purus ac tellus maximus efficitur. Cras iaculis erat sagittis feugiat viverra. Maecenas viverra, orci eu sodales porttitor, libero arcu efficitur nulla, a pellentesque nunc sapien non mi. Ut dignissim imperdiet vehicula. Nam eu sapien convallis, pulvinar felis id, sodales lorem. Praesent ornare tristique mi nec posuere. Pellentesque egestas diam nec condimentum fringilla. Nunc pulvinar urna aliquet ex vehicula suscipit. Sed pretium orci nunc, quis gravida ipsum consequat sit amet. Integer sit amet libero eu mauris ultricies auctor eu nec mi. Donec pulvinar eros erat, eget molestie neque dictum sit amet. Sed vitae venenatis nisi, tincidunt ultricies enim. Nam et velit gravida, malesuada dolor eget, feugiat massa. Morbi vel pellentesque arcu. Sed vulputate libero vel ipsum placerat posuere.
+
+Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Cras mattis ultrices enim id posuere. Proin sollicitudin posuere lectus, in tempus odio porta quis. Etiam semper sapien elit, eu imperdiet tortor iaculis sed. Ut id faucibus arcu. Suspendisse tincidunt, tortor sed dapibus ullamcorper, odio ex egestas purus, eget posuere ante elit quis augue. Nulla facilisi. Pellentesque feugiat euismod elit, eu luctus tellus feugiat a. Aliquam cursus rhoncus mauris at consequat. Morbi dapibus metus id est bibendum, et mollis eros lobortis. Nulla erat turpis, sodales sit amet dictum id, pharetra sed magna. Proin efficitur erat id libero congue pellentesque eu eu massa. Ut a lobortis nunc. Aliquam sollicitudin diam vel magna tempor convallis. Vivamus non tempus orci. Fusce lacinia, ipsum vitae finibus imperdiet, elit lorem pretium elit, tincidunt pretium odio erat in diam.
+
+Morbi suscipit rhoncus odio a molestie. Donec eleifend ipsum eget efficitur varius. Etiam faucibus pretium urna, sed fermentum magna feugiat ut. Aenean ornare gravida vehicula. Aenean sagittis est pretium mollis facilisis. Sed scelerisque placerat erat, vel lacinia nibh feugiat vitae. Praesent vel dapibus lacus. Nunc bibendum tempor lorem et faucibus. Praesent mattis blandit neque interdum varius. Nunc pharetra orci sed ipsum tincidunt, non suscipit nisl malesuada. Maecenas tincidunt libero sit amet mattis lacinia.
+
+Duis accumsan sem erat, a ornare nibh faucibus vulputate. Ut rutrum scelerisque sapien vitae consectetur. Aliquam quis tristique tortor. Maecenas nibh lacus, varius a blandit eu, dapibus sit amet sem. Vivamus accumsan, libero sit amet suscipit elementum, nisl magna fermentum ipsum, laoreet elementum orci nisl et ligula. Curabitur in ligula placerat, scelerisque tellus in, ultricies nibh. Nam nunc libero, egestas at mauris dignissim, consectetur congue urna. Suspendisse molestie diam nec ipsum molestie, eu rutrum nulla sollicitudin. Duis quis facilisis arcu, in semper leo. Quisque viverra ultricies orci, eu mattis eros pulvinar mattis. Pellentesque vel finibus ante. Praesent ac mi facilisis, mollis augue vitae, rhoncus mauris. Pellentesque commodo vestibulum maximus. Donec accumsan urna id iaculis malesuada. Integer varius elit nec orci pulvinar, ut ultrices metus vulputate.
+
+Cras posuere neque mauris, in dignissim magna tincidunt sit amet. Aliquam sit amet mi dolor. Quisque elementum molestie posuere. Vestibulum tempor mollis purus, vitae vestibulum purus tempor quis. Aenean ut augue massa. Suspendisse tincidunt tincidunt erat, in consequat massa vulputate id. Duis cursus eget enim eu tristique. Proin quis nulla sed velit commodo dignissim. Praesent lacinia ante a ante lobortis, id imperdiet augue rutrum. Quisque purus lacus, sollicitudin euismod venenatis sit amet, eleifend nec eros. Sed luctus faucibus dolor ut eleifend. Quisque tincidunt ante elit, nec vulputate eros fermentum vel. In posuere leo vel risus efficitur mollis. Phasellus imperdiet pharetra orci.
+
+Fusce auctor sagittis turpis, nec pharetra dolor pharetra vel. Vestibulum luctus sagittis gravida. Nulla quam erat, sagittis non elit id, gravida hendrerit leo. In eleifend elit at efficitur blandit. Sed quis dignissim nulla. Sed in dapibus tortor. Vivamus lacinia, ligula vitae cursus porttitor, dui urna condimentum nisi, quis hendrerit dolor eros vel neque. Curabitur eget lectus vel elit lobortis scelerisque. Etiam congue, risus feugiat faucibus rutrum, urna orci egestas felis, auctor finibus est urna id eros. Morbi rutrum, arcu quis dictum euismod, turpis urna lacinia enim, ac malesuada justo elit non lorem. Sed vel orci nec ex rutrum faucibus. Praesent nisl sapien, ultrices quis justo eu, molestie suscipit ante. Donec gravida quis purus eu dignissim. Donec vulputate convallis ipsum vitae pellentesque. Pellentesque ut urna mi.
+
+In id quam vel libero mollis commodo a ac sem. Sed ornare elit est, molestie condimentum justo mattis sed. Vivamus tempor velit sit amet libero venenatis ultrices. Cras faucibus orci venenatis diam fermentum commodo. Donec pulvinar augue lacus, vitae dictum nisl auctor sed. Suspendisse ut nisi porttitor, porta neque id, tincidunt dolor. Fusce mollis laoreet arcu nec ultricies. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Duis ultricies nisl eget dui semper dapibus. Aenean vitae lacus est. Proin vel erat sed ex euismod facilisis. Interdum et malesuada fames ac ante ipsum primis in faucibus.
+
+Ut non fermentum tellus, sed vehicula augue. Etiam blandit lacus sapien, luctus sagittis leo auctor sit amet. Sed ipsum massa, eleifend sit amet augue non, tristique vulputate lacus. Suspendisse sit amet leo odio. Quisque dignissim, erat non eleifend accumsan, nisl diam blandit neque, eget sodales enim ipsum in lorem. Praesent erat dolor, pulvinar vitae turpis sit amet, auctor dignissim ligula. Fusce eget commodo massa. Nullam sit amet tincidunt libero, id vehicula erat. Nulla a fermentum elit. Aenean maximus luctus auctor. Integer sit amet maximus diam, ac lobortis sapien.
+
+Sed at ultricies velit, in laoreet dui. Pellentesque sit amet euismod mauris. Fusce euismod vehicula mauris. Phasellus magna nisi, maximus vel elit et, fringilla aliquet elit. Proin varius, ipsum eget scelerisque malesuada, ipsum felis vulputate tortor, eu luctus justo ipsum sit amet elit. Suspendisse lacus leo, mollis et malesuada eget, pharetra nec massa. Donec tristique fringilla pharetra. Maecenas malesuada mi turpis. Nulla id mauris purus.
+
+Nullam rutrum in ex non placerat. Cras rutrum nulla sit amet felis ultricies feugiat. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Suspendisse laoreet turpis eu eros vestibulum, cursus blandit arcu ultricies. Ut et quam eu diam gravida pulvinar a non dui. Sed ut lectus sem. In tristique finibus elit sit amet aliquet. Vestibulum convallis nunc arcu, in euismod ante vulputate et. Etiam tempor enim non iaculis elementum. Suspendisse feugiat sem non nisi imperdiet, eu convallis ante malesuada. Duis consectetur, ligula at viverra vehicula, neque neque aliquam arcu, sed eleifend elit arcu non diam. Fusce in magna et neque ultrices varius. Sed ante nibh, egestas id ligula sit amet, ullamcorper aliquet erat. Nulla dictum neque et sagittis blandit. Curabitur tincidunt sollicitudin ligula in consectetur. Fusce velit est, aliquet eu tempor ut, lobortis quis justo.
+
+Proin vel augue ut ex viverra lobortis. Maecenas ultricies vulputate metus, in consectetur dolor gravida quis. Suspendisse potenti. Curabitur vitae mauris a dolor efficitur accumsan eget eget tellus. Suspendisse tristique quam ac pellentesque viverra. Curabitur ex turpis, consequat non est at, finibus ultricies sem. Duis bibendum mi vel libero aliquam scelerisque. Sed eget rhoncus sapien. In dictum, neque vitae efficitur accumsan, nulla ipsum ultrices eros, vitae porttitor massa ex vel augue. Curabitur aliquet dui et urna dapibus, a elementum diam dapibus. Pellentesque leo libero, ornare vitae fringilla non, venenatis vitae massa. Interdum et malesuada fames ac ante ipsum primis in faucibus. Suspendisse dapibus nisi ut nunc vulputate pellentesque. Suspendisse auctor erat non viverra fringilla. Pellentesque feugiat dictum urna, eu auctor metus aliquam vitae. Nunc nulla sem, maximus in lacinia non, viverra eu nulla.
+
+In fringilla cursus nisi vel tempus. Mauris blandit leo vel facilisis blandit. Quisque auctor magna quis justo commodo, in laoreet justo pharetra. In hac habitasse platea dictumst. Cras imperdiet cursus eros, quis rhoncus neque viverra in. Praesent rutrum aliquam euismod. In vitae elit blandit erat efficitur vehicula vitae quis lectus. Fusce consectetur nibh sit amet felis placerat consectetur. Morbi leo risus, dictum vel vestibulum vel, tempor id erat. Suspendisse facilisis massa nec risus maximus, nec semper purus fringilla. Cras dapibus diam eu elit sollicitudin, in tempor tellus accumsan. Proin pulvinar varius sollicitudin. Nullam quis tellus ac est imperdiet malesuada.
+
+Morbi sem nulla, egestas a luctus at, egestas id magna. Pellentesque ac tristique neque, in vestibulum enim. Fusce turpis nisi, commodo a justo id, fermentum vulputate sem. Phasellus fermentum elementum dui, id dictum leo fermentum et. Fusce porttitor enim odio, sit amet porttitor dolor luctus eget. Etiam ligula libero, finibus vitae enim vitae, facilisis fringilla mi. Fusce eget fermentum dui.
+
+Cras quis ipsum ultricies, tincidunt nibh non, commodo nisl. In commodo diam et quam porttitor, non sagittis ante feugiat. Vestibulum ultricies elit non lectus ultrices, a egestas dui tempus. Etiam faucibus ipsum ante, interdum condimentum ligula pellentesque at. Integer ornare bibendum libero vel accumsan. Donec ornare finibus diam fringilla pharetra. Nam pellentesque nibh quis diam tincidunt faucibus. Sed tortor arcu, posuere id enim accumsan, tristique lobortis velit. Suspendisse massa turpis, maximus ut eros vitae, sollicitudin efficitur libero. Phasellus ut scelerisque nisl. Ut ligula risus, venenatis at orci non, hendrerit aliquam mi. Vestibulum a varius ante, ac pulvinar diam. Integer hendrerit fringilla erat, eu egestas mi fringilla molestie. Aliquam erat volutpat. Nunc ut feugiat elit. Etiam a bibendum dui.
+
+Morbi ornare molestie lobortis. Aliquam erat nunc, placerat eget volutpat in, vehicula nec tortor. Maecenas et libero nec nibh mollis bibendum quis et neque. Fusce eleifend eros quis consequat hendrerit. Nunc ac dolor odio. Nullam condimentum ut dolor id venenatis. Quisque ultrices, urna quis commodo elementum, augue lectus tristique turpis, at lobortis nibh dolor sit amet lectus. Curabitur accumsan tortor ex, ut sagittis tortor volutpat a. Morbi justo diam, iaculis et felis vel, pretium porttitor mi. Cras volutpat enim ut posuere sollicitudin. Nulla suscipit diam ut varius volutpat. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+
+Duis ut convallis est, ac cursus purus. Fusce euismod gravida diam non lacinia. Pellentesque quis arcu fermentum, elementum erat et, porttitor sem. Sed sed mauris sed urna auctor ultricies. Mauris vel sodales purus. Vivamus semper lorem nec ligula ultricies, lobortis lobortis metus scelerisque. Morbi in dolor hendrerit metus sodales mollis sed eget neque. Nam sollicitudin, nulla id consequat malesuada, ligula nulla imperdiet lacus, nec pellentesque nunc leo convallis elit. Aenean vestibulum ipsum quis nulla laoreet, ut convallis velit sodales. Quisque dolor tellus, dignissim sit amet nulla ut, mollis vulputate ligula. Sed tempus porta rutrum. Sed tincidunt justo eget est ullamcorper, quis tempor odio convallis.
+
+Pellentesque tortor felis, euismod a orci at, posuere tristique neque. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Integer congue elit dignissim dolor feugiat, non pellentesque quam aliquam. Aenean porttitor, mi nec aliquet vehicula, magna diam euismod elit, gravida commodo nunc erat ut nulla. Mauris arcu odio, dictum a fermentum et, tempor quis nisl. Vestibulum congue rhoncus semper. Suspendisse ut convallis ante, non consequat nibh. Vivamus blandit laoreet accumsan. Maecenas feugiat congue mi ac aliquet. Nunc nisl massa, facilisis vel fringilla et, vestibulum ac lorem. Proin eget ipsum id turpis hendrerit pharetra in eget nisi. Cras tellus arcu, tristique id dictum ornare, tempus et ante. Aenean aliquam elementum metus vitae pretium.
+
+Cras et purus tellus. Quisque ipsum enim, sagittis sit amet vulputate in, sollicitudin in felis. Vivamus a commodo nisi. Aliquam ligula neque, venenatis vel risus id, pellentesque gravida sapien. Donec leo ipsum, tincidunt non suscipit eu, scelerisque sit amet tortor. Donec sit amet nisl tristique, placerat ex id, aliquam nibh. Etiam fringilla nisl sem, ac pellentesque ex lobortis eget.
+
+Donec luctus dui sit amet imperdiet accumsan. Sed tempus rutrum finibus. Nunc aliquet vitae ligula non tempus. Pellentesque mauris tortor, ullamcorper at velit in, consectetur commodo nisi. Vestibulum tempor massa quis est ultricies lobortis. Aliquam et elit bibendum, sodales nulla in, sollicitudin tellus. Morbi rhoncus eros nec quam ultricies varius. Praesent vitae venenatis velit, eget dignissim velit. Aliquam pellentesque, urna vitae dictum tristique, nibh mauris vehicula felis, ut eleifend orci magna a nulla. Fusce vel laoreet dolor, a imperdiet lacus. Vivamus at pharetra tortor. Aliquam ut ultricies magna, eget vehicula neque.
+
+Cras laoreet facilisis varius. Donec congue tempor orci, euismod sagittis nulla ornare et. Integer sollicitudin id felis ac mollis. Aliquam eget elit in nulla posuere consequat. Mauris nec hendrerit libero, id elementum diam. Donec rhoncus consectetur eros, non condimentum sapien malesuada sed. Pellentesque sagittis enim luctus fermentum sodales. Nam condimentum molestie nulla quis cursus. Quisque vitae sollicitudin diam. Fusce mattis elementum lectus a rutrum. Donec egestas dui eros, ut dictum metus tincidunt ut. Nullam at eros est. Mauris mollis vestibulum velit vel facilisis. In accumsan nisi in lorem commodo maximus.
+
+Nam nec libero dictum, cursus eros quis, ultricies metus. Sed in leo sapien. Suspendisse sollicitudin orci vitae interdum iaculis. Nullam cursus id nunc eget scelerisque. Curabitur non tincidunt elit. Duis gravida auctor pellentesque. Integer sodales ultrices nibh a ornare. Phasellus efficitur mi arcu, at pulvinar turpis gravida eu. Aliquam vitae posuere urna. Sed iaculis aliquet ipsum vel mollis.
+
+Pellentesque interdum bibendum eros vel convallis. Sed iaculis erat tortor, quis suscipit quam laoreet vitae. Sed ut augue dignissim, viverra diam molestie, vehicula est. Ut facilisis aliquet ipsum, non finibus mauris pretium non. Donec vel dapibus tellus. Proin at justo tellus. Praesent eget risus quis urna maximus dictum. Cras sapien ipsum, ullamcorper eget augue nec, pellentesque tempus ante. Aenean ut mattis justo. Fusce congue massa a augue dapibus dapibus. Maecenas interdum enim et ligula tincidunt accumsan.
+
+Aliquam et tempor arcu. Sed auctor lacus justo, ut dictum diam auctor sit amet. Quisque sed quam rutrum, pulvinar justo non, dignissim felis. Donec in est eget nulla convallis tristique ut nec nunc. Maecenas pulvinar felis sem, at pulvinar augue sodales non. In magna ex, mollis id finibus sit amet, imperdiet a nisi. Fusce ullamcorper, leo et suscipit consectetur, ex odio sodales elit, scelerisque scelerisque turpis risus et ex. Morbi sed ultrices ex. Duis vel arcu rutrum, volutpat dui vel, luctus ligula. Maecenas nibh ante, porttitor vestibulum quam ut, consequat consectetur elit.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Duis consequat lorem vitae massa volutpat, eu venenatis orci placerat. Integer varius sagittis volutpat. In vel mollis ante. Maecenas eget vestibulum dolor, ut aliquam sapien. Nam efficitur orci laoreet, lobortis nunc eu, pretium quam. Suspendisse et purus a quam vestibulum faucibus a tristique magna. Nulla at enim gravida massa eleifend molestie vitae quis erat. Integer tristique nisi libero, et varius lacus posuere eget. Donec interdum sed nisi a congue. Nam sodales mattis pharetra. Curabitur gravida sapien nec viverra posuere. Duis a dolor vulputate, sollicitudin mi vitae, accumsan erat. Sed leo neque, rhoncus posuere fringilla vitae, porttitor vel nulla.
+
+In hac habitasse platea dictumst. Etiam a mollis dolor, nec suscipit ex. Aenean nec bibendum velit. Donec fermentum, nisl vel porta semper, nunc velit porttitor felis, egestas malesuada magna tellus vel tortor. Integer fermentum nulla at eros fringilla, sit amet fringilla lectus luctus. Nulla scelerisque arcu ac rhoncus iaculis. Proin lobortis tincidunt velit, at mattis augue eleifend id. Sed pellentesque semper diam sit amet ultricies. Etiam felis lectus, molestie id orci quis, porttitor dictum mauris. Nulla facilisi. Fusce tempus urna quis sollicitudin blandit. Phasellus sed sodales est, quis viverra velit. Duis eget auctor risus. Aliquam tempor turpis quis turpis aliquet, id viverra ipsum vestibulum. Integer ac finibus tellus.
+
+Donec scelerisque placerat metus, ac tincidunt turpis ornare non. Aenean dignissim pharetra ex vel dignissim. Sed egestas tristique lacus, et convallis nibh vestibulum rutrum. Nulla facilisi. Sed posuere volutpat ex, vel consequat nunc dapibus at. Curabitur sit amet dapibus risus. Fusce dui est, varius venenatis libero sit amet, tincidunt facilisis felis. Morbi pharetra volutpat mauris vitae varius. Nam vestibulum, arcu at efficitur facilisis, ex mauris ultricies sem, at interdum metus nunc at est. Phasellus id leo eu lacus aliquet gravida eu ac tortor.
+
+Etiam dapibus sem eu tellus luctus, at laoreet enim feugiat. Morbi mollis justo quam, in egestas ex pulvinar a. Etiam et aliquam metus. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nam sit amet elit sed nunc vestibulum iaculis ut vel augue. Quisque risus metus, ultrices id ipsum sit amet, dapibus consequat leo. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed nulla sapien, consequat rhoncus accumsan vitae, finibus a mauris. Vivamus at odio arcu. Nam in lacus non dui laoreet pulvinar. Sed quis tempor urna, ut condimentum turpis. Cras vulputate eros erat, sit amet auctor orci blandit nec. Integer consectetur fringilla rhoncus. Suspendisse suscipit lectus finibus consectetur imperdiet.
+
+Proin pellentesque ligula vel lacus laoreet, id elementum diam facilisis. Ut et ipsum ligula. Sed in nisi vel erat maximus cursus sed eu velit. Aenean porttitor felis arcu, aliquet maximus ante mollis id. Praesent laoreet nisi lacus, sit amet rutrum turpis blandit vel. Integer in volutpat elit. Suspendisse scelerisque elit et erat tempus, sed consectetur leo molestie. Etiam eleifend massa sit amet ante euismod facilisis.
+
+Proin accumsan sed nunc quis sollicitudin. Aliquam vehicula orci eu libero placerat, sed condimentum justo hendrerit. Morbi eu turpis ut sapien fringilla molestie vel non risus. Nunc porttitor est nec est interdum, imperdiet volutpat sem malesuada. Curabitur a lacus eu enim cursus tristique. Morbi pharetra mollis tincidunt. Sed viverra libero tempus sem tristique, quis elementum ipsum tincidunt. Duis tincidunt feugiat tortor pellentesque tempor. Mauris pellentesque pretium ex porta consectetur. Vestibulum euismod sollicitudin nibh id maximus. Aenean bibendum, mi quis dapibus facilisis, purus dolor viverra risus, nec aliquam velit quam at ipsum. Vivamus enim velit, rutrum at finibus non, placerat a justo. Praesent maximus nunc sed maximus fringilla. Sed in est in odio auctor tempus. Quisque erat lorem, sodales ut eros quis, dictum porttitor ipsum.
+
+Ut facilisis pellentesque leo, aliquam imperdiet leo maximus a. Donec eget turpis porttitor, euismod lorem vitae, condimentum lorem. Sed non convallis metus, a tristique metus. Aenean nec est a libero ultrices fermentum eget malesuada sapien. Phasellus faucibus elit felis, in efficitur lectus maximus nec. Nullam mollis quam est, ac finibus eros efficitur ut. Proin pretium, metus id lacinia molestie, mi diam dignissim nulla, ac feugiat dui dui a urna. Aliquam erat volutpat. Donec eget viverra nunc. Vivamus a facilisis est. Morbi varius felis orci, eget tempus quam congue vitae.
+
+Suspendisse in ipsum ut turpis ornare pellentesque sed sed velit. Morbi posuere in sapien tempus egestas. Aenean fermentum ipsum vel risus dictum, a mollis lectus tristique. Vestibulum sed sapien sed sem cursus sodales. Quisque ultricies ligula ut erat gravida molestie. Cras tincidunt urna odio, at varius lectus sagittis eget. Donec rhoncus accumsan tincidunt.
+
+Curabitur risus lorem, tempus euismod hendrerit eget, consequat vitae risus. Pellentesque malesuada laoreet tempus. Nunc sagittis, mi ut tristique sodales, tortor quam pulvinar ipsum, vestibulum accumsan dui augue a velit. Quisque faucibus nibh fermentum tempor vehicula. Morbi id rutrum velit, ut imperdiet justo. Nulla nec neque libero. Fusce consequat ornare tincidunt. Praesent eget imperdiet libero. Aliquam convallis risus sed risus condimentum ultricies. Duis sed purus purus. Quisque pulvinar faucibus finibus. Etiam fringilla sapien et tellus sollicitudin consectetur. Proin viverra eu nunc malesuada ullamcorper.
+
+Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nullam auctor, elit vitae euismod laoreet, leo erat blandit felis, quis porta nibh sem a massa. Nam vehicula est purus, vel convallis mauris commodo a. Proin cursus tortor eu velit consectetur fermentum. Nunc et egestas purus, et volutpat orci. Mauris eleifend id tellus in eleifend. Duis lectus tellus, malesuada et velit at, hendrerit finibus nulla. Ut tincidunt sagittis orci, mollis condimentum urna lobortis quis. Integer vitae dictum eros. Phasellus eu hendrerit neque. Aenean sit amet lectus nunc. Pellentesque tortor sapien, euismod rutrum placerat quis, cursus eu nunc. Suspendisse pretium, erat non mollis pellentesque, sapien neque rhoncus justo, in facilisis odio augue ac lacus. Nunc a sapien sodales, convallis nisl ac, gravida ante. Suspendisse sollicitudin eu leo eget facilisis. Donec sodales justo eu lacus tincidunt, sit amet tristique ipsum egestas.
+
+Suspendisse pharetra dictum neque, vel elementum sem condimentum lobortis. Aenean eget aliquet dolor. Aliquam erat volutpat. Ut feugiat tempor pretium. Phasellus faucibus eros et mauris ultrices, in pretium felis consectetur. Nullam ac turpis venenatis, feugiat massa vel, tristique turpis. Nunc eu ligula non quam laoreet dictum. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Maecenas sed mi imperdiet quam commodo accumsan. Vivamus pharetra iaculis diam, non tempus tortor pharetra dapibus. Nulla posuere, velit nec vehicula cursus, mi massa ultrices lectus, ut scelerisque quam velit sed velit. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas commodo, lorem ut elementum accumsan, sem leo lacinia tortor, ac placerat purus erat eget libero. Nunc posuere scelerisque ante, ut eleifend mauris vehicula nec.
+
+Donec rutrum quam dolor, id varius velit efficitur non. Aliquam eros lacus, dapibus at leo sit amet, ultricies ullamcorper ante. Nam quis condimentum leo. Curabitur porta vel nulla ac lobortis. Sed et tellus eu erat cursus bibendum. Vivamus id eros eget enim molestie volutpat vel at lectus. Ut ut neque erat. Sed vulputate erat justo, tristique bibendum mauris elementum eget. Pellentesque at convallis nisi. In commodo placerat elit et semper. Fusce nec sed.
diff --git a/src/mongo/gotools/mongoimport/common.go b/src/mongo/gotools/mongoimport/common.go
new file mode 100644
index 00000000000..cd7c4d333e8
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/common.go
@@ -0,0 +1,472 @@
+package mongoimport
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2/bson"
+ "gopkg.in/tomb.v2"
+)
+
+type ParseGrace int
+
+const (
+ pgAutoCast ParseGrace = iota
+ pgSkipField
+ pgSkipRow
+ pgStop
+)
+
+// ValidatePG ensures the user-provided parseGrace is one of the allowed
+// values.
+func ValidatePG(pg string) (ParseGrace, error) {
+ switch pg {
+ case "autoCast":
+ return pgAutoCast, nil
+ case "skipField":
+ return pgSkipField, nil
+ case "skipRow":
+ return pgSkipRow, nil
+ case "stop":
+ return pgStop, nil
+ default:
+ return pgAutoCast, fmt.Errorf("invalid parse grace: %s", pg)
+ }
+}
+
+// ParsePG interprets the user-provided parseGrace, assuming it is valid.
+func ParsePG(pg string) (res ParseGrace) {
+ res, _ = ValidatePG(pg)
+ return
+}
+
+// Converter is an interface that adds the basic Convert method which returns a
+// valid BSON document that has been converted by the underlying implementation.
+// If conversion fails, err will be set.
+type Converter interface {
+ Convert() (document bson.D, err error)
+}
+
+// An importWorker reads Converter from the unprocessedDataChan channel and
+// sends processed BSON documents on the processedDocumentChan channel
+type importWorker struct {
+ // unprocessedDataChan is used to stream the input data for a worker to process
+ unprocessedDataChan chan Converter
+
+ // used to stream the processed document back to the caller
+ processedDocumentChan chan bson.D
+
+ // used to synchronise all worker goroutines
+ tomb *tomb.Tomb
+}
+
+// an interface for tracking the number of bytes, which is used in mongoimport to feed
+// the progress bar.
+type sizeTracker interface {
+ Size() int64
+}
+
+// sizeTrackingReader implements Reader and sizeTracker by wrapping an io.Reader and keeping track
+// of the total number of bytes read from each call to Read().
+type sizeTrackingReader struct {
+ bytesRead int64
+ reader io.Reader
+}
+
+func (str *sizeTrackingReader) Size() int64 {
+ bytes := atomic.LoadInt64(&str.bytesRead)
+ return bytes
+}
+
+func (str *sizeTrackingReader) Read(p []byte) (n int, err error) {
+ n, err = str.reader.Read(p)
+ atomic.AddInt64(&str.bytesRead, int64(n))
+ return
+}
+
+func newSizeTrackingReader(reader io.Reader) *sizeTrackingReader {
+ return &sizeTrackingReader{
+ reader: reader,
+ bytesRead: 0,
+ }
+}
+
+var (
+ UTF8_BOM = []byte{0xEF, 0xBB, 0xBF}
+)
+
+// bomDiscardingReader implements and wraps io.Reader, discarding the UTF-8 BOM, if applicable
+type bomDiscardingReader struct {
+ buf *bufio.Reader
+ didRead bool
+}
+
+func (bd *bomDiscardingReader) Read(p []byte) (int, error) {
+ if !bd.didRead {
+ bom, err := bd.buf.Peek(3)
+ if err == nil && bytes.Equal(bom, UTF8_BOM) {
+ bd.buf.Read(make([]byte, 3)) // discard BOM
+ }
+ bd.didRead = true
+ }
+ return bd.buf.Read(p)
+}
+
+func newBomDiscardingReader(r io.Reader) *bomDiscardingReader {
+ return &bomDiscardingReader{buf: bufio.NewReader(r)}
+}
+
+// channelQuorumError takes a channel and a quorum - which specifies how many
+// messages to receive on that channel before returning. It either returns the
+// first non-nil error received on the channel or nil if up to `quorum` nil
+// errors are received
+func channelQuorumError(ch <-chan error, quorum int) (err error) {
+ for i := 0; i < quorum; i++ {
+ if err = <-ch; err != nil {
+ return
+ }
+ }
+ return
+}
+
+// constructUpsertDocument constructs a BSON document to use for upserts
+func constructUpsertDocument(upsertFields []string, document bson.D) bson.D {
+ upsertDocument := bson.D{}
+ var hasDocumentKey bool
+ for _, key := range upsertFields {
+ val := getUpsertValue(key, document)
+ if val != nil {
+ hasDocumentKey = true
+ }
+ upsertDocument = append(upsertDocument, bson.DocElem{Name: key, Value: val})
+ }
+ if !hasDocumentKey {
+ return nil
+ }
+ return upsertDocument
+}
+
+// doSequentialStreaming takes a slice of workers, a readDocs (input) channel and
+// an outputChan (output) channel. It sequentially writes unprocessed data read from
+// the input channel to each worker and then sequentially reads the processed data
+// from each worker before passing it on to the output channel
+func doSequentialStreaming(workers []*importWorker, readDocs chan Converter, outputChan chan bson.D) {
+ numWorkers := len(workers)
+
+ // feed in the data to be processed and do round-robin
+ // reads from each worker once processing is completed
+ go func() {
+ i := 0
+ for doc := range readDocs {
+ workers[i].unprocessedDataChan <- doc
+ i = (i + 1) % numWorkers
+ }
+
+ // close the read channels of all the workers
+ for i := 0; i < numWorkers; i++ {
+ close(workers[i].unprocessedDataChan)
+ }
+ }()
+
+ // coordinate the order in which the documents are sent over to the
+ // main output channel
+ numDoneWorkers := 0
+ i := 0
+ for {
+ processedDocument, open := <-workers[i].processedDocumentChan
+ if open {
+ outputChan <- processedDocument
+ } else {
+ numDoneWorkers++
+ }
+ if numDoneWorkers == numWorkers {
+ break
+ }
+ i = (i + 1) % numWorkers
+ }
+}
+
+// getUpsertValue takes a given BSON document and a given field, and returns the
+// field's associated value in the document. The field is specified using dot
+// notation for nested fields. e.g. "person.age" would return 34 would return
+// 34 in the document: bson.M{"person": bson.M{"age": 34}} whereas,
+// "person.name" would return nil
+func getUpsertValue(field string, document bson.D) interface{} {
+ index := strings.Index(field, ".")
+ if index == -1 {
+ // grab the value (ignoring errors because we are okay with nil)
+ val, _ := bsonutil.FindValueByKey(field, &document)
+ return val
+ }
+ // recurse into subdocuments
+ left := field[0:index]
+ subDoc, _ := bsonutil.FindValueByKey(left, &document)
+ if subDoc == nil {
+ return nil
+ }
+ subDocD, ok := subDoc.(bson.D)
+ if !ok {
+ return nil
+ }
+ return getUpsertValue(field[index+1:], subDocD)
+}
+
+// filterIngestError accepts a boolean indicating if a non-nil error should be,
+// returned as an actual error.
+//
+// If the error indicates an unreachable server, it returns that immediately.
+//
+// If the error indicates an invalid write concern was passed, it returns nil
+//
+// If the error is not nil, it logs the error. If the error is an io.EOF error -
+// indicating a lost connection to the server, it sets the error as such.
+//
+func filterIngestError(stopOnError bool, err error) error {
+ if err == nil {
+ return nil
+ }
+ if err.Error() == io.EOF.Error() {
+ return fmt.Errorf(db.ErrLostConnection)
+ }
+ if stopOnError || db.IsConnectionError(err) {
+ return err
+ }
+ log.Logvf(log.Always, "error inserting documents: %v", err)
+ return nil
+}
+
+// removeBlankFields takes document and returns a new copy in which
+// fields with empty/blank values are removed
+func removeBlankFields(document bson.D) (newDocument bson.D) {
+ for _, keyVal := range document {
+ if val, ok := keyVal.Value.(*bson.D); ok {
+ keyVal.Value = removeBlankFields(*val)
+ }
+ if val, ok := keyVal.Value.(string); ok && val == "" {
+ continue
+ }
+ if val, ok := keyVal.Value.(bson.D); ok && val == nil {
+ continue
+ }
+ newDocument = append(newDocument, keyVal)
+ }
+ return newDocument
+}
+
+// setNestedValue takes a nested field - in the form "a.b.c" -
+// its associated value, and a document. It then assigns that
+// value to the appropriate nested field within the document
+func setNestedValue(key string, value interface{}, document *bson.D) {
+ index := strings.Index(key, ".")
+ if index == -1 {
+ *document = append(*document, bson.DocElem{Name: key, Value: value})
+ return
+ }
+ keyName := key[0:index]
+ subDocument := &bson.D{}
+ elem, err := bsonutil.FindValueByKey(keyName, document)
+ if err != nil { // no such key in the document
+ elem = nil
+ }
+ var existingKey bool
+ if elem != nil {
+ subDocument = elem.(*bson.D)
+ existingKey = true
+ }
+ setNestedValue(key[index+1:], value, subDocument)
+ if !existingKey {
+ *document = append(*document, bson.DocElem{Name: keyName, Value: subDocument})
+ }
+}
+
+// streamDocuments concurrently processes data gotten from the inputChan
+// channel in parallel and then sends over the processed data to the outputChan
+// channel - either in sequence or concurrently (depending on the value of
+// ordered) - in which the data was received
+func streamDocuments(ordered bool, numDecoders int, readDocs chan Converter, outputChan chan bson.D) (retErr error) {
+ if numDecoders == 0 {
+ numDecoders = 1
+ }
+ var importWorkers []*importWorker
+ wg := new(sync.WaitGroup)
+ importTomb := new(tomb.Tomb)
+ inChan := readDocs
+ outChan := outputChan
+ for i := 0; i < numDecoders; i++ {
+ if ordered {
+ inChan = make(chan Converter, workerBufferSize)
+ outChan = make(chan bson.D, workerBufferSize)
+ }
+ iw := &importWorker{
+ unprocessedDataChan: inChan,
+ processedDocumentChan: outChan,
+ tomb: importTomb,
+ }
+ importWorkers = append(importWorkers, iw)
+ wg.Add(1)
+ go func(iw importWorker) {
+ defer wg.Done()
+ // only set the first worker error and cause sibling goroutines
+ // to terminate immediately
+ err := iw.processDocuments(ordered)
+ if err != nil && retErr == nil {
+ retErr = err
+ iw.tomb.Kill(err)
+ }
+ }(*iw)
+ }
+
+ // if ordered, we have to coordinate the sequence in which processed
+ // documents are passed to the main read channel
+ if ordered {
+ doSequentialStreaming(importWorkers, readDocs, outputChan)
+ }
+ wg.Wait()
+ close(outputChan)
+ return
+}
+
+// coercionError should only be used as a specific error type to check
+// whether tokensToBSON wants the row to print
+type coercionError struct{}
+
+func (coercionError) Error() string { return "coercionError" }
+
+// tokensToBSON reads in slice of records - along with ordered column names -
+// and returns a BSON document for the record.
+func tokensToBSON(colSpecs []ColumnSpec, tokens []string, numProcessed uint64, ignoreBlanks bool) (bson.D, error) {
+ log.Logvf(log.DebugHigh, "got line: %v", tokens)
+ var parsedValue interface{}
+ document := bson.D{}
+ for index, token := range tokens {
+ if token == "" && ignoreBlanks {
+ continue
+ }
+ if index < len(colSpecs) {
+ parsedValue, err := colSpecs[index].Parser.Parse(token)
+ if err != nil {
+ log.Logvf(log.DebugHigh, "parse failure in document #%d for column '%s',"+
+ "could not parse token '%s' to type %s",
+ numProcessed, colSpecs[index].Name, token, colSpecs[index].TypeName)
+ switch colSpecs[index].ParseGrace {
+ case pgAutoCast:
+ parsedValue = autoParse(token)
+ case pgSkipField:
+ continue
+ case pgSkipRow:
+ log.Logvf(log.Always, "skipping row #%d: %v", numProcessed, tokens)
+ return nil, coercionError{}
+ case pgStop:
+ return nil, fmt.Errorf("type coercion failure in document #%d for column '%s', "+
+ "could not parse token '%s' to type %s",
+ numProcessed, colSpecs[index].Name, token, colSpecs[index].TypeName)
+ }
+ }
+ if strings.Index(colSpecs[index].Name, ".") != -1 {
+ setNestedValue(colSpecs[index].Name, parsedValue, &document)
+ } else {
+ document = append(document, bson.DocElem{Name: colSpecs[index].Name, Value: parsedValue})
+ }
+ } else {
+ parsedValue = autoParse(token)
+ key := "field" + strconv.Itoa(index)
+ if util.StringSliceContains(ColumnNames(colSpecs), key) {
+ return nil, fmt.Errorf("duplicate field name - on %v - for token #%v ('%v') in document #%v",
+ key, index+1, parsedValue, numProcessed)
+ }
+ document = append(document, bson.DocElem{Name: key, Value: parsedValue})
+ }
+ }
+ return document, nil
+}
+
+// validateFields takes a slice of fields and returns an error if the fields
+// are invalid, returns nil otherwise
+func validateFields(fields []string) error {
+ fieldsCopy := make([]string, len(fields), len(fields))
+ copy(fieldsCopy, fields)
+ sort.Sort(sort.StringSlice(fieldsCopy))
+
+ for index, field := range fieldsCopy {
+ if strings.HasSuffix(field, ".") {
+ return fmt.Errorf("field '%v' cannot end with a '.'", field)
+ }
+ if strings.HasPrefix(field, ".") {
+ return fmt.Errorf("field '%v' cannot start with a '.'", field)
+ }
+ if strings.HasPrefix(field, "$") {
+ return fmt.Errorf("field '%v' cannot start with a '$'", field)
+ }
+ if strings.Contains(field, "..") {
+ return fmt.Errorf("field '%v' cannot contain consecutive '.' characters", field)
+ }
+ // NOTE: since fields is sorted, this check ensures that no field
+ // is incompatible with another one that occurs further down the list.
+ // meant to prevent cases where we have fields like "a" and "a.c"
+ for _, latterField := range fieldsCopy[index+1:] {
+ // NOTE: this means we will not support imports that have fields that
+ // include e.g. a, a.b
+ if strings.HasPrefix(latterField, field+".") {
+ return fmt.Errorf("fields '%v' and '%v' are incompatible", field, latterField)
+ }
+ // NOTE: this means we will not support imports that have fields like
+ // a, a - since this is invalid in MongoDB
+ if field == latterField {
+ return fmt.Errorf("fields cannot be identical: '%v' and '%v'", field, latterField)
+ }
+ }
+ }
+ return nil
+}
+
+// validateReaderFields is a helper to validate fields for input readers
+func validateReaderFields(fields []string) error {
+ if err := validateFields(fields); err != nil {
+ return err
+ }
+ if len(fields) == 1 {
+ log.Logvf(log.Info, "using field: %v", fields[0])
+ } else {
+ log.Logvf(log.Info, "using fields: %v", strings.Join(fields, ","))
+ }
+ return nil
+}
+
+// processDocuments reads from the Converter channel and for each record, converts it
+// to a bson.D document before sending it on the processedDocumentChan channel. Once the
+// input channel is closed the processed channel is also closed if the worker streams its
+// reads in order
+func (iw *importWorker) processDocuments(ordered bool) error {
+ if ordered {
+ defer close(iw.processedDocumentChan)
+ }
+ for {
+ select {
+ case converter, alive := <-iw.unprocessedDataChan:
+ if !alive {
+ return nil
+ }
+ document, err := converter.Convert()
+ if err != nil {
+ return err
+ }
+ if document == nil {
+ continue
+ }
+ iw.processedDocumentChan <- document
+ case <-iw.tomb.Dying():
+ return nil
+ }
+ }
+}
diff --git a/src/mongo/gotools/mongoimport/common_test.go b/src/mongo/gotools/mongoimport/common_test.go
new file mode 100644
index 00000000000..4745333a927
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/common_test.go
@@ -0,0 +1,600 @@
+package mongoimport
+
+import (
+ "fmt"
+ "io"
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "gopkg.in/tomb.v2"
+)
+
+func init() {
+ log.SetVerbosity(&options.Verbosity{
+ VLevel: 4,
+ })
+}
+
+var (
+ index = uint64(0)
+ csvConverters = []CSVConverter{
+ CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field1", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field2", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field3", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"a", "b", "c"},
+ index: index,
+ },
+ CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field4", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field5", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field6", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"d", "e", "f"},
+ index: index,
+ },
+ CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field7", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field8", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field9", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"d", "e", "f"},
+ index: index,
+ },
+ CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field10", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field11", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field12", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"d", "e", "f"},
+ index: index,
+ },
+ CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field13", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field14", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field15", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"d", "e", "f"},
+ index: index,
+ },
+ }
+ expectedDocuments = []bson.D{
+ {
+ {"field1", "a"},
+ {"field2", "b"},
+ {"field3", "c"},
+ }, {
+ {"field4", "d"},
+ {"field5", "e"},
+ {"field6", "f"},
+ }, {
+ {"field7", "d"},
+ {"field8", "e"},
+ {"field9", "f"},
+ }, {
+ {"field10", "d"},
+ {"field11", "e"},
+ {"field12", "f"},
+ }, {
+ {"field13", "d"},
+ {"field14", "e"},
+ {"field15", "f"},
+ },
+ }
+)
+
+func convertBSONDToRaw(documents []bson.D) []bson.Raw {
+ rawBSONDocuments := []bson.Raw{}
+ for _, document := range documents {
+ rawBytes, err := bson.Marshal(document)
+ So(err, ShouldBeNil)
+ rawBSONDocuments = append(rawBSONDocuments, bson.Raw{3, rawBytes})
+ }
+ return rawBSONDocuments
+}
+
+func TestValidateFields(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given an import input, in validating the headers", t, func() {
+ Convey("if the fields contain '..', an error should be thrown", func() {
+ So(validateFields([]string{"a..a"}), ShouldNotBeNil)
+ })
+ Convey("if the fields start/end in a '.', an error should be thrown", func() {
+ So(validateFields([]string{".a"}), ShouldNotBeNil)
+ So(validateFields([]string{"a."}), ShouldNotBeNil)
+ })
+ Convey("if the fields start in a '$', an error should be thrown", func() {
+ So(validateFields([]string{"$.a"}), ShouldNotBeNil)
+ So(validateFields([]string{"$"}), ShouldNotBeNil)
+ So(validateFields([]string{"$a"}), ShouldNotBeNil)
+ So(validateFields([]string{"a$a"}), ShouldBeNil)
+ })
+ Convey("if the fields collide, an error should be thrown", func() {
+ So(validateFields([]string{"a", "a.a"}), ShouldNotBeNil)
+ So(validateFields([]string{"a", "a.ba", "b.a"}), ShouldNotBeNil)
+ So(validateFields([]string{"a", "a.ba", "b.a"}), ShouldNotBeNil)
+ So(validateFields([]string{"a", "a.b.c"}), ShouldNotBeNil)
+ })
+ Convey("if the fields don't collide, no error should be thrown", func() {
+ So(validateFields([]string{"a", "aa"}), ShouldBeNil)
+ So(validateFields([]string{"a", "aa", "b.a", "b.c"}), ShouldBeNil)
+ So(validateFields([]string{"a", "ba", "ab", "b.a"}), ShouldBeNil)
+ So(validateFields([]string{"a", "ba", "ab", "b.a", "b.c.d"}), ShouldBeNil)
+ So(validateFields([]string{"a", "ab.c"}), ShouldBeNil)
+ })
+ Convey("if the fields contain the same keys, an error should be thrown", func() {
+ So(validateFields([]string{"a", "ba", "a"}), ShouldNotBeNil)
+ })
+ })
+}
+
+func TestGetUpsertValue(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given a field and a BSON document, on calling getUpsertValue", t, func() {
+ Convey("the value of the key should be correct for unnested documents", func() {
+ bsonDocument := bson.D{{"a", 3}}
+ So(getUpsertValue("a", bsonDocument), ShouldEqual, 3)
+ })
+ Convey("the value of the key should be correct for nested document fields", func() {
+ inner := bson.D{{"b", 4}}
+ bsonDocument := bson.D{{"a", inner}}
+ So(getUpsertValue("a.b", bsonDocument), ShouldEqual, 4)
+ })
+ Convey("the value of the key should be nil for unnested document "+
+ "fields that do not exist", func() {
+ bsonDocument := bson.D{{"a", 4}}
+ So(getUpsertValue("c", bsonDocument), ShouldBeNil)
+ })
+ Convey("the value of the key should be nil for nested document "+
+ "fields that do not exist", func() {
+ inner := bson.D{{"b", 4}}
+ bsonDocument := bson.D{{"a", inner}}
+ So(getUpsertValue("a.c", bsonDocument), ShouldBeNil)
+ })
+ Convey("the value of the key should be nil for nil document values", func() {
+ So(getUpsertValue("a", bson.D{{"a", nil}}), ShouldBeNil)
+ })
+ })
+}
+
+func TestConstructUpsertDocument(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given a set of upsert fields and a BSON document, on calling "+
+ "constructUpsertDocument", t, func() {
+ Convey("the key/value combination in the upsert document should be "+
+ "correct for unnested documents with single fields", func() {
+ bsonDocument := bson.D{{"a", 3}}
+ upsertFields := []string{"a"}
+ upsertDocument := constructUpsertDocument(upsertFields,
+ bsonDocument)
+ So(upsertDocument, ShouldResemble, bsonDocument)
+ })
+ Convey("the key/value combination in the upsert document should be "+
+ "correct for unnested documents with several fields", func() {
+ bsonDocument := bson.D{{"a", 3}, {"b", "string value"}}
+ upsertFields := []string{"a"}
+ expectedDocument := bson.D{{"a", 3}}
+ upsertDocument := constructUpsertDocument(upsertFields,
+ bsonDocument)
+ So(upsertDocument, ShouldResemble, expectedDocument)
+ })
+ Convey("the key/value combination in the upsert document should be "+
+ "correct for nested documents with several fields", func() {
+ inner := bson.D{{testCollection, 4}}
+ bsonDocument := bson.D{{"a", inner}, {"b", "string value"}}
+ upsertFields := []string{"a.c"}
+ expectedDocument := bson.D{{"a.c", 4}}
+ upsertDocument := constructUpsertDocument(upsertFields,
+ bsonDocument)
+ So(upsertDocument, ShouldResemble, expectedDocument)
+ })
+ Convey("the upsert document should be nil if the key does not exist "+
+ "in the BSON document", func() {
+ bsonDocument := bson.D{{"a", 3}, {"b", "string value"}}
+ upsertFields := []string{testCollection}
+ upsertDocument := constructUpsertDocument(upsertFields, bsonDocument)
+ So(upsertDocument, ShouldBeNil)
+ })
+ })
+}
+
+func TestSetNestedValue(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given a field, its value, and an existing BSON document...", t, func() {
+ b := bson.D{{"c", "d"}}
+ currentDocument := bson.D{
+ {"a", 3},
+ {"b", &b},
+ }
+ Convey("ensure top level fields are set and others, unchanged", func() {
+ testDocument := &currentDocument
+ expectedDocument := bson.DocElem{"c", 4}
+ setNestedValue("c", 4, testDocument)
+ newDocument := *testDocument
+ So(len(newDocument), ShouldEqual, 3)
+ So(newDocument[2], ShouldResemble, expectedDocument)
+ })
+ Convey("ensure new nested top-level fields are set and others, unchanged", func() {
+ testDocument := &currentDocument
+ expectedDocument := bson.D{{"b", "4"}}
+ setNestedValue("c.b", "4", testDocument)
+ newDocument := *testDocument
+ So(len(newDocument), ShouldEqual, 3)
+ So(newDocument[2].Name, ShouldResemble, "c")
+ So(*newDocument[2].Value.(*bson.D), ShouldResemble, expectedDocument)
+ })
+ Convey("ensure existing nested level fields are set and others, unchanged", func() {
+ testDocument := &currentDocument
+ expectedDocument := bson.D{{"c", "d"}, {"d", 9}}
+ setNestedValue("b.d", 9, testDocument)
+ newDocument := *testDocument
+ So(len(newDocument), ShouldEqual, 2)
+ So(newDocument[1].Name, ShouldResemble, "b")
+ So(*newDocument[1].Value.(*bson.D), ShouldResemble, expectedDocument)
+ })
+ Convey("ensure subsequent calls update fields accordingly", func() {
+ testDocument := &currentDocument
+ expectedDocumentOne := bson.D{{"c", "d"}, {"d", 9}}
+ expectedDocumentTwo := bson.DocElem{"f", 23}
+ setNestedValue("b.d", 9, testDocument)
+ newDocument := *testDocument
+ So(len(newDocument), ShouldEqual, 2)
+ So(newDocument[1].Name, ShouldResemble, "b")
+ So(*newDocument[1].Value.(*bson.D), ShouldResemble, expectedDocumentOne)
+ setNestedValue("f", 23, testDocument)
+ newDocument = *testDocument
+ So(len(newDocument), ShouldEqual, 3)
+ So(newDocument[2], ShouldResemble, expectedDocumentTwo)
+ })
+ })
+}
+
+func TestRemoveBlankFields(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given an unordered BSON document", t, func() {
+ Convey("the same document should be returned if there are no blanks", func() {
+ bsonDocument := bson.D{{"a", 3}, {"b", "hello"}}
+ So(removeBlankFields(bsonDocument), ShouldResemble, bsonDocument)
+ })
+ Convey("a new document without blanks should be returned if there are "+
+ " blanks", func() {
+ d := bson.D{
+ {"a", ""},
+ {"b", ""},
+ }
+ e := bson.D{
+ {"a", ""},
+ {"b", 1},
+ }
+ bsonDocument := bson.D{
+ {"a", 0},
+ {"b", ""},
+ {"c", ""},
+ {"d", &d},
+ {"e", &e},
+ }
+ inner := bson.D{
+ {"b", 1},
+ }
+ expectedDocument := bson.D{
+ {"a", 0},
+ {"e", inner},
+ }
+ So(removeBlankFields(bsonDocument), ShouldResemble, expectedDocument)
+ })
+ })
+}
+
+func TestTokensToBSON(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given an slice of column specs and tokens to convert to BSON", t, func() {
+ Convey("the expected ordered BSON should be produced for the given"+
+ "column specs and tokens", func() {
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ tokens := []string{"1", "2", "hello"}
+ expectedDocument := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", "hello"},
+ }
+ bsonD, err := tokensToBSON(colSpecs, tokens, uint64(0), false)
+ So(err, ShouldBeNil)
+ So(bsonD, ShouldResemble, expectedDocument)
+ })
+ Convey("if there are more tokens than fields, additional fields should be prefixed"+
+ " with 'fields' and an index indicating the header number", func() {
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ tokens := []string{"1", "2", "hello", "mongodb", "user"}
+ expectedDocument := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", "hello"},
+ {"field3", "mongodb"},
+ {"field4", "user"},
+ }
+ bsonD, err := tokensToBSON(colSpecs, tokens, uint64(0), false)
+ So(err, ShouldBeNil)
+ So(bsonD, ShouldResemble, expectedDocument)
+ })
+ Convey("an error should be thrown if duplicate headers are found", func() {
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field3", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ tokens := []string{"1", "2", "hello", "mongodb", "user"}
+ _, err := tokensToBSON(colSpecs, tokens, uint64(0), false)
+ So(err, ShouldNotBeNil)
+ })
+ Convey("fields with nested values should be set appropriately", func() {
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c.a", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ tokens := []string{"1", "2", "hello"}
+ c := bson.D{
+ {"a", "hello"},
+ }
+ expectedDocument := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", c},
+ }
+ bsonD, err := tokensToBSON(colSpecs, tokens, uint64(0), false)
+ So(err, ShouldBeNil)
+ So(expectedDocument[0].Name, ShouldResemble, bsonD[0].Name)
+ So(expectedDocument[0].Value, ShouldResemble, bsonD[0].Value)
+ So(expectedDocument[1].Name, ShouldResemble, bsonD[1].Name)
+ So(expectedDocument[1].Value, ShouldResemble, bsonD[1].Value)
+ So(expectedDocument[2].Name, ShouldResemble, bsonD[2].Name)
+ So(expectedDocument[2].Value, ShouldResemble, *bsonD[2].Value.(*bson.D))
+ })
+ })
+}
+
+func TestProcessDocuments(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given an import worker", t, func() {
+ index := uint64(0)
+ csvConverters := []CSVConverter{
+ CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field1", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field2", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field3", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"a", "b", "c"},
+ index: index,
+ },
+ CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field4", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field5", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field6", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"d", "e", "f"},
+ index: index,
+ },
+ }
+ expectedDocuments := []bson.D{
+ {
+ {"field1", "a"},
+ {"field2", "b"},
+ {"field3", "c"},
+ }, {
+ {"field4", "d"},
+ {"field5", "e"},
+ {"field6", "f"},
+ },
+ }
+ Convey("processDocuments should execute the expected conversion for documents, "+
+ "pass then on the output channel, and close the input channel if ordered is true", func() {
+ inputChannel := make(chan Converter, 100)
+ outputChannel := make(chan bson.D, 100)
+ iw := &importWorker{
+ unprocessedDataChan: inputChannel,
+ processedDocumentChan: outputChannel,
+ tomb: &tomb.Tomb{},
+ }
+ inputChannel <- csvConverters[0]
+ inputChannel <- csvConverters[1]
+ close(inputChannel)
+ So(iw.processDocuments(true), ShouldBeNil)
+ doc1, open := <-outputChannel
+ So(doc1, ShouldResemble, expectedDocuments[0])
+ So(open, ShouldEqual, true)
+ doc2, open := <-outputChannel
+ So(doc2, ShouldResemble, expectedDocuments[1])
+ So(open, ShouldEqual, true)
+ _, open = <-outputChannel
+ So(open, ShouldEqual, false)
+ })
+ Convey("processDocuments should execute the expected conversion for documents, "+
+ "pass then on the output channel, and leave the input channel open if ordered is false", func() {
+ inputChannel := make(chan Converter, 100)
+ outputChannel := make(chan bson.D, 100)
+ iw := &importWorker{
+ unprocessedDataChan: inputChannel,
+ processedDocumentChan: outputChannel,
+ tomb: &tomb.Tomb{},
+ }
+ inputChannel <- csvConverters[0]
+ inputChannel <- csvConverters[1]
+ close(inputChannel)
+ So(iw.processDocuments(false), ShouldBeNil)
+ doc1, open := <-outputChannel
+ So(doc1, ShouldResemble, expectedDocuments[0])
+ So(open, ShouldEqual, true)
+ doc2, open := <-outputChannel
+ So(doc2, ShouldResemble, expectedDocuments[1])
+ So(open, ShouldEqual, true)
+ // close will throw a runtime error if outputChannel is already closed
+ close(outputChannel)
+ })
+ })
+}
+
+func TestDoSequentialStreaming(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given some import workers, a Converters input channel and an bson.D output channel", t, func() {
+ inputChannel := make(chan Converter, 5)
+ outputChannel := make(chan bson.D, 5)
+ workerInputChannel := []chan Converter{
+ make(chan Converter),
+ make(chan Converter),
+ }
+ workerOutputChannel := []chan bson.D{
+ make(chan bson.D),
+ make(chan bson.D),
+ }
+ importWorkers := []*importWorker{
+ &importWorker{
+ unprocessedDataChan: workerInputChannel[0],
+ processedDocumentChan: workerOutputChannel[0],
+ tomb: &tomb.Tomb{},
+ },
+ &importWorker{
+ unprocessedDataChan: workerInputChannel[1],
+ processedDocumentChan: workerOutputChannel[1],
+ tomb: &tomb.Tomb{},
+ },
+ }
+ Convey("documents moving through the input channel should be processed and returned in sequence", func() {
+ // start goroutines to do sequential processing
+ for _, iw := range importWorkers {
+ go iw.processDocuments(true)
+ }
+ // feed in a bunch of documents
+ for _, inputCSVDocument := range csvConverters {
+ inputChannel <- inputCSVDocument
+ }
+ close(inputChannel)
+ doSequentialStreaming(importWorkers, inputChannel, outputChannel)
+ for _, document := range expectedDocuments {
+ So(<-outputChannel, ShouldResemble, document)
+ }
+ })
+ })
+}
+
+func TestStreamDocuments(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey(`Given:
+ 1. a boolean indicating streaming order
+ 2. an input channel where documents are streamed in
+ 3. an output channel where processed documents are streamed out`, t, func() {
+
+ inputChannel := make(chan Converter, 5)
+ outputChannel := make(chan bson.D, 5)
+
+ Convey("the entire pipeline should complete without error under normal circumstances", func() {
+ // stream in some documents
+ for _, csvConverter := range csvConverters {
+ inputChannel <- csvConverter
+ }
+ close(inputChannel)
+ So(streamDocuments(true, 3, inputChannel, outputChannel), ShouldBeNil)
+
+ // ensure documents are streamed out and processed in the correct manner
+ for _, expectedDocument := range expectedDocuments {
+ So(<-outputChannel, ShouldResemble, expectedDocument)
+ }
+ })
+ Convey("the entire pipeline should complete with error if an error is encountered", func() {
+ // stream in some documents - create duplicate headers to simulate an error
+ csvConverter := CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field1", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field2", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"a", "b", "c"},
+ index: uint64(0),
+ }
+ inputChannel <- csvConverter
+ close(inputChannel)
+
+ // ensure that an error is returned on the error channel
+ So(streamDocuments(true, 3, inputChannel, outputChannel), ShouldNotBeNil)
+ })
+ })
+}
+
+func TestChannelQuorumError(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("Given a channel and a quorum...", t, func() {
+ Convey("an error should be returned if one is received", func() {
+ ch := make(chan error, 2)
+ ch <- nil
+ ch <- io.EOF
+ So(channelQuorumError(ch, 2), ShouldNotBeNil)
+ })
+ Convey("no error should be returned if none is received", func() {
+ ch := make(chan error, 2)
+ ch <- nil
+ ch <- nil
+ So(channelQuorumError(ch, 2), ShouldBeNil)
+ })
+ Convey("no error should be returned if up to quorum nil errors are received", func() {
+ ch := make(chan error, 3)
+ ch <- nil
+ ch <- nil
+ ch <- io.EOF
+ So(channelQuorumError(ch, 2), ShouldBeNil)
+ })
+ })
+}
+
+func TestFilterIngestError(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given a boolean 'stopOnError' and an error...", t, func() {
+
+ Convey("an error should be returned if stopOnError is true the err is not nil", func() {
+ So(filterIngestError(true, fmt.Errorf("")), ShouldNotBeNil)
+ })
+
+ Convey("errLostConnection should be returned if stopOnError is true the err is io.EOF", func() {
+ So(filterIngestError(true, io.EOF).Error(), ShouldEqual, db.ErrLostConnection)
+ })
+
+ Convey("no error should be returned if stopOnError is false the err is not nil", func() {
+ So(filterIngestError(false, fmt.Errorf("")), ShouldBeNil)
+ })
+
+ Convey("no error should be returned if stopOnError is false the err is nil", func() {
+ So(filterIngestError(false, nil), ShouldBeNil)
+ })
+
+ Convey("no error should be returned if stopOnError is true the err is nil", func() {
+ So(filterIngestError(true, nil), ShouldBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/mongoimport/csv.go b/src/mongo/gotools/mongoimport/csv.go
new file mode 100644
index 00000000000..3e66d034ddc
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/csv.go
@@ -0,0 +1,151 @@
+package mongoimport
+
+import (
+ gocsv "encoding/csv"
+ "fmt"
+ "io"
+
+ "github.com/mongodb/mongo-tools/mongoimport/csv"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// CSVInputReader implements the InputReader interface for CSV input types.
+type CSVInputReader struct {
+ // colSpecs is a list of column specifications in the BSON documents to be imported
+ colSpecs []ColumnSpec
+
+ // csvReader is the underlying reader used to read data in from the CSV or CSV file
+ csvReader *csv.Reader
+
+ // csvRejectWriter is where coercion-failed rows are written, if applicable
+ csvRejectWriter *gocsv.Writer
+
+ // csvRecord stores each line of input we read from the underlying reader
+ csvRecord []string
+
+ // numProcessed tracks the number of CSV records processed by the underlying reader
+ numProcessed uint64
+
+ // numDecoders is the number of concurrent goroutines to use for decoding
+ numDecoders int
+
+ // embedded sizeTracker exposes the Size() method to check the number of bytes read so far
+ sizeTracker
+
+ // ignoreBlanks is whether empty fields should be ignored
+ ignoreBlanks bool
+}
+
+// CSVConverter implements the Converter interface for CSV input.
+type CSVConverter struct {
+ colSpecs []ColumnSpec
+ data []string
+ index uint64
+ ignoreBlanks bool
+ rejectWriter *gocsv.Writer
+}
+
+// NewCSVInputReader returns a CSVInputReader configured to read data from the
+// given io.Reader, extracting only the specified columns using exactly "numDecoders"
+// goroutines.
+func NewCSVInputReader(colSpecs []ColumnSpec, in io.Reader, rejects io.Writer, numDecoders int, ignoreBlanks bool) *CSVInputReader {
+ szCount := newSizeTrackingReader(newBomDiscardingReader(in))
+ csvReader := csv.NewReader(szCount)
+ // allow variable number of colSpecs in document
+ csvReader.FieldsPerRecord = -1
+ csvReader.TrimLeadingSpace = true
+ return &CSVInputReader{
+ colSpecs: colSpecs,
+ csvReader: csvReader,
+ csvRejectWriter: gocsv.NewWriter(rejects),
+ numProcessed: uint64(0),
+ numDecoders: numDecoders,
+ sizeTracker: szCount,
+ ignoreBlanks: ignoreBlanks,
+ }
+}
+
+// ReadAndValidateHeader reads the header from the underlying reader and validates
+// the header fields. It sets err if the read/validation fails.
+func (r *CSVInputReader) ReadAndValidateHeader() (err error) {
+ fields, err := r.csvReader.Read()
+ if err != nil {
+ return err
+ }
+ r.colSpecs = ParseAutoHeaders(fields)
+ return validateReaderFields(ColumnNames(r.colSpecs))
+}
+
+// ReadAndValidateHeader reads the header from the underlying reader and validates
+// the header fields. It sets err if the read/validation fails.
+func (r *CSVInputReader) ReadAndValidateTypedHeader(parseGrace ParseGrace) (err error) {
+ fields, err := r.csvReader.Read()
+ if err != nil {
+ return err
+ }
+ r.colSpecs, err = ParseTypedHeaders(fields, parseGrace)
+ if err != nil {
+ return err
+ }
+ return validateReaderFields(ColumnNames(r.colSpecs))
+}
+
+// StreamDocument takes a boolean indicating if the documents should be streamed
+// in read order and a channel on which to stream the documents processed from
+// the underlying reader. Returns a non-nil error if streaming fails.
+func (r *CSVInputReader) StreamDocument(ordered bool, readDocs chan bson.D) (retErr error) {
+ csvRecordChan := make(chan Converter, r.numDecoders)
+ csvErrChan := make(chan error)
+
+ // begin reading from source
+ go func() {
+ var err error
+ for {
+ r.csvRecord, err = r.csvReader.Read()
+ if err != nil {
+ close(csvRecordChan)
+ if err == io.EOF {
+ csvErrChan <- nil
+ } else {
+ r.numProcessed++
+ csvErrChan <- fmt.Errorf("read error on entry #%v: %v", r.numProcessed, err)
+ }
+ return
+ }
+ csvRecordChan <- CSVConverter{
+ colSpecs: r.colSpecs,
+ data: r.csvRecord,
+ index: r.numProcessed,
+ ignoreBlanks: r.ignoreBlanks,
+ rejectWriter: r.csvRejectWriter,
+ }
+ r.numProcessed++
+ }
+ }()
+
+ go func() {
+ csvErrChan <- streamDocuments(ordered, r.numDecoders, csvRecordChan, readDocs)
+ }()
+
+ return channelQuorumError(csvErrChan, 2)
+}
+
+// Convert implements the Converter interface for CSV input. It converts a
+// CSVConverter struct to a BSON document.
+func (c CSVConverter) Convert() (b bson.D, err error) {
+ b, err = tokensToBSON(
+ c.colSpecs,
+ c.data,
+ c.index,
+ c.ignoreBlanks,
+ )
+ if _, ok := err.(coercionError); ok {
+ c.Print()
+ err = nil
+ }
+ return
+}
+
+func (c CSVConverter) Print() {
+ c.rejectWriter.Write(c.data)
+}
diff --git a/src/mongo/gotools/mongoimport/csv/reader.go b/src/mongo/gotools/mongoimport/csv/reader.go
new file mode 100644
index 00000000000..aad3c4a32c3
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/csv/reader.go
@@ -0,0 +1,363 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package csv reads and writes comma-separated values (CSV) files.
+//
+// A csv file contains zero or more records of one or more fields per record.
+// Each record is separated by the newline character. The final record may
+// optionally be followed by a newline character.
+//
+// field1,field2,field3
+//
+// White space is considered part of a field.
+//
+// Carriage returns before newline characters are silently removed.
+//
+// Blank lines are ignored. A line with only whitespace characters (excluding
+// the ending newline character) is not considered a blank line.
+//
+// Fields which start and stop with the quote character " are called
+// quoted-fields. The beginning and ending quote are not part of the
+// field.
+//
+// The source:
+//
+// normal string,"quoted-field"
+//
+// results in the fields
+//
+// {`normal string`, `quoted-field`}
+//
+// Within a quoted-field a quote character followed by a second quote
+// character is considered a single quote.
+//
+// "the ""word"" is true","a ""quoted-field"""
+//
+// results in
+//
+// {`the "word" is true`, `a "quoted-field"`}
+//
+// Newlines and commas may be included in a quoted-field
+//
+// "Multi-line
+// field","comma is ,"
+//
+// results in
+//
+// {`Multi-line
+// field`, `comma is ,`}
+package csv
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "unicode"
+)
+
+// A ParseError is returned for parsing errors.
+// The first line is 1. The first column is 0.
+type ParseError struct {
+ Line int // Line where the error occurred
+ Column int // Column (rune index) where the error occurred
+ Err error // The actual error
+}
+
+func (e *ParseError) Error() string {
+ return fmt.Sprintf("line %d, column %d: %s", e.Line, e.Column, e.Err)
+}
+
+// These are the errors that can be returned in ParseError.Error
+var (
+ ErrTrailingComma = errors.New("extra delimiter at end of line") // no longer used
+ ErrBareQuote = errors.New("bare \" in non-quoted-field")
+ ErrQuote = errors.New("extraneous \" in field")
+ ErrFieldCount = errors.New("wrong number of fields in line")
+)
+
+// A Reader reads records from a CSV-encoded file.
+//
+// As returned by NewReader, a Reader expects input conforming to RFC 4180.
+// The exported fields can be changed to customize the details before the
+// first call to Read or ReadAll.
+//
+// Comma is the field delimiter. It defaults to ','.
+//
+// Comment, if not 0, is the comment character. Lines beginning with the
+// Comment character are ignored.
+//
+// If FieldsPerRecord is positive, Read requires each record to
+// have the given number of fields. If FieldsPerRecord is 0, Read sets it to
+// the number of fields in the first record, so that future records must
+// have the same field count. If FieldsPerRecord is negative, no check is
+// made and records may have a variable number of fields.
+//
+// If LazyQuotes is true, a quote may appear in an unquoted field and a
+// non-doubled quote may appear in a quoted field.
+//
+// If TrimLeadingSpace is true, leading white space in a field is ignored.
+type Reader struct {
+ Comma rune // field delimiter (set to ',' by NewReader)
+ Comment rune // comment character for start of line
+ FieldsPerRecord int // number of expected fields per record
+ LazyQuotes bool // allow lazy quotes
+ TrailingComma bool // ignored; here for backwards compatibility
+ TrimLeadingSpace bool // trim leading space
+ line int
+ column int
+ r *bufio.Reader
+ field bytes.Buffer
+}
+
+// NewReader returns a new Reader that reads from r.
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ Comma: ',',
+ r: bufio.NewReader(r),
+ }
+}
+
+// error creates a new ParseError based on err.
+func (r *Reader) error(err error) error {
+ return &ParseError{
+ Line: r.line,
+ Column: r.column,
+ Err: err,
+ }
+}
+
+// Read reads one record from r. The record is a slice of strings with each
+// string representing one field.
+func (r *Reader) Read() (record []string, err error) {
+ for {
+ record, err = r.parseRecord()
+ if record != nil {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if r.FieldsPerRecord > 0 {
+ if len(record) != r.FieldsPerRecord {
+ r.column = 0 // report at start of record
+ return record, r.error(ErrFieldCount)
+ }
+ } else if r.FieldsPerRecord == 0 {
+ r.FieldsPerRecord = len(record)
+ }
+ return record, nil
+}
+
+// ReadAll reads all the remaining records from r.
+// Each record is a slice of fields.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read until EOF, it does not treat end of file as an error to be
+// reported.
+func (r *Reader) ReadAll() (records [][]string, err error) {
+ for {
+ record, err := r.Read()
+ if err == io.EOF {
+ return records, nil
+ }
+ if err != nil {
+ return nil, err
+ }
+ records = append(records, record)
+ }
+}
+
+// readRune reads one rune from r, folding \r\n to \n and keeping track
+// of how far into the line we have read. r.column will point to the start
+// of this rune, not the end of this rune.
+func (r *Reader) readRune() (rune, error) {
+ r1, _, err := r.r.ReadRune()
+
+ // Handle \r\n here. We make the simplifying assumption that
+ // anytime \r is followed by \n that it can be folded to \n.
+ // We will not detect files which contain both \r\n and bare \n.
+ if r1 == '\r' {
+ r1, _, err = r.r.ReadRune()
+ if err == nil {
+ if r1 != '\n' {
+ r.r.UnreadRune()
+ r1 = '\r'
+ }
+ }
+ }
+ r.column++
+ return r1, err
+}
+
+// skip reads runes up to and including the rune delim or until error.
+func (r *Reader) skip(delim rune) error {
+ for {
+ r1, err := r.readRune()
+ if err != nil {
+ return err
+ }
+ if r1 == delim {
+ return nil
+ }
+ }
+}
+
+// parseRecord reads and parses a single csv record from r.
+func (r *Reader) parseRecord() (fields []string, err error) {
+ // Each record starts on a new line. We increment our line
+ // number (lines start at 1, not 0) and set column to -1
+ // so as we increment in readRune it points to the character we read.
+ r.line++
+ r.column = -1
+
+ // Peek at the first rune. If it is an error we are done.
+ // If we are support comments and it is the comment character
+ // then skip to the end of line.
+
+ r1, _, err := r.r.ReadRune()
+ if err != nil {
+ return nil, err
+ }
+
+ if r.Comment != 0 && r1 == r.Comment {
+ return nil, r.skip('\n')
+ }
+ r.r.UnreadRune()
+
+ // At this point we have at least one field.
+ for {
+ haveField, delim, err := r.parseField()
+ if haveField {
+ fields = append(fields, r.field.String())
+ }
+ if delim == '\n' || err == io.EOF {
+ return fields, err
+ } else if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// parseField parses the next field in the record. The read field is
+// located in r.field. Delim is the first character not part of the field
+// (r.Comma or '\n').
+func (r *Reader) parseField() (haveField bool, delim rune, err error) {
+ r.field.Reset()
+
+ r1, err := r.readRune()
+ for err == nil && r.TrimLeadingSpace && r1 != '\n' && unicode.IsSpace(r1) {
+ r1, err = r.readRune()
+ }
+
+ if err == io.EOF && r.column != 0 {
+ return true, 0, err
+ }
+ if err != nil {
+ return false, 0, err
+ }
+
+ var ws bytes.Buffer
+
+ switch r1 {
+ case r.Comma:
+ // will check below
+
+ case '\n':
+ // We are a trailing empty field or a blank line
+ if r.column == 0 {
+ return false, r1, nil
+ }
+ return true, r1, nil
+
+ case '"':
+ // quoted field
+ Quoted:
+ for {
+ r1, err = r.readRune()
+ if err != nil {
+ if err == io.EOF {
+ if r.LazyQuotes {
+ return true, 0, err
+ }
+ return false, 0, r.error(ErrQuote)
+ }
+ return false, 0, err
+ }
+ switch r1 {
+ case '"':
+ r1, err = r.readRune()
+ if err == nil && r.TrimLeadingSpace && r1 != '\n' && unicode.IsSpace(r1) {
+ for err == nil && r.TrimLeadingSpace && r1 != '\n' && unicode.IsSpace(r1) {
+ r1, err = r.readRune()
+ }
+ // we don't want '"foo" "bar",' to look like '"foo""bar"'
+ // which evaluates to 'foo"bar'
+ // so we explicitly test for the case that the trimed whitespace isn't
+ // followed by a '"'
+ if err == nil && r1 == '"' {
+ r.column--
+ return false, 0, r.error(ErrQuote)
+ }
+ }
+ if err != nil || r1 == r.Comma {
+ break Quoted
+ }
+ if r1 == '\n' {
+ return true, r1, nil
+ }
+ if r1 != '"' {
+ if !r.LazyQuotes {
+ r.column--
+ return false, 0, r.error(ErrQuote)
+ }
+ // accept the bare quote
+ r.field.WriteRune('"')
+ }
+ case '\n':
+ r.line++
+ r.column = -1
+ }
+ r.field.WriteRune(r1)
+ }
+
+ default:
+ // unquoted field
+ for {
+ // only write sections of whitespace if it's followed by non-whitespace
+ if unicode.IsSpace(r1) {
+ ws.WriteRune(r1)
+ } else {
+ r.field.WriteString(ws.String())
+ ws.Reset()
+ r.field.WriteRune(r1)
+ }
+ r1, err = r.readRune()
+ if err != nil || r1 == r.Comma {
+ break
+ }
+ if r1 == '\n' {
+ return true, r1, nil
+ }
+ if !r.LazyQuotes && r1 == '"' {
+ return false, 0, r.error(ErrBareQuote)
+ }
+ }
+ }
+ //write any remaining section of whitespace unless TrimLeadingSpace on
+ if !r.TrimLeadingSpace {
+ r.field.WriteString(ws.String())
+ }
+
+ if err != nil {
+ if err == io.EOF {
+ return true, 0, err
+ }
+ return false, 0, err
+ }
+
+ return true, r1, nil
+}
diff --git a/src/mongo/gotools/mongoimport/csv_test.go b/src/mongo/gotools/mongoimport/csv_test.go
new file mode 100644
index 00000000000..0db342d9f12
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/csv_test.go
@@ -0,0 +1,354 @@
+package mongoimport
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func init() {
+ log.SetVerbosity(&options.Verbosity{
+ VLevel: 4,
+ })
+}
+
+func TestCSVStreamDocument(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With a CSV input reader", t, func() {
+ Convey("badly encoded CSV should result in a parsing error", func() {
+ contents := `1, 2, foo"bar`
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldNotBeNil)
+ })
+ Convey("escaped quotes are parsed correctly", func() {
+ contents := `1, 2, "foo""bar"`
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ })
+ Convey("multiple escaped quotes separated by whitespace parsed correctly", func() {
+ contents := `1, 2, "foo"" ""bar"`
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedRead := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", `foo" "bar`},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+ Convey("integer valued strings should be converted", func() {
+ contents := `1, 2, " 3e"`
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedRead := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", " 3e"},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+ Convey("extra fields should be prefixed with 'field'", func() {
+ contents := `1, 2f , " 3e" , " may"`
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedRead := bson.D{
+ {"a", int32(1)},
+ {"b", "2f"},
+ {"c", " 3e"},
+ {"field3", " may"},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+ Convey("nested CSV fields should be imported properly", func() {
+ contents := `1, 2f , " 3e" , " may"`
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b.c", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ b := bson.D{{"c", "2f"}}
+ expectedRead := bson.D{
+ {"a", int32(1)},
+ {"b", b},
+ {"c", " 3e"},
+ {"field3", " may"},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 4)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+
+ readDocument := <-docChan
+ So(readDocument[0], ShouldResemble, expectedRead[0])
+ So(readDocument[1].Name, ShouldResemble, expectedRead[1].Name)
+ So(*readDocument[1].Value.(*bson.D), ShouldResemble, expectedRead[1].Value)
+ So(readDocument[2], ShouldResemble, expectedRead[2])
+ So(readDocument[3], ShouldResemble, expectedRead[3])
+ })
+ Convey("whitespace separated quoted strings are still an error", func() {
+ contents := `1, 2, "foo" "bar"`
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldNotBeNil)
+ })
+ Convey("nested CSV fields causing header collisions should error", func() {
+ contents := `1, 2f , " 3e" , " may", june`
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b.c", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field3", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldNotBeNil)
+ })
+ Convey("calling StreamDocument() for CSVs should return next set of "+
+ "values", func() {
+ contents := "1, 2, 3\n4, 5, 6"
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedReadOne := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", int32(3)},
+ }
+ expectedReadTwo := bson.D{
+ {"a", int32(4)},
+ {"b", int32(5)},
+ {"c", int32(6)},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 2)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedReadOne)
+ So(<-docChan, ShouldResemble, expectedReadTwo)
+ })
+ Convey("valid CSV input file that starts with the UTF-8 BOM should "+
+ "not raise an error", func() {
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedReads := []bson.D{
+ {
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", int32(3)},
+ }, {
+ {"a", int32(4)},
+ {"b", int32(5)},
+ {"c", int32(6)},
+ },
+ }
+ fileHandle, err := os.Open("testdata/test_bom.csv")
+ So(err, ShouldBeNil)
+ r := NewCSVInputReader(colSpecs, fileHandle, os.Stdout, 1, false)
+ docChan := make(chan bson.D, len(expectedReads))
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ for _, expectedRead := range expectedReads {
+ for i, readDocument := range <-docChan {
+ So(readDocument.Name, ShouldResemble, expectedRead[i].Name)
+ So(readDocument.Value, ShouldResemble, expectedRead[i].Value)
+ }
+ }
+ })
+ })
+}
+
+func TestCSVReadAndValidateHeader(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ var err error
+ Convey("With a CSV input reader", t, func() {
+ Convey("setting the header should read the first line of the CSV", func() {
+ contents := "extraHeader1, extraHeader2, extraHeader3"
+ colSpecs := []ColumnSpec{}
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldBeNil)
+ So(len(r.colSpecs), ShouldEqual, 3)
+ })
+
+ Convey("setting non-colliding nested CSV headers should not raise an error", func() {
+ contents := "a, b, c"
+ colSpecs := []ColumnSpec{}
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldBeNil)
+ So(len(r.colSpecs), ShouldEqual, 3)
+ contents = "a.b.c, a.b.d, c"
+ colSpecs = []ColumnSpec{}
+ r = NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldBeNil)
+ So(len(r.colSpecs), ShouldEqual, 3)
+
+ contents = "a.b, ab, a.c"
+ colSpecs = []ColumnSpec{}
+ r = NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldBeNil)
+ So(len(r.colSpecs), ShouldEqual, 3)
+
+ contents = "a, ab, ac, dd"
+ colSpecs = []ColumnSpec{}
+ r = NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldBeNil)
+ So(len(r.colSpecs), ShouldEqual, 4)
+ })
+
+ Convey("setting colliding nested CSV headers should raise an error", func() {
+ contents := "a, a.b, c"
+ colSpecs := []ColumnSpec{}
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldNotBeNil)
+
+ contents = "a.b.c, a.b.d.c, a.b.d"
+ colSpecs = []ColumnSpec{}
+ r = NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldNotBeNil)
+
+ contents = "a, a, a"
+ colSpecs = []ColumnSpec{}
+ r = NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldNotBeNil)
+ })
+
+ Convey("setting the header that ends in a dot should error", func() {
+ contents := "c, a., b"
+ colSpecs := []ColumnSpec{}
+ So(err, ShouldBeNil)
+ So(NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false).ReadAndValidateHeader(), ShouldNotBeNil)
+ })
+
+ Convey("setting the header that starts in a dot should error", func() {
+ contents := "c, .a, b"
+ colSpecs := []ColumnSpec{}
+ So(NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false).ReadAndValidateHeader(), ShouldNotBeNil)
+ })
+
+ Convey("setting the header that contains multiple consecutive dots should error", func() {
+ contents := "c, a..a, b"
+ colSpecs := []ColumnSpec{}
+ So(NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false).ReadAndValidateHeader(), ShouldNotBeNil)
+
+ contents = "c, a.a, b.b...b"
+ colSpecs = []ColumnSpec{}
+ So(NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false).ReadAndValidateHeader(), ShouldNotBeNil)
+ })
+
+ Convey("setting the header using an empty file should return EOF", func() {
+ contents := ""
+ colSpecs := []ColumnSpec{}
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldEqual, io.EOF)
+ So(len(r.colSpecs), ShouldEqual, 0)
+ })
+ Convey("setting the header with column specs already set should replace "+
+ "the existing column specs", func() {
+ contents := "extraHeader1,extraHeader2,extraHeader3"
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ r := NewCSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldBeNil)
+ // if ReadAndValidateHeader() is called with column specs already passed
+ // in, the header should be replaced with the read header line
+ So(len(r.colSpecs), ShouldEqual, 3)
+ So(ColumnNames(r.colSpecs), ShouldResemble, strings.Split(contents, ","))
+ })
+ Convey("plain CSV input file sources should be parsed correctly and "+
+ "subsequent imports should parse correctly", func() {
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedReadOne := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", int32(3)},
+ }
+ expectedReadTwo := bson.D{
+ {"a", int32(3)},
+ {"b", 5.4},
+ {"c", "string"},
+ }
+ fileHandle, err := os.Open("testdata/test.csv")
+ So(err, ShouldBeNil)
+ r := NewCSVInputReader(colSpecs, fileHandle, os.Stdout, 1, false)
+ docChan := make(chan bson.D, 50)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedReadOne)
+ So(<-docChan, ShouldResemble, expectedReadTwo)
+ })
+ })
+}
+
+func TestCSVConvert(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With a CSV input reader", t, func() {
+ Convey("calling convert on a CSVConverter should return the expected BSON document", func() {
+ csvConverter := CSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field1", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field2", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field3", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: []string{"a", "b", "c"},
+ index: uint64(0),
+ }
+ expectedDocument := bson.D{
+ {"field1", "a"},
+ {"field2", "b"},
+ {"field3", "c"},
+ }
+ document, err := csvConverter.Convert()
+ So(err, ShouldBeNil)
+ So(document, ShouldResemble, expectedDocument)
+ })
+ })
+}
diff --git a/src/mongo/gotools/mongoimport/dateconv/dateconv.go b/src/mongo/gotools/mongoimport/dateconv/dateconv.go
new file mode 100644
index 00000000000..91fecf51ad7
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/dateconv/dateconv.go
@@ -0,0 +1,77 @@
+package dateconv
+
+import (
+ "strings"
+)
+
+var (
+ // msReplacers based on:
+ // https://msdn.microsoft.com/en-us/library/ee634398(v=sql.130).aspx
+ msReplacers = []string{
+ "dddd", "Monday",
+ "ddd", "Mon",
+ "dd", "02",
+ "d", "2",
+ "MMMM", "January",
+ "MMM", "Jan",
+ "MM", "01",
+ "M", "1",
+ // "gg", "?",
+ "hh", "03",
+ "h", "3",
+ "HH", "15",
+ "H", "15",
+ "mm", "04",
+ "m", "4",
+ "ss", "05",
+ "s", "5",
+ // "f", "?",
+ "tt", "PM",
+ // "t", "?",
+ "yyyy", "2006",
+ "yyy", "2006",
+ "yy", "06",
+ // "y", "?",
+ "zzz", "-07:00",
+ "zz", "-07",
+ // "z", "?",
+ }
+ msStringReplacer = strings.NewReplacer(msReplacers...)
+)
+
+// FromMS reformats a datetime layout string from the Microsoft SQL Server
+// FORMAT function into go's parse format.
+func FromMS(layout string) string {
+ return msStringReplacer.Replace(layout)
+}
+
+var (
+ // oracleReplacers based on:
+ // http://docs.oracle.com/cd/B19306_01/server.102/b14200/sql_elements004.htm#i34924
+ oracleReplacers = []string{
+ "AM", "PM",
+ "DAY", "Monday",
+ "DY", "Mon",
+ "DD", "02",
+ "HH12", "03",
+ "HH24", "15",
+ "HH", "03",
+ "MI", "04",
+ "MONTH", "January",
+ "MON", "Jan",
+ "MM", "01",
+ "SS", "05",
+ "TZD", "MST",
+ "TZH:TZM", "-07:00",
+ "TZH", "-07",
+ "YYYY", "2006",
+ "YY", "06",
+ }
+ oracleStringReplacer = strings.NewReplacer(oracleReplacers...)
+)
+
+// FromOrace reformats a datetime layout string from the Oracle Database
+// TO_DATE function into go's parse format.
+func FromOracle(layout string) string {
+ return oracleStringReplacer.Replace(strings.ToUpper(layout))
+}
diff --git a/src/mongo/gotools/mongoimport/json.go b/src/mongo/gotools/mongoimport/json.go
new file mode 100644
index 00000000000..caa0dce3121
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/json.go
@@ -0,0 +1,239 @@
+package mongoimport
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// JSONInputReader is an implementation of InputReader that reads documents
+// in JSON format.
+type JSONInputReader struct {
+ // isArray indicates if the JSON import is an array of JSON documents
+ // or not
+ isArray bool
+
+ // decoder is used to read the next valid JSON documents from the input source
+ decoder *json.Decoder
+
+ // numProcessed indicates the number of JSON documents processed
+ numProcessed uint64
+
+ // readOpeningBracket indicates if the underlying io.Reader has consumed
+ // an opening bracket from the input source. Used to prevent errors when
+ // a JSON input source contains just '[]'
+ readOpeningBracket bool
+
+ // expectedByte is used to store the next expected valid character for JSON
+ // array imports
+ expectedByte byte
+
+ // bytesFromReader is used to store the next byte read from the Reader for
+ // JSON array imports
+ bytesFromReader []byte
+
+ // separatorReader is used for JSON arrays to look for a valid array
+ // separator. It is a reader consisting of the decoder's buffer and the
+ // underlying reader
+ separatorReader io.Reader
+
+ // embedded sizeTracker exposes the Size() method to check the number of bytes read so far
+ sizeTracker
+
+ // numDecoders is the number of concurrent goroutines to use for decoding
+ numDecoders int
+}
+
+// JSONConverter implements the Converter interface for JSON input.
+type JSONConverter struct {
+ data []byte
+ index uint64
+}
+
+var (
+ // ErrNoOpeningBracket means that the input source did not contain any
+ // opening brace - returned only if --jsonArray is passed in.
+ ErrNoOpeningBracket = errors.New("bad JSON array format - found no " +
+ "opening bracket '[' in input source")
+
+ // ErrNoClosingBracket means that the input source did not contain any
+ // closing brace - returned only if --jsonArray is passed in.
+ ErrNoClosingBracket = errors.New("bad JSON array format - found no " +
+ "closing bracket ']' in input source")
+)
+
+// NewJSONInputReader creates a new JSONInputReader in array mode if specified,
+// configured to read data to the given io.Reader.
+func NewJSONInputReader(isArray bool, in io.Reader, numDecoders int) *JSONInputReader {
+ szCount := newSizeTrackingReader(newBomDiscardingReader(in))
+ return &JSONInputReader{
+ isArray: isArray,
+ sizeTracker: szCount,
+ decoder: json.NewDecoder(szCount),
+ readOpeningBracket: false,
+ bytesFromReader: make([]byte, 1),
+ numDecoders: numDecoders,
+ }
+}
+
+// ReadAndValidateHeader is a no-op for JSON imports; always returns nil.
+func (r *JSONInputReader) ReadAndValidateHeader() error {
+ return nil
+}
+
+// ReadAndValidateTypedHeader is a no-op for JSON imports; always returns nil.
+func (r *JSONInputReader) ReadAndValidateTypedHeader(parseGrace ParseGrace) error {
+ return nil
+}
+
+// StreamDocument takes a boolean indicating if the documents should be streamed
+// in read order and a channel on which to stream the documents processed from
+// the underlying reader. Returns a non-nil error if encountered
+func (r *JSONInputReader) StreamDocument(ordered bool, readChan chan bson.D) (retErr error) {
+ rawChan := make(chan Converter, r.numDecoders)
+ jsonErrChan := make(chan error)
+
+ // begin reading from source
+ go func() {
+ var err error
+ for {
+ if r.isArray {
+ if err = r.readJSONArraySeparator(); err != nil {
+ close(rawChan)
+ if err == io.EOF {
+ jsonErrChan <- nil
+ } else {
+ r.numProcessed++
+ jsonErrChan <- fmt.Errorf("error reading separator after document #%v: %v", r.numProcessed, err)
+ }
+ return
+ }
+ }
+ rawBytes, err := r.decoder.ScanObject()
+ if err != nil {
+ close(rawChan)
+ if err == io.EOF {
+ jsonErrChan <- nil
+ } else {
+ r.numProcessed++
+ jsonErrChan <- fmt.Errorf("error processing document #%v: %v", r.numProcessed, err)
+ }
+ return
+ }
+ rawChan <- JSONConverter{
+ data: rawBytes,
+ index: r.numProcessed,
+ }
+ r.numProcessed++
+ }
+ }()
+
+ // begin processing read bytes
+ go func() {
+ jsonErrChan <- streamDocuments(ordered, r.numDecoders, rawChan, readChan)
+ }()
+
+ return channelQuorumError(jsonErrChan, 2)
+}
+
+// Convert implements the Converter interface for JSON input. It converts a
+// JSONConverter struct to a BSON document.
+func (c JSONConverter) Convert() (bson.D, error) {
+ document, err := json.UnmarshalBsonD(c.data)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshaling bytes on document #%v: %v", c.index, err)
+ }
+ log.Logvf(log.DebugHigh, "got line: %v", document)
+
+ bsonD, err := bsonutil.GetExtendedBsonD(document)
+ if err != nil {
+ return nil, fmt.Errorf("error getting extended BSON for document #%v: %v", c.index, err)
+ }
+ log.Logvf(log.DebugHigh, "got extended line: %#v", bsonD)
+ return bsonD, nil
+}
+
+// readJSONArraySeparator is a helper method used to process JSON arrays. It is
+// used to read any of the valid separators for a JSON array and flag invalid
+// characters.
+//
+// It will read a byte at a time until it finds an expected character after
+// which it returns control to the caller.
+//
+// It will also return immediately if it finds any error (including EOF). If it
+// reads a JSON_ARRAY_END byte, as a validity check it will continue to scan the
+// input source until it hits an error (including EOF) to ensure the entire
+// input source content is a valid JSON array
+func (r *JSONInputReader) readJSONArraySeparator() error {
+ r.expectedByte = json.ArraySep
+ if r.numProcessed == 0 {
+ r.expectedByte = json.ArrayStart
+ }
+
+ var readByte byte
+ scanp := 0
+
+ separatorReader := io.MultiReader(
+ r.decoder.Buffered(),
+ r.decoder.R,
+ )
+ for readByte != r.expectedByte {
+ n, err := separatorReader.Read(r.bytesFromReader)
+ scanp += n
+ if n == 0 || err != nil {
+ if err == io.EOF {
+ return ErrNoClosingBracket
+ }
+ return err
+ }
+ readByte = r.bytesFromReader[0]
+
+ if readByte == json.ArrayEnd {
+ // if we read the end of the JSON array, ensure we have no other
+ // non-whitespace characters at the end of the array
+ for {
+ _, err = separatorReader.Read(r.bytesFromReader)
+ if err != nil {
+ // takes care of the '[]' case
+ if !r.readOpeningBracket {
+ return ErrNoOpeningBracket
+ }
+ return err
+ }
+ readString := string(r.bytesFromReader[0])
+ if strings.TrimSpace(readString) != "" {
+ return fmt.Errorf("bad JSON array format - found '%v' "+
+ "after '%v' in input source", readString,
+ string(json.ArrayEnd))
+ }
+ }
+ }
+
+ // this will catch any invalid inter JSON object byte that occurs in the
+ // input source
+ if !(readByte == json.ArraySep ||
+ strings.TrimSpace(string(readByte)) == "" ||
+ readByte == json.ArrayStart ||
+ readByte == json.ArrayEnd) {
+ if r.expectedByte == json.ArrayStart {
+ return ErrNoOpeningBracket
+ }
+ return fmt.Errorf("bad JSON array format - found '%v' outside "+
+ "JSON object/array in input source", string(readByte))
+ }
+ }
+ // adjust the buffer to account for read bytes
+ if scanp < len(r.decoder.Buf) {
+ r.decoder.Buf = r.decoder.Buf[scanp:]
+ } else {
+ r.decoder.Buf = []byte{}
+ }
+ r.readOpeningBracket = true
+ return nil
+}
diff --git a/src/mongo/gotools/mongoimport/json_test.go b/src/mongo/gotools/mongoimport/json_test.go
new file mode 100644
index 00000000000..597b282856b
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/json_test.go
@@ -0,0 +1,264 @@
+package mongoimport
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func TestJSONArrayStreamDocument(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With a JSON array input reader", t, func() {
+ var jsonFile, fileHandle *os.File
+ Convey("an error should be thrown if a plain JSON document is supplied", func() {
+ contents := `{"a": "ae"}`
+ r := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(r.StreamDocument(true, make(chan bson.D, 1)), ShouldNotBeNil)
+ })
+
+ Convey("reading a JSON object that has no opening bracket should "+
+ "error out", func() {
+ contents := `{"a":3},{"b":4}]`
+ r := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(r.StreamDocument(true, make(chan bson.D, 1)), ShouldNotBeNil)
+ })
+
+ Convey("JSON arrays that do not end with a closing bracket should "+
+ "error out", func() {
+ contents := `[{"a": "ae"}`
+ r := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldNotBeNil)
+ // though first read should be fine
+ So(<-docChan, ShouldResemble, bson.D{{"a", "ae"}})
+ })
+
+ Convey("an error should be thrown if a plain JSON file is supplied", func() {
+ fileHandle, err := os.Open("testdata/test_plain.json")
+ So(err, ShouldBeNil)
+ r := NewJSONInputReader(true, fileHandle, 1)
+ So(r.StreamDocument(true, make(chan bson.D, 50)), ShouldNotBeNil)
+ })
+
+ Convey("array JSON input file sources should be parsed correctly and "+
+ "subsequent imports should parse correctly", func() {
+ // TODO: currently parses JSON as floats and not ints
+ expectedReadOne := bson.D{
+ {"a", 1.2},
+ {"b", "a"},
+ {"c", 0.4},
+ }
+ expectedReadTwo := bson.D{
+ {"a", 2.4},
+ {"b", "string"},
+ {"c", 52.9},
+ }
+ fileHandle, err := os.Open("testdata/test_array.json")
+ So(err, ShouldBeNil)
+ r := NewJSONInputReader(true, fileHandle, 1)
+ docChan := make(chan bson.D, 50)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedReadOne)
+ So(<-docChan, ShouldResemble, expectedReadTwo)
+ })
+
+ Reset(func() {
+ jsonFile.Close()
+ fileHandle.Close()
+ })
+ })
+}
+
+func TestJSONPlainStreamDocument(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With a plain JSON input reader", t, func() {
+ var jsonFile, fileHandle *os.File
+ Convey("string valued JSON documents should be imported properly", func() {
+ contents := `{"a": "ae"}`
+ expectedRead := bson.D{{"a", "ae"}}
+ r := NewJSONInputReader(false, bytes.NewReader([]byte(contents)), 1)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+
+ Convey("several string valued JSON documents should be imported "+
+ "properly", func() {
+ contents := `{"a": "ae"}{"b": "dc"}`
+ expectedReadOne := bson.D{{"a", "ae"}}
+ expectedReadTwo := bson.D{{"b", "dc"}}
+ r := NewJSONInputReader(false, bytes.NewReader([]byte(contents)), 1)
+ docChan := make(chan bson.D, 2)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedReadOne)
+ So(<-docChan, ShouldResemble, expectedReadTwo)
+ })
+
+ Convey("number valued JSON documents should be imported properly", func() {
+ contents := `{"a": "ae", "b": 2.0}`
+ expectedRead := bson.D{{"a", "ae"}, {"b", 2.0}}
+ r := NewJSONInputReader(false, bytes.NewReader([]byte(contents)), 1)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+
+ Convey("JSON arrays should return an error", func() {
+ contents := `[{"a": "ae", "b": 2.0}]`
+ r := NewJSONInputReader(false, bytes.NewReader([]byte(contents)), 1)
+ So(r.StreamDocument(true, make(chan bson.D, 50)), ShouldNotBeNil)
+ })
+
+ Convey("plain JSON input file sources should be parsed correctly and "+
+ "subsequent imports should parse correctly", func() {
+ expectedReads := []bson.D{
+ {
+ {"a", 4},
+ {"b", "string value"},
+ {"c", 1},
+ }, {
+ {"a", 5},
+ {"b", "string value"},
+ {"c", 2},
+ }, {
+ {"a", 6},
+ {"b", "string value"},
+ {"c", 3},
+ },
+ }
+ fileHandle, err := os.Open("testdata/test_plain.json")
+ So(err, ShouldBeNil)
+ r := NewJSONInputReader(false, fileHandle, 1)
+ docChan := make(chan bson.D, len(expectedReads))
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ for i := 0; i < len(expectedReads); i++ {
+ for j, readDocument := range <-docChan {
+ So(readDocument.Name, ShouldEqual, expectedReads[i][j].Name)
+ So(readDocument.Value, ShouldEqual, expectedReads[i][j].Value)
+ }
+ }
+ })
+
+ Convey("reading JSON that starts with a UTF-8 BOM should not error",
+ func() {
+ expectedReads := []bson.D{
+ {
+ {"a", 1},
+ {"b", 2},
+ {"c", 3},
+ }, {
+ {"a", 4},
+ {"b", 5},
+ {"c", 6},
+ },
+ }
+ fileHandle, err := os.Open("testdata/test_bom.json")
+ So(err, ShouldBeNil)
+ r := NewJSONInputReader(false, fileHandle, 1)
+ docChan := make(chan bson.D, 2)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ for _, expectedRead := range expectedReads {
+ for i, readDocument := range <-docChan {
+ So(readDocument.Name, ShouldEqual, expectedRead[i].Name)
+ So(readDocument.Value, ShouldEqual, expectedRead[i].Value)
+ }
+ }
+ })
+
+ Reset(func() {
+ jsonFile.Close()
+ fileHandle.Close()
+ })
+ })
+}
+
+func TestReadJSONArraySeparator(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With an array JSON input reader", t, func() {
+ Convey("reading a JSON array separator should consume [",
+ func() {
+ contents := `[{"a": "ae"}`
+ jsonImporter := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(jsonImporter.readJSONArraySeparator(), ShouldBeNil)
+ // at this point it should have consumed all bytes up to `{`
+ So(jsonImporter.readJSONArraySeparator(), ShouldNotBeNil)
+ })
+ Convey("reading a closing JSON array separator without a "+
+ "corresponding opening bracket should error out ",
+ func() {
+ contents := `]`
+ jsonImporter := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(jsonImporter.readJSONArraySeparator(), ShouldNotBeNil)
+ })
+ Convey("reading an opening JSON array separator without a "+
+ "corresponding closing bracket should error out ",
+ func() {
+ contents := `[`
+ jsonImporter := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(jsonImporter.readJSONArraySeparator(), ShouldBeNil)
+ So(jsonImporter.readJSONArraySeparator(), ShouldNotBeNil)
+ })
+ Convey("reading an opening JSON array separator with an ending "+
+ "closing bracket should return EOF",
+ func() {
+ contents := `[]`
+ jsonImporter := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(jsonImporter.readJSONArraySeparator(), ShouldBeNil)
+ So(jsonImporter.readJSONArraySeparator(), ShouldEqual, io.EOF)
+ })
+ Convey("reading an opening JSON array separator, an ending closing "+
+ "bracket but then additional characters after that, should error",
+ func() {
+ contents := `[]a`
+ jsonImporter := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(jsonImporter.readJSONArraySeparator(), ShouldBeNil)
+ So(jsonImporter.readJSONArraySeparator(), ShouldNotBeNil)
+ })
+ Convey("reading invalid JSON objects between valid objects should "+
+ "error out",
+ func() {
+ contents := `[{"a":3}x{"b":4}]`
+ r := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldNotBeNil)
+ // read first valid document
+ <-docChan
+ So(r.readJSONArraySeparator(), ShouldNotBeNil)
+ })
+ Convey("reading invalid JSON objects after valid objects but between "+
+ "valid objects should error out",
+ func() {
+ contents := `[{"a":3},b{"b":4}]`
+ r := NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(r.StreamDocument(true, make(chan bson.D, 1)), ShouldNotBeNil)
+ contents = `[{"a":3},,{"b":4}]`
+ r = NewJSONInputReader(true, bytes.NewReader([]byte(contents)), 1)
+ So(r.StreamDocument(true, make(chan bson.D, 1)), ShouldNotBeNil)
+ })
+ })
+}
+
+func TestJSONConvert(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With a JSON input reader", t, func() {
+ Convey("calling convert on a JSONConverter should return the expected BSON document", func() {
+ jsonConverter := JSONConverter{
+ data: []byte(`{field1:"a",field2:"b",field3:"c"}`),
+ index: uint64(0),
+ }
+ expectedDocument := bson.D{
+ {"field1", "a"},
+ {"field2", "b"},
+ {"field3", "c"},
+ }
+ document, err := jsonConverter.Convert()
+ So(err, ShouldBeNil)
+ So(document, ShouldResemble, expectedDocument)
+ })
+ })
+}
diff --git a/src/mongo/gotools/mongoimport/main/mongoimport.go b/src/mongo/gotools/mongoimport/main/mongoimport.go
new file mode 100644
index 00000000000..50002bc5442
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/main/mongoimport.go
@@ -0,0 +1,86 @@
+// Main package for the mongoimport tool.
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongoimport"
+)
+
+func main() {
+ // initialize command-line opts
+ opts := options.New("mongoimport", mongoimport.Usage,
+ options.EnabledOptions{Auth: true, Connection: true, Namespace: true})
+
+ inputOpts := &mongoimport.InputOptions{}
+ opts.AddOptions(inputOpts)
+ ingestOpts := &mongoimport.IngestOptions{}
+ opts.AddOptions(ingestOpts)
+
+ args, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'mongoimport --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ log.SetVerbosity(opts.Verbosity)
+ signals.Handle()
+
+ // print help, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ // print version, if specified
+ if opts.PrintVersion() {
+ return
+ }
+
+ // connect directly, unless a replica set name is explicitly specified
+ _, setName := util.ParseConnectionString(opts.Host)
+ opts.Direct = (setName == "")
+ opts.ReplicaSetName = setName
+
+ // create a session provider to connect to the db
+ sessionProvider, err := db.NewSessionProvider(*opts)
+ if err != nil {
+ log.Logvf(log.Always, "error connecting to host: %v", err)
+ os.Exit(util.ExitError)
+ }
+ sessionProvider.SetBypassDocumentValidation(ingestOpts.BypassDocumentValidation)
+
+ m := mongoimport.MongoImport{
+ ToolOptions: opts,
+ InputOptions: inputOpts,
+ IngestOptions: ingestOpts,
+ SessionProvider: sessionProvider,
+ }
+
+ if err = m.ValidateSettings(args); err != nil {
+ log.Logvf(log.Always, "error validating settings: %v", err)
+ log.Logvf(log.Always, "try 'mongoimport --help' for more information")
+ os.Exit(util.ExitError)
+ }
+
+ numDocs, err := m.ImportDocuments()
+ if !opts.Quiet {
+ if err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ }
+ message := fmt.Sprintf("imported 1 document")
+ if numDocs != 1 {
+ message = fmt.Sprintf("imported %v documents", numDocs)
+ }
+ log.Logvf(log.Always, message)
+ }
+ if err != nil {
+ os.Exit(util.ExitError)
+ }
+}
diff --git a/src/mongo/gotools/mongoimport/mongoimport.go b/src/mongo/gotools/mongoimport/mongoimport.go
new file mode 100644
index 00000000000..599760cb053
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/mongoimport.go
@@ -0,0 +1,575 @@
+// Package mongoimport allows importing content from a JSON, CSV, or TSV into a MongoDB instance.
+package mongoimport
+
+import (
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/progress"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "gopkg.in/tomb.v2"
+
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Input format types accepted by mongoimport.
+const (
+ CSV = "csv"
+ TSV = "tsv"
+ JSON = "json"
+)
+
+const (
+ workerBufferSize = 16
+ progressBarLength = 24
+)
+
+// MongoImport is a container for the user-specified options and
+// internal state used for running mongoimport.
+type MongoImport struct {
+ // insertionCount keeps track of how many documents have successfully
+ // been inserted into the database
+ // updated atomically, aligned at the beginning of the struct
+ insertionCount uint64
+
+ // generic mongo tool options
+ ToolOptions *options.ToolOptions
+
+ // InputOptions defines options used to read data to be ingested
+ InputOptions *InputOptions
+
+ // IngestOptions defines options used to ingest data into MongoDB
+ IngestOptions *IngestOptions
+
+ // SessionProvider is used for connecting to the database
+ SessionProvider *db.SessionProvider
+
+ // the tomb is used to synchronize ingestion goroutines and causes
+ // other sibling goroutines to terminate immediately if one errors out
+ tomb.Tomb
+
+ // fields to use for upsert operations
+ upsertFields []string
+
+ // type of node the SessionProvider is connected to
+ nodeType db.NodeType
+}
+
+type InputReader interface {
+ // StreamDocument takes a boolean indicating if the documents should be streamed
+ // in read order and a channel on which to stream the documents processed from
+ // the underlying reader. Returns a non-nil error if encountered.
+ StreamDocument(ordered bool, read chan bson.D) error
+
+ // ReadAndValidateHeader reads the header line from the InputReader and returns
+ // a non-nil error if the fields from the header line are invalid; returns
+ // nil otherwise. No-op for JSON input readers.
+ ReadAndValidateHeader() error
+
+ // ReadAndValidateTypedHeader is the same as ReadAndValidateHeader,
+ // except it also parses types from the fields of the header. Parse errors
+ // will be handled according parseGrace.
+ ReadAndValidateTypedHeader(parseGrace ParseGrace) error
+
+ // embedded io.Reader that tracks number of bytes read, to allow feeding into progress bar.
+ sizeTracker
+}
+
+// ValidateSettings ensures that the tool specific options supplied for
+// MongoImport are valid.
+func (imp *MongoImport) ValidateSettings(args []string) error {
+ // namespace must have a valid database; if none is specified, use 'test'
+ if imp.ToolOptions.DB == "" {
+ imp.ToolOptions.DB = "test"
+ }
+ err := util.ValidateDBName(imp.ToolOptions.DB)
+ if err != nil {
+ return fmt.Errorf("invalid database name: %v", err)
+ }
+
+ imp.InputOptions.Type = strings.ToLower(imp.InputOptions.Type)
+ // use JSON as default input type
+ if imp.InputOptions.Type == "" {
+ imp.InputOptions.Type = JSON
+ } else {
+ if !(imp.InputOptions.Type == TSV ||
+ imp.InputOptions.Type == JSON ||
+ imp.InputOptions.Type == CSV) {
+ return fmt.Errorf("unknown type %v", imp.InputOptions.Type)
+ }
+ }
+
+ // ensure headers are supplied for CSV/TSV
+ if imp.InputOptions.Type == CSV ||
+ imp.InputOptions.Type == TSV {
+ if !imp.InputOptions.HeaderLine {
+ if imp.InputOptions.Fields == nil &&
+ imp.InputOptions.FieldFile == nil {
+ return fmt.Errorf("must specify --fields, --fieldFile or --headerline to import this file type")
+ }
+ if imp.InputOptions.FieldFile != nil &&
+ *imp.InputOptions.FieldFile == "" {
+ return fmt.Errorf("--fieldFile can not be empty string")
+ }
+ if imp.InputOptions.Fields != nil &&
+ imp.InputOptions.FieldFile != nil {
+ return fmt.Errorf("incompatible options: --fields and --fieldFile")
+ }
+ } else {
+ if imp.InputOptions.Fields != nil {
+ return fmt.Errorf("incompatible options: --fields and --headerline")
+ }
+ if imp.InputOptions.FieldFile != nil {
+ return fmt.Errorf("incompatible options: --fieldFile and --headerline")
+ }
+ }
+
+ if _, err := ValidatePG(imp.InputOptions.ParseGrace); err != nil {
+ return err
+ }
+ } else {
+ // input type is JSON
+ if imp.InputOptions.HeaderLine {
+ return fmt.Errorf("can not use --headerline when input type is JSON")
+ }
+ if imp.InputOptions.Fields != nil {
+ return fmt.Errorf("can not use --fields when input type is JSON")
+ }
+ if imp.InputOptions.FieldFile != nil {
+ return fmt.Errorf("can not use --fieldFile when input type is JSON")
+ }
+ if imp.IngestOptions.IgnoreBlanks {
+ return fmt.Errorf("can not use --ignoreBlanks when input type is JSON")
+ }
+ if imp.InputOptions.ColumnsHaveTypes {
+ return fmt.Errorf("can not use --columnsHaveTypes when input type is JSON")
+ }
+ }
+
+ if imp.IngestOptions.UpsertFields != "" {
+ imp.IngestOptions.Upsert = true
+ imp.upsertFields = strings.Split(imp.IngestOptions.UpsertFields, ",")
+ if err := validateFields(imp.upsertFields); err != nil {
+ return fmt.Errorf("invalid --upsertFields argument: %v", err)
+ }
+ } else if imp.IngestOptions.Upsert {
+ imp.upsertFields = []string{"_id"}
+ }
+
+ if imp.IngestOptions.Upsert {
+ imp.IngestOptions.MaintainInsertionOrder = true
+ log.Logvf(log.Info, "using upsert fields: %v", imp.upsertFields)
+ }
+
+ // set the number of decoding workers to use for imports
+ if imp.IngestOptions.NumDecodingWorkers <= 0 {
+ imp.IngestOptions.NumDecodingWorkers = imp.ToolOptions.MaxProcs
+ }
+ log.Logvf(log.DebugLow, "using %v decoding workers", imp.IngestOptions.NumDecodingWorkers)
+
+ // set the number of insertion workers to use for imports
+ if imp.IngestOptions.NumInsertionWorkers <= 0 {
+ imp.IngestOptions.NumInsertionWorkers = 1
+ }
+
+ log.Logvf(log.DebugLow, "using %v insert workers", imp.IngestOptions.NumInsertionWorkers)
+
+ // if --maintainInsertionOrder is set, we can only allow 1 insertion worker
+ if imp.IngestOptions.MaintainInsertionOrder {
+ imp.IngestOptions.NumInsertionWorkers = 1
+ }
+
+ // get the number of documents per batch
+ if imp.IngestOptions.BulkBufferSize <= 0 || imp.IngestOptions.BulkBufferSize > 1000 {
+ imp.IngestOptions.BulkBufferSize = 1000
+ }
+
+ // ensure no more than one positional argument is supplied
+ if len(args) > 1 {
+ return fmt.Errorf("only one positional argument is allowed")
+ }
+
+ // ensure either a positional argument is supplied or an argument is passed
+ // to the --file flag - and not both
+ if imp.InputOptions.File != "" && len(args) != 0 {
+ return fmt.Errorf("incompatible options: --file and positional argument(s)")
+ }
+
+ if imp.InputOptions.File == "" {
+ if len(args) != 0 {
+ // if --file is not supplied, use the positional argument supplied
+ imp.InputOptions.File = args[0]
+ }
+ }
+
+ // ensure we have a valid string to use for the collection
+ if imp.ToolOptions.Collection == "" {
+ log.Logvf(log.Always, "no collection specified")
+ fileBaseName := filepath.Base(imp.InputOptions.File)
+ lastDotIndex := strings.LastIndex(fileBaseName, ".")
+ if lastDotIndex != -1 {
+ fileBaseName = fileBaseName[0:lastDotIndex]
+ }
+ log.Logvf(log.Always, "using filename '%v' as collection", fileBaseName)
+ imp.ToolOptions.Collection = fileBaseName
+ }
+ err = util.ValidateCollectionName(imp.ToolOptions.Collection)
+ if err != nil {
+ return fmt.Errorf("invalid collection name: %v", err)
+ }
+ return nil
+}
+
+// getSourceReader returns an io.Reader to read from the input source. Also
+// returns a progress.Progressor which can be used to track progress if the
+// reader supports it.
+func (imp *MongoImport) getSourceReader() (io.ReadCloser, int64, error) {
+ if imp.InputOptions.File != "" {
+ file, err := os.Open(util.ToUniversalPath(imp.InputOptions.File))
+ if err != nil {
+ return nil, -1, err
+ }
+ fileStat, err := file.Stat()
+ if err != nil {
+ return nil, -1, err
+ }
+ log.Logvf(log.Info, "filesize: %v bytes", fileStat.Size())
+ return file, int64(fileStat.Size()), err
+ }
+
+ log.Logvf(log.Info, "reading from stdin")
+
+ // Stdin has undefined max size, so return 0
+ return os.Stdin, 0, nil
+}
+
+// fileSizeProgressor implements Progressor to allow a sizeTracker to hook up with a
+// progress.Bar instance, so that the progress bar can report the percentage of the file read.
+type fileSizeProgressor struct {
+ max int64
+ sizeTracker
+}
+
+func (fsp *fileSizeProgressor) Progress() (int64, int64) {
+ return fsp.max, fsp.sizeTracker.Size()
+}
+
+// ImportDocuments is used to write input data to the database. It returns the
+// number of documents successfully imported to the appropriate namespace and
+// any error encountered in doing this
+func (imp *MongoImport) ImportDocuments() (uint64, error) {
+ source, fileSize, err := imp.getSourceReader()
+ if err != nil {
+ return 0, err
+ }
+ defer source.Close()
+
+ inputReader, err := imp.getInputReader(source)
+ if err != nil {
+ return 0, err
+ }
+
+ if imp.InputOptions.HeaderLine {
+ if imp.InputOptions.ColumnsHaveTypes {
+ err = inputReader.ReadAndValidateTypedHeader(ParsePG(imp.InputOptions.ParseGrace))
+ } else {
+ err = inputReader.ReadAndValidateHeader()
+ }
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ bar := &progress.Bar{
+ Name: fmt.Sprintf("%v.%v", imp.ToolOptions.DB, imp.ToolOptions.Collection),
+ Watching: &fileSizeProgressor{fileSize, inputReader},
+ Writer: log.Writer(0),
+ BarLength: progressBarLength,
+ IsBytes: true,
+ }
+ bar.Start()
+ defer bar.Stop()
+ return imp.importDocuments(inputReader)
+}
+
+// importDocuments is a helper to ImportDocuments and does all the ingestion
+// work by taking data from the inputReader source and writing it to the
+// appropriate namespace
+func (imp *MongoImport) importDocuments(inputReader InputReader) (numImported uint64, retErr error) {
+ session, err := imp.SessionProvider.GetSession()
+ if err != nil {
+ return 0, err
+ }
+ defer session.Close()
+
+ connURL := imp.ToolOptions.Host
+ if connURL == "" {
+ connURL = util.DefaultHost
+ }
+ if imp.ToolOptions.Port != "" {
+ connURL = connURL + ":" + imp.ToolOptions.Port
+ }
+ log.Logvf(log.Always, "connected to: %v", connURL)
+
+ log.Logvf(log.Info, "ns: %v.%v",
+ imp.ToolOptions.Namespace.DB,
+ imp.ToolOptions.Namespace.Collection)
+
+ // check if the server is a replica set, mongos, or standalone
+ imp.nodeType, err = imp.SessionProvider.GetNodeType()
+ if err != nil {
+ return 0, fmt.Errorf("error checking connected node type: %v", err)
+ }
+ log.Logvf(log.Info, "connected to node type: %v", imp.nodeType)
+
+ if err = imp.configureSession(session); err != nil {
+ return 0, fmt.Errorf("error configuring session: %v", err)
+ }
+
+ // drop the database if necessary
+ if imp.IngestOptions.Drop {
+ log.Logvf(log.Always, "dropping: %v.%v",
+ imp.ToolOptions.DB,
+ imp.ToolOptions.Collection)
+ collection := session.DB(imp.ToolOptions.DB).
+ C(imp.ToolOptions.Collection)
+ if err := collection.DropCollection(); err != nil {
+ if err.Error() != db.ErrNsNotFound {
+ return 0, err
+ }
+ }
+ }
+
+ readDocs := make(chan bson.D, workerBufferSize)
+ processingErrChan := make(chan error)
+ ordered := imp.IngestOptions.MaintainInsertionOrder
+
+ // read and process from the input reader
+ go func() {
+ processingErrChan <- inputReader.StreamDocument(ordered, readDocs)
+ }()
+
+ // insert documents into the target database
+ go func() {
+ processingErrChan <- imp.ingestDocuments(readDocs)
+ }()
+
+ e1 := channelQuorumError(processingErrChan, 2)
+ insertionCount := atomic.LoadUint64(&imp.insertionCount)
+ return insertionCount, e1
+}
+
+// ingestDocuments accepts a channel from which it reads documents to be inserted
+// into the target collection. It spreads the insert/upsert workload across one
+// or more workers.
+func (imp *MongoImport) ingestDocuments(readDocs chan bson.D) (retErr error) {
+ numInsertionWorkers := imp.IngestOptions.NumInsertionWorkers
+ if numInsertionWorkers <= 0 {
+ numInsertionWorkers = 1
+ }
+
+ // Each ingest worker will return an error which will
+ // be set in the following cases:
+ //
+ // 1. There is a problem connecting with the server
+ // 2. The server becomes unreachable
+ // 3. There is an insertion/update error - e.g. duplicate key
+ // error - and stopOnError is set to true
+
+ wg := new(sync.WaitGroup)
+ for i := 0; i < numInsertionWorkers; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // only set the first insertion error and cause sibling goroutines to terminate immediately
+ err := imp.runInsertionWorker(readDocs)
+ if err != nil && retErr == nil {
+ retErr = err
+ imp.Kill(err)
+ }
+ }()
+ }
+ wg.Wait()
+ return
+}
+
+// configureSession takes in a session and modifies it with properly configured
+// settings. It does the following configurations:
+//
+// 1. Sets the session to not timeout
+// 2. Sets the write concern on the session
+// 3. Sets the session safety
+//
+// returns an error if it's unable to set the write concern
+func (imp *MongoImport) configureSession(session *mgo.Session) error {
+ // sockets to the database will never be forcibly closed
+ session.SetSocketTimeout(0)
+ sessionSafety, err := db.BuildWriteConcern(imp.IngestOptions.WriteConcern, imp.nodeType)
+ if err != nil {
+ return fmt.Errorf("write concern error: %v", err)
+ }
+ session.SetSafe(sessionSafety)
+
+ return nil
+}
+
+type flushInserter interface {
+ Insert(doc interface{}) error
+ Flush() error
+}
+
+// runInsertionWorker is a helper to InsertDocuments - it reads document off
+// the read channel and prepares then in batches for insertion into the databas
+func (imp *MongoImport) runInsertionWorker(readDocs chan bson.D) (err error) {
+ session, err := imp.SessionProvider.GetSession()
+ if err != nil {
+ return fmt.Errorf("error connecting to mongod: %v", err)
+ }
+ defer session.Close()
+ if err = imp.configureSession(session); err != nil {
+ return fmt.Errorf("error configuring session: %v", err)
+ }
+ collection := session.DB(imp.ToolOptions.DB).C(imp.ToolOptions.Collection)
+
+ var inserter flushInserter
+ if imp.IngestOptions.Upsert {
+ inserter = imp.newUpserter(collection)
+ } else {
+ inserter = db.NewBufferedBulkInserter(collection, imp.IngestOptions.BulkBufferSize, !imp.IngestOptions.StopOnError)
+ if !imp.IngestOptions.MaintainInsertionOrder {
+ inserter.(*db.BufferedBulkInserter).Unordered()
+ }
+ }
+
+readLoop:
+ for {
+ select {
+ case document, alive := <-readDocs:
+ if !alive {
+ break readLoop
+ }
+ err = filterIngestError(imp.IngestOptions.StopOnError, inserter.Insert(document))
+ if err != nil {
+ return err
+ }
+ atomic.AddUint64(&imp.insertionCount, 1)
+ case <-imp.Dying():
+ return nil
+ }
+ }
+
+ err = inserter.Flush()
+ // TOOLS-349 correct import count for bulk operations
+ if bulkError, ok := err.(*mgo.BulkError); ok {
+ failedDocs := make(map[int]bool) // index of failures
+ for _, failure := range bulkError.Cases() {
+ failedDocs[failure.Index] = true
+ }
+ numFailures := len(failedDocs)
+ if numFailures > 0 {
+ log.Logvf(log.Always, "num failures: %d", numFailures)
+ atomic.AddUint64(&imp.insertionCount, ^uint64(numFailures-1))
+ }
+ }
+ return filterIngestError(imp.IngestOptions.StopOnError, err)
+}
+
+type upserter struct {
+ imp *MongoImport
+ collection *mgo.Collection
+}
+
+func (imp *MongoImport) newUpserter(collection *mgo.Collection) *upserter {
+ return &upserter{
+ imp: imp,
+ collection: collection,
+ }
+}
+
+// Insert is part of the flushInserter interface and performs
+// upserts or inserts.
+func (up *upserter) Insert(doc interface{}) error {
+ document := doc.(bson.D)
+ selector := constructUpsertDocument(up.imp.upsertFields, document)
+ var err error
+ if selector == nil {
+ err = up.collection.Insert(document)
+ } else {
+ _, err = up.collection.Upsert(selector, document)
+ }
+ return err
+}
+
+// Flush is needed so that upserter implements flushInserter, but upserter
+// doesn't buffer anything so we don't need to do anything in Flush.
+func (up *upserter) Flush() error {
+ return nil
+}
+
+func splitInlineHeader(header string) (headers []string) {
+ var level uint8
+ var currentField string
+ for _, c := range header {
+ if c == '(' {
+ level++
+ } else if c == ')' && level > 0 {
+ level--
+ }
+ if c == ',' && level == 0 {
+ headers = append(headers, currentField)
+ currentField = ""
+ } else {
+ currentField = currentField + string(c)
+ }
+ }
+ headers = append(headers, currentField) // add last field
+ return
+}
+
+// getInputReader returns an implementation of InputReader based on the input type
+func (imp *MongoImport) getInputReader(in io.Reader) (InputReader, error) {
+ var colSpecs []ColumnSpec
+ var headers []string
+ var err error
+ if imp.InputOptions.Fields != nil {
+ headers = splitInlineHeader(*imp.InputOptions.Fields)
+ } else if imp.InputOptions.FieldFile != nil {
+ headers, err = util.GetFieldsFromFile(*imp.InputOptions.FieldFile)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if imp.InputOptions.ColumnsHaveTypes {
+ colSpecs, err = ParseTypedHeaders(headers, ParsePG(imp.InputOptions.ParseGrace))
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ colSpecs = ParseAutoHeaders(headers)
+ }
+
+ // header fields validation can only happen once we have an input reader
+ if !imp.InputOptions.HeaderLine {
+ if err = validateReaderFields(ColumnNames(colSpecs)); err != nil {
+ return nil, err
+ }
+ }
+
+ out := os.Stdout
+
+ ignoreBlanks := imp.IngestOptions.IgnoreBlanks && imp.InputOptions.Type != JSON
+ if imp.InputOptions.Type == CSV {
+ return NewCSVInputReader(colSpecs, in, out, imp.IngestOptions.NumDecodingWorkers, ignoreBlanks), nil
+ } else if imp.InputOptions.Type == TSV {
+ return NewTSVInputReader(colSpecs, in, out, imp.IngestOptions.NumDecodingWorkers, ignoreBlanks), nil
+ }
+ return NewJSONInputReader(imp.InputOptions.JSONArray, in, imp.IngestOptions.NumDecodingWorkers), nil
+}
diff --git a/src/mongo/gotools/mongoimport/mongoimport_test.go b/src/mongo/gotools/mongoimport/mongoimport_test.go
new file mode 100644
index 00000000000..ff10ec5525c
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/mongoimport_test.go
@@ -0,0 +1,757 @@
+package mongoimport
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+const (
+ testDb = "db"
+ testCollection = "c"
+)
+
+// checkOnlyHasDocuments returns an error if the documents in the test
+// collection don't exactly match those that are passed in
+func checkOnlyHasDocuments(sessionProvider db.SessionProvider, expectedDocuments []bson.M) error {
+ session, err := sessionProvider.GetSession()
+ if err != nil {
+ return err
+ }
+ defer session.Close()
+
+ collection := session.DB(testDb).C(testCollection)
+ dbDocuments := []bson.M{}
+ err = collection.Find(nil).Sort("_id").All(&dbDocuments)
+ if err != nil {
+ return err
+ }
+ if len(dbDocuments) != len(expectedDocuments) {
+ return fmt.Errorf("document count mismatch: expected %#v, got %#v",
+ len(expectedDocuments), len(dbDocuments))
+ }
+ for index := range dbDocuments {
+ if !reflect.DeepEqual(dbDocuments[index], expectedDocuments[index]) {
+ return fmt.Errorf("document mismatch: expected %#v, got %#v",
+ expectedDocuments[index], dbDocuments[index])
+ }
+ }
+ return nil
+}
+
+// getBasicToolOptions returns a test helper to instantiate the session provider
+// for calls to StreamDocument
+func getBasicToolOptions() *options.ToolOptions {
+ general := &options.General{}
+ ssl := testutil.GetSSLOptions()
+ auth := testutil.GetAuthOptions()
+ namespace := &options.Namespace{
+ DB: testDb,
+ Collection: testCollection,
+ }
+ connection := &options.Connection{
+ Host: "localhost",
+ Port: db.DefaultTestPort,
+ }
+ return &options.ToolOptions{
+ General: general,
+ SSL: &ssl,
+ Namespace: namespace,
+ Connection: connection,
+ Auth: &auth,
+ }
+}
+
+func NewMongoImport() (*MongoImport, error) {
+ toolOptions := getBasicToolOptions()
+ inputOptions := &InputOptions{
+ ParseGrace: "stop",
+ }
+ ingestOptions := &IngestOptions{}
+ provider, err := db.NewSessionProvider(*toolOptions)
+ if err != nil {
+ return nil, err
+ }
+ return &MongoImport{
+ ToolOptions: toolOptions,
+ InputOptions: inputOptions,
+ IngestOptions: ingestOptions,
+ SessionProvider: provider,
+ }, nil
+}
+
+func TestSplitInlineHeader(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("handle normal, untyped headers", t, func() {
+ fields := []string{"foo.bar", "baz", "boo"}
+ header := strings.Join(fields, ",")
+ Convey("with '"+header+"'", func() {
+ So(splitInlineHeader(header), ShouldResemble, fields)
+ })
+ })
+ Convey("handle typed headers", t, func() {
+ fields := []string{"foo.bar.string()", "baz.date(January 2 2006)", "boo.binary(hex)"}
+ header := strings.Join(fields, ",")
+ Convey("with '"+header+"'", func() {
+ So(splitInlineHeader(header), ShouldResemble, fields)
+ })
+ })
+ Convey("handle typed headers that include commas", t, func() {
+ fields := []string{"foo.bar.date(,,,,)", "baz.date(January 2, 2006)", "boo.binary(hex)"}
+ header := strings.Join(fields, ",")
+ Convey("with '"+header+"'", func() {
+ So(splitInlineHeader(header), ShouldResemble, fields)
+ })
+ })
+}
+
+func TestMongoImportValidateSettings(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Given a mongoimport instance for validation, ", t, func() {
+ Convey("an error should be thrown if no collection is given", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.ToolOptions.Namespace.DB = ""
+ imp.ToolOptions.Namespace.Collection = ""
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("an error should be thrown if an invalid type is given", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = "invalid"
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("an error should be thrown if neither --headerline is supplied "+
+ "nor --fields/--fieldFile", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("no error should be thrown if --headerline is not supplied "+
+ "but --fields is supplied", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fields := "a,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.InputOptions.Type = CSV
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ })
+
+ Convey("no error should be thrown if no input type is supplied", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ })
+
+ Convey("no error should be thrown if there's just one positional argument", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ So(imp.ValidateSettings([]string{"a"}), ShouldBeNil)
+ })
+
+ Convey("an error should be thrown if --file is used with one positional argument", func() {
+ imp, err := NewMongoImport()
+ imp.InputOptions.File = "abc"
+ So(err, ShouldBeNil)
+ So(imp.ValidateSettings([]string{"a"}), ShouldNotBeNil)
+ })
+
+ Convey("an error should be thrown if there's more than one positional argument", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ So(imp.ValidateSettings([]string{"a", "b"}), ShouldNotBeNil)
+ })
+
+ Convey("an error should be thrown if --headerline is used with JSON input", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.HeaderLine = true
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("an error should be thrown if --fields is used with JSON input", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fields := ""
+ imp.InputOptions.Fields = &fields
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ fields = "a,b,c"
+ imp.InputOptions.Fields = &fields
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("an error should be thrown if --fieldFile is used with JSON input", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fieldFile := ""
+ imp.InputOptions.FieldFile = &fieldFile
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ fieldFile = "test.csv"
+ imp.InputOptions.FieldFile = &fieldFile
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("an error should be thrown if --ignoreBlanks is used with JSON input", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.IngestOptions.IgnoreBlanks = true
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("no error should be thrown if --headerline is not supplied "+
+ "but --fieldFile is supplied", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fieldFile := "test.csv"
+ imp.InputOptions.FieldFile = &fieldFile
+ imp.InputOptions.Type = CSV
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ })
+
+ Convey("an error should be thrown if a field in the --upsertFields "+
+ "argument starts with a dollar sign", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.HeaderLine = true
+ imp.InputOptions.Type = CSV
+ imp.IngestOptions.Upsert = true
+ imp.IngestOptions.UpsertFields = "a,$b,c"
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ imp.IngestOptions.UpsertFields = "a,.b,c"
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("no error should be thrown if --upsertFields is supplied without "+
+ "--upsert", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.HeaderLine = true
+ imp.InputOptions.Type = CSV
+ imp.IngestOptions.UpsertFields = "a,b,c"
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ })
+
+ Convey("if --upsert is used without --upsertFields, _id should be set as "+
+ "the upsert field", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.HeaderLine = true
+ imp.InputOptions.Type = CSV
+ imp.IngestOptions.Upsert = true
+ imp.IngestOptions.UpsertFields = ""
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ So(imp.upsertFields, ShouldResemble, []string{"_id"})
+ })
+
+ Convey("no error should be thrown if all fields in the --upsertFields "+
+ "argument are valid", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.HeaderLine = true
+ imp.InputOptions.Type = CSV
+ imp.IngestOptions.Upsert = true
+ imp.IngestOptions.UpsertFields = "a,b,c"
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ })
+
+ Convey("no error should be thrown if --fields is supplied with CSV import", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fields := "a,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.InputOptions.Type = CSV
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ })
+
+ Convey("an error should be thrown if an empty --fields is supplied with CSV import", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fields := ""
+ imp.InputOptions.Fields = &fields
+ imp.InputOptions.Type = CSV
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ })
+
+ Convey("no error should be thrown if --fieldFile is supplied with CSV import", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fieldFile := "test.csv"
+ imp.InputOptions.FieldFile = &fieldFile
+ imp.InputOptions.Type = CSV
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ })
+
+ Convey("an error should be thrown if no collection and no file is supplied", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fieldFile := "test.csv"
+ imp.InputOptions.FieldFile = &fieldFile
+ imp.InputOptions.Type = CSV
+ imp.ToolOptions.Namespace.Collection = ""
+ So(imp.ValidateSettings([]string{}), ShouldNotBeNil)
+ })
+
+ Convey("no error should be thrown if --file is used (without -c) supplied "+
+ "- the file name should be used as the collection name", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = "input"
+ imp.InputOptions.HeaderLine = true
+ imp.InputOptions.Type = CSV
+ imp.ToolOptions.Namespace.Collection = ""
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ So(imp.ToolOptions.Namespace.Collection, ShouldEqual,
+ imp.InputOptions.File)
+ })
+
+ Convey("with no collection name and a file name the base name of the "+
+ "file (without the extension) should be used as the collection name", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = "/path/to/input/file/dot/input.txt"
+ imp.InputOptions.HeaderLine = true
+ imp.InputOptions.Type = CSV
+ imp.ToolOptions.Namespace.Collection = ""
+ So(imp.ValidateSettings([]string{}), ShouldBeNil)
+ So(imp.ToolOptions.Namespace.Collection, ShouldEqual, "input")
+ })
+ })
+}
+
+func TestGetSourceReader(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("Given a mongoimport instance, on calling getSourceReader", t,
+ func() {
+ Convey("an error should be thrown if the given file referenced by "+
+ "the reader does not exist", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = "/path/to/input/file/dot/input.txt"
+ imp.InputOptions.Type = CSV
+ imp.ToolOptions.Namespace.Collection = ""
+ _, _, err = imp.getSourceReader()
+ So(err, ShouldNotBeNil)
+ })
+
+ Convey("no error should be thrown if the file exists", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = "testdata/test_array.json"
+ imp.InputOptions.Type = JSON
+ _, _, err = imp.getSourceReader()
+ So(err, ShouldBeNil)
+ })
+
+ Convey("no error should be thrown if stdin is used", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = ""
+ _, _, err = imp.getSourceReader()
+ So(err, ShouldBeNil)
+ })
+ })
+}
+
+func TestGetInputReader(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("Given a io.Reader on calling getInputReader", t, func() {
+ Convey("should parse --fields using valid csv escaping", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Fields = new(string)
+ *imp.InputOptions.Fields = "foo.auto(),bar.date(January 2, 2006)"
+ imp.InputOptions.File = "/path/to/input/file/dot/input.txt"
+ imp.InputOptions.ColumnsHaveTypes = true
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldBeNil)
+ })
+ Convey("should complain about non-escaped new lines in --fields", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Fields = new(string)
+ *imp.InputOptions.Fields = "foo.auto(),\nblah.binary(hex),bar.date(January 2, 2006)"
+ imp.InputOptions.File = "/path/to/input/file/dot/input.txt"
+ imp.InputOptions.ColumnsHaveTypes = true
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldBeNil)
+ })
+ Convey("no error should be thrown if neither --fields nor --fieldFile "+
+ "is used", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = "/path/to/input/file/dot/input.txt"
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldBeNil)
+ })
+ Convey("no error should be thrown if --fields is used", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fields := "a,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.InputOptions.File = "/path/to/input/file/dot/input.txt"
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldBeNil)
+ })
+ Convey("no error should be thrown if --fieldFile is used and it "+
+ "references a valid file", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fieldFile := "testdata/test.csv"
+ imp.InputOptions.FieldFile = &fieldFile
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldBeNil)
+ })
+ Convey("an error should be thrown if --fieldFile is used and it "+
+ "references an invalid file", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fieldFile := "/path/to/input/file/dot/input.txt"
+ imp.InputOptions.FieldFile = &fieldFile
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldNotBeNil)
+ })
+ Convey("no error should be thrown for CSV import inputs", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldBeNil)
+ })
+ Convey("no error should be thrown for TSV import inputs", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = TSV
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldBeNil)
+ })
+ Convey("no error should be thrown for JSON import inputs", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = JSON
+ _, err = imp.getInputReader(&os.File{})
+ So(err, ShouldBeNil)
+ })
+ Convey("an error should be thrown if --fieldFile fields are invalid", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fieldFile := "testdata/test_fields_invalid.txt"
+ imp.InputOptions.FieldFile = &fieldFile
+ file, err := os.Open(fieldFile)
+ So(err, ShouldBeNil)
+ _, err = imp.getInputReader(file)
+ So(err, ShouldNotBeNil)
+ })
+ Convey("no error should be thrown if --fieldFile fields are valid", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ fieldFile := "testdata/test_fields_valid.txt"
+ imp.InputOptions.FieldFile = &fieldFile
+ file, err := os.Open(fieldFile)
+ So(err, ShouldBeNil)
+ _, err = imp.getInputReader(file)
+ So(err, ShouldBeNil)
+ })
+ })
+}
+
+func TestImportDocuments(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.IntegrationTestType)
+ Convey("With a mongoimport instance", t, func() {
+ Reset(func() {
+ sessionProvider, err := db.NewSessionProvider(*getBasicToolOptions())
+ if err != nil {
+ t.Fatalf("error getting session provider session: %v", err)
+ }
+ session, err := sessionProvider.GetSession()
+ if err != nil {
+ t.Fatalf("error getting session: %v", err)
+ }
+ defer session.Close()
+ session.DB(testDb).C(testCollection).DropCollection()
+ })
+ Convey("no error should be thrown for CSV import on test data and all "+
+ "CSV data lines should be imported correctly", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test.csv"
+ fields := "a,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.WriteConcern = "majority"
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 3)
+ })
+ Convey("an error should be thrown for JSON import on test data that is "+
+ "JSON array", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = "testdata/test_array.json"
+ imp.IngestOptions.WriteConcern = "majority"
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldNotBeNil)
+ So(numImported, ShouldEqual, 0)
+ })
+ Convey("TOOLS-247: no error should be thrown for JSON import on test "+
+ "data and all documents should be imported correctly", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = "testdata/test_plain2.json"
+ imp.IngestOptions.WriteConcern = "majority"
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 10)
+ })
+ Convey("CSV import with --ignoreBlanks should import only non-blank fields", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test_blanks.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.IgnoreBlanks = true
+
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 3)
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "b": int(2)},
+ bson.M{"_id": 5, "c": "6e"},
+ bson.M{"_id": 7, "b": int(8), "c": int(6)},
+ }
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("CSV import without --ignoreBlanks should include blanks", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test_blanks.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 3)
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "b": int(2), "c": ""},
+ bson.M{"_id": 5, "b": "", "c": "6e"},
+ bson.M{"_id": 7, "b": int(8), "c": int(6)},
+ }
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("no error should be thrown for CSV import on test data with --upsertFields", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.UpsertFields = "b,c"
+ imp.IngestOptions.MaintainInsertionOrder = true
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 3)
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "b": int(2), "c": int(3)},
+ bson.M{"_id": 3, "b": 5.4, "c": "string"},
+ bson.M{"_id": 5, "b": int(6), "c": int(6)},
+ }
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("no error should be thrown for CSV import on test data with "+
+ "--stopOnError. Only documents before error should be imported", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.StopOnError = true
+ imp.IngestOptions.MaintainInsertionOrder = true
+ imp.IngestOptions.WriteConcern = "majority"
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 3)
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "b": int(2), "c": int(3)},
+ bson.M{"_id": 3, "b": 5.4, "c": "string"},
+ bson.M{"_id": 5, "b": int(6), "c": int(6)},
+ }
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("CSV import with duplicate _id's should not error if --stopOnError is not set", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test_duplicate.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.StopOnError = false
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 5)
+
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "b": int(2), "c": int(3)},
+ bson.M{"_id": 3, "b": 5.4, "c": "string"},
+ bson.M{"_id": 5, "b": int(6), "c": int(6)},
+ bson.M{"_id": 8, "b": int(6), "c": int(6)},
+ }
+ // all docs except the one with duplicate _id - should be imported
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("no error should be thrown for CSV import on test data with --drop", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.Drop = true
+ imp.IngestOptions.MaintainInsertionOrder = true
+ imp.IngestOptions.WriteConcern = "majority"
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 3)
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "b": int(2), "c": int(3)},
+ bson.M{"_id": 3, "b": 5.4, "c": "string"},
+ bson.M{"_id": 5, "b": int(6), "c": int(6)},
+ }
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("CSV import on test data with --headerLine should succeed", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.InputOptions.HeaderLine = true
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 2)
+ })
+ Convey("EOF should be thrown for CSV import with --headerLine if file is empty", func() {
+ csvFile, err := ioutil.TempFile("", "mongoimport_")
+ So(err, ShouldBeNil)
+ csvFile.Close()
+
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = csvFile.Name()
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.InputOptions.HeaderLine = true
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldEqual, io.EOF)
+ So(numImported, ShouldEqual, 0)
+ })
+ Convey("CSV import with --upsert and --upsertFields should succeed", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test.csv"
+ fields := "_id,c,b"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.UpsertFields = "_id"
+ imp.IngestOptions.MaintainInsertionOrder = true
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 3)
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "c": int(2), "b": int(3)},
+ bson.M{"_id": 3, "c": 5.4, "b": "string"},
+ bson.M{"_id": 5, "c": int(6), "b": int(6)},
+ }
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("CSV import with --upsert/--upsertFields with duplicate id should succeed "+
+ "if stopOnError is not set", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test_duplicate.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.Upsert = true
+ imp.upsertFields = []string{"_id"}
+ numImported, err := imp.ImportDocuments()
+ So(err, ShouldBeNil)
+ So(numImported, ShouldEqual, 5)
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "b": int(2), "c": int(3)},
+ bson.M{"_id": 3, "b": 5.4, "c": "string"},
+ bson.M{"_id": 5, "b": int(6), "c": int(9)},
+ bson.M{"_id": 8, "b": int(6), "c": int(6)},
+ }
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("an error should be thrown for CSV import on test data with "+
+ "duplicate _id if --stopOnError is set", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test_duplicate.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.StopOnError = true
+ imp.IngestOptions.WriteConcern = "1"
+ imp.IngestOptions.MaintainInsertionOrder = true
+ _, err = imp.ImportDocuments()
+ So(err, ShouldNotBeNil)
+ expectedDocuments := []bson.M{
+ bson.M{"_id": 1, "b": int(2), "c": int(3)},
+ bson.M{"_id": 3, "b": 5.4, "c": "string"},
+ bson.M{"_id": 5, "b": int(6), "c": int(6)},
+ }
+ So(checkOnlyHasDocuments(*imp.SessionProvider, expectedDocuments), ShouldBeNil)
+ })
+ Convey("an error should be thrown for JSON import on test data that "+
+ "is a JSON array without passing --jsonArray", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.File = "testdata/test_array.json"
+ imp.IngestOptions.WriteConcern = "1"
+ _, err = imp.ImportDocuments()
+ So(err, ShouldNotBeNil)
+ })
+ Convey("an error should be thrown if a plain JSON file is supplied", func() {
+ fileHandle, err := os.Open("testdata/test_plain.json")
+ So(err, ShouldBeNil)
+ jsonInputReader := NewJSONInputReader(true, fileHandle, 1)
+ docChan := make(chan bson.D, 1)
+ So(jsonInputReader.StreamDocument(true, docChan), ShouldNotBeNil)
+ })
+ Convey("an error should be thrown for invalid CSV import on test data", func() {
+ imp, err := NewMongoImport()
+ So(err, ShouldBeNil)
+ imp.InputOptions.Type = CSV
+ imp.InputOptions.File = "testdata/test_bad.csv"
+ fields := "_id,b,c"
+ imp.InputOptions.Fields = &fields
+ imp.IngestOptions.StopOnError = true
+ imp.IngestOptions.WriteConcern = "1"
+ imp.IngestOptions.MaintainInsertionOrder = true
+ _, err = imp.ImportDocuments()
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/mongoimport/options.go b/src/mongo/gotools/mongoimport/options.go
new file mode 100644
index 00000000000..2fe252fb527
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/options.go
@@ -0,0 +1,79 @@
+package mongoimport
+
+var Usage = `<options> <file>
+
+Import CSV, TSV or JSON data into MongoDB. If no file is provided, mongoimport reads from stdin.
+
+See http://docs.mongodb.org/manual/reference/program/mongoimport/ for more information.`
+
+// InputOptions defines the set of options for reading input data.
+type InputOptions struct {
+ // Fields is an option to directly specify comma-separated fields to import to CSV.
+ Fields *string `long:"fields" value-name:"<field>[,<field>]*" short:"f" description:"comma separated list of fields, e.g. -f name,age"`
+
+ // FieldFile is a filename that refers to a list of fields to import, 1 per line.
+ FieldFile *string `long:"fieldFile" value-name:"<filename>" description:"file with field names - 1 per line"`
+
+ // Specifies the location and name of a file containing the data to import.
+ File string `long:"file" value-name:"<filename>" description:"file to import from; if not specified, stdin is used"`
+
+ // Treats the input source's first line as field list (csv and tsv only).
+ HeaderLine bool `long:"headerline" description:"use first line in input source as the field list (CSV and TSV only)"`
+
+ // Indicates that the underlying input source contains a single JSON array with the documents to import.
+ JSONArray bool `long:"jsonArray" description:"treat input source as a JSON array"`
+
+ // Indicates how to handle type coercion failures
+ ParseGrace string `long:"parseGrace" value-name:"<grace>" default:"stop" description:"controls behavior when type coercion fails - one of: autoCast, skipField, skipRow, stop (defaults to 'stop')"`
+
+ // Specifies the file type to import. The default format is JSON, but it’s possible to import CSV and TSV files.
+ Type string `long:"type" value-name:"<type>" default:"json" default-mask:"-" description:"input format to import: json, csv, or tsv (defaults to 'json')"`
+
+ // Indicates that field names include type descriptions
+ ColumnsHaveTypes bool `long:"columnsHaveTypes" description:"indicated that the field list (from --fields, --fieldsFile, or --headerline) specifies types; They must be in the form of '<colName>.<type>(<arg>)'. The type can be one of: auto, binary, bool, date, date_go, date_ms, date_oracle, double, int32, int64, string. For each of the date types, the argument is a datetime layout string. For the binary type, the argument can be one of: base32, base64, hex. All other types take an empty argument. Only valid for CSV and TSV imports. e.g. zipcode.string(), thumbnail.binary(base64)"`
+}
+
+// Name returns a description of the InputOptions struct.
+func (_ *InputOptions) Name() string {
+ return "input"
+}
+
+// IngestOptions defines the set of options for storing data.
+type IngestOptions struct {
+ // Drops target collection before importing.
+ Drop bool `long:"drop" description:"drop collection before inserting documents"`
+
+ // Ignores fields with empty values in CSV and TSV imports.
+ IgnoreBlanks bool `long:"ignoreBlanks" description:"ignore fields with empty values in CSV and TSV"`
+
+ // Indicates that documents will be inserted in the order of their appearance in the input source.
+ MaintainInsertionOrder bool `long:"maintainInsertionOrder" description:"insert documents in the order of their appearance in the input source"`
+
+ // Sets the number of insertion routines to use
+ NumInsertionWorkers int `short:"j" value-name:"<number>" long:"numInsertionWorkers" description:"number of insert operations to run concurrently (defaults to 1)" default:"1" default-mask:"-"`
+
+ // Forces mongoimport to halt the import operation at the first insert or upsert error.
+ StopOnError bool `long:"stopOnError" description:"stop importing at first insert/upsert error"`
+
+ // Modifies the import process to update existing objects in the database if they match --upsertFields.
+ Upsert bool `long:"upsert" description:"insert or update objects that already exist"`
+
+ // Specifies a list of fields for the query portion of the upsert; defaults to _id field.
+ UpsertFields string `long:"upsertFields" value-name:"<field>[,<field>]*" description:"comma-separated fields for the query part of the upsert"`
+
+ // Sets write concern level for write operations.
+ WriteConcern string `long:"writeConcern" default:"majority" value-name:"<write-concern-specifier>" default-mask:"-" description:"write concern options e.g. --writeConcern majority, --writeConcern '{w: 3, wtimeout: 500, fsync: true, j: true}' (defaults to 'majority')"`
+
+ // Indicates that the server should bypass document validation on import.
+ BypassDocumentValidation bool `long:"bypassDocumentValidation" description:"bypass document validation"`
+
+ // Specifies the number of threads to use in processing data read from the input source
+ NumDecodingWorkers int `long:"numDecodingWorkers" default:"0" hidden:"true"`
+
+ BulkBufferSize int `long:"batchSize" default:"1000" hidden:"true"`
+}
+
+// Name returns a description of the IngestOptions struct.
+func (_ *IngestOptions) Name() string {
+ return "ingest"
+}
diff --git a/src/mongo/gotools/mongoimport/testdata/test.csv b/src/mongo/gotools/mongoimport/testdata/test.csv
new file mode 100644
index 00000000000..357d40e6da3
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test.csv
@@ -0,0 +1,3 @@
+1,2,3
+3,5.4,string
+5,6,6
diff --git a/src/mongo/gotools/mongoimport/testdata/test.tsv b/src/mongo/gotools/mongoimport/testdata/test.tsv
new file mode 100644
index 00000000000..a6d5298b40a
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test.tsv
@@ -0,0 +1,3 @@
+1 2 3
+3 4.6 5
+5 string 6
diff --git a/src/mongo/gotools/mongoimport/testdata/test_array.json b/src/mongo/gotools/mongoimport/testdata/test_array.json
new file mode 100644
index 00000000000..c4642157433
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_array.json
@@ -0,0 +1,31 @@
+[ {"a": 1.2, "b":"a", "c": 0.4} ,
+
+
+{"a": 2.4, "b":"string", "c": 52.9},
+{"a": 3, "b":"string", "c": 52},
+{"a": 4, "b":"string", "c": 52},
+{"a": 5, "b":"string", "c": 52},
+{"a": 6, "b":"string", "c": 52},
+{"a": 7, "b":"string", "c": 52} ,
+{"a": 8, "b":"string", "c": 52},
+{"a": 9, "b":"string", "c": 52},
+{"a": 10, "b":"string", "c": 52},
+{"a": 11, "b":"string", "c": 52},
+{"a": 12, "b":"string", "c": 52},
+{"a": 13, "b":"string", "c": 52},
+{"a": 14, "b":"string", "c": 52},
+{"a": 15, "b":"string", "c": 52},
+ {"a": 16, "b":"string", "c": 52},
+{"a": 17, "b":"string", "c": 52},
+{"a": 18, "b":"string", "c": 52},
+{"a": 29, "b":"string", "c": 52},
+{"a": 20, "b":"string", "c": 52},
+{"a": 21, "b":"string", "c": 52}
+
+ ,
+{"a": 22, "b":"string", "c": 52},
+{"a": 23, "b":"string", "c": 52},
+{"a": 24, "b":"string", "c": 52},
+{"a": 25, "b":"string", "c": 52},
+{"a": 25, "b":"string", "c": 52},
+{"a": 27, "b":"value", "c": 65}] \ No newline at end of file
diff --git a/src/mongo/gotools/mongoimport/testdata/test_bad.csv b/src/mongo/gotools/mongoimport/testdata/test_bad.csv
new file mode 100644
index 00000000000..c1d6aeeca88
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_bad.csv
@@ -0,0 +1,3 @@
+1,2,3
+3,5".4,string
+5,6,6
diff --git a/src/mongo/gotools/mongoimport/testdata/test_blanks.csv b/src/mongo/gotools/mongoimport/testdata/test_blanks.csv
new file mode 100644
index 00000000000..e94daca6d0d
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_blanks.csv
@@ -0,0 +1,3 @@
+1,2,
+5,,6e
+7,8,6 \ No newline at end of file
diff --git a/src/mongo/gotools/mongoimport/testdata/test_bom.csv b/src/mongo/gotools/mongoimport/testdata/test_bom.csv
new file mode 100644
index 00000000000..eef9b0a80c5
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_bom.csv
@@ -0,0 +1,2 @@
+1,2,3
+4,5,6
diff --git a/src/mongo/gotools/mongoimport/testdata/test_bom.json b/src/mongo/gotools/mongoimport/testdata/test_bom.json
new file mode 100644
index 00000000000..e813e78d234
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_bom.json
@@ -0,0 +1,2 @@
+{"a": 1, "b": 2, "c": 3}
+{"a": 4, "b": 5, "c": 6}
diff --git a/src/mongo/gotools/mongoimport/testdata/test_bom.tsv b/src/mongo/gotools/mongoimport/testdata/test_bom.tsv
new file mode 100644
index 00000000000..4c117a5ca88
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_bom.tsv
@@ -0,0 +1 @@
+1 2 3
diff --git a/src/mongo/gotools/mongoimport/testdata/test_duplicate.csv b/src/mongo/gotools/mongoimport/testdata/test_duplicate.csv
new file mode 100644
index 00000000000..137f668e25a
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_duplicate.csv
@@ -0,0 +1,5 @@
+1,2,3
+3,5.4,string
+5,6,6
+5,6,9
+8,6,6 \ No newline at end of file
diff --git a/src/mongo/gotools/mongoimport/testdata/test_fields_invalid.txt b/src/mongo/gotools/mongoimport/testdata/test_fields_invalid.txt
new file mode 100644
index 00000000000..90505050d51
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_fields_invalid.txt
@@ -0,0 +1,3 @@
+a
+$
+b
diff --git a/src/mongo/gotools/mongoimport/testdata/test_fields_valid.txt b/src/mongo/gotools/mongoimport/testdata/test_fields_valid.txt
new file mode 100644
index 00000000000..de980441c3a
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_fields_valid.txt
@@ -0,0 +1,3 @@
+a
+b
+c
diff --git a/src/mongo/gotools/mongoimport/testdata/test_plain.json b/src/mongo/gotools/mongoimport/testdata/test_plain.json
new file mode 100644
index 00000000000..ce158ad792b
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_plain.json
@@ -0,0 +1,3 @@
+{"a": 4, "b":"string value", "c": 1}
+{"a": 5, "b":"string value", "c": 2}
+{"a": 6, "b":"string value", "c": 3} \ No newline at end of file
diff --git a/src/mongo/gotools/mongoimport/testdata/test_plain2.json b/src/mongo/gotools/mongoimport/testdata/test_plain2.json
new file mode 100644
index 00000000000..84efc925a9d
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_plain2.json
@@ -0,0 +1,10 @@
+{"body": "'''Wei-chi''' may refer to:\n*The [[game of go]]\n*The [[Chinese word for \"crisis\"]]\n\n{{dab}}", "timestamp": {"$date": 1409836258000}, "page_id": 747205, "user": "TheQ Editor", "title": "Wei-chi"}
+{"body": "'''Frameset''' may refer to:\n\n* [[Framing (World Wide Web)]]\n* [[Bicycle frame]]\n\n{{disambig}}", "timestamp": {"$date": 1409836258000}, "page_id": 1450638, "user": "Kkj11210", "title": "Frameset"}
+{"body": "'''Poenit''' may refer to:\n*[[Poenitentiaria]] or Apostolic Penitentiary\n*[[Phut]]\n\n{{disambig}}", "timestamp": {"$date": 1409836258000}, "page_id": 379316, "user": "Omnipaedista", "title": "Poenit"}
+{"body": "In Malawi, '''Tonga''' may be:\n* [[Tonga people (Malawi)]]\n* [[Tonga language (Malawi)]]\n\n{{dab}}", "timestamp": {"$date": 1409836258000}, "page_id": 3750295, "user": "Kwamikagami", "title": "Tonga (Malawi)"}
+{"body": "'''Windows NT 6.0''' can refer to:\n*[[Windows Vista]]\n*[[Windows Server 2008]]\n\n{{disambiguation}}", "timestamp": {"$date": 1409836258000}, "page_id": 3875545, "user": "Codename Lisa", "title": "Windows NT 6.0"}
+{"body": "'''Poyen''' may refer to:\n*[[Poyen, Arkansas]], United States\n* [[Poyen, Kargil]], India\n\n{{geodis}}", "timestamp": {"$date": 1409836258000}, "page_id": 1889856, "user": "PamD", "title": "Poyen"}
+{"body": "'''Body check''' may refer to:\n*[[Checking (ice hockey)]]\n*[[Physical examination]]\n{{Disambiguation}}", "timestamp": {"$date": 1409836258000}, "page_id": 3555067, "user": "Bgheard", "title": "Body check"}
+{"body": "'''Yevtushenko''' may refer to:\n\n* [[Yevgeny Yevtushenko]]\n* [[Vadym Yevtushenko]]\n\n{{disambiguation}}", "timestamp": {"$date": 1409836258000}, "page_id": 4842284, "user": "Kkj11210", "title": "Yevtushenko"}
+{"body": "'''Tuks''' may refer to:\n*[[Tuks Senganga]], South African rapper\n* [[University of Pretoria]]\n\n{{dab}}", "timestamp": {"$date": 1409836258000}, "page_id": 490212, "user": "PamD", "title": "Tuks"}
+{"body": "'''Ethanedithiol''' may refer to:\n\n* [[1,1-Ethanedithiol]]\n* [[1,2-Ethanedithiol]]\n\n{{chemistry index}}", "timestamp": {"$date": 1409836258000}, "page_id": 4514054, "user": "Kkj11210", "title": "Ethanedithiol"} \ No newline at end of file
diff --git a/src/mongo/gotools/mongoimport/testdata/test_type.csv b/src/mongo/gotools/mongoimport/testdata/test_type.csv
new file mode 100644
index 00000000000..444321ee570
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/testdata/test_type.csv
@@ -0,0 +1,4 @@
+zip.string(),number.double()
+12345,20.2
+12345-1234,40.4
+23455,BLAH
diff --git a/src/mongo/gotools/mongoimport/tsv.go b/src/mongo/gotools/mongoimport/tsv.go
new file mode 100644
index 00000000000..09aadc67fdc
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/tsv.go
@@ -0,0 +1,163 @@
+package mongoimport
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+const (
+ entryDelimiter = '\n'
+ tokenSeparator = "\t"
+)
+
+// TSVInputReader is a struct that implements the InputReader interface for a
+// TSV input source.
+type TSVInputReader struct {
+ // colSpecs is a list of column specifications in the BSON documents to be imported
+ colSpecs []ColumnSpec
+
+ // tsvReader is the underlying reader used to read data in from the TSV
+ // or TSV file
+ tsvReader *bufio.Reader
+
+ // tsvRejectWriter is where coercion-failed rows are written, if applicable
+ tsvRejectWriter io.Writer
+
+ // tsvRecord stores each line of input we read from the underlying reader
+ tsvRecord string
+
+ // numProcessed tracks the number of TSV records processed by the underlying reader
+ numProcessed uint64
+
+ // numDecoders is the number of concurrent goroutines to use for decoding
+ numDecoders int
+
+ // embedded sizeTracker exposes the Size() method to check the number of bytes read so far
+ sizeTracker
+
+ // ignoreBlanks is whether empty fields should be ignored
+ ignoreBlanks bool
+}
+
+// TSVConverter implements the Converter interface for TSV input.
+type TSVConverter struct {
+ colSpecs []ColumnSpec
+ data string
+ index uint64
+ ignoreBlanks bool
+ rejectWriter io.Writer
+}
+
+// NewTSVInputReader returns a TSVInputReader configured to read input from the
+// given io.Reader, extracting the specified columns only.
+func NewTSVInputReader(colSpecs []ColumnSpec, in io.Reader, rejects io.Writer, numDecoders int, ignoreBlanks bool) *TSVInputReader {
+ szCount := newSizeTrackingReader(newBomDiscardingReader(in))
+ return &TSVInputReader{
+ colSpecs: colSpecs,
+ tsvReader: bufio.NewReader(szCount),
+ tsvRejectWriter: rejects,
+ numProcessed: uint64(0),
+ numDecoders: numDecoders,
+ sizeTracker: szCount,
+ ignoreBlanks: ignoreBlanks,
+ }
+}
+
+// ReadAndValidateHeader reads the header from the underlying reader and validates
+// the header fields. It sets err if the read/validation fails.
+func (r *TSVInputReader) ReadAndValidateHeader() (err error) {
+ header, err := r.tsvReader.ReadString(entryDelimiter)
+ if err != nil {
+ return err
+ }
+ for _, field := range strings.Split(header, tokenSeparator) {
+ r.colSpecs = append(r.colSpecs, ColumnSpec{
+ Name: strings.TrimRight(field, "\r\n"),
+ Parser: new(FieldAutoParser),
+ })
+ }
+ return validateReaderFields(ColumnNames(r.colSpecs))
+}
+
+// ReadAndValidateTypedHeader reads the header from the underlying reader and validates
+// the header fields. It sets err if the read/validation fails.
+func (r *TSVInputReader) ReadAndValidateTypedHeader(parseGrace ParseGrace) (err error) {
+ header, err := r.tsvReader.ReadString(entryDelimiter)
+ if err != nil {
+ return err
+ }
+ var headerFields []string
+ for _, field := range strings.Split(header, tokenSeparator) {
+ headerFields = append(headerFields, strings.TrimRight(field, "\r\n"))
+ }
+ r.colSpecs, err = ParseTypedHeaders(headerFields, parseGrace)
+ if err != nil {
+ return err
+ }
+ return validateReaderFields(ColumnNames(r.colSpecs))
+}
+
+// StreamDocument takes a boolean indicating if the documents should be streamed
+// in read order and a channel on which to stream the documents processed from
+// the underlying reader. Returns a non-nil error if streaming fails.
+func (r *TSVInputReader) StreamDocument(ordered bool, readDocs chan bson.D) (retErr error) {
+ tsvRecordChan := make(chan Converter, r.numDecoders)
+ tsvErrChan := make(chan error)
+
+ // begin reading from source
+ go func() {
+ var err error
+ for {
+ r.tsvRecord, err = r.tsvReader.ReadString(entryDelimiter)
+ if err != nil {
+ close(tsvRecordChan)
+ if err == io.EOF {
+ tsvErrChan <- nil
+ } else {
+ r.numProcessed++
+ tsvErrChan <- fmt.Errorf("read error on entry #%v: %v", r.numProcessed, err)
+ }
+ return
+ }
+ tsvRecordChan <- TSVConverter{
+ colSpecs: r.colSpecs,
+ data: r.tsvRecord,
+ index: r.numProcessed,
+ ignoreBlanks: r.ignoreBlanks,
+ rejectWriter: r.tsvRejectWriter,
+ }
+ r.numProcessed++
+ }
+ }()
+
+ // begin processing read bytes
+ go func() {
+ tsvErrChan <- streamDocuments(ordered, r.numDecoders, tsvRecordChan, readDocs)
+ }()
+
+ return channelQuorumError(tsvErrChan, 2)
+}
+
+// Convert implements the Converter interface for TSV input. It converts a
+// TSVConverter struct to a BSON document.
+func (c TSVConverter) Convert() (b bson.D, err error) {
+ b, err = tokensToBSON(
+ c.colSpecs,
+ strings.Split(strings.TrimRight(c.data, "\r\n"), tokenSeparator),
+ c.index,
+ c.ignoreBlanks,
+ )
+ if _, ok := err.(coercionError); ok {
+ c.Print()
+ err = nil
+ }
+ return
+}
+
+func (c TSVConverter) Print() {
+ c.rejectWriter.Write([]byte(c.data + "\n"))
+}
diff --git a/src/mongo/gotools/mongoimport/tsv_test.go b/src/mongo/gotools/mongoimport/tsv_test.go
new file mode 100644
index 00000000000..7ea33248f5a
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/tsv_test.go
@@ -0,0 +1,232 @@
+package mongoimport
+
+import (
+ "bytes"
+ "os"
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func TestTSVStreamDocument(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With a TSV input reader", t, func() {
+ Convey("integer valued strings should be converted tsv1", func() {
+ contents := "1\t2\t3e\n"
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedRead := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", "3e"},
+ }
+ r := NewTSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+
+ Convey("valid TSV input file that starts with the UTF-8 BOM should "+
+ "not raise an error", func() {
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedRead := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", int32(3)},
+ }
+ fileHandle, err := os.Open("testdata/test_bom.tsv")
+ So(err, ShouldBeNil)
+ r := NewTSVInputReader(colSpecs, fileHandle, os.Stdout, 1, false)
+ docChan := make(chan bson.D, 2)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+
+ Convey("integer valued strings should be converted tsv2", func() {
+ contents := "a\tb\t\"cccc,cccc\"\td\n"
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedRead := bson.D{
+ {"a", "a"},
+ {"b", "b"},
+ {"c", `"cccc,cccc"`},
+ {"field3", "d"},
+ }
+ r := NewTSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+
+ Convey("extra columns should be prefixed with 'field'", func() {
+ contents := "1\t2\t3e\t may\n"
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedRead := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", "3e"},
+ {"field3", " may"},
+ }
+ r := NewTSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+
+ Convey("mixed values should be parsed correctly", func() {
+ contents := "12\t13.3\tInline\t14\n"
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"d", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedRead := bson.D{
+ {"a", int32(12)},
+ {"b", 13.3},
+ {"c", "Inline"},
+ {"d", int32(14)},
+ }
+ r := NewTSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 1)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedRead)
+ })
+
+ Convey("calling StreamDocument() in succession for TSVs should "+
+ "return the correct next set of values", func() {
+ contents := "1\t2\t3\n4\t5\t6\n"
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedReads := []bson.D{
+ {
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", int32(3)},
+ }, {
+ {"a", int32(4)},
+ {"b", int32(5)},
+ {"c", int32(6)},
+ },
+ }
+ r := NewTSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, len(expectedReads))
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ for i := 0; i < len(expectedReads); i++ {
+ for j, readDocument := range <-docChan {
+ So(readDocument.Name, ShouldEqual, expectedReads[i][j].Name)
+ So(readDocument.Value, ShouldEqual, expectedReads[i][j].Value)
+ }
+ }
+ })
+
+ Convey("calling StreamDocument() in succession for TSVs that contain "+
+ "quotes should return the correct next set of values", func() {
+ contents := "1\t2\t3\n4\t\"\t6\n"
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedReadOne := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", int32(3)},
+ }
+ expectedReadTwo := bson.D{
+ {"a", int32(4)},
+ {"b", `"`},
+ {"c", int32(6)},
+ }
+ r := NewTSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ docChan := make(chan bson.D, 2)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedReadOne)
+ So(<-docChan, ShouldResemble, expectedReadTwo)
+ })
+
+ Convey("plain TSV input file sources should be parsed correctly and "+
+ "subsequent imports should parse correctly",
+ func() {
+ colSpecs := []ColumnSpec{
+ {"a", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"b", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"c", new(FieldAutoParser), pgAutoCast, "auto"},
+ }
+ expectedReadOne := bson.D{
+ {"a", int32(1)},
+ {"b", int32(2)},
+ {"c", int32(3)},
+ }
+ expectedReadTwo := bson.D{
+ {"a", int32(3)},
+ {"b", 4.6},
+ {"c", int32(5)},
+ }
+ fileHandle, err := os.Open("testdata/test.tsv")
+ So(err, ShouldBeNil)
+ r := NewTSVInputReader(colSpecs, fileHandle, os.Stdout, 1, false)
+ docChan := make(chan bson.D, 50)
+ So(r.StreamDocument(true, docChan), ShouldBeNil)
+ So(<-docChan, ShouldResemble, expectedReadOne)
+ So(<-docChan, ShouldResemble, expectedReadTwo)
+ })
+ })
+}
+
+func TestTSVReadAndValidateHeader(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With a TSV input reader", t, func() {
+ Convey("setting the header should read the first line of the TSV", func() {
+ contents := "extraHeader1\textraHeader2\textraHeader3\n"
+ colSpecs := []ColumnSpec{}
+ r := NewTSVInputReader(colSpecs, bytes.NewReader([]byte(contents)), os.Stdout, 1, false)
+ So(r.ReadAndValidateHeader(), ShouldBeNil)
+ So(len(r.colSpecs), ShouldEqual, 3)
+ })
+ })
+}
+
+func TestTSVConvert(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ Convey("With a TSV input reader", t, func() {
+ Convey("calling convert on a TSVConverter should return the expected BSON document", func() {
+ tsvConverter := TSVConverter{
+ colSpecs: []ColumnSpec{
+ {"field1", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field2", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"field3", new(FieldAutoParser), pgAutoCast, "auto"},
+ },
+ data: "a\tb\tc",
+ index: uint64(0),
+ }
+ expectedDocument := bson.D{
+ {"field1", "a"},
+ {"field2", "b"},
+ {"field3", "c"},
+ }
+ document, err := tsvConverter.Convert()
+ So(err, ShouldBeNil)
+ So(document, ShouldResemble, expectedDocument)
+ })
+ })
+}
diff --git a/src/mongo/gotools/mongoimport/typed_fields.go b/src/mongo/gotools/mongoimport/typed_fields.go
new file mode 100644
index 00000000000..aa1bb1c0e0a
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/typed_fields.go
@@ -0,0 +1,284 @@
+package mongoimport
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/mongodb/mongo-tools/mongoimport/dateconv"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// columnType defines different types for columns that can be parsed distinctly
+type columnType int
+
+const (
+ ctAuto columnType = iota
+ ctBinary
+ ctBoolean
+ ctDate
+ ctDateGo
+ ctDateMS
+ ctDateOracle
+ ctDouble
+ ctInt32
+ ctInt64
+ ctDecimal
+ ctString
+)
+
+var (
+ columnTypeRE = regexp.MustCompile(`(?s)^(.*)\.(\w+)\((.*)\)$`)
+ columnTypeNameMap = map[string]columnType{
+ "auto": ctAuto,
+ "binary": ctBinary,
+ "boolean": ctBoolean,
+ "date": ctDate,
+ "decimal": ctDecimal,
+ "date_go": ctDateGo,
+ "date_ms": ctDateMS,
+ "date_oracle": ctDateOracle,
+ "double": ctDouble,
+ "int32": ctInt32,
+ "int64": ctInt64,
+ "string": ctString,
+ }
+)
+
+type binaryEncoding int
+
+const (
+ beBase64 binaryEncoding = iota
+ beBase32
+ beHex
+)
+
+var binaryEncodingNameMap = map[string]binaryEncoding{
+ "base64": beBase64,
+ "base32": beBase32,
+ "hex": beHex,
+}
+
+// ColumnSpec keeps information for each 'column' of import.
+type ColumnSpec struct {
+ Name string
+ Parser FieldParser
+ ParseGrace ParseGrace
+ TypeName string
+}
+
+// ColumnNames maps a ColumnSpec slice to their associated names
+func ColumnNames(fs []ColumnSpec) (s []string) {
+ for _, f := range fs {
+ s = append(s, f.Name)
+ }
+ return
+}
+
+// ParseTypedHeader produces a ColumnSpec from a header item, extracting type
+// information from the it. The parseGrace is passed along to the new ColumnSpec.
+func ParseTypedHeader(header string, parseGrace ParseGrace) (f ColumnSpec, err error) {
+ match := columnTypeRE.FindStringSubmatch(header)
+ if len(match) != 4 {
+ err = fmt.Errorf("could not parse type from header %s", header)
+ return
+ }
+ t, ok := columnTypeNameMap[match[2]]
+ if !ok {
+ err = fmt.Errorf("invalid type %s in header %s", match[2], header)
+ return
+ }
+ p, err := NewFieldParser(t, match[3])
+ if err != nil {
+ return
+ }
+ return ColumnSpec{match[1], p, parseGrace, match[2]}, nil
+}
+
+// ParseTypedHeaders performs ParseTypedHeader on each item, returning an
+// error if any single one fails.
+func ParseTypedHeaders(headers []string, parseGrace ParseGrace) (fs []ColumnSpec, err error) {
+ fs = make([]ColumnSpec, len(headers))
+ for i, f := range headers {
+ fs[i], err = ParseTypedHeader(f, parseGrace)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ParseAutoHeaders converts a list of header items to ColumnSpec objects, with
+// automatic parsers.
+func ParseAutoHeaders(headers []string) (fs []ColumnSpec) {
+ fs = make([]ColumnSpec, len(headers))
+ for i, f := range headers {
+ fs[i] = ColumnSpec{f, new(FieldAutoParser), pgAutoCast, "auto"}
+ }
+ return
+}
+
+// FieldParser is the interface for any parser of a field item.
+type FieldParser interface {
+ Parse(in string) (interface{}, error)
+}
+
+var (
+ escapeReplacements = []string{
+ `\\`, `\`,
+ `\(`, "(",
+ `\)`, ")",
+ `\`, "",
+ }
+ escapeReplacer = strings.NewReplacer(escapeReplacements...)
+)
+
+// NewFieldParser yields a FieldParser corresponding to the given columnType.
+// arg is passed along to the specific type's parser, if it permits an
+// argument. An error will be raised if arg is not valid for the type's
+// parser.
+func NewFieldParser(t columnType, arg string) (parser FieldParser, err error) {
+ arg = escapeReplacer.Replace(arg)
+
+ switch t { // validate argument
+ case ctBinary:
+ case ctDate:
+ case ctDateGo:
+ case ctDateMS:
+ case ctDateOracle:
+ default:
+ if arg != "" {
+ err = fmt.Errorf("type %v does not support arguments", t)
+ return
+ }
+ }
+
+ switch t {
+ case ctBinary:
+ parser, err = NewFieldBinaryParser(arg)
+ case ctBoolean:
+ parser = new(FieldBooleanParser)
+ case ctDate:
+ fallthrough
+ case ctDateGo:
+ parser = &FieldDateParser{arg}
+ case ctDateMS:
+ parser = &FieldDateParser{dateconv.FromMS(arg)}
+ case ctDateOracle:
+ parser = &FieldDateParser{dateconv.FromOracle(arg)}
+ case ctDouble:
+ parser = new(FieldDoubleParser)
+ case ctInt32:
+ parser = new(FieldInt32Parser)
+ case ctInt64:
+ parser = new(FieldInt64Parser)
+ case ctDecimal:
+ parser = new(FieldDecimalParser)
+ case ctString:
+ parser = new(FieldStringParser)
+ default: // ctAuto
+ parser = new(FieldAutoParser)
+ }
+ return
+}
+
+func autoParse(in string) interface{} {
+ parsedInt, err := strconv.ParseInt(in, 10, 64)
+ if err == nil {
+ if math.MinInt32 <= parsedInt && parsedInt <= math.MaxInt32 {
+ return int32(parsedInt)
+ }
+ return parsedInt
+ }
+ parsedFloat, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return parsedFloat
+ }
+ return in
+}
+
+type FieldAutoParser struct{}
+
+func (ap *FieldAutoParser) Parse(in string) (interface{}, error) {
+ return autoParse(in), nil
+}
+
+type FieldBinaryParser struct {
+ enc binaryEncoding
+}
+
+func (bp *FieldBinaryParser) Parse(in string) (interface{}, error) {
+ switch bp.enc {
+ case beBase32:
+ return base32.StdEncoding.DecodeString(in)
+ case beBase64:
+ return base64.StdEncoding.DecodeString(in)
+ default: // beHex
+ return hex.DecodeString(in)
+ }
+}
+
+func NewFieldBinaryParser(arg string) (*FieldBinaryParser, error) {
+ enc, ok := binaryEncodingNameMap[arg]
+ if !ok {
+ return nil, fmt.Errorf("invalid binary encoding: %s", arg)
+ }
+ return &FieldBinaryParser{enc}, nil
+}
+
+type FieldBooleanParser struct{}
+
+func (bp *FieldBooleanParser) Parse(in string) (interface{}, error) {
+ if strings.ToLower(in) == "true" || in == "1" {
+ return true, nil
+ }
+ if strings.ToLower(in) == "false" || in == "0" {
+ return false, nil
+ }
+ return nil, fmt.Errorf("failed to parse boolean: %s", in)
+}
+
+type FieldDateParser struct {
+ layout string
+}
+
+func (dp *FieldDateParser) Parse(in string) (interface{}, error) {
+ return time.Parse(dp.layout, in)
+}
+
+type FieldDoubleParser struct{}
+
+func (dp *FieldDoubleParser) Parse(in string) (interface{}, error) {
+ return strconv.ParseFloat(in, 64)
+}
+
+type FieldInt32Parser struct{}
+
+func (ip *FieldInt32Parser) Parse(in string) (interface{}, error) {
+ value, err := strconv.ParseInt(in, 10, 32)
+ return int32(value), err
+}
+
+type FieldInt64Parser struct{}
+
+func (ip *FieldInt64Parser) Parse(in string) (interface{}, error) {
+ return strconv.ParseInt(in, 10, 64)
+}
+
+type FieldDecimalParser struct{}
+
+func (ip *FieldDecimalParser) Parse(in string) (interface{}, error) {
+ return bson.ParseDecimal128(in)
+}
+
+type FieldStringParser struct{}
+
+func (sp *FieldStringParser) Parse(in string) (interface{}, error) {
+ return in, nil
+}
diff --git a/src/mongo/gotools/mongoimport/typed_fields_test.go b/src/mongo/gotools/mongoimport/typed_fields_test.go
new file mode 100644
index 00000000000..073710c5762
--- /dev/null
+++ b/src/mongo/gotools/mongoimport/typed_fields_test.go
@@ -0,0 +1,407 @@
+package mongoimport
+
+import (
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+ "time"
+)
+
+func init() {
+ log.SetVerbosity(&options.Verbosity{
+ VLevel: 4,
+ })
+}
+
+func TestTypedHeaderParser(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Using 'zip.string(),number.double(),foo.auto()'", t, func() {
+ var headers = []string{"zip.string()", "number.double()", "foo.auto()", `bar.date(January 2\, \(2006\))`}
+ var colSpecs []ColumnSpec
+ var err error
+
+ Convey("with parse grace: auto", func() {
+ colSpecs, err = ParseTypedHeaders(headers, pgAutoCast)
+ So(colSpecs, ShouldResemble, []ColumnSpec{
+ {"zip", new(FieldStringParser), pgAutoCast, "string"},
+ {"number", new(FieldDoubleParser), pgAutoCast, "double"},
+ {"foo", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"bar", &FieldDateParser{"January 2, (2006)"}, pgAutoCast, "date"},
+ })
+ So(err, ShouldBeNil)
+ })
+ Convey("with parse grace: skipRow", func() {
+ colSpecs, err = ParseTypedHeaders(headers, pgSkipRow)
+ So(colSpecs, ShouldResemble, []ColumnSpec{
+ {"zip", new(FieldStringParser), pgSkipRow, "string"},
+ {"number", new(FieldDoubleParser), pgSkipRow, "double"},
+ {"foo", new(FieldAutoParser), pgSkipRow, "auto"},
+ {"bar", &FieldDateParser{"January 2, (2006)"}, pgSkipRow, "date"},
+ })
+ So(err, ShouldBeNil)
+ })
+ })
+
+ Convey("Using various bad headers", t, func() {
+ var err error
+
+ Convey("with non-empty arguments for types that don't want them", func() {
+ _, err = ParseTypedHeader("zip.string(blah)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ _, err = ParseTypedHeader("zip.string(0)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ _, err = ParseTypedHeader("zip.int32(0)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ _, err = ParseTypedHeader("zip.int64(0)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ _, err = ParseTypedHeader("zip.double(0)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ _, err = ParseTypedHeader("zip.auto(0)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ })
+ Convey("with bad arguments for the binary type", func() {
+ _, err = ParseTypedHeader("zip.binary(blah)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ _, err = ParseTypedHeader("zip.binary(binary)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ _, err = ParseTypedHeader("zip.binary(decimal)", pgAutoCast)
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestAutoHeaderParser(t *testing.T) {
+ Convey("Using 'zip,number'", t, func() {
+ var headers = []string{"zip", "number", "foo"}
+ var colSpecs = ParseAutoHeaders(headers)
+ So(colSpecs, ShouldResemble, []ColumnSpec{
+ {"zip", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"number", new(FieldAutoParser), pgAutoCast, "auto"},
+ {"foo", new(FieldAutoParser), pgAutoCast, "auto"},
+ })
+ })
+}
+
+func TestFieldParsers(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Using FieldAutoParser", t, func() {
+ var p, _ = NewFieldParser(ctAuto, "")
+ var value interface{}
+ var err error
+
+ Convey("parses integers when it can", func() {
+ value, err = p.Parse("2147483648")
+ So(value.(int64), ShouldEqual, int64(2147483648))
+ So(err, ShouldBeNil)
+ value, err = p.Parse("42")
+ So(value.(int32), ShouldEqual, 42)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("-2147483649")
+ So(value.(int64), ShouldEqual, int64(-2147483649))
+ })
+ Convey("parses decimals when it can", func() {
+ value, err = p.Parse("3.14159265")
+ So(value.(float64), ShouldEqual, 3.14159265)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("0.123123")
+ So(value.(float64), ShouldEqual, 0.123123)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("-123456.789")
+ So(value.(float64), ShouldEqual, -123456.789)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("-1.")
+ So(value.(float64), ShouldEqual, -1.0)
+ So(err, ShouldBeNil)
+ })
+ Convey("leaves everything else as a string", func() {
+ value, err = p.Parse("12345-6789")
+ So(value.(string), ShouldEqual, "12345-6789")
+ So(err, ShouldBeNil)
+ value, err = p.Parse("06/02/1997")
+ So(value.(string), ShouldEqual, "06/02/1997")
+ So(err, ShouldBeNil)
+ value, err = p.Parse("")
+ So(value.(string), ShouldEqual, "")
+ So(err, ShouldBeNil)
+ })
+ })
+
+ Convey("Using FieldBooleanParser", t, func() {
+ var p, _ = NewFieldParser(ctBoolean, "")
+ var value interface{}
+ var err error
+
+ Convey("parses representations of true correctly", func() {
+ value, err = p.Parse("true")
+ So(value.(bool), ShouldBeTrue)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("TrUe")
+ So(value.(bool), ShouldBeTrue)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("1")
+ So(value.(bool), ShouldBeTrue)
+ So(err, ShouldBeNil)
+ })
+ Convey("parses representations of false correctly", func() {
+ value, err = p.Parse("false")
+ So(value.(bool), ShouldBeFalse)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("FaLsE")
+ So(value.(bool), ShouldBeFalse)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("0")
+ So(value.(bool), ShouldBeFalse)
+ So(err, ShouldBeNil)
+ })
+ Convey("does not parse other boolean representations", func() {
+ _, err = p.Parse("")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("t")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("f")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("yes")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("no")
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Using FieldBinaryParser", t, func() {
+ var value interface{}
+ var err error
+
+ Convey("using hex encoding", func() {
+ var p, _ = NewFieldParser(ctBinary, "hex")
+ Convey("parses valid hex values correctly", func() {
+ value, err = p.Parse("400a11")
+ So(value.([]byte), ShouldResemble, []byte{64, 10, 17})
+ So(err, ShouldBeNil)
+ value, err = p.Parse("400A11")
+ So(value.([]byte), ShouldResemble, []byte{64, 10, 17})
+ So(err, ShouldBeNil)
+ value, err = p.Parse("0b400A11")
+ So(value.([]byte), ShouldResemble, []byte{11, 64, 10, 17})
+ So(err, ShouldBeNil)
+ value, err = p.Parse("")
+ So(value.([]byte), ShouldResemble, []byte{})
+ So(err, ShouldBeNil)
+ })
+ })
+ Convey("using base32 encoding", func() {
+ var p, _ = NewFieldParser(ctBinary, "base32")
+ Convey("parses valid base32 values correctly", func() {
+ value, err = p.Parse("")
+ So(value.([]uint8), ShouldResemble, []uint8{})
+ So(err, ShouldBeNil)
+ value, err = p.Parse("MZXW6YTBOI======")
+ So(value.([]uint8), ShouldResemble, []uint8{102, 111, 111, 98, 97, 114})
+ So(err, ShouldBeNil)
+ })
+ })
+ Convey("using base64 encoding", func() {
+ var p, _ = NewFieldParser(ctBinary, "base64")
+ Convey("parses valid base64 values correctly", func() {
+ value, err = p.Parse("")
+ So(value.([]uint8), ShouldResemble, []uint8{})
+ So(err, ShouldBeNil)
+ value, err = p.Parse("Zm9vYmFy")
+ So(value.([]uint8), ShouldResemble, []uint8{102, 111, 111, 98, 97, 114})
+ So(err, ShouldBeNil)
+ })
+ })
+ })
+
+ Convey("Using FieldDateParser", t, func() {
+ var value interface{}
+ var err error
+
+ Convey("with Go's format", func() {
+ var p, _ = NewFieldParser(ctDateGo, "01/02/2006 3:04:05pm MST")
+ Convey("parses valid timestamps correctly", func() {
+ value, err = p.Parse("01/04/2000 5:38:10pm UTC")
+ So(value.(time.Time), ShouldResemble, time.Date(2000, 1, 4, 17, 38, 10, 0, time.UTC))
+ So(err, ShouldBeNil)
+ })
+ Convey("does not parse invalid dates", func() {
+ _, err = p.Parse("01/04/2000 5:38:10pm")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("01/04/2000 5:38:10 pm UTC")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("01/04/2000")
+ So(err, ShouldNotBeNil)
+ })
+ })
+ Convey("with MS's format", func() {
+ var p, _ = NewFieldParser(ctDateMS, "MM/dd/yyyy h:mm:sstt")
+ Convey("parses valid timestamps correctly", func() {
+ value, err = p.Parse("01/04/2000 5:38:10PM")
+ So(value.(time.Time), ShouldResemble, time.Date(2000, 1, 4, 17, 38, 10, 0, time.UTC))
+ So(err, ShouldBeNil)
+ })
+ Convey("does not parse invalid dates", func() {
+ _, err = p.Parse("01/04/2000 :) 05:38:10PM")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("01/04/2000 005:38:10PM")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("01/04/2000 5:38:10 PM")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("01/04/2000")
+ So(err, ShouldNotBeNil)
+ })
+ })
+ Convey("with Oracle's format", func() {
+ var p, _ = NewFieldParser(ctDateOracle, "mm/Dd/yYYy hh:MI:SsAm")
+ Convey("parses valid timestamps correctly", func() {
+ value, err = p.Parse("01/04/2000 05:38:10PM")
+ So(value.(time.Time), ShouldResemble, time.Date(2000, 1, 4, 17, 38, 10, 0, time.UTC))
+ So(err, ShouldBeNil)
+ })
+ Convey("does not parse invalid dates", func() {
+ _, err = p.Parse("01/04/2000 :) 05:38:10PM")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("01/04/2000 005:38:10PM")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("01/04/2000 5:38:10 PM")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("01/04/2000")
+ So(err, ShouldNotBeNil)
+ })
+ })
+ })
+
+ Convey("Using FieldDoubleParser", t, func() {
+ var p, _ = NewFieldParser(ctDouble, "")
+ var value interface{}
+ var err error
+
+ Convey("parses valid decimal values correctly", func() {
+ value, err = p.Parse("3.14159265")
+ So(value.(float64), ShouldEqual, 3.14159265)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("0.123123")
+ So(value.(float64), ShouldEqual, 0.123123)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("-123456.789")
+ So(value.(float64), ShouldEqual, -123456.789)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("-1.")
+ So(value.(float64), ShouldEqual, -1.0)
+ So(err, ShouldBeNil)
+ })
+ Convey("does not parse invalid numbers", func() {
+ _, err = p.Parse("")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("1.1.1")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("1-2.0")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("80-")
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Using FieldInt32Parser", t, func() {
+ var p, _ = NewFieldParser(ctInt32, "")
+ var value interface{}
+ var err error
+
+ Convey("parses valid integer values correctly", func() {
+ value, err = p.Parse("2147483647")
+ So(value.(int32), ShouldEqual, 2147483647)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("42")
+ So(value.(int32), ShouldEqual, 42)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("-2147483648")
+ So(value.(int32), ShouldEqual, -2147483648)
+ })
+ Convey("does not parse invalid numbers", func() {
+ _, err = p.Parse("")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("42.0")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("1-2")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("80-")
+ So(err, ShouldNotBeNil)
+ value, err = p.Parse("2147483648")
+ So(err, ShouldNotBeNil)
+ value, err = p.Parse("-2147483649")
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Using FieldInt64Parser", t, func() {
+ var p, _ = NewFieldParser(ctInt64, "")
+ var value interface{}
+ var err error
+
+ Convey("parses valid integer values correctly", func() {
+ value, err = p.Parse("2147483648")
+ So(value.(int64), ShouldEqual, int64(2147483648))
+ So(err, ShouldBeNil)
+ value, err = p.Parse("42")
+ So(value.(int64), ShouldEqual, 42)
+ So(err, ShouldBeNil)
+ value, err = p.Parse("-2147483649")
+ So(value.(int64), ShouldEqual, int64(-2147483649))
+ })
+ Convey("does not parse invalid numbers", func() {
+ _, err = p.Parse("")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("42.0")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("1-2")
+ So(err, ShouldNotBeNil)
+ _, err = p.Parse("80-")
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("Using FieldDecimalParser", t, func() {
+ var p, _ = NewFieldParser(ctDecimal, "")
+ var err error
+
+ Convey("parses valid decimal values correctly", func() {
+ for _, ts := range []string{"12235.2355", "42", "0", "-124", "-124.55"} {
+ testVal, err := bson.ParseDecimal128(ts)
+ So(err, ShouldBeNil)
+ parsedValue, err := p.Parse(ts)
+ So(err, ShouldBeNil)
+
+ So(testVal, ShouldResemble, parsedValue.(bson.Decimal128))
+ }
+ })
+ Convey("does not parse invalid decimal values", func() {
+ for _, ts := range []string{"", "1-2", "abcd"} {
+ _, err = p.Parse(ts)
+ So(err, ShouldNotBeNil)
+ }
+ })
+ })
+
+ Convey("Using FieldStringParser", t, func() {
+ var p, _ = NewFieldParser(ctString, "")
+ var value interface{}
+ var err error
+
+ Convey("parses strings as strings only", func() {
+ value, err = p.Parse("42")
+ So(value.(string), ShouldEqual, "42")
+ So(err, ShouldBeNil)
+ value, err = p.Parse("true")
+ So(value.(string), ShouldEqual, "true")
+ So(err, ShouldBeNil)
+ value, err = p.Parse("")
+ So(value.(string), ShouldEqual, "")
+ So(err, ShouldBeNil)
+ })
+ })
+
+}
diff --git a/src/mongo/gotools/mongooplog/main/mongooplog.go b/src/mongo/gotools/mongooplog/main/mongooplog.go
new file mode 100644
index 00000000000..a1326208724
--- /dev/null
+++ b/src/mongo/gotools/mongooplog/main/mongooplog.go
@@ -0,0 +1,94 @@
+// Main package for the mongooplog tool.
+package main
+
+import (
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongooplog"
+ "os"
+)
+
+func main() {
+ // initialize command line options
+ opts := options.New("mongooplog", mongooplog.Usage,
+ options.EnabledOptions{Auth: true, Connection: true, Namespace: false})
+
+ // add the mongooplog-specific options
+ sourceOpts := &mongooplog.SourceOptions{}
+ opts.AddOptions(sourceOpts)
+
+ log.Logvf(log.Always, "warning: mongooplog is deprecated, and will be removed completely in a future release")
+
+ // parse the command line options
+ args, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'mongooplog --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ if len(args) != 0 {
+ log.Logvf(log.Always, "positional arguments not allowed: %v", args)
+ log.Logvf(log.Always, "try 'mongooplog --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // print help, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ // print version, if specified
+ if opts.PrintVersion() {
+ return
+ }
+
+ // init logger
+ log.SetVerbosity(opts.Verbosity)
+ signals.Handle()
+
+ // connect directly, unless a replica set name is explicitly specified
+ _, setName := util.ParseConnectionString(opts.Host)
+ opts.Direct = (setName == "")
+ opts.ReplicaSetName = setName
+
+ // validate the mongooplog options
+ if sourceOpts.From == "" {
+ log.Logvf(log.Always, "command line error: need to specify --from")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // create a session provider for the destination server
+ sessionProviderTo, err := db.NewSessionProvider(*opts)
+ if err != nil {
+ log.Logvf(log.Always, "error connecting to destination host: %v", err)
+ os.Exit(util.ExitError)
+ }
+
+ // create a session provider for the source server
+ opts.Connection.Host = sourceOpts.From
+ opts.Connection.Port = ""
+ sessionProviderFrom, err := db.NewSessionProvider(*opts)
+ if err != nil {
+ log.Logvf(log.Always, "error connecting to source host: %v", err)
+ os.Exit(util.ExitError)
+ }
+
+ // initialize mongooplog
+ oplog := mongooplog.MongoOplog{
+ ToolOptions: opts,
+ SourceOptions: sourceOpts,
+ SessionProviderFrom: sessionProviderFrom,
+ SessionProviderTo: sessionProviderTo,
+ }
+
+ // kick it off
+ if err := oplog.Run(); err != nil {
+ log.Logvf(log.Always, "error: %v", err)
+ os.Exit(util.ExitError)
+ }
+
+}
diff --git a/src/mongo/gotools/mongooplog/mongooplog.go b/src/mongo/gotools/mongooplog/mongooplog.go
new file mode 100644
index 00000000000..67f7a4e9d2f
--- /dev/null
+++ b/src/mongo/gotools/mongooplog/mongooplog.go
@@ -0,0 +1,151 @@
+// Package mongooplog polls operations from the replication oplog of one server, and applies them to another.
+package mongooplog
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "time"
+)
+
+// MongoOplog is a container for the user-specified options for running mongooplog.
+type MongoOplog struct {
+ // standard tool options
+ ToolOptions *options.ToolOptions
+
+ // mongooplog-specific options
+ SourceOptions *SourceOptions
+
+ // session provider for the source server
+ SessionProviderFrom *db.SessionProvider
+
+ // session provider for the destination server
+ SessionProviderTo *db.SessionProvider
+}
+
+// Run executes the mongooplog program.
+func (mo *MongoOplog) Run() error {
+
+ // split up the oplog namespace we are using
+ oplogDB, oplogColl, err :=
+ util.SplitAndValidateNamespace(mo.SourceOptions.OplogNS)
+
+ if err != nil {
+ return err
+ }
+
+ // the full oplog namespace needs to be specified
+ if oplogColl == "" {
+ return fmt.Errorf("the oplog namespace must specify a collection")
+ }
+
+ log.Logvf(log.DebugLow, "using oplog namespace `%v.%v`", oplogDB, oplogColl)
+
+ // connect to the destination server
+ toSession, err := mo.SessionProviderTo.GetSession()
+ if err != nil {
+ return fmt.Errorf("error connecting to destination db: %v", err)
+ }
+ defer toSession.Close()
+ toSession.SetSocketTimeout(0)
+
+ // purely for logging
+ destServerStr := mo.ToolOptions.Host
+ if mo.ToolOptions.Port != "" {
+ destServerStr = destServerStr + ":" + mo.ToolOptions.Port
+ }
+ log.Logvf(log.DebugLow, "successfully connected to destination server `%v`", destServerStr)
+
+ // connect to the source server
+ fromSession, err := mo.SessionProviderFrom.GetSession()
+ if err != nil {
+ return fmt.Errorf("error connecting to source db: %v", err)
+ }
+ defer fromSession.Close()
+ fromSession.SetSocketTimeout(0)
+
+ log.Logvf(log.DebugLow, "successfully connected to source server `%v`", mo.SourceOptions.From)
+
+ // set slave ok
+ fromSession.SetMode(mgo.Eventual, true)
+
+ // get the tailing cursor for the source server's oplog
+ tail := buildTailingCursor(fromSession.DB(oplogDB).C(oplogColl),
+ mo.SourceOptions)
+ defer tail.Close()
+
+ // read the cursor dry, applying ops to the destination
+ // server in the process
+ oplogEntry := &db.Oplog{}
+ res := &db.ApplyOpsResponse{}
+
+ log.Logv(log.DebugLow, "applying oplog entries...")
+
+ opCount := 0
+
+ for tail.Next(oplogEntry) {
+
+ // skip noops
+ if oplogEntry.Operation == "n" {
+ log.Logvf(log.DebugHigh, "skipping no-op for namespace `%v`", oplogEntry.Namespace)
+ continue
+ }
+ opCount++
+
+ // prepare the op to be applied
+ opsToApply := []db.Oplog{*oplogEntry}
+
+ // apply the operation
+ err := toSession.Run(bson.M{"applyOps": opsToApply}, res)
+
+ if err != nil {
+ return fmt.Errorf("error applying ops: %v", err)
+ }
+
+ // check the server's response for an issue
+ if !res.Ok {
+ return fmt.Errorf("server gave error applying ops: %v", res.ErrMsg)
+ }
+ }
+
+ // make sure there was no tailing error
+ if err := tail.Err(); err != nil {
+ return fmt.Errorf("error querying oplog: %v", err)
+ }
+
+ log.Logvf(log.DebugLow, "done applying %v oplog entries", opCount)
+
+ return nil
+}
+
+// get the cursor for the oplog collection, based on the options
+// passed in to mongooplog
+func buildTailingCursor(oplog *mgo.Collection,
+ sourceOptions *SourceOptions) *mgo.Iter {
+
+ // how many seconds in the past we need
+ secondsInPast := time.Duration(sourceOptions.Seconds) * time.Second
+ // the time threshold for oplog queries
+ threshold := time.Now().Add(-secondsInPast)
+ // convert to a unix timestamp (seconds since epoch)
+ thresholdAsUnix := threshold.Unix()
+
+ // shift it appropriately, to prepare it to be converted to an
+ // oplog timestamp
+ thresholdShifted := uint64(thresholdAsUnix) << 32
+
+ // build the oplog query
+ oplogQuery := bson.M{
+ "ts": bson.M{
+ "$gte": bson.MongoTimestamp(thresholdShifted),
+ },
+ }
+
+ // TODO: wait time
+ return oplog.Find(oplogQuery).Iter()
+
+}
diff --git a/src/mongo/gotools/mongooplog/mongooplog_test.go b/src/mongo/gotools/mongooplog/mongooplog_test.go
new file mode 100644
index 00000000000..de825b2117f
--- /dev/null
+++ b/src/mongo/gotools/mongooplog/mongooplog_test.go
@@ -0,0 +1,135 @@
+package mongooplog
+
+import (
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "testing"
+)
+
+func TestBasicOps(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.IntegrationTestType)
+
+ var opts *options.ToolOptions
+ var sourceOpts *SourceOptions
+
+ Convey("When replicating operations", t, func() {
+ ssl := testutil.GetSSLOptions()
+ auth := testutil.GetAuthOptions()
+
+ // specify localhost:33333 as the destination host
+ opts = &options.ToolOptions{
+ Namespace: &options.Namespace{},
+ SSL: &ssl,
+ Auth: &auth,
+ Kerberos: &options.Kerberos{},
+ Connection: &options.Connection{
+ Host: "localhost",
+ Port: db.DefaultTestPort,
+ },
+ }
+
+ // specify localhost:33333 as the source host
+ sourceOpts = &SourceOptions{
+ Seconds: 84600, // the default
+ OplogNS: "local.oplog.rs", // the default
+ }
+
+ Convey("all operations should be applied correctly, without"+
+ " error", func() {
+
+ // set the "oplog" we will use
+ sourceOpts.OplogNS = "mongooplog_test.oplog"
+
+ // initialize a session provider for the source
+ sourceSP, err := db.NewSessionProvider(*opts)
+ So(err, ShouldBeNil)
+
+ // initialize a session provider for the destination
+ destSP, err := db.NewSessionProvider(*opts)
+ So(err, ShouldBeNil)
+
+ // clear out the oplog
+ sess, err := sourceSP.GetSession()
+ So(err, ShouldBeNil)
+ defer sess.Close()
+ oplogColl := sess.DB("mongooplog_test").C("oplog")
+ oplogColl.DropCollection()
+
+ // create the oplog as a capped collection, so it can be tailed
+ So(sess.DB("mongooplog_test").Run(
+ bson.D{{"create", "oplog"}, {"capped", true},
+ {"size", 10000}},
+ bson.M{}),
+ ShouldBeNil)
+
+ // create the collection we are testing against (ignore errors)
+ sess.DB("mongooplog_test").C("data").Create(&mgo.CollectionInfo{})
+
+ // clear out the collection we'll use for testing
+ testColl := sess.DB("mongooplog_test").C("data")
+ _, err = testColl.RemoveAll(bson.M{})
+ So(err, ShouldBeNil)
+
+ // insert some "ops" into the oplog to be found and applied
+ obj1 := bson.D{{"_id", 3}}
+ op1 := &db.Oplog{
+ Timestamp: bson.MongoTimestamp(1<<63 - 1), // years in the future
+ HistoryID: 100,
+ Version: 2,
+ Operation: "i",
+ Namespace: "mongooplog_test.data",
+ Object: obj1,
+ }
+ So(oplogColl.Insert(op1), ShouldBeNil)
+ obj2 := bson.D{{"_id", 4}}
+ op2 := &db.Oplog{
+ Timestamp: bson.MongoTimestamp(1<<63 - 1), // years in the future
+ HistoryID: 200,
+ Version: 2,
+ Operation: "i",
+ Namespace: "mongooplog_test.data",
+ Object: obj2,
+ }
+ So(oplogColl.Insert(op2), ShouldBeNil)
+
+ // this one should be filtered out, since it occurred before the
+ // threshold
+ obj3 := bson.D{{"_id", 3}}
+ op3 := &db.Oplog{
+ Timestamp: bson.MongoTimestamp(1<<62 - 1), // more than 1 day in the past
+ HistoryID: 300,
+ Version: 2,
+ Operation: "i",
+ Namespace: "mongooplog_test.data",
+ Object: obj3,
+ }
+ So(oplogColl.Insert(op3), ShouldBeNil)
+
+ // initialize the mongooplog
+ oplog := MongoOplog{
+ ToolOptions: opts,
+ SourceOptions: sourceOpts,
+ SessionProviderFrom: sourceSP,
+ SessionProviderTo: destSP,
+ }
+
+ // run it
+ So(oplog.Run(), ShouldBeNil)
+
+ // the operations should have been applied
+ var inserted []bson.M
+ So(testColl.Find(bson.M{}).Sort("_id").All(&inserted),
+ ShouldBeNil)
+ So(len(inserted), ShouldEqual, 2)
+ So(inserted[0]["_id"], ShouldEqual, 3)
+ So(inserted[1]["_id"], ShouldEqual, 4)
+
+ })
+
+ })
+
+}
diff --git a/src/mongo/gotools/mongooplog/options.go b/src/mongo/gotools/mongooplog/options.go
new file mode 100644
index 00000000000..b7037e165d8
--- /dev/null
+++ b/src/mongo/gotools/mongooplog/options.go
@@ -0,0 +1,23 @@
+package mongooplog
+
+import (
+ "gopkg.in/mgo.v2/bson"
+)
+
+var Usage = `--from <remote host> <options>
+
+Poll operations from the replication oplog of one server, and apply them to another.
+
+See http://docs.mongodb.org/manual/reference/program/mongooplog/ for more information.`
+
+// SourceOptions defines the set of options to use in retrieving oplog data from the source server.
+type SourceOptions struct {
+ From string `long:"from" value-name:"<hostname>" description:"specify the host for mongooplog to retrive operations from"`
+ OplogNS string `long:"oplogns" value-name:"<namespace>" description:"specify the namespace in the --from host where the oplog lives (default 'local.oplog.rs') " default:"local.oplog.rs" default-mask:"-"`
+ Seconds bson.MongoTimestamp `long:"seconds" value-name:"<seconds>" short:"s" description:"specify a number of seconds for mongooplog to pull from the remote host" default:"86400" default-mask:"-"`
+}
+
+// Name returns a human-readable group name for source options.
+func (_ *SourceOptions) Name() string {
+ return "source"
+}
diff --git a/src/mongo/gotools/mongorestore/filepath.go b/src/mongo/gotools/mongorestore/filepath.go
new file mode 100644
index 00000000000..61ccbbfb08f
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/filepath.go
@@ -0,0 +1,644 @@
+package mongorestore
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync/atomic"
+
+ "github.com/mongodb/mongo-tools/common"
+ "github.com/mongodb/mongo-tools/common/archive"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/util"
+)
+
+// FileType describes the various types of restore documents.
+type FileType uint
+
+// File types constants used by mongorestore.
+const (
+ UnknownFileType FileType = iota
+ BSONFileType
+ MetadataFileType
+)
+
+type errorWriter struct{}
+
+func (errorWriter) Write([]byte) (int, error) {
+ return 0, os.ErrInvalid
+}
+
+// PosReader is a ReadCloser which maintains the position of what has been
+// read from the Reader.
+type PosReader interface {
+ io.ReadCloser
+ Pos() int64
+}
+
+// posTrackingReader is a type for reading from a file and being able to determine
+// what position the file is at.
+type posTrackingReader struct {
+ pos int64 // updated atomically, aligned at the beginning of the struct
+ io.ReadCloser
+}
+
+func (f *posTrackingReader) Read(p []byte) (int, error) {
+ n, err := f.ReadCloser.Read(p)
+ atomic.AddInt64(&f.pos, int64(n))
+ return n, err
+}
+
+func (f *posTrackingReader) Pos() int64 {
+ return atomic.LoadInt64(&f.pos)
+}
+
+// mixedPosTrackingReader is a type for reading from one file but getting the position of a
+// different file. This is useful for compressed files where the appropriate position for progress
+// bars is that of the compressed file, but file should be read from the uncompressed file.
+type mixedPosTrackingReader struct {
+ readHolder PosReader
+ posHolder PosReader
+}
+
+func (f *mixedPosTrackingReader) Read(p []byte) (int, error) {
+ return f.readHolder.Read(p)
+}
+
+func (f *mixedPosTrackingReader) Pos() int64 {
+ return f.posHolder.Pos()
+}
+
+func (f *mixedPosTrackingReader) Close() error {
+ err := f.readHolder.Close()
+ if err != nil {
+ return err
+ }
+ return f.posHolder.Close()
+}
+
+// realBSONFile implements the intents.file interface. It lets intents read from real BSON files
+// ok disk via an embedded os.File
+// The Read, Write and Close methods of the intents.file interface is implemented here by the
+// embedded os.File, the Write will return an error and not succeed
+type realBSONFile struct {
+ path string
+ PosReader
+ // errorWrite adds a Write() method to this object allowing it to be an
+ // intent.file ( a ReadWriteOpenCloser )
+ errorWriter
+ intent *intents.Intent
+ gzip bool
+}
+
+// Open is part of the intents.file interface. realBSONFiles need to be Opened before Read
+// can be called on them.
+func (f *realBSONFile) Open() (err error) {
+ if f.path == "" {
+ // this error shouldn't happen normally
+ return fmt.Errorf("error reading BSON file for %v", f.intent.Namespace())
+ }
+ file, err := os.Open(f.path)
+ if err != nil {
+ return fmt.Errorf("error reading BSON file %v: %v", f.path, err)
+ }
+ posFile := &posTrackingReader{0, file}
+ if f.gzip {
+ gzFile, err := gzip.NewReader(posFile)
+ posUncompressedFile := &posTrackingReader{0, gzFile}
+ if err != nil {
+ return fmt.Errorf("error decompressing compresed BSON file %v: %v", f.path, err)
+ }
+ f.PosReader = &mixedPosTrackingReader{
+ readHolder: posUncompressedFile,
+ posHolder: posFile}
+ } else {
+ f.PosReader = posFile
+ }
+ return nil
+}
+
+// realMetadataFile implements the intents.file interface. It lets intents read from real
+// metadata.json files ok disk via an embedded os.File
+// The Read, Write and Close methods of the intents.file interface is implemented here by the
+// embedded os.File, the Write will return an error and not succeed
+type realMetadataFile struct {
+ pos int64 // updated atomically, aligned at the beginning of the struct
+ io.ReadCloser
+ path string
+ // errorWrite adds a Write() method to this object allowing it to be an
+ // intent.file ( a ReadWriteOpenCloser )
+ errorWriter
+ intent *intents.Intent
+ gzip bool
+}
+
+// Open is part of the intents.file interface. realMetadataFiles need to be Opened before Read
+// can be called on them.
+func (f *realMetadataFile) Open() (err error) {
+ if f.path == "" {
+ return fmt.Errorf("error reading metadata for %v", f.intent.Namespace())
+ }
+ file, err := os.Open(f.path)
+ if err != nil {
+ return fmt.Errorf("error reading metadata %v: %v", f.path, err)
+ }
+ if f.gzip {
+ gzFile, err := gzip.NewReader(file)
+ if err != nil {
+ return fmt.Errorf("error reading compressed metadata %v: %v", f.path, err)
+ }
+ f.ReadCloser = &wrappedReadCloser{gzFile, file}
+ } else {
+ f.ReadCloser = file
+ }
+ return nil
+}
+
+func (f *realMetadataFile) Read(p []byte) (int, error) {
+ n, err := f.ReadCloser.Read(p)
+ atomic.AddInt64(&f.pos, int64(n))
+ return n, err
+}
+
+func (f *realMetadataFile) Pos() int64 {
+ return atomic.LoadInt64(&f.pos)
+}
+
+// stdinFile implements the intents.file interface. They allow intents to read single collections
+// from standard input
+type stdinFile struct {
+ pos int64 // updated atomically, aligned at the beginning of the struct
+ io.Reader
+ errorWriter
+}
+
+// Open is part of the intents.file interface. stdinFile needs to have Open called on it before
+// Read can be called on it.
+func (f *stdinFile) Open() error {
+ return nil
+}
+
+func (f *stdinFile) Read(p []byte) (int, error) {
+ n, err := f.Reader.Read(p)
+ atomic.AddInt64(&f.pos, int64(n))
+ return n, err
+}
+
+func (f *stdinFile) Pos() int64 {
+ return atomic.LoadInt64(&f.pos)
+}
+
+// Close is part of the intents.file interface. After Close is called, Read will fail.
+func (f *stdinFile) Close() error {
+ f.Reader = nil
+ return nil
+}
+
+// getInfoFromFilename pulls the base collection name and FileType from a given file.
+func (restore *MongoRestore) getInfoFromFilename(filename string) (string, FileType) {
+ baseFileName := filepath.Base(filename)
+ // .bin supported for legacy reasons
+ if strings.HasSuffix(baseFileName, ".bin") {
+ baseName := strings.TrimSuffix(baseFileName, ".bin")
+ return baseName, BSONFileType
+ }
+ // Gzip indicates that files in a dump directory should have a .gz suffix
+ // but it does not indicate that the "files" provided by the archive should,
+ // compressed or otherwise.
+ if restore.InputOptions.Gzip && restore.InputOptions.Archive == "" {
+ if strings.HasSuffix(baseFileName, ".metadata.json.gz") {
+ baseName := strings.TrimSuffix(baseFileName, ".metadata.json.gz")
+ return baseName, MetadataFileType
+ } else if strings.HasSuffix(baseFileName, ".bson.gz") {
+ baseName := strings.TrimSuffix(baseFileName, ".bson.gz")
+ return baseName, BSONFileType
+ }
+ return "", UnknownFileType
+ }
+ if strings.HasSuffix(baseFileName, ".metadata.json") {
+ baseName := strings.TrimSuffix(baseFileName, ".metadata.json")
+ return baseName, MetadataFileType
+ } else if strings.HasSuffix(baseFileName, ".bson") {
+ baseName := strings.TrimSuffix(baseFileName, ".bson")
+ return baseName, BSONFileType
+ }
+ return "", UnknownFileType
+}
+
+// CreateAllIntents drills down into a dump folder, creating intents for all of
+// the databases and collections it finds.
+func (restore *MongoRestore) CreateAllIntents(dir archive.DirLike) error {
+ log.Logvf(log.DebugHigh, "using %v as dump root directory", dir.Path())
+ entries, err := dir.ReadDir()
+ if err != nil {
+ return fmt.Errorf("error reading root dump folder: %v", err)
+ }
+ for _, entry := range entries {
+ if entry.IsDir() {
+ if err = util.ValidateDBName(entry.Name()); err != nil {
+ return fmt.Errorf("invalid database name '%v': %v", entry.Name(), err)
+ }
+ err = restore.CreateIntentsForDB(entry.Name(), entry)
+ if err != nil {
+ return err
+ }
+ } else {
+ if entry.Name() == "oplog.bson" {
+ if restore.InputOptions.OplogReplay {
+ log.Logv(log.DebugLow, "found oplog.bson file to replay")
+ }
+ oplogIntent := &intents.Intent{
+ C: "oplog",
+ Size: entry.Size(),
+ Location: entry.Path(),
+ }
+ if !restore.InputOptions.OplogReplay {
+ if restore.InputOptions.Archive != "" {
+ mutedOut := &archive.MutedCollection{
+ Intent: oplogIntent,
+ Demux: restore.archive.Demux,
+ }
+ restore.archive.Demux.Open(
+ oplogIntent.Namespace(),
+ mutedOut,
+ )
+ }
+ continue
+ }
+ if restore.InputOptions.Archive != "" {
+ if restore.InputOptions.Archive == "-" {
+ oplogIntent.Location = "archive on stdin"
+ } else {
+ oplogIntent.Location = fmt.Sprintf("archive '%v'", restore.InputOptions.Archive)
+ }
+
+ // no need to check that we want to cache here
+ oplogIntent.BSONFile = &archive.RegularCollectionReceiver{
+ Intent: oplogIntent,
+ Origin: oplogIntent.Namespace(),
+ Demux: restore.archive.Demux,
+ }
+ } else {
+ oplogIntent.BSONFile = &realBSONFile{path: entry.Path(), intent: oplogIntent, gzip: restore.InputOptions.Gzip}
+ }
+ restore.manager.Put(oplogIntent)
+ } else {
+ log.Logvf(log.Always, `don't know what to do with file "%v", skipping...`, entry.Path())
+ }
+ }
+ }
+ return nil
+}
+
+// CreateIntentForOplog creates an intent for a file that we want to treat as an oplog.
+func (restore *MongoRestore) CreateIntentForOplog() error {
+ target, err := newActualPath(restore.InputOptions.OplogFile)
+ db := ""
+ collection := "oplog"
+ if err != nil {
+ return err
+ }
+ log.Logvf(log.DebugLow, "reading oplog from %v", target.Path())
+
+ if target.IsDir() {
+ return fmt.Errorf("file %v is a directory, not a bson file", target.Path())
+ }
+
+ // Then create its intent.
+ intent := &intents.Intent{
+ DB: db,
+ C: collection,
+ Size: target.Size(),
+ Location: target.Path(),
+ }
+ intent.BSONFile = &realBSONFile{path: target.Path(), intent: intent, gzip: restore.InputOptions.Gzip}
+ restore.manager.PutOplogIntent(intent, "oplogFile")
+ return nil
+}
+
+// CreateIntentsForDB drills down into the dir folder, creating intents
+// for all of the collection dump files it finds for the db database.
+func (restore *MongoRestore) CreateIntentsForDB(db string, dir archive.DirLike) (err error) {
+ var entries []archive.DirLike
+ log.Logvf(log.DebugHigh, "reading collections for database %v in %v", db, dir.Name())
+ entries, err = dir.ReadDir()
+ if err != nil {
+ return fmt.Errorf("error reading db folder %v: %v", db, err)
+ }
+ usesMetadataFiles := hasMetadataFiles(entries)
+ for _, entry := range entries {
+ if entry.IsDir() {
+ log.Logvf(log.Always, `don't know what to do with subdirectory "%v", skipping...`,
+ filepath.Join(dir.Name(), entry.Name()))
+ } else {
+ collection, fileType := restore.getInfoFromFilename(entry.Name())
+ sourceNS := db + "." + collection
+ switch fileType {
+ case BSONFileType:
+ var skip bool
+ // Dumps of a single database (i.e. with the -d flag) may contain special
+ // db-specific collections that start with a "$" (for example, $admin.system.users
+ // holds the users for a database that was dumped with --dumpDbUsersAndRoles enabled).
+ // If these special files manage to be included in a dump directory during a full
+ // (multi-db) restore, we should ignore them.
+ if restore.NSOptions.DB == "" && strings.HasPrefix(collection, "$") {
+ log.Logvf(log.DebugLow, "not restoring special collection %v.%v", db, collection)
+ skip = true
+ }
+ // TOOLS-717: disallow restoring to the system.profile collection.
+ // Server versions >= 3.0.3 disallow user inserts to system.profile so
+ // it would likely fail anyway.
+ if collection == "system.profile" {
+ log.Logvf(log.DebugLow, "skipping restore of system.profile collection", db)
+ skip = true
+ }
+ // skip restoring the indexes collection if we are using metadata
+ // files to store index information, to eliminate redundancy
+ if collection == "system.indexes" && usesMetadataFiles {
+ log.Logvf(log.DebugLow,
+ "not restoring system.indexes collection because database %v "+
+ "has .metadata.json files", db)
+ skip = true
+ }
+
+ if !restore.includer.Has(sourceNS) {
+ log.Logvf(log.DebugLow, "skipping restoring %v.%v, it is not included", db, collection)
+ skip = true
+ }
+ if restore.excluder.Has(sourceNS) {
+ log.Logvf(log.DebugLow, "skipping restoring %v.%v, it is excluded", db, collection)
+ skip = true
+ }
+ destNS := restore.renamer.Get(sourceNS)
+ destDB, destC := common.SplitNamespace(destNS)
+ intent := &intents.Intent{
+ DB: destDB,
+ C: destC,
+ Size: entry.Size(),
+ }
+ if restore.InputOptions.Archive != "" {
+ if restore.InputOptions.Archive == "-" {
+ intent.Location = "archive on stdin"
+ } else {
+ intent.Location = fmt.Sprintf("archive '%v'", restore.InputOptions.Archive)
+ }
+ if skip {
+ // adding the DemuxOut to the demux, but not adding the intent to the manager
+ mutedOut := &archive.MutedCollection{Intent: intent, Demux: restore.archive.Demux}
+ restore.archive.Demux.Open(sourceNS, mutedOut)
+ continue
+ }
+ if intent.IsSpecialCollection() {
+ specialCollectionCache := archive.NewSpecialCollectionCache(intent, restore.archive.Demux)
+ intent.BSONFile = specialCollectionCache
+ restore.archive.Demux.Open(sourceNS, specialCollectionCache)
+ } else {
+ intent.BSONFile = &archive.RegularCollectionReceiver{
+ Origin: sourceNS,
+ Intent: intent,
+ Demux: restore.archive.Demux,
+ }
+ }
+ } else {
+ if skip {
+ continue
+ }
+ intent.Location = entry.Path()
+ intent.BSONFile = &realBSONFile{path: entry.Path(), intent: intent, gzip: restore.InputOptions.Gzip}
+ }
+ log.Logvf(log.Info, "found collection %v bson to restore to %v", sourceNS, destNS)
+ restore.manager.PutWithNamespace(sourceNS, intent)
+ case MetadataFileType:
+ if !restore.includer.Has(sourceNS) {
+ log.Logvf(log.DebugLow, "skipping restoring %v.%v metadata, it is not included", db, collection)
+ continue
+ }
+ if restore.excluder.Has(sourceNS) {
+ log.Logvf(log.DebugLow, "skipping restoring %v.%v metadata, it is excluded", db, collection)
+ continue
+ }
+
+ usesMetadataFiles = true
+ destNS := restore.renamer.Get(sourceNS)
+ rnDB, rnC := common.SplitNamespace(destNS)
+ intent := &intents.Intent{
+ DB: rnDB,
+ C: rnC,
+ }
+
+ if restore.InputOptions.Archive != "" {
+ if restore.InputOptions.Archive == "-" {
+ intent.MetadataLocation = "archive on stdin"
+ } else {
+ intent.MetadataLocation = fmt.Sprintf("archive '%v'", restore.InputOptions.Archive)
+ }
+ intent.MetadataFile = &archive.MetadataPreludeFile{Origin: sourceNS, Intent: intent, Prelude: restore.archive.Prelude}
+ } else {
+ intent.MetadataLocation = entry.Path()
+ intent.MetadataFile = &realMetadataFile{path: entry.Path(), intent: intent, gzip: restore.InputOptions.Gzip}
+ }
+ log.Logvf(log.Info, "found collection metadata from %v to restore to %v", sourceNS, destNS)
+ restore.manager.PutWithNamespace(sourceNS, intent)
+ default:
+ log.Logvf(log.Always, `don't know what to do with file "%v", skipping...`,
+ entry.Path())
+ }
+ }
+ }
+ return nil
+}
+
+// CreateStdinIntentForCollection builds an intent for the given database and collection name
+// that is to be read from standard input
+func (restore *MongoRestore) CreateStdinIntentForCollection(db string, collection string) error {
+ log.Logvf(log.DebugLow, "reading collection %v for database %v from standard input",
+ collection, db)
+ intent := &intents.Intent{
+ DB: db,
+ C: collection,
+ Location: "-",
+ }
+ intent.BSONFile = &stdinFile{Reader: restore.stdin}
+ restore.manager.Put(intent)
+ return nil
+}
+
+// CreateIntentForCollection builds an intent for the given database and collection name
+// along with a path to a .bson collection file. It searches the file's parent directory
+// for a matching metadata file.
+//
+// This method is not called by CreateIntentsForDB,
+// it is only used in the case where --db and --collection flags are set.
+func (restore *MongoRestore) CreateIntentForCollection(db string, collection string, dir archive.DirLike) error {
+ log.Logvf(log.DebugLow, "reading collection %v for database %v from %v",
+ collection, db, dir.Path())
+ // first make sure the bson file exists and is valid
+ _, err := dir.Stat()
+ if err != nil {
+ return err
+ }
+ if dir.IsDir() {
+ return fmt.Errorf("file %v is a directory, not a bson file", dir.Path())
+ }
+
+ baseName, fileType := restore.getInfoFromFilename(dir.Name())
+ if fileType != BSONFileType {
+ return fmt.Errorf("file %v does not have .bson extension", dir.Path())
+ }
+
+ // then create its intent
+ intent := &intents.Intent{
+ DB: db,
+ C: collection,
+ Size: dir.Size(),
+ Location: dir.Path(),
+ }
+ intent.BSONFile = &realBSONFile{path: dir.Path(), intent: intent, gzip: restore.InputOptions.Gzip}
+
+ // finally, check if it has a .metadata.json file in its folder
+ log.Logvf(log.DebugLow, "scanning directory %v for metadata", dir.Name())
+ entries, err := dir.Parent().ReadDir()
+ if err != nil {
+ // try and carry on if we can
+ log.Logvf(log.Info, "error attempting to locate metadata for file: %v", err)
+ log.Logv(log.Info, "restoring collection without metadata")
+ restore.manager.Put(intent)
+ return nil
+ }
+ metadataName := baseName + ".metadata.json"
+ if restore.InputOptions.Gzip {
+ metadataName += ".gz"
+ }
+ for _, entry := range entries {
+ if entry.Name() == metadataName {
+ metadataPath := entry.Path()
+ log.Logvf(log.Info, "found metadata for collection at %v", metadataPath)
+ intent.MetadataLocation = metadataPath
+ intent.MetadataFile = &realMetadataFile{path: metadataPath, intent: intent, gzip: restore.InputOptions.Gzip}
+ break
+ }
+ }
+
+ if intent.MetadataFile == nil {
+ log.Logv(log.Info, "restoring collection without metadata")
+ }
+
+ restore.manager.Put(intent)
+
+ return nil
+}
+
+// helper for searching a list of FileInfo for metadata files
+func hasMetadataFiles(files []archive.DirLike) bool {
+ for _, file := range files {
+ if strings.HasSuffix(file.Name(), ".metadata.json") {
+ return true
+ }
+ }
+ return false
+}
+
+// handleBSONInsteadOfDirectory updates -d and -c settings based on
+// the path to the BSON file passed to mongorestore. This is only
+// applicable if the target path points to a .bson file.
+//
+// As an example, when the user passes 'dump/mydb/col.bson', this method
+// will infer that 'mydb' is the database and 'col' is the collection name.
+func (restore *MongoRestore) handleBSONInsteadOfDirectory(path string) error {
+ // we know we have been given a non-directory, so we should handle it
+ // like a bson file and infer as much as we can
+ if restore.NSOptions.Collection == "" {
+ // if the user did not set -c, use the file name for the collection
+ newCollectionName, fileType := restore.getInfoFromFilename(path)
+ if fileType != BSONFileType {
+ return fmt.Errorf("file %v does not have .bson extension", path)
+ }
+ restore.NSOptions.Collection = newCollectionName
+ log.Logvf(log.DebugLow, "inferred collection '%v' from file", restore.NSOptions.Collection)
+ }
+ if restore.NSOptions.DB == "" {
+ // if the user did not set -d, use the directory containing the target
+ // file as the db name (as it would be in a dump directory). If
+ // we cannot determine the directory name, use "test"
+ dirForFile := filepath.Base(filepath.Dir(path))
+ if dirForFile == "." || dirForFile == ".." {
+ dirForFile = "test"
+ }
+ restore.NSOptions.DB = dirForFile
+ log.Logvf(log.DebugLow, "inferred db '%v' from the file's directory", restore.NSOptions.DB)
+ }
+ return nil
+}
+
+type actualPath struct {
+ os.FileInfo
+ path string
+ parent *actualPath
+}
+
+func newActualPath(dir string) (*actualPath, error) {
+ stat, err := os.Stat(dir)
+ if err != nil {
+ return nil, err
+ }
+ path := filepath.Dir(filepath.Clean(dir))
+ parent := &actualPath{}
+ parentStat, err := os.Stat(path)
+ if err == nil {
+ parent.FileInfo = parentStat
+ parent.path = filepath.Dir(path)
+ }
+ ap := &actualPath{
+ FileInfo: stat,
+ path: path,
+ parent: parent,
+ }
+ return ap, nil
+}
+
+func (ap actualPath) Path() string {
+ return filepath.Join(ap.path, ap.Name())
+}
+
+func (ap actualPath) Parent() archive.DirLike {
+ // returns nil if there is no parent
+ return ap.parent
+}
+
+func (ap actualPath) ReadDir() ([]archive.DirLike, error) {
+ entries, err := ioutil.ReadDir(ap.Path())
+ if err != nil {
+ return nil, err
+ }
+ var returnFileInfo = make([]archive.DirLike, 0, len(entries))
+ for _, entry := range entries {
+ returnFileInfo = append(returnFileInfo,
+ actualPath{
+ FileInfo: entry,
+ path: ap.Path(),
+ parent: &ap,
+ })
+ }
+ return returnFileInfo, nil
+}
+
+func (ap actualPath) Stat() (archive.DirLike, error) {
+ stat, err := os.Stat(ap.Path())
+ if err != nil {
+ return nil, err
+ }
+ return &actualPath{FileInfo: stat, path: ap.Path()}, nil
+}
+
+func (ap actualPath) IsDir() bool {
+ stat, err := os.Stat(ap.Path())
+ if err != nil {
+ return false
+ }
+ return stat.IsDir()
+}
diff --git a/src/mongo/gotools/mongorestore/filepath_test.go b/src/mongo/gotools/mongorestore/filepath_test.go
new file mode 100644
index 00000000000..fb5c5b325f6
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/filepath_test.go
@@ -0,0 +1,337 @@
+package mongorestore
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ commonOpts "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongorestore/ns"
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func init() {
+ // bump up the verbosity to make checking debug log output possible
+ log.SetVerbosity(&options.Verbosity{
+ VLevel: 4,
+ })
+}
+
+func newMongoRestore() *MongoRestore {
+ renamer, _ := ns.NewRenamer([]string{}, []string{})
+ includer, _ := ns.NewMatcher([]string{"*"})
+ excluder, _ := ns.NewMatcher([]string{})
+ return &MongoRestore{
+ manager: intents.NewIntentManager(),
+ InputOptions: &InputOptions{},
+ ToolOptions: &commonOpts.ToolOptions{},
+ NSOptions: &NSOptions{},
+ renamer: renamer,
+ includer: includer,
+ excluder: excluder,
+ }
+}
+
+func TestCreateAllIntents(t *testing.T) {
+ // This tests creates intents based on the test file tree:
+ // testdirs/badfile.txt
+ // testdirs/oplog.bson
+ // testdirs/db1
+ // testdirs/db1/baddir
+ // testdirs/db1/baddir/out.bson
+ // testdirs/db1/c1.bson
+ // testdirs/db1/c1.metadata.json
+ // testdirs/db1/c2.bson
+ // testdirs/db1/c3.bson
+ // testdirs/db1/c3.metadata.json
+ // testdirs/db2
+ // testdirs/db2/c1.bin
+ // testdirs/db2/c2.txt
+
+ var mr *MongoRestore
+ var buff bytes.Buffer
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a test MongoRestore", t, func() {
+ mr = newMongoRestore()
+ log.SetWriter(&buff)
+
+ Convey("running CreateAllIntents should succeed", func() {
+ ddl, err := newActualPath("testdata/testdirs/")
+ So(err, ShouldBeNil)
+ So(mr.CreateAllIntents(ddl), ShouldBeNil)
+ mr.manager.Finalize(intents.Legacy)
+
+ Convey("and reading the intents should show alphabetical order", func() {
+ i0 := mr.manager.Pop()
+ So(i0.DB, ShouldEqual, "db1")
+ So(i0.C, ShouldEqual, "c1")
+ i1 := mr.manager.Pop()
+ So(i1.DB, ShouldEqual, "db1")
+ So(i1.C, ShouldEqual, "c2")
+ i2 := mr.manager.Pop()
+ So(i2.DB, ShouldEqual, "db1")
+ So(i2.C, ShouldEqual, "c3")
+ i3 := mr.manager.Pop()
+ So(i3.DB, ShouldEqual, "db2")
+ So(i3.C, ShouldEqual, "c1")
+ i4 := mr.manager.Pop()
+ So(i4, ShouldBeNil)
+
+ Convey("with all the proper metadata + bson merges", func() {
+ So(i0.Location, ShouldNotEqual, "")
+ So(i0.MetadataLocation, ShouldNotEqual, "")
+ So(i1.Location, ShouldNotEqual, "")
+ So(i1.MetadataLocation, ShouldEqual, "") //no metadata for this file
+ So(i2.Location, ShouldNotEqual, "")
+ So(i2.MetadataLocation, ShouldNotEqual, "")
+ So(i3.Location, ShouldNotEqual, "")
+ So(i3.MetadataLocation, ShouldEqual, "") //no metadata for this file
+
+ Convey("and skipped files all present in the logs", func() {
+ logs := buff.String()
+ So(strings.Contains(logs, "badfile.txt"), ShouldEqual, true)
+ So(strings.Contains(logs, "baddir"), ShouldEqual, true)
+ So(strings.Contains(logs, "c2.txt"), ShouldEqual, true)
+ })
+ })
+ })
+ })
+ })
+}
+
+func TestCreateIntentsForDB(t *testing.T) {
+ // This tests creates intents based on the test file tree:
+ // db1
+ // db1/baddir
+ // db1/baddir/out.bson
+ // db1/c1.bson
+ // db1/c1.metadata.json
+ // db1/c2.bson
+ // db1/c3.bson
+ // db1/c3.metadata.json
+
+ var mr *MongoRestore
+ var buff bytes.Buffer
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a test MongoRestore", t, func() {
+ mr = newMongoRestore()
+ log.SetWriter(&buff)
+
+ Convey("running CreateIntentsForDB should succeed", func() {
+ ddl, err := newActualPath("testdata/testdirs/db1")
+ So(err, ShouldBeNil)
+ err = mr.CreateIntentsForDB("myDB", ddl)
+ So(err, ShouldBeNil)
+ mr.manager.Finalize(intents.Legacy)
+
+ Convey("and reading the intents should show alphabetical order", func() {
+ i0 := mr.manager.Pop()
+ So(i0.C, ShouldEqual, "c1")
+ i1 := mr.manager.Pop()
+ So(i1.C, ShouldEqual, "c2")
+ i2 := mr.manager.Pop()
+ So(i2.C, ShouldEqual, "c3")
+ i3 := mr.manager.Pop()
+ So(i3, ShouldBeNil)
+
+ Convey("and all intents should have the supplied db name", func() {
+ So(i0.DB, ShouldEqual, "myDB")
+ So(i1.DB, ShouldEqual, "myDB")
+ So(i2.DB, ShouldEqual, "myDB")
+ })
+
+ Convey("with all the proper metadata + bson merges", func() {
+ So(i0.Location, ShouldNotEqual, "")
+ So(i0.MetadataLocation, ShouldNotEqual, "")
+ So(i1.Location, ShouldNotEqual, "")
+ So(i1.MetadataLocation, ShouldEqual, "") //no metadata for this file
+ So(i2.Location, ShouldNotEqual, "")
+ So(i2.MetadataLocation, ShouldNotEqual, "")
+
+ Convey("and skipped files all present in the logs", func() {
+ logs := buff.String()
+ So(strings.Contains(logs, "baddir"), ShouldEqual, true)
+ })
+ })
+ })
+ })
+ })
+}
+
+func TestCreateIntentsRenamed(t *testing.T) {
+ Convey("With a test MongoRestore", t, func() {
+ mr := newMongoRestore()
+ mr.renamer, _ = ns.NewRenamer([]string{"db1.*"}, []string{"db4.test.*"})
+
+ Convey("running CreateAllIntents should succeed", func() {
+ ddl, err := newActualPath("testdata/testdirs/")
+ So(err, ShouldBeNil)
+ So(mr.CreateAllIntents(ddl), ShouldBeNil)
+ mr.manager.Finalize(intents.Legacy)
+
+ Convey("and reading the intents should show new collection names", func() {
+ i0 := mr.manager.Pop()
+ So(i0.C, ShouldEqual, "test.c1")
+ i1 := mr.manager.Pop()
+ So(i1.C, ShouldEqual, "test.c2")
+ i2 := mr.manager.Pop()
+ So(i2.C, ShouldEqual, "test.c3")
+ i3 := mr.manager.Pop()
+ So(i3.C, ShouldEqual, "c1")
+ i4 := mr.manager.Pop()
+ So(i4, ShouldBeNil)
+
+ Convey("and intents should have the renamed db", func() {
+ So(i0.DB, ShouldEqual, "db4")
+ So(i1.DB, ShouldEqual, "db4")
+ So(i2.DB, ShouldEqual, "db4")
+ So(i3.DB, ShouldEqual, "db2")
+ })
+ })
+ })
+ })
+}
+
+func TestHandlingBSON(t *testing.T) {
+ var mr *MongoRestore
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a test MongoRestore", t, func() {
+ mr = newMongoRestore()
+
+ Convey("with a target path to a bson file instead of a directory", func() {
+ err := mr.handleBSONInsteadOfDirectory("testdata/testdirs/db1/c2.bson")
+ So(err, ShouldBeNil)
+
+ Convey("the proper DB and Coll should be inferred", func() {
+ So(mr.NSOptions.DB, ShouldEqual, "db1")
+ So(mr.NSOptions.Collection, ShouldEqual, "c2")
+ })
+ })
+
+ Convey("but pre-existing settings should not be overwritten", func() {
+ mr.NSOptions.DB = "a"
+
+ Convey("either collection settings", func() {
+ mr.NSOptions.Collection = "b"
+ err := mr.handleBSONInsteadOfDirectory("testdata/testdirs/db1/c1.bson")
+ So(err, ShouldBeNil)
+ So(mr.NSOptions.DB, ShouldEqual, "a")
+ So(mr.NSOptions.Collection, ShouldEqual, "b")
+ })
+
+ Convey("or db settings", func() {
+ err := mr.handleBSONInsteadOfDirectory("testdata/testdirs/db1/c1.bson")
+ So(err, ShouldBeNil)
+ So(mr.NSOptions.DB, ShouldEqual, "a")
+ So(mr.NSOptions.Collection, ShouldEqual, "c1")
+ })
+ })
+ })
+}
+
+func TestCreateIntentsForCollection(t *testing.T) {
+ var mr *MongoRestore
+ var buff bytes.Buffer
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a test MongoRestore", t, func() {
+ buff = bytes.Buffer{}
+ mr = &MongoRestore{
+ manager: intents.NewIntentManager(),
+ ToolOptions: &commonOpts.ToolOptions{},
+ InputOptions: &InputOptions{},
+ }
+ log.SetWriter(&buff)
+
+ Convey("running CreateIntentForCollection on a file without metadata", func() {
+ ddl, err := newActualPath(util.ToUniversalPath("testdata/testdirs/db1/c2.bson"))
+ So(err, ShouldBeNil)
+ err = mr.CreateIntentForCollection("myDB", "myC", ddl)
+ So(err, ShouldBeNil)
+ mr.manager.Finalize(intents.Legacy)
+
+ Convey("should create one intent with 'myDb' and 'myC' fields", func() {
+ i0 := mr.manager.Pop()
+ So(i0, ShouldNotBeNil)
+ So(i0.DB, ShouldEqual, "myDB")
+ So(i0.C, ShouldEqual, "myC")
+ ddl, err := newActualPath(util.ToUniversalPath("testdata/testdirs/db1/c2.bson"))
+ So(err, ShouldBeNil)
+ So(i0.Location, ShouldEqual, ddl.Path())
+ i1 := mr.manager.Pop()
+ So(i1, ShouldBeNil)
+
+ Convey("and no Metadata path", func() {
+ So(i0.MetadataLocation, ShouldEqual, "")
+ logs := buff.String()
+ So(strings.Contains(logs, "without metadata"), ShouldEqual, true)
+ })
+ })
+ })
+
+ Convey("running CreateIntentForCollection on a file *with* metadata", func() {
+ ddl, err := newActualPath(util.ToUniversalPath("testdata/testdirs/db1/c1.bson"))
+ So(err, ShouldBeNil)
+ err = mr.CreateIntentForCollection("myDB", "myC", ddl)
+ So(err, ShouldBeNil)
+ mr.manager.Finalize(intents.Legacy)
+
+ Convey("should create one intent with 'myDb' and 'myC' fields", func() {
+ i0 := mr.manager.Pop()
+ So(i0, ShouldNotBeNil)
+ So(i0.DB, ShouldEqual, "myDB")
+ So(i0.C, ShouldEqual, "myC")
+ So(i0.Location, ShouldEqual, util.ToUniversalPath("testdata/testdirs/db1/c1.bson"))
+ i1 := mr.manager.Pop()
+ So(i1, ShouldBeNil)
+
+ Convey("and a set Metadata path", func() {
+ So(i0.MetadataLocation, ShouldEqual, util.ToUniversalPath("testdata/testdirs/db1/c1.metadata.json"))
+ logs := buff.String()
+ So(strings.Contains(logs, "found metadata"), ShouldEqual, true)
+ })
+ })
+ })
+
+ Convey("running CreateIntentForCollection on a non-existent file", func() {
+ _, err := newActualPath("aaaaaaaaaaaaaa.bson")
+ Convey("should fail", func() {
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("running CreateIntentForCollection on a directory", func() {
+ ddl, err := newActualPath("testdata")
+ So(err, ShouldBeNil)
+ err = mr.CreateIntentForCollection(
+ "myDB", "myC", ddl)
+
+ Convey("should fail", func() {
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ Convey("running CreateIntentForCollection on non-bson file", func() {
+ ddl, err := newActualPath("testdata/testdirs/db1/c1.metadata.json")
+ So(err, ShouldBeNil)
+ err = mr.CreateIntentForCollection(
+ "myDB", "myC", ddl)
+
+ Convey("should fail", func() {
+ So(err, ShouldNotBeNil)
+ })
+ })
+
+ })
+}
diff --git a/src/mongo/gotools/mongorestore/main/mongorestore.go b/src/mongo/gotools/mongorestore/main/mongorestore.go
new file mode 100644
index 00000000000..7b106029eca
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/main/mongorestore.go
@@ -0,0 +1,120 @@
+// Main package for the mongorestore tool.
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongorestore"
+)
+
+func main() {
+ // initialize command-line opts
+ opts := options.New("mongorestore", mongorestore.Usage,
+ options.EnabledOptions{Auth: true, Connection: true})
+ nsOpts := &mongorestore.NSOptions{}
+ opts.AddOptions(nsOpts)
+ inputOpts := &mongorestore.InputOptions{}
+ opts.AddOptions(inputOpts)
+ outputOpts := &mongorestore.OutputOptions{}
+ opts.AddOptions(outputOpts)
+
+ extraArgs, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'mongorestore --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // Allow the db connector to fall back onto the current database when no
+ // auth database is given; the standard -d/-c options go into nsOpts now
+ opts.Namespace = &options.Namespace{DB: nsOpts.DB}
+
+ // print help or version info, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ if opts.PrintVersion() {
+ return
+ }
+
+ log.SetVerbosity(opts.Verbosity)
+
+ targetDir, err := getTargetDirFromArgs(extraArgs, inputOpts.Directory)
+ if err != nil {
+ log.Logvf(log.Always, "%v", err)
+ log.Logvf(log.Always, "try 'mongorestore --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+ targetDir = util.ToUniversalPath(targetDir)
+
+ // connect directly, unless a replica set name is explicitly specified
+ _, setName := util.ParseConnectionString(opts.Host)
+ opts.Direct = (setName == "")
+ opts.ReplicaSetName = setName
+
+ provider, err := db.NewSessionProvider(*opts)
+ if err != nil {
+ log.Logvf(log.Always, "error connecting to host: %v", err)
+ os.Exit(util.ExitError)
+ }
+ provider.SetBypassDocumentValidation(outputOpts.BypassDocumentValidation)
+
+ // disable TCP timeouts for restore jobs
+ provider.SetFlags(db.DisableSocketTimeout)
+ restore := mongorestore.MongoRestore{
+ ToolOptions: opts,
+ OutputOptions: outputOpts,
+ InputOptions: inputOpts,
+ NSOptions: nsOpts,
+ TargetDirectory: targetDir,
+ SessionProvider: provider,
+ }
+
+ finishedChan := signals.HandleWithInterrupt(restore.HandleInterrupt)
+ defer close(finishedChan)
+
+ if err = restore.Restore(); err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ if err == util.ErrTerminated {
+ os.Exit(util.ExitKill)
+ }
+ os.Exit(util.ExitError)
+ }
+}
+
+// getTargetDirFromArgs handles the logic and error cases of figuring out
+// the target restore directory.
+func getTargetDirFromArgs(extraArgs []string, dirFlag string) (string, error) {
+ // This logic is in a switch statement so that the rules are understandable.
+ // We start by handling error cases, and then handle the different ways the target
+ // directory can be legally set.
+ switch {
+ case len(extraArgs) > 1:
+ // error on cases when there are too many positional arguments
+ return "", fmt.Errorf("too many positional arguments")
+
+ case dirFlag != "" && len(extraArgs) > 0:
+ // error when positional arguments and --dir are used
+ return "", fmt.Errorf(
+ "cannot use both --dir and a positional argument to set the target directory")
+
+ case len(extraArgs) == 1:
+ // a nice, simple case where one argument is given, so we use it
+ return extraArgs[0], nil
+
+ case dirFlag != "":
+ // if we have no extra args and a --dir flag, use the --dir flag
+ log.Logv(log.Info, "using --dir flag instead of arguments")
+ return dirFlag, nil
+
+ default:
+ return "", nil
+ }
+}
diff --git a/src/mongo/gotools/mongorestore/metadata.go b/src/mongo/gotools/mongorestore/metadata.go
new file mode 100644
index 00000000000..e16f63d991f
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/metadata.go
@@ -0,0 +1,524 @@
+package mongorestore
+
+import (
+ "fmt"
+
+ "github.com/mongodb/mongo-tools/common"
+ "github.com/mongodb/mongo-tools/common/bsonutil"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/json"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// Specially treated restore collection types.
+const (
+ Users = "users"
+ Roles = "roles"
+)
+
+// struct for working with auth versions
+type authVersionPair struct {
+ // Dump is the auth version of the users/roles collection files in the target dump directory
+ Dump int
+ // Server is the auth version of the connected MongoDB server
+ Server int
+}
+
+// Metadata holds information about a collection's options and indexes.
+type Metadata struct {
+ Options bson.D `json:"options,omitempty"`
+ Indexes []IndexDocument `json:"indexes"`
+}
+
+// this struct is used to read in the options of a set of indexes
+type metaDataMapIndex struct {
+ Indexes []bson.M `json:"indexes"`
+}
+
+// IndexDocument holds information about a collection's index.
+type IndexDocument struct {
+ Options bson.M `bson:",inline"`
+ Key bson.D `bson:"key"`
+}
+
+// MetadataFromJSON takes a slice of JSON bytes and unmarshals them into usable
+// collection options and indexes for restoring collections.
+func (restore *MongoRestore) MetadataFromJSON(jsonBytes []byte) (bson.D, []IndexDocument, error) {
+ if len(jsonBytes) == 0 {
+ // skip metadata parsing if the file is empty
+ return nil, nil, nil
+ }
+
+ meta := &Metadata{}
+
+ err := json.Unmarshal(jsonBytes, meta)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // first get the ordered key information for each index,
+ // then merge it with a set of options stored as a map
+ metaAsMap := metaDataMapIndex{}
+ err = json.Unmarshal(jsonBytes, &metaAsMap)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error unmarshalling metadata as map: %v", err)
+ }
+ for i := range meta.Indexes {
+ // remove "key" from the map so we can decode it properly later
+ delete(metaAsMap.Indexes[i], "key")
+
+ // parse extra index fields
+ meta.Indexes[i].Options = metaAsMap.Indexes[i]
+ if err := bsonutil.ConvertJSONDocumentToBSON(meta.Indexes[i].Options); err != nil {
+ return nil, nil, fmt.Errorf("extended json error: %v", err)
+ }
+
+ // parse the values of the index keys, so we can support extended json
+ for pos, field := range meta.Indexes[i].Key {
+ meta.Indexes[i].Key[pos].Value, err = bsonutil.ParseJSONValue(field.Value)
+ if err != nil {
+ return nil, nil, fmt.Errorf("extended json in '%v' field: %v", field.Name, err)
+ }
+ }
+ }
+
+ // parse the values of options fields, to support extended json
+ meta.Options, err = bsonutil.GetExtendedBsonD(meta.Options)
+ if err != nil {
+ return nil, nil, fmt.Errorf("extended json in 'options': %v", err)
+ }
+
+ return meta.Options, meta.Indexes, nil
+}
+
+// LoadIndexesFromBSON reads indexes from the index BSON files and
+// caches them in the MongoRestore object.
+func (restore *MongoRestore) LoadIndexesFromBSON() error {
+
+ dbCollectionIndexes := make(map[string]collectionIndexes)
+
+ for _, dbname := range restore.manager.SystemIndexDBs() {
+ dbCollectionIndexes[dbname] = make(collectionIndexes)
+ intent := restore.manager.SystemIndexes(dbname)
+ err := intent.BSONFile.Open()
+ if err != nil {
+ return err
+ }
+ defer intent.BSONFile.Close()
+ bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(intent.BSONFile))
+ defer bsonSource.Close()
+
+ // iterate over stored indexes, saving all that match the collection
+ indexDocument := &IndexDocument{}
+ for bsonSource.Next(indexDocument) {
+ namespace := indexDocument.Options["ns"].(string)
+ dbCollectionIndexes[dbname][stripDBFromNS(namespace)] =
+ append(dbCollectionIndexes[dbname][stripDBFromNS(namespace)], *indexDocument)
+ }
+ if err := bsonSource.Err(); err != nil {
+ return fmt.Errorf("error scanning system.indexes: %v", err)
+ }
+ }
+ restore.dbCollectionIndexes = dbCollectionIndexes
+ return nil
+}
+
+func stripDBFromNS(ns string) string {
+ _, c := common.SplitNamespace(ns)
+ return c
+}
+
+// CollectionExists returns true if the given intent's collection exists.
+func (restore *MongoRestore) CollectionExists(intent *intents.Intent) (bool, error) {
+ restore.knownCollectionsMutex.Lock()
+ defer restore.knownCollectionsMutex.Unlock()
+
+ // make sure the map exists
+ if restore.knownCollections == nil {
+ restore.knownCollections = map[string][]string{}
+ }
+
+ // first check if we haven't done listCollections for this database already
+ if restore.knownCollections[intent.DB] == nil {
+ // if the database name isn't in the cache, grab collection
+ // names from the server
+ session, err := restore.SessionProvider.GetSession()
+ if err != nil {
+ return false, fmt.Errorf("error establishing connection: %v", err)
+ }
+ defer session.Close()
+ collections, err := session.DB(intent.DB).CollectionNames()
+ if err != nil {
+ return false, err
+ }
+ // update the cache
+ restore.knownCollections[intent.DB] = collections
+ }
+
+ // now check the cache for the given collection name
+ exists := util.StringSliceContains(restore.knownCollections[intent.DB], intent.C)
+ return exists, nil
+}
+
+// CreateIndexes takes in an intent and an array of index documents and
+// attempts to create them using the createIndexes command. If that command
+// fails, we fall back to individual index creation.
+func (restore *MongoRestore) CreateIndexes(intent *intents.Intent, indexes []IndexDocument) error {
+ // first, sanitize the indexes
+ for _, index := range indexes {
+ // update the namespace of the index before inserting
+ index.Options["ns"] = intent.Namespace()
+
+ // check for length violations before building the command
+ fullIndexName := fmt.Sprintf("%v.$%v", index.Options["ns"], index.Options["name"])
+ if len(fullIndexName) > 127 {
+ return fmt.Errorf(
+ "cannot restore index with namespace '%v': "+
+ "namespace is too long (max size is 127 bytes)", fullIndexName)
+ }
+
+ // remove the index version, forcing an update,
+ // unless we specifically want to keep it
+ if !restore.OutputOptions.KeepIndexVersion {
+ delete(index.Options, "v")
+ }
+ }
+
+ session, err := restore.SessionProvider.GetSession()
+ if err != nil {
+ return fmt.Errorf("error establishing connection: %v", err)
+ }
+ session.SetSafe(&mgo.Safe{})
+ defer session.Close()
+
+ // then attempt the createIndexes command
+ rawCommand := bson.D{
+ {"createIndexes", intent.C},
+ {"indexes", indexes},
+ }
+ results := bson.M{}
+ err = session.DB(intent.DB).Run(rawCommand, &results)
+ if err == nil {
+ return nil
+ }
+ if err.Error() != "no such cmd: createIndexes" {
+ return fmt.Errorf("createIndex error: %v", err)
+ }
+
+ // if we're here, the connected server does not support the command, so we fall back
+ log.Logv(log.Info, "\tcreateIndexes command not supported, attemping legacy index insertion")
+ for _, idx := range indexes {
+ log.Logvf(log.Info, "\tmanually creating index %v", idx.Options["name"])
+ err = restore.LegacyInsertIndex(intent, idx)
+ if err != nil {
+ return fmt.Errorf("error creating index %v: %v", idx.Options["name"], err)
+ }
+ }
+ return nil
+}
+
+// LegacyInsertIndex takes in an intent and an index document and attempts to
+// create the index on the "system.indexes" collection.
+func (restore *MongoRestore) LegacyInsertIndex(intent *intents.Intent, index IndexDocument) error {
+ session, err := restore.SessionProvider.GetSession()
+ if err != nil {
+ return fmt.Errorf("error establishing connection: %v", err)
+ }
+ defer session.Close()
+
+ // overwrite safety to make sure we catch errors
+ session.SetSafe(&mgo.Safe{})
+ indexCollection := session.DB(intent.DB).C("system.indexes")
+ err = indexCollection.Insert(index)
+ if err != nil {
+ return fmt.Errorf("insert error: %v", err)
+ }
+
+ return nil
+}
+
+// CreateCollection creates the collection specified in the intent with the
+// given options.
+func (restore *MongoRestore) CreateCollection(intent *intents.Intent, options bson.D) error {
+ command := append(bson.D{{"create", intent.C}}, options...)
+
+ session, err := restore.SessionProvider.GetSession()
+ if err != nil {
+ return fmt.Errorf("error establishing connection: %v", err)
+ }
+ defer session.Close()
+
+ res := bson.M{}
+ err = session.DB(intent.DB).Run(command, &res)
+ if err != nil {
+ return fmt.Errorf("error running create command: %v", err)
+ }
+ if util.IsFalsy(res["ok"]) {
+ return fmt.Errorf("create command: %v", res["errmsg"])
+ }
+ return nil
+}
+
+// RestoreUsersOrRoles accepts a users intent and a roles intent, and restores
+// them via _mergeAuthzCollections. Either or both can be nil. In the latter case
+// nothing is done.
+func (restore *MongoRestore) RestoreUsersOrRoles(users, roles *intents.Intent) error {
+
+ type loopArg struct {
+ intent *intents.Intent
+ intentType string
+ mergeParamName string
+ tempCollectionName string
+ }
+
+ if users == nil && roles == nil {
+ return nil
+ }
+
+ if users != nil && roles != nil && users.DB != roles.DB {
+ return fmt.Errorf("can't restore users and roles to different databases, %v and %v", users.DB, roles.DB)
+ }
+
+ args := []loopArg{}
+ mergeArgs := bson.D{}
+ userTargetDB := ""
+
+ if users != nil {
+ args = append(args, loopArg{users, "users", "tempUsersCollection", restore.OutputOptions.TempUsersColl})
+ }
+ if roles != nil {
+ args = append(args, loopArg{roles, "roles", "tempRolesCollection", restore.OutputOptions.TempRolesColl})
+ }
+
+ session, err := restore.SessionProvider.GetSession()
+ if err != nil {
+ return fmt.Errorf("error establishing connection: %v", err)
+ }
+ defer session.Close()
+
+ // For each of the users and roles intents:
+ // build up the mergeArgs component of the _mergeAuthzCollections command
+ // upload the BSONFile to a temporary collection
+ for _, arg := range args {
+
+ if arg.intent.Size == 0 {
+ // MongoDB complains if we try and remove a non-existent collection, so we should
+ // just skip auth collections with empty .bson files to avoid gnarly logic later on.
+ log.Logvf(log.Always, "%v file '%v' is empty; skipping %v restoration", arg.intentType, arg.intent.Location, arg.intentType)
+ }
+ log.Logvf(log.Always, "restoring %v from %v", arg.intentType, arg.intent.Location)
+ mergeArgs = append(mergeArgs, bson.DocElem{
+ Name: arg.mergeParamName,
+ Value: "admin." + arg.tempCollectionName,
+ })
+
+ err := arg.intent.BSONFile.Open()
+ if err != nil {
+ return err
+ }
+ defer arg.intent.BSONFile.Close()
+ bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(arg.intent.BSONFile))
+ defer bsonSource.Close()
+
+ tempCollectionNameExists, err := restore.CollectionExists(&intents.Intent{DB: "admin", C: arg.tempCollectionName})
+ if err != nil {
+ return err
+ }
+ if tempCollectionNameExists {
+ log.Logvf(log.Info, "dropping preexisting temporary collection admin.%v", arg.tempCollectionName)
+ err = session.DB("admin").C(arg.tempCollectionName).DropCollection()
+ if err != nil {
+ return fmt.Errorf("error dropping preexisting temporary collection %v: %v", arg.tempCollectionName, err)
+ }
+ }
+
+ log.Logvf(log.DebugLow, "restoring %v to temporary collection", arg.intentType)
+ if _, err = restore.RestoreCollectionToDB("admin", arg.tempCollectionName, bsonSource, arg.intent.BSONFile, 0); err != nil {
+ return fmt.Errorf("error restoring %v: %v", arg.intentType, err)
+ }
+
+ // make sure we always drop the temporary collection
+ defer func() {
+ session, e := restore.SessionProvider.GetSession()
+ if e != nil {
+ // logging errors here because this has no way of returning that doesn't mask other errors
+ log.Logvf(log.Info, "error establishing connection to drop temporary collection admin.%v: %v", arg.tempCollectionName, e)
+ return
+ }
+ defer session.Close()
+ log.Logvf(log.DebugHigh, "dropping temporary collection admin.%v", arg.tempCollectionName)
+ e = session.DB("admin").C(arg.tempCollectionName).DropCollection()
+ if e != nil {
+ log.Logvf(log.Info, "error dropping temporary collection admin.%v: %v", arg.tempCollectionName, e)
+ }
+ }()
+ userTargetDB = arg.intent.DB
+ }
+
+ if userTargetDB == "admin" {
+ // _mergeAuthzCollections uses an empty db string as a sentinel for "all databases"
+ userTargetDB = ""
+ }
+
+ // we have to manually convert mgo's safety to a writeconcern object
+ writeConcern := bson.M{}
+ if restore.safety == nil {
+ writeConcern["w"] = 0
+ } else {
+ if restore.safety.WMode != "" {
+ writeConcern["w"] = restore.safety.WMode
+ } else {
+ writeConcern["w"] = restore.safety.W
+ }
+ }
+
+ command := bsonutil.MarshalD{}
+ command = append(command,
+ bson.DocElem{Name: "_mergeAuthzCollections", Value: 1})
+ command = append(command,
+ mergeArgs...)
+ command = append(command,
+ bson.DocElem{Name: "drop", Value: restore.OutputOptions.Drop},
+ bson.DocElem{Name: "writeConcern", Value: writeConcern},
+ bson.DocElem{Name: "db", Value: userTargetDB})
+
+ log.Logvf(log.DebugLow, "merging users/roles from temp collections")
+ res := bson.M{}
+ err = session.Run(command, &res)
+ if err != nil {
+ return fmt.Errorf("error running merge command: %v", err)
+ }
+ if util.IsFalsy(res["ok"]) {
+ return fmt.Errorf("_mergeAuthzCollections command: %v", res["errmsg"])
+ }
+ return nil
+}
+
+// GetDumpAuthVersion reads the admin.system.version collection in the dump directory
+// to determine the authentication version of the files in the dump. If that collection is not
+// present in the dump, we try to infer the authentication version based on its absence.
+// Returns the authentication version number and any errors that occur.
+func (restore *MongoRestore) GetDumpAuthVersion() (int, error) {
+ // first handle the case where we have no auth version
+ intent := restore.manager.AuthVersion()
+ if intent == nil {
+ if restore.InputOptions.RestoreDBUsersAndRoles {
+ // If we are using --restoreDbUsersAndRoles, we cannot guarantee an
+ // $admin.system.version collection from a 2.6 server,
+ // so we can assume up to version 3.
+ log.Logvf(log.Always, "no system.version bson file found in '%v' database dump", restore.NSOptions.DB)
+ log.Logv(log.Always, "warning: assuming users and roles collections are of auth version 3")
+ log.Logv(log.Always, "if users are from an earlier version of MongoDB, they may not restore properly")
+ return 3, nil
+ }
+ log.Logv(log.Info, "no system.version bson file found in dump")
+ log.Logv(log.Always, "assuming users in the dump directory are from <= 2.4 (auth version 1)")
+ return 1, nil
+ }
+
+ err := intent.BSONFile.Open()
+ if err != nil {
+ return 0, err
+ }
+ defer intent.BSONFile.Close()
+ bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(intent.BSONFile))
+ defer bsonSource.Close()
+
+ versionDoc := struct {
+ CurrentVersion int `bson:"currentVersion"`
+ }{}
+ bsonSource.Next(&versionDoc)
+ if err := bsonSource.Err(); err != nil {
+ return 0, fmt.Errorf("error reading version bson file %v: %v", intent.Location, err)
+ }
+ authVersion := versionDoc.CurrentVersion
+ if authVersion == 0 {
+ // 0 is not a possible valid version number, so this can only indicate bad input
+ return 0, fmt.Errorf("system.version bson file does not have 'currentVersion' field")
+ }
+ return authVersion, nil
+}
+
+// ValidateAuthVersions compares the authentication version of the dump files and the
+// authentication version of the target server, and returns an error if the versions
+// are incompatible.
+func (restore *MongoRestore) ValidateAuthVersions() error {
+ if restore.authVersions.Dump == 2 || restore.authVersions.Dump == 4 {
+ return fmt.Errorf(
+ "cannot restore users and roles from a dump file with auth version %v; "+
+ "finish the upgrade or roll it back", restore.authVersions.Dump)
+ }
+ if restore.authVersions.Server == 2 || restore.authVersions.Server == 4 {
+ return fmt.Errorf(
+ "cannot restore users and roles to a server with auth version %v; "+
+ "finish the upgrade or roll it back", restore.authVersions.Server)
+ }
+ switch restore.authVersions {
+ case authVersionPair{3, 5}:
+ log.Logv(log.Info,
+ "restoring users and roles of auth version 3 to a server of auth version 5")
+ case authVersionPair{5, 5}:
+ log.Logv(log.Info,
+ "restoring users and roles of auth version 5 to a server of auth version 5")
+ case authVersionPair{3, 3}:
+ log.Logv(log.Info,
+ "restoring users and roles of auth version 3 to a server of auth version 3")
+ case authVersionPair{1, 1}:
+ log.Logv(log.Info,
+ "restoring users and roles of auth version 1 to a server of auth version 1")
+ case authVersionPair{1, 5}:
+ return fmt.Errorf("cannot restore users of auth version 1 to a server of auth version 5")
+ case authVersionPair{5, 3}:
+ return fmt.Errorf("cannot restore users of auth version 5 to a server of auth version 3")
+ case authVersionPair{1, 3}:
+ log.Logv(log.Info,
+ "restoring users and roles of auth version 1 to a server of auth version 3")
+ log.Logv(log.Always,
+ "users and roles will have to be updated with the authSchemaUpgrade command")
+ case authVersionPair{5, 1}:
+ fallthrough
+ case authVersionPair{3, 1}:
+ return fmt.Errorf(
+ "cannot restore users and roles dump file >= auth version 3 to a server of auth version 1")
+ default:
+ return fmt.Errorf("invalid auth pair: dump=%v, server=%v",
+ restore.authVersions.Dump, restore.authVersions.Server)
+ }
+ return nil
+
+}
+
+// ShouldRestoreUsersAndRoles returns true if mongorestore should go through
+// through the process of restoring collections pertaining to authentication.
+func (restore *MongoRestore) ShouldRestoreUsersAndRoles() bool {
+ // If the user has done anything that would indicate the restoration
+ // of users and roles (i.e. used --restoreDbUsersAndRoles, -d admin, or
+ // is doing a full restore), then we check if users or roles BSON files
+ // actually exist in the dump dir. If they do, return true.
+ if restore.InputOptions.RestoreDBUsersAndRoles ||
+ restore.NSOptions.DB == "" ||
+ restore.NSOptions.DB == "admin" {
+ if restore.manager.Users() != nil || restore.manager.Roles() != nil {
+ return true
+ }
+ }
+ return false
+}
+
+// DropCollection drops the intent's collection.
+func (restore *MongoRestore) DropCollection(intent *intents.Intent) error {
+ session, err := restore.SessionProvider.GetSession()
+ if err != nil {
+ return fmt.Errorf("error establishing connection: %v", err)
+ }
+ defer session.Close()
+ err = session.DB(intent.DB).C(intent.C).DropCollection()
+ if err != nil {
+ return fmt.Errorf("error dropping collection: %v", err)
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/mongorestore/metadata_test.go b/src/mongo/gotools/mongorestore/metadata_test.go
new file mode 100644
index 00000000000..b3b6018f8f2
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/metadata_test.go
@@ -0,0 +1,177 @@
+package mongorestore
+
+import (
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ commonOpts "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+const ExistsDB = "restore_collection_exists"
+
+func TestCollectionExists(t *testing.T) {
+
+ testutil.VerifyTestType(t, testutil.IntegrationTestType)
+
+ Convey("With a test mongorestore", t, func() {
+ ssl := testutil.GetSSLOptions()
+ auth := testutil.GetAuthOptions()
+ sessionProvider, err := db.NewSessionProvider(commonOpts.ToolOptions{
+ Connection: &commonOpts.Connection{
+ Host: "localhost",
+ Port: db.DefaultTestPort,
+ },
+ Auth: &auth,
+ SSL: &ssl,
+ })
+ So(err, ShouldBeNil)
+
+ restore := &MongoRestore{
+ SessionProvider: sessionProvider,
+ }
+
+ Convey("and some test data in a server", func() {
+ session, err := restore.SessionProvider.GetSession()
+ So(err, ShouldBeNil)
+ So(session.DB(ExistsDB).C("one").Insert(bson.M{}), ShouldBeNil)
+ So(session.DB(ExistsDB).C("two").Insert(bson.M{}), ShouldBeNil)
+ So(session.DB(ExistsDB).C("three").Insert(bson.M{}), ShouldBeNil)
+
+ Convey("collections that exist should return true", func() {
+ exists, err := restore.CollectionExists(&intents.Intent{DB: ExistsDB, C: "one"})
+ So(err, ShouldBeNil)
+ So(exists, ShouldBeTrue)
+ exists, err = restore.CollectionExists(&intents.Intent{DB: ExistsDB, C: "two"})
+ So(err, ShouldBeNil)
+ So(exists, ShouldBeTrue)
+ exists, err = restore.CollectionExists(&intents.Intent{DB: ExistsDB, C: "three"})
+ So(err, ShouldBeNil)
+ So(exists, ShouldBeTrue)
+
+ Convey("and those that do not exist should return false", func() {
+ exists, err = restore.CollectionExists(&intents.Intent{DB: ExistsDB, C: "four"})
+ So(err, ShouldBeNil)
+ So(exists, ShouldBeFalse)
+ })
+ })
+
+ Reset(func() {
+ session.DB(ExistsDB).DropDatabase()
+ })
+ })
+
+ Convey("and a fake cache should be used instead of the server when it exists", func() {
+ restore.knownCollections = map[string][]string{
+ ExistsDB: []string{"cats", "dogs", "snakes"},
+ }
+ exists, err := restore.CollectionExists(&intents.Intent{DB: ExistsDB, C: "dogs"})
+ So(err, ShouldBeNil)
+ So(exists, ShouldBeTrue)
+ exists, err = restore.CollectionExists(&intents.Intent{DB: ExistsDB, C: "two"})
+ So(err, ShouldBeNil)
+ So(exists, ShouldBeFalse)
+ })
+ })
+}
+
+func TestGetDumpAuthVersion(t *testing.T) {
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+ restore := &MongoRestore{}
+
+ Convey("With a test mongorestore", t, func() {
+ Convey("and no --restoreDbUsersAndRoles", func() {
+ restore = &MongoRestore{
+ InputOptions: &InputOptions{},
+ ToolOptions: &commonOpts.ToolOptions{},
+ NSOptions: &NSOptions{},
+ }
+ Convey("auth version 1 should be detected", func() {
+ restore.manager = intents.NewIntentManager()
+ version, err := restore.GetDumpAuthVersion()
+ So(err, ShouldBeNil)
+ So(version, ShouldEqual, 1)
+ })
+
+ Convey("auth version 3 should be detected", func() {
+ restore.manager = intents.NewIntentManager()
+ intent := &intents.Intent{
+ DB: "admin",
+ C: "system.version",
+ Location: "testdata/auth_version_3.bson",
+ }
+ intent.BSONFile = &realBSONFile{path: "testdata/auth_version_3.bson", intent: intent}
+ restore.manager.Put(intent)
+ version, err := restore.GetDumpAuthVersion()
+ So(err, ShouldBeNil)
+ So(version, ShouldEqual, 3)
+ })
+
+ Convey("auth version 5 should be detected", func() {
+ restore.manager = intents.NewIntentManager()
+ intent := &intents.Intent{
+ DB: "admin",
+ C: "system.version",
+ Location: "testdata/auth_version_5.bson",
+ }
+ intent.BSONFile = &realBSONFile{path: "testdata/auth_version_5.bson", intent: intent}
+ restore.manager.Put(intent)
+ version, err := restore.GetDumpAuthVersion()
+ So(err, ShouldBeNil)
+ So(version, ShouldEqual, 5)
+ })
+ })
+
+ Convey("using --restoreDbUsersAndRoles", func() {
+ restore = &MongoRestore{
+ InputOptions: &InputOptions{
+ RestoreDBUsersAndRoles: true,
+ },
+ ToolOptions: &commonOpts.ToolOptions{},
+ NSOptions: &NSOptions{
+ DB: "TestDB",
+ },
+ }
+
+ Convey("auth version 3 should be detected when no file exists", func() {
+ restore.manager = intents.NewIntentManager()
+ version, err := restore.GetDumpAuthVersion()
+ So(err, ShouldBeNil)
+ So(version, ShouldEqual, 3)
+ })
+
+ Convey("auth version 3 should be detected when a version 3 file exists", func() {
+ restore.manager = intents.NewIntentManager()
+ intent := &intents.Intent{
+ DB: "admin",
+ C: "system.version",
+ Location: "testdata/auth_version_3.bson",
+ }
+ intent.BSONFile = &realBSONFile{path: "testdata/auth_version_3.bson", intent: intent}
+ restore.manager.Put(intent)
+ version, err := restore.GetDumpAuthVersion()
+ So(err, ShouldBeNil)
+ So(version, ShouldEqual, 3)
+ })
+
+ Convey("auth version 5 should be detected", func() {
+ restore.manager = intents.NewIntentManager()
+ intent := &intents.Intent{
+ DB: "admin",
+ C: "system.version",
+ Location: "testdata/auth_version_5.bson",
+ }
+ intent.BSONFile = &realBSONFile{path: "testdata/auth_version_5.bson", intent: intent}
+ restore.manager.Put(intent)
+ version, err := restore.GetDumpAuthVersion()
+ So(err, ShouldBeNil)
+ So(version, ShouldEqual, 5)
+ })
+ })
+ })
+
+}
diff --git a/src/mongo/gotools/mongorestore/mongorestore.go b/src/mongo/gotools/mongorestore/mongorestore.go
new file mode 100644
index 00000000000..8be099e8de1
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/mongorestore.go
@@ -0,0 +1,540 @@
+// Package mongorestore writes BSON data to a MongoDB instance.
+package mongorestore
+
+import (
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/mongodb/mongo-tools/common/archive"
+ "github.com/mongodb/mongo-tools/common/auth"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/progress"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongorestore/ns"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// MongoRestore is a container for the user-specified options and
+// internal state used for running mongorestore.
+type MongoRestore struct {
+ ToolOptions *options.ToolOptions
+ InputOptions *InputOptions
+ OutputOptions *OutputOptions
+ NSOptions *NSOptions
+
+ SessionProvider *db.SessionProvider
+
+ TargetDirectory string
+
+ // other internal state
+ manager *intents.Manager
+ safety *mgo.Safe
+ progressManager *progress.Manager
+
+ objCheck bool
+ oplogLimit bson.MongoTimestamp
+ isMongos bool
+ useWriteCommands bool
+ authVersions authVersionPair
+
+ // a map of database names to a list of collection names
+ knownCollections map[string][]string
+ knownCollectionsMutex sync.Mutex
+
+ renamer *ns.Renamer
+ includer *ns.Matcher
+ excluder *ns.Matcher
+
+ // indexes belonging to dbs and collections
+ dbCollectionIndexes map[string]collectionIndexes
+
+ archive *archive.Reader
+
+ // channel on which to notify if/when a termination signal is received
+ termChan chan struct{}
+
+ // for testing. If set, this value will be used instead of os.Stdin
+ stdin io.Reader
+}
+
+type collectionIndexes map[string][]IndexDocument
+
+// ParseAndValidateOptions returns a non-nil error if user-supplied options are invalid.
+func (restore *MongoRestore) ParseAndValidateOptions() error {
+ // Can't use option pkg defaults for --objcheck because it's two separate flags,
+ // and we need to be able to see if they're both being used. We default to
+ // true here and then see if noobjcheck is enabled.
+ log.Logv(log.DebugHigh, "checking options")
+ if restore.InputOptions.Objcheck {
+ restore.objCheck = true
+ log.Logv(log.DebugHigh, "\tdumping with object check enabled")
+ } else {
+ log.Logv(log.DebugHigh, "\tdumping with object check disabled")
+ }
+
+ if restore.NSOptions.DB == "" && restore.NSOptions.Collection != "" {
+ return fmt.Errorf("cannot restore a collection without a specified database")
+ }
+
+ if restore.NSOptions.DB != "" {
+ if err := util.ValidateDBName(restore.NSOptions.DB); err != nil {
+ return fmt.Errorf("invalid db name: %v", err)
+ }
+ }
+ if restore.NSOptions.Collection != "" {
+ if err := util.ValidateCollectionGrammar(restore.NSOptions.Collection); err != nil {
+ return fmt.Errorf("invalid collection name: %v", err)
+ }
+ }
+ if restore.InputOptions.RestoreDBUsersAndRoles && restore.NSOptions.DB == "" {
+ return fmt.Errorf("cannot use --restoreDbUsersAndRoles without a specified database")
+ }
+ if restore.InputOptions.RestoreDBUsersAndRoles && restore.NSOptions.DB == "admin" {
+ return fmt.Errorf("cannot use --restoreDbUsersAndRoles with the admin database")
+ }
+
+ var err error
+ restore.isMongos, err = restore.SessionProvider.IsMongos()
+ if err != nil {
+ return err
+ }
+ if restore.isMongos {
+ log.Logv(log.DebugLow, "restoring to a sharded system")
+ }
+
+ if restore.InputOptions.OplogLimit != "" {
+ if !restore.InputOptions.OplogReplay {
+ return fmt.Errorf("cannot use --oplogLimit without --oplogReplay enabled")
+ }
+ restore.oplogLimit, err = ParseTimestampFlag(restore.InputOptions.OplogLimit)
+ if err != nil {
+ return fmt.Errorf("error parsing timestamp argument to --oplogLimit: %v", err)
+ }
+ }
+ if restore.InputOptions.OplogFile != "" {
+ if !restore.InputOptions.OplogReplay {
+ return fmt.Errorf("cannot use --oplogFile without --oplogReplay enabled")
+ }
+ if restore.InputOptions.Archive != "" {
+ return fmt.Errorf("cannot use --oplogFile with --archive specified")
+ }
+ }
+
+ // check if we are using a replica set and fall back to w=1 if we aren't (for <= 2.4)
+ nodeType, err := restore.SessionProvider.GetNodeType()
+ if err != nil {
+ return fmt.Errorf("error determining type of connected node: %v", err)
+ }
+
+ log.Logvf(log.DebugLow, "connected to node type: %v", nodeType)
+ restore.safety, err = db.BuildWriteConcern(restore.OutputOptions.WriteConcern, nodeType)
+ if err != nil {
+ return fmt.Errorf("error parsing write concern: %v", err)
+ }
+
+ // deprecations with --nsInclude --nsExclude
+ if restore.NSOptions.DB != "" || restore.NSOptions.Collection != "" {
+ // these are only okay if restoring from a bson file
+ _, fileType := restore.getInfoFromFilename(restore.TargetDirectory)
+ if fileType != BSONFileType {
+ log.Logvf(log.Always, "the --db and --collection args should only be used when "+
+ "restoring from a BSON file. Other uses are deprecated and will not exist "+
+ "in the future; use --nsInclude instead")
+ }
+ }
+ if len(restore.NSOptions.ExcludedCollections) > 0 ||
+ len(restore.NSOptions.ExcludedCollectionPrefixes) > 0 {
+ log.Logvf(log.Always, "the --excludeCollections and --excludeCollectionPrefixes options "+
+ "are deprecated and will not exist in the future; use --nsExclude instead")
+ }
+ if restore.InputOptions.OplogReplay {
+ if len(restore.NSOptions.NSInclude) > 0 || restore.NSOptions.DB != "" {
+ return fmt.Errorf("cannot use --oplogReplay with includes specified")
+ }
+ if len(restore.NSOptions.NSExclude) > 0 || len(restore.NSOptions.ExcludedCollections) > 0 ||
+ len(restore.NSOptions.ExcludedCollectionPrefixes) > 0 {
+ return fmt.Errorf("cannot use --oplogReplay with excludes specified")
+ }
+ if len(restore.NSOptions.NSFrom) > 0 {
+ return fmt.Errorf("cannot use --oplogReplay with namespace renames specified")
+ }
+ }
+
+ includes := restore.NSOptions.NSInclude
+ if restore.NSOptions.DB != "" && restore.NSOptions.Collection != "" {
+ includes = append(includes, ns.Escape(restore.NSOptions.DB)+"."+
+ restore.NSOptions.Collection)
+ } else if restore.NSOptions.DB != "" {
+ includes = append(includes, ns.Escape(restore.NSOptions.DB)+".*")
+ }
+ if len(includes) == 0 {
+ includes = []string{"*"}
+ }
+ restore.includer, err = ns.NewMatcher(includes)
+ if err != nil {
+ return fmt.Errorf("invalid includes: %v", err)
+ }
+
+ if len(restore.NSOptions.ExcludedCollections) > 0 && restore.NSOptions.Collection != "" {
+ return fmt.Errorf("--collection is not allowed when --excludeCollection is specified")
+ }
+ if len(restore.NSOptions.ExcludedCollectionPrefixes) > 0 && restore.NSOptions.Collection != "" {
+ return fmt.Errorf("--collection is not allowed when --excludeCollectionsWithPrefix is specified")
+ }
+ excludes := restore.NSOptions.NSExclude
+ for _, col := range restore.NSOptions.ExcludedCollections {
+ excludes = append(excludes, "*."+ns.Escape(col))
+ }
+ for _, colPrefix := range restore.NSOptions.ExcludedCollectionPrefixes {
+ excludes = append(excludes, "*."+ns.Escape(colPrefix)+"*")
+ }
+ restore.excluder, err = ns.NewMatcher(excludes)
+ if err != nil {
+ return fmt.Errorf("invalid excludes: %v", err)
+ }
+
+ if len(restore.NSOptions.NSFrom) != len(restore.NSOptions.NSTo) {
+ return fmt.Errorf("--nsFrom and --nsTo arguments must be specified an equal number of times")
+ }
+ restore.renamer, err = ns.NewRenamer(restore.NSOptions.NSFrom, restore.NSOptions.NSTo)
+ if err != nil {
+ return fmt.Errorf("invalid renames: %v", err)
+ }
+
+ if restore.OutputOptions.NumInsertionWorkers < 0 {
+ return fmt.Errorf(
+ "cannot specify a negative number of insertion workers per collection")
+ }
+
+ // a single dash signals reading from stdin
+ if restore.TargetDirectory == "-" {
+ if restore.InputOptions.Archive != "" {
+ return fmt.Errorf(
+ "cannot restore from \"-\" when --archive is specified")
+ }
+ if restore.NSOptions.Collection == "" {
+ return fmt.Errorf("cannot restore from stdin without a specified collection")
+ }
+ }
+ if restore.stdin == nil {
+ restore.stdin = os.Stdin
+ }
+
+ return nil
+}
+
+// Restore runs the mongorestore program.
+func (restore *MongoRestore) Restore() error {
+ var target archive.DirLike
+ err := restore.ParseAndValidateOptions()
+ if err != nil {
+ log.Logvf(log.DebugLow, "got error from options parsing: %v", err)
+ return err
+ }
+
+ // Build up all intents to be restored
+ restore.manager = intents.NewIntentManager()
+ if restore.InputOptions.Archive == "" && restore.InputOptions.OplogReplay {
+ restore.manager.SetSmartPickOplog(true)
+ }
+
+ if restore.InputOptions.Archive != "" {
+ archiveReader, err := restore.getArchiveReader()
+ if err != nil {
+ return err
+ }
+ restore.archive = &archive.Reader{
+ In: archiveReader,
+ Prelude: &archive.Prelude{},
+ }
+ err = restore.archive.Prelude.Read(restore.archive.In)
+ if err != nil {
+ return err
+ }
+ log.Logvf(log.DebugLow, `archive format version "%v"`, restore.archive.Prelude.Header.FormatVersion)
+ log.Logvf(log.DebugLow, `archive server version "%v"`, restore.archive.Prelude.Header.ServerVersion)
+ log.Logvf(log.DebugLow, `archive tool version "%v"`, restore.archive.Prelude.Header.ToolVersion)
+ target, err = restore.archive.Prelude.NewPreludeExplorer()
+ if err != nil {
+ return err
+ }
+ } else if restore.TargetDirectory != "-" {
+ var usedDefaultTarget bool
+ if restore.TargetDirectory == "" {
+ restore.TargetDirectory = "dump"
+ log.Logv(log.Always, "using default 'dump' directory")
+ usedDefaultTarget = true
+ }
+ target, err = newActualPath(restore.TargetDirectory)
+ if err != nil {
+ if usedDefaultTarget {
+ log.Logv(log.Always, "see mongorestore --help for usage information")
+ }
+ return fmt.Errorf("mongorestore target '%v' invalid: %v", restore.TargetDirectory, err)
+ }
+ // handle cases where the user passes in a file instead of a directory
+ if !target.IsDir() {
+ log.Logv(log.DebugLow, "mongorestore target is a file, not a directory")
+ err = restore.handleBSONInsteadOfDirectory(restore.TargetDirectory)
+ if err != nil {
+ return err
+ }
+ } else {
+ log.Logv(log.DebugLow, "mongorestore target is a directory, not a file")
+ }
+ }
+ if restore.NSOptions.Collection != "" &&
+ restore.OutputOptions.NumParallelCollections > 1 &&
+ restore.OutputOptions.NumInsertionWorkers == 1 {
+ // handle special parallelization case when we are only restoring one collection
+ // by mapping -j to insertion workers rather than parallel collections
+ log.Logvf(log.DebugHigh,
+ "setting number of insertions workers to number of parallel collections (%v)",
+ restore.OutputOptions.NumParallelCollections)
+ restore.OutputOptions.NumInsertionWorkers = restore.OutputOptions.NumParallelCollections
+ }
+ if restore.InputOptions.Archive != "" {
+ if int(restore.archive.Prelude.Header.ConcurrentCollections) > restore.OutputOptions.NumParallelCollections {
+ restore.OutputOptions.NumParallelCollections = int(restore.archive.Prelude.Header.ConcurrentCollections)
+ restore.OutputOptions.NumInsertionWorkers = int(restore.archive.Prelude.Header.ConcurrentCollections)
+ log.Logvf(log.Always,
+ "setting number of parallel collections to number of parallel collections in archive (%v)",
+ restore.archive.Prelude.Header.ConcurrentCollections,
+ )
+ }
+ }
+
+ // Create the demux before intent creation, because muted archive intents need
+ // to register themselves with the demux directly
+ if restore.InputOptions.Archive != "" {
+ restore.archive.Demux = &archive.Demultiplexer{
+ In: restore.archive.In,
+ }
+ }
+
+ switch {
+ case restore.InputOptions.Archive != "":
+ log.Logvf(log.Always, "preparing collections to restore from")
+ err = restore.CreateAllIntents(target)
+ case restore.NSOptions.DB != "" && restore.NSOptions.Collection == "":
+ log.Logvf(log.Always,
+ "building a list of collections to restore from %v dir",
+ target.Path())
+ err = restore.CreateIntentsForDB(
+ restore.NSOptions.DB,
+ target,
+ )
+ case restore.NSOptions.DB != "" && restore.NSOptions.Collection != "" && restore.TargetDirectory == "-":
+ log.Logvf(log.Always, "setting up a collection to be read from standard input")
+ err = restore.CreateStdinIntentForCollection(
+ restore.NSOptions.DB,
+ restore.NSOptions.Collection,
+ )
+ case restore.NSOptions.DB != "" && restore.NSOptions.Collection != "":
+ log.Logvf(log.Always, "checking for collection data in %v", target.Path())
+ err = restore.CreateIntentForCollection(
+ restore.NSOptions.DB,
+ restore.NSOptions.Collection,
+ target,
+ )
+ default:
+ log.Logvf(log.Always, "preparing collections to restore from")
+ err = restore.CreateAllIntents(target)
+ }
+ if err != nil {
+ return fmt.Errorf("error scanning filesystem: %v", err)
+ }
+
+ if restore.isMongos && restore.manager.HasConfigDBIntent() && restore.NSOptions.DB == "" {
+ return fmt.Errorf("cannot do a full restore on a sharded system - " +
+ "remove the 'config' directory from the dump directory first")
+ }
+
+ if restore.InputOptions.OplogFile != "" {
+ err = restore.CreateIntentForOplog()
+ if err != nil {
+ return fmt.Errorf("error reading oplog file: %v", err)
+ }
+ }
+ if restore.InputOptions.OplogReplay && restore.manager.Oplog() == nil {
+ return fmt.Errorf("no oplog file to replay; make sure you run mongodump with --oplog")
+ }
+ if restore.manager.GetOplogConflict() {
+ return fmt.Errorf("cannot provide both an oplog.bson file and an oplog file with --oplogFile, " +
+ "nor can you provide both a local/oplog.rs.bson and a local/oplog.$main.bson file.")
+ }
+
+ conflicts := restore.manager.GetDestinationConflicts()
+ if len(conflicts) > 0 {
+ for _, conflict := range conflicts {
+ log.Logvf(log.Always, "%s", conflict.Error())
+ }
+ return fmt.Errorf("cannot restore with conflicting namespace destinations")
+ }
+
+ if restore.OutputOptions.DryRun {
+ log.Logvf(log.Always, "dry run completed")
+ return nil
+ }
+
+ if restore.InputOptions.Archive != "" {
+ namespaceChan := make(chan string, 1)
+ namespaceErrorChan := make(chan error)
+ restore.archive.Demux.NamespaceChan = namespaceChan
+ restore.archive.Demux.NamespaceErrorChan = namespaceErrorChan
+
+ go restore.archive.Demux.Run()
+ // consume the new namespace announcement from the demux for all of the special collections
+ // that get cached when being read out of the archive.
+ // The first regular collection found gets pushed back on to the namespaceChan
+ // consume the new namespace announcement from the demux for all of the collections that get cached
+ for {
+ ns, ok := <-namespaceChan
+ // the archive can have only special collections. In that case we keep reading until
+ // the namespaces are exhausted, indicated by the namespaceChan being closed.
+ if !ok {
+ break
+ }
+ intent := restore.manager.IntentForNamespace(ns)
+ if intent == nil {
+ return fmt.Errorf("no intent for collection in archive: %v", ns)
+ }
+ if intent.IsSystemIndexes() ||
+ intent.IsUsers() ||
+ intent.IsRoles() ||
+ intent.IsAuthVersion() {
+ log.Logvf(log.DebugLow, "special collection %v found", ns)
+ namespaceErrorChan <- nil
+ } else {
+ // Put the ns back on the announcement chan so that the
+ // demultiplexer can start correctly
+ log.Logvf(log.DebugLow, "first non special collection %v found."+
+ " The demultiplexer will handle it and the remainder", ns)
+ namespaceChan <- ns
+ break
+ }
+ }
+ }
+
+ // If restoring users and roles, make sure we validate auth versions
+ if restore.ShouldRestoreUsersAndRoles() {
+ log.Logv(log.Info, "comparing auth version of the dump directory and target server")
+ restore.authVersions.Dump, err = restore.GetDumpAuthVersion()
+ if err != nil {
+ return fmt.Errorf("error getting auth version from dump: %v", err)
+ }
+ restore.authVersions.Server, err = auth.GetAuthVersion(restore.SessionProvider)
+ if err != nil {
+ return fmt.Errorf("error getting auth version of server: %v", err)
+ }
+ err = restore.ValidateAuthVersions()
+ if err != nil {
+ return fmt.Errorf(
+ "the users and roles collections in the dump have an incompatible auth version with target server: %v",
+ err)
+ }
+ }
+
+ err = restore.LoadIndexesFromBSON()
+ if err != nil {
+ return fmt.Errorf("restore error: %v", err)
+ }
+
+ // Restore the regular collections
+ if restore.InputOptions.Archive != "" {
+ restore.manager.UsePrioritizer(restore.archive.Demux.NewPrioritizer(restore.manager))
+ } else if restore.OutputOptions.NumParallelCollections > 1 {
+ restore.manager.Finalize(intents.MultiDatabaseLTF)
+ } else {
+ // use legacy restoration order if we are single-threaded
+ restore.manager.Finalize(intents.Legacy)
+ }
+
+ restore.termChan = make(chan struct{})
+
+ if err := restore.RestoreIntents(); err != nil {
+ return err
+ }
+
+ // Restore users/roles
+ if restore.ShouldRestoreUsersAndRoles() {
+ err = restore.RestoreUsersOrRoles(restore.manager.Users(), restore.manager.Roles())
+ if err != nil {
+ return fmt.Errorf("restore error: %v", err)
+ }
+ }
+
+ // Restore oplog
+ if restore.InputOptions.OplogReplay {
+ err = restore.RestoreOplog()
+ if err != nil {
+ return fmt.Errorf("restore error: %v", err)
+ }
+ }
+
+ log.Logv(log.Always, "done")
+
+ return nil
+}
+
+type wrappedReadCloser struct {
+ io.ReadCloser
+ inner io.ReadCloser
+}
+
+func (wrc *wrappedReadCloser) Close() error {
+ err := wrc.ReadCloser.Close()
+ if err != nil {
+ return err
+ }
+ return wrc.inner.Close()
+}
+
+func (restore *MongoRestore) getArchiveReader() (rc io.ReadCloser, err error) {
+ if restore.InputOptions.Archive == "-" {
+ rc = ioutil.NopCloser(restore.stdin)
+ } else {
+ targetStat, err := os.Stat(restore.InputOptions.Archive)
+ if err != nil {
+ return nil, err
+ }
+ if targetStat.IsDir() {
+ defaultArchiveFilePath := filepath.Join(restore.InputOptions.Archive, "archive")
+ if restore.InputOptions.Gzip {
+ defaultArchiveFilePath = defaultArchiveFilePath + ".gz"
+ }
+ rc, err = os.Open(defaultArchiveFilePath)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ rc, err = os.Open(restore.InputOptions.Archive)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ if restore.InputOptions.Gzip {
+ gzrc, err := gzip.NewReader(rc)
+ if err != nil {
+ return nil, err
+ }
+ return &wrappedReadCloser{gzrc, rc}, nil
+ }
+ return rc, nil
+}
+
+func (restore *MongoRestore) HandleInterrupt() {
+ if restore.termChan != nil {
+ close(restore.termChan)
+ }
+}
diff --git a/src/mongo/gotools/mongorestore/mongorestore_test.go b/src/mongo/gotools/mongorestore/mongorestore_test.go
new file mode 100644
index 00000000000..db946ffd80f
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/mongorestore_test.go
@@ -0,0 +1,88 @@
+package mongorestore
+
+import (
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/testutil"
+ "github.com/mongodb/mongo-tools/common/util"
+
+ "os"
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func init() {
+ // bump up the verbosity to make checking debug log output possible
+ log.SetVerbosity(&options.Verbosity{
+ VLevel: 4,
+ })
+}
+
+var (
+ testServer = "localhost"
+ testPort = db.DefaultTestPort
+)
+
+func TestMongorestore(t *testing.T) {
+ ssl := testutil.GetSSLOptions()
+ auth := testutil.GetAuthOptions()
+
+ testutil.VerifyTestType(t, testutil.IntegrationTestType)
+ toolOptions := &options.ToolOptions{
+ Connection: &options.Connection{
+ Host: testServer,
+ Port: testPort,
+ },
+ Auth: &auth,
+ SSL: &ssl,
+ }
+ inputOptions := &InputOptions{}
+ outputOptions := &OutputOptions{
+ NumParallelCollections: 1,
+ NumInsertionWorkers: 1,
+ WriteConcern: "majority",
+ }
+ nsOptions := &NSOptions{}
+ Convey("With a test MongoRestore", t, func() {
+ provider, err := db.NewSessionProvider(*toolOptions)
+ if err != nil {
+ log.Logvf(log.Always, "error connecting to host: %v", err)
+ os.Exit(util.ExitError)
+ }
+ restore := MongoRestore{
+ ToolOptions: toolOptions,
+ OutputOptions: outputOptions,
+ InputOptions: inputOptions,
+ NSOptions: nsOptions,
+ SessionProvider: provider,
+ }
+ session, _ := provider.GetSession()
+ c1 := session.DB("db1").C("c1")
+ c1.DropCollection()
+ Convey("and an explicit target restores from that dump directory", func() {
+ restore.TargetDirectory = "testdata/testdirs"
+ err = restore.Restore()
+ So(err, ShouldBeNil)
+ count, err := c1.Count()
+ So(err, ShouldBeNil)
+ So(count, ShouldEqual, 100)
+ })
+
+ Convey("and an target of '-' restores from standard input", func() {
+ bsonFile, err := os.Open("testdata/testdirs/db1/c1.bson")
+ restore.NSOptions.Collection = "c1"
+ restore.NSOptions.DB = "db1"
+ So(err, ShouldBeNil)
+ restore.stdin = bsonFile
+ restore.TargetDirectory = "-"
+ err = restore.Restore()
+ So(err, ShouldBeNil)
+ count, err := c1.Count()
+ So(err, ShouldBeNil)
+ So(count, ShouldEqual, 100)
+ })
+
+ })
+}
diff --git a/src/mongo/gotools/mongorestore/ns/ns.go b/src/mongo/gotools/mongorestore/ns/ns.go
new file mode 100644
index 00000000000..ce17a38277c
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/ns/ns.go
@@ -0,0 +1,235 @@
+package ns
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// Renamer maps namespaces given user-defined patterns
+type Renamer struct {
+ // List of regexps to match namespaces against
+ matchers []*regexp.Regexp
+ // List of regexp-syle replacement strings to use with the matcher
+ replacers []string
+}
+
+// Matcher identifies namespaces given user-defined patterns
+type Matcher struct {
+ // List of regexps to check namespaces against
+ matchers []*regexp.Regexp
+}
+
+var (
+ unescapeReplacements = []string{
+ `\\`, `\`,
+ `\*`, "*",
+ `\`, "",
+ }
+ unescapeReplacer = strings.NewReplacer(unescapeReplacements...)
+)
+
+// Escape escapes instances of '\' and '*' with a backslash
+func Escape(in string) string {
+ in = strings.Replace(in, `\`, `\\`, -1)
+ in = strings.Replace(in, "*", `\*`, -1)
+ return in
+}
+
+// Unescape removes the escaping backslash where applicable
+func Unescape(in string) string {
+ return unescapeReplacer.Replace(in)
+}
+
+var (
+ // Finds non-escaped asterisks
+ wildCardRE = regexp.MustCompile(`^(|.*[^\\])\*(.*)$`)
+ // Finds $variables$ at the beginning of a string
+ variableRE = regexp.MustCompile(`^\$([^\$]*)\$(.*)$`)
+ // List of control characters that a regexp can use
+ escapedChars = `*[](){}\?$^+!.|`
+)
+
+// peelLeadingVariable returns the first variable in the given string and
+// the remaining string if there is such a variable at the beginning
+func peelLeadingVariable(in string) (name, rest string, ok bool) {
+ var match = variableRE.FindStringSubmatch(in)
+ if len(match) != 3 {
+ return
+ }
+ return match[1], match[2], true
+}
+
+// replaceWildCards replaces non-escaped asterisks with named variables
+// i.e. 'pre*.*' becomes 'pre$1$.$2$'
+func replaceWildCards(in string) string {
+ count := 1
+ match := wildCardRE.FindStringSubmatch(in)
+ for len(match) > 2 {
+ in = fmt.Sprintf("%s$%d$%s", match[1], count, match[2])
+ match = wildCardRE.FindStringSubmatch(in)
+ count++
+ }
+ return Unescape(in)
+}
+
+// countAsterisks returns the number of non-escaped asterisks
+func countAsterisks(in string) int {
+ return strings.Count(in, "*") - strings.Count(in, `\*`)
+}
+
+// countDollarSigns returns the number of dollar signs
+func countDollarSigns(in string) int {
+ return strings.Count(in, "$")
+}
+
+// validateReplacement performs preliminary checks on the from and to strings,
+// returning an error if it finds a syntactic issue
+func validateReplacement(from, to string) error {
+ if strings.Contains(from, "$") {
+ if countDollarSigns(from)%2 != 0 {
+ return fmt.Errorf("Odd number of dollar signs in from: '%s'", from)
+ }
+ if countDollarSigns(to)%2 != 0 {
+ return fmt.Errorf("Odd number of dollar signs in to: '%s'", to)
+ }
+ } else {
+ if countAsterisks(from) != countAsterisks(to) {
+ return fmt.Errorf("Different number of asterisks in from: '%s' and to: '%s'", from, to)
+ }
+ }
+ return nil
+}
+
+// processReplacement converts the given from and to strings into a regexp and
+// a corresponding replacement string
+func processReplacement(from, to string) (re *regexp.Regexp, replacer string, err error) {
+ if !strings.Contains(from, "$") {
+ // Convert asterisk wild cards to named variables
+ from = replaceWildCards(from)
+ to = replaceWildCards(to)
+ }
+
+ // Map from variable names to positions in the search regexp
+ vars := make(map[string]int)
+
+ var matcher string
+ for len(from) > 0 {
+ varName, rest, ok := peelLeadingVariable(from)
+ if ok { // found variable
+ if _, ok := vars[varName]; ok {
+ // Cannot repeat the same variable in a 'from' string
+ err = fmt.Errorf("Variable name '%s' used more than once", varName)
+ return
+ }
+ // Put the variable in the map with its index in the string
+ vars[varName] = len(vars) + 1
+ matcher += "(.*?)"
+ from = rest
+ continue
+ }
+
+ c := rune(from[0])
+ if c == '$' {
+ err = fmt.Errorf("Extraneous '$'")
+ return
+ }
+ if strings.ContainsRune(escapedChars, c) {
+ // Add backslash before special chars
+ matcher += `\`
+ }
+ matcher += string(c)
+ from = from[1:]
+ }
+ matcher = fmt.Sprintf("^%s$", matcher)
+ // The regexp we generated should always compile (it's not the user's fault)
+ re = regexp.MustCompile(matcher)
+
+ for len(to) > 0 {
+ varName, rest, ok := peelLeadingVariable(to)
+ if ok { // found variable
+ if num, ok := vars[varName]; ok {
+ replacer += fmt.Sprintf("${%d}", num)
+ to = rest
+ } else {
+ err = fmt.Errorf("Unknown variable '%s'", varName)
+ return
+ }
+ continue
+ }
+
+ c := rune(to[0])
+ if c == '$' {
+ err = fmt.Errorf("Extraneous '$'")
+ return
+ }
+ replacer += string(c)
+ to = to[1:]
+ }
+ return
+}
+
+// NewRenamer creates a Renamer that will use the given from and to slices to
+// map namespaces
+func NewRenamer(fromSlice, toSlice []string) (r *Renamer, err error) {
+ if len(fromSlice) != len(toSlice) {
+ err = fmt.Errorf("Different number of froms and tos")
+ return
+ }
+ r = new(Renamer)
+ for i := len(fromSlice) - 1; i >= 0; i-- {
+ // reversed for replacement precedence
+ from := fromSlice[i]
+ to := toSlice[i]
+ err = validateReplacement(from, to)
+ if err != nil {
+ return
+ }
+ matcher, replacer, e := processReplacement(from, to)
+ if e != nil {
+ err = fmt.Errorf("Invalid replacement from '%s' to '%s': %s", from, to, e)
+ return
+ }
+ r.matchers = append(r.matchers, matcher)
+ r.replacers = append(r.replacers, replacer)
+ }
+ return
+}
+
+// Get returns the rewritten namespace according to the renamer's rules
+func (r *Renamer) Get(name string) string {
+ for i, matcher := range r.matchers {
+ if matcher.MatchString(name) {
+ return matcher.ReplaceAllString(name, r.replacers[i])
+ }
+ }
+ return name
+}
+
+// NewMatcher creates a matcher that will use the given list patterns to
+// match namespaces
+func NewMatcher(patterns []string) (m *Matcher, err error) {
+ m = new(Matcher)
+ for _, pattern := range patterns {
+ if strings.Contains(pattern, "$") {
+ err = fmt.Errorf("'$' is not allowed in include/exclude patternsj")
+ }
+ re, _, e := processReplacement(pattern, pattern)
+ if e != nil {
+ err = fmt.Errorf("%s processing include/exclude pattern: '%s'", err, pattern)
+ return
+ }
+ m.matchers = append(m.matchers, re)
+ }
+ return
+}
+
+// Has returns whether the given namespace matches any of the matcher's patterns
+func (m *Matcher) Has(name string) bool {
+ for _, re := range m.matchers {
+ if re.MatchString(name) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/mongo/gotools/mongorestore/ns/ns_test.go b/src/mongo/gotools/mongorestore/ns/ns_test.go
new file mode 100644
index 00000000000..1feb84678a5
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/ns/ns_test.go
@@ -0,0 +1,111 @@
+package ns
+
+import (
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func init() {
+ // bump up the verbosity to make checking debug log output possible
+ log.SetVerbosity(&options.Verbosity{
+ VLevel: 4,
+ })
+}
+
+func TestEscape(t *testing.T) {
+ Convey("with a few strings", t, func() {
+ So(Escape("(blah)"), ShouldEqual, "(blah)")
+ So(Escape(""), ShouldEqual, "")
+ So(Escape(`bl*h*\\`), ShouldEqual, `bl\*h\*\\\\`)
+ So(Escape("blah**"), ShouldEqual, `blah\*\*`)
+ })
+}
+
+func TestUnescape(t *testing.T) {
+ Convey("with a few escaped strings", t, func() {
+ So(Unescape("(blah)"), ShouldEqual, "(blah)")
+ So(Unescape(""), ShouldEqual, "")
+ So(Unescape(`bl\*h\*\\\\`), ShouldEqual, `bl*h*\\`)
+ So(Unescape(`blah\*\*`), ShouldEqual, "blah**")
+ })
+}
+
+func TestReplacer(t *testing.T) {
+ Convey("with replacements", t, func() {
+ Convey(`'$db$.user$$' -> 'test.user$$_$db$', 'pr\*d\.*' -> 'st\*g\\ing.*'`, func() {
+ r, err := NewRenamer([]string{"$db$.user$$", `pr\*d\\.*`}, []string{"test.user$$_$db$", `st\*g\\ing.*`})
+ So(r, ShouldNotBeNil)
+ So(err, ShouldBeNil)
+ So(r.Get("stuff.user"), ShouldEqual, "test.user_stuff")
+ So(r.Get("stuff.users"), ShouldEqual, "test.users_stuff")
+ So(r.Get(`pr*d\.users`), ShouldEqual, `st*g\ing.users`)
+ So(r.Get(`pr*d\.turbo.encabulators`), ShouldEqual, `st*g\ing.turbo.encabulators`)
+ So(r.Get(`st*g\ing.turbo.encabulators`), ShouldEqual, `st*g\ing.turbo.encabulators`)
+ })
+ Convey(`'$:)*$.us(?:2)er$?$' -> 'test.us(?:2)er$?$_$:)*$'`, func() {
+ r, err := NewRenamer([]string{"$:)*$.us(?:2)er$?$"}, []string{"test.us(?:2)er$?$_$:)*$"})
+ So(r, ShouldNotBeNil)
+ So(err, ShouldBeNil)
+ So(r.Get("stuff.us(?:2)er"), ShouldEqual, "test.us(?:2)er_stuff")
+ So(r.Get("stuff.us(?:2)ers"), ShouldEqual, "test.us(?:2)ers_stuff")
+ })
+ Convey("'*.*' -> '*_test.*'", func() {
+ r, err := NewRenamer([]string{"*.*"}, []string{"*_test.*"})
+ So(r, ShouldNotBeNil)
+ So(err, ShouldBeNil)
+ So(r.Get("stuff.user"), ShouldEqual, "stuff_test.user")
+ So(r.Get("stuff.users"), ShouldEqual, "stuff_test.users")
+ So(r.Get("prod.turbo.encabulators"), ShouldEqual, "prod_test.turbo.encabulators")
+ })
+ })
+ Convey("with invalid replacements", t, func() {
+ Convey("'$db$.user$db$' -> 'test.user-$db$'", func() {
+ _, err := NewRenamer([]string{"$db$.user$db$"}, []string{"test.user-$db$"})
+ So(err, ShouldNotBeNil)
+ })
+ Convey("'$db$.us$er$table$' -> 'test.user$table$_$db$'", func() {
+ _, err := NewRenamer([]string{"$db$.us$er$table$"}, []string{"test.user$table$_$db$"})
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
+
+func TestMatcher(t *testing.T) {
+ Convey("with matcher", t, func() {
+ Convey(`'*.user*', 'pr\*d\.*'`, func() {
+ m, err := NewMatcher([]string{`*.user*`, `pr\*d\.*`})
+ So(m, ShouldNotBeNil)
+ So(err, ShouldBeNil)
+ So(m.Has("stuff.user"), ShouldBeTrue)
+ So(m.Has("stuff.users"), ShouldBeTrue)
+ So(m.Has("pr*d.users"), ShouldBeTrue)
+ So(m.Has("pr*d.magic"), ShouldBeTrue)
+ So(m.Has(`pr*d\.magic`), ShouldBeFalse)
+ So(m.Has("prod.magic"), ShouldBeFalse)
+ So(m.Has("pr*d.turbo.encabulators"), ShouldBeTrue)
+ So(m.Has("st*ging.turbo.encabulators"), ShouldBeFalse)
+ })
+ Convey("'*.*'", func() {
+ m, err := NewMatcher([]string{"*.*"})
+ So(m, ShouldNotBeNil)
+ So(err, ShouldBeNil)
+ So(m.Has("stuff"), ShouldBeFalse)
+ So(m.Has("stuff.user"), ShouldBeTrue)
+ So(m.Has("stuff.users"), ShouldBeTrue)
+ So(m.Has("prod.turbo.encabulators"), ShouldBeTrue)
+ })
+ })
+ Convey("with invalid matcher", t, func() {
+ Convey("'$.user$'", func() {
+ _, err := NewMatcher([]string{"$.user$"})
+ So(err, ShouldNotBeNil)
+ })
+ Convey("'*.user$'", func() {
+ _, err := NewMatcher([]string{"*.user$"})
+ So(err, ShouldNotBeNil)
+ })
+ })
+}
diff --git a/src/mongo/gotools/mongorestore/oplog.go b/src/mongo/gotools/mongorestore/oplog.go
new file mode 100644
index 00000000000..5aad53b0614
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/oplog.go
@@ -0,0 +1,173 @@
+package mongorestore
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/progress"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// oplogMaxCommandSize sets the maximum size for multiple buffered ops in the
+// applyOps command. This is to prevent pathological cases where the array overhead
+// of many small operations can overflow the maximum command size.
+// Note that ops > 8MB will still be buffered, just as single elements.
+const oplogMaxCommandSize = 1024 * 1024 * 8
+
+// RestoreOplog attempts to restore a MongoDB oplog.
+func (restore *MongoRestore) RestoreOplog() error {
+ log.Logv(log.Always, "replaying oplog")
+ intent := restore.manager.Oplog()
+ if intent == nil {
+ // this should not be reached
+ log.Logv(log.Always, "no oplog file provided, skipping oplog application")
+ return nil
+ }
+ if err := intent.BSONFile.Open(); err != nil {
+ return err
+ }
+ defer intent.BSONFile.Close()
+ // NewBufferlessBSONSource reads each bson document into its own buffer
+ // because bson.Unmarshal currently can't unmarshal binary types without
+ // them referencing the source buffer
+ bsonSource := db.NewDecodedBSONSource(db.NewBufferlessBSONSource(intent.BSONFile))
+ defer bsonSource.Close()
+
+ entryArray := make([]interface{}, 0, 1024)
+ rawOplogEntry := &bson.Raw{}
+
+ var totalOps int64
+ var entrySize, bufferedBytes int
+
+ oplogProgressor := progress.NewCounter(intent.BSONSize)
+ bar := progress.Bar{
+ Name: "oplog",
+ Watching: oplogProgressor,
+ WaitTime: 3 * time.Second,
+ Writer: log.Writer(0),
+ BarLength: progressBarLength,
+ IsBytes: true,
+ }
+ bar.Start()
+ defer bar.Stop()
+
+ session, err := restore.SessionProvider.GetSession()
+ if err != nil {
+ return fmt.Errorf("error establishing connection: %v", err)
+ }
+ defer session.Close()
+
+ // To restore the oplog, we iterate over the oplog entries,
+ // filling up a buffer. Once the buffer reaches max document size,
+ // apply the current buffered ops and reset the buffer.
+ for bsonSource.Next(rawOplogEntry) {
+ entrySize = len(rawOplogEntry.Data)
+ if bufferedBytes+entrySize > oplogMaxCommandSize {
+ err = restore.ApplyOps(session, entryArray)
+ if err != nil {
+ return fmt.Errorf("error applying oplog: %v", err)
+ }
+ entryArray = make([]interface{}, 0, 1024)
+ bufferedBytes = 0
+ }
+
+ entryAsOplog := db.Oplog{}
+ err = bson.Unmarshal(rawOplogEntry.Data, &entryAsOplog)
+ if err != nil {
+ return fmt.Errorf("error reading oplog: %v", err)
+ }
+ if entryAsOplog.Operation == "n" {
+ //skip no-ops
+ continue
+ }
+ if !restore.TimestampBeforeLimit(entryAsOplog.Timestamp) {
+ log.Logvf(
+ log.DebugLow,
+ "timestamp %v is not below limit of %v; ending oplog restoration",
+ entryAsOplog.Timestamp,
+ restore.oplogLimit,
+ )
+ break
+ }
+
+ totalOps++
+ bufferedBytes += entrySize
+ oplogProgressor.Inc(int64(entrySize))
+ entryArray = append(entryArray, entryAsOplog)
+ }
+ // finally, flush the remaining entries
+ if len(entryArray) > 0 {
+ err = restore.ApplyOps(session, entryArray)
+ if err != nil {
+ return fmt.Errorf("error applying oplog: %v", err)
+ }
+ }
+
+ log.Logvf(log.Info, "applied %v ops", totalOps)
+ return nil
+
+}
+
+// ApplyOps is a wrapper for the applyOps database command, we pass in
+// a session to avoid opening a new connection for a few inserts at a time.
+func (restore *MongoRestore) ApplyOps(session *mgo.Session, entries []interface{}) error {
+ res := bson.M{}
+ err := session.Run(bson.D{{"applyOps", entries}}, &res)
+ if err != nil {
+ return fmt.Errorf("applyOps: %v", err)
+ }
+ if util.IsFalsy(res["ok"]) {
+ return fmt.Errorf("applyOps command: %v", res["errmsg"])
+ }
+
+ return nil
+}
+
+// TimestampBeforeLimit returns true if the given timestamp is allowed to be
+// applied to mongorestore's target database.
+func (restore *MongoRestore) TimestampBeforeLimit(ts bson.MongoTimestamp) bool {
+ if restore.oplogLimit == 0 {
+ // always valid if there is no --oplogLimit set
+ return true
+ }
+ return ts < restore.oplogLimit
+}
+
+// ParseTimestampFlag takes in a string the form of <time_t>:<ordinal>,
+// where <time_t> is the seconds since the UNIX epoch, and <ordinal> represents
+// a counter of operations in the oplog that occurred in the specified second.
+// It parses this timestamp string and returns a bson.MongoTimestamp type.
+func ParseTimestampFlag(ts string) (bson.MongoTimestamp, error) {
+ var seconds, increment int
+ timestampFields := strings.Split(ts, ":")
+ if len(timestampFields) > 2 {
+ return 0, fmt.Errorf("too many : characters")
+ }
+
+ seconds, err := strconv.Atoi(timestampFields[0])
+ if err != nil {
+ return 0, fmt.Errorf("error parsing timestamp seconds: %v", err)
+ }
+
+ // parse the increment field if it exists
+ if len(timestampFields) == 2 {
+ if len(timestampFields[1]) > 0 {
+ increment, err = strconv.Atoi(timestampFields[1])
+ if err != nil {
+ return 0, fmt.Errorf("error parsing timestamp increment: %v", err)
+ }
+ } else {
+ // handle the case where the user writes "<time_t>:" with no ordinal
+ increment = 0
+ }
+ }
+
+ timestamp := (int64(seconds) << 32) | int64(increment)
+ return bson.MongoTimestamp(timestamp), nil
+}
diff --git a/src/mongo/gotools/mongorestore/oplog_test.go b/src/mongo/gotools/mongorestore/oplog_test.go
new file mode 100644
index 00000000000..a91c09740a3
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/oplog_test.go
@@ -0,0 +1,116 @@
+package mongorestore
+
+import (
+ "testing"
+
+ "github.com/mongodb/mongo-tools/common/testutil"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func TestTimestampStringParsing(t *testing.T) {
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("Testing some possible timestamp strings:", t, func() {
+ Convey("123:456 [should pass]", func() {
+ ts, err := ParseTimestampFlag("123:456")
+ So(err, ShouldBeNil)
+ So(ts, ShouldEqual, (int64(123)<<32 | int64(456)))
+ })
+
+ Convey("123 [should pass]", func() {
+ ts, err := ParseTimestampFlag("123")
+ So(err, ShouldBeNil)
+ So(ts, ShouldEqual, int64(123)<<32)
+ })
+
+ Convey("123: [should pass]", func() {
+ ts, err := ParseTimestampFlag("123:")
+ So(err, ShouldBeNil)
+ So(ts, ShouldEqual, int64(123)<<32)
+ })
+
+ Convey("123.123 [should fail]", func() {
+ ts, err := ParseTimestampFlag("123.123")
+ So(err, ShouldNotBeNil)
+ So(ts, ShouldEqual, 0)
+ })
+
+ Convey(": [should fail]", func() {
+ ts, err := ParseTimestampFlag(":")
+ So(err, ShouldNotBeNil)
+ So(ts, ShouldEqual, 0)
+ })
+
+ Convey("1:1:1 [should fail]", func() {
+ ts, err := ParseTimestampFlag("1:1:1")
+ So(err, ShouldNotBeNil)
+ So(ts, ShouldEqual, 0)
+ })
+
+ Convey("cats [should fail]", func() {
+ ts, err := ParseTimestampFlag("cats")
+ So(err, ShouldNotBeNil)
+ So(ts, ShouldEqual, 0)
+ })
+
+ Convey("[empty string] [should fail]", func() {
+ ts, err := ParseTimestampFlag("")
+ So(err, ShouldNotBeNil)
+ So(ts, ShouldEqual, 0)
+ })
+ })
+}
+
+func TestValidOplogLimitChecking(t *testing.T) {
+
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ Convey("With a MongoRestore instance with oplogLimit of 5:0", t, func() {
+ mr := &MongoRestore{
+ oplogLimit: bson.MongoTimestamp(int64(5) << 32),
+ }
+
+ Convey("an oplog entry with ts=1000:0 should be invalid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(int64(1000)<<32)), ShouldBeFalse)
+ })
+
+ Convey("an oplog entry with ts=5:1 should be invalid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(int64(5)<<32|1)), ShouldBeFalse)
+ })
+
+ Convey("an oplog entry with ts=5:0 should be invalid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(int64(5)<<32)), ShouldBeFalse)
+ })
+
+ Convey("an oplog entry with ts=4:9 should be valid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(int64(4)<<32|9)), ShouldBeTrue)
+ })
+
+ Convey("an oplog entry with ts=4:0 should be valid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(int64(4)<<32)), ShouldBeTrue)
+ })
+
+ Convey("an oplog entry with ts=0:1 should be valid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(1)), ShouldBeTrue)
+ })
+ })
+
+ Convey("With a MongoRestore instance with no oplogLimit", t, func() {
+ mr := &MongoRestore{}
+
+ Convey("an oplog entry with ts=1000:0 should be valid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(int64(1000)<<32)), ShouldBeTrue)
+ })
+
+ Convey("an oplog entry with ts=5:1 should be valid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(int64(5)<<32|1)), ShouldBeTrue)
+ })
+
+ Convey("an oplog entry with ts=5:0 should be valid", func() {
+ So(mr.TimestampBeforeLimit(bson.MongoTimestamp(int64(5)<<32)), ShouldBeTrue)
+ })
+ })
+
+}
diff --git a/src/mongo/gotools/mongorestore/options.go b/src/mongo/gotools/mongorestore/options.go
new file mode 100644
index 00000000000..93965cc3e2b
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/options.go
@@ -0,0 +1,68 @@
+package mongorestore
+
+// Usage describes basic usage of mongorestore
+var Usage = `<options> <directory or file to restore>
+
+Restore backups generated with mongodump to a running server.
+
+Specify a database with -d to restore a single database from the target directory,
+or use -d and -c to restore a single collection from a single .bson file.
+
+See http://docs.mongodb.org/manual/reference/program/mongorestore/ for more information.`
+
+// InputOptions defines the set of options to use in configuring the restore process.
+type InputOptions struct {
+ Objcheck bool `long:"objcheck" description:"validate all objects before inserting"`
+ OplogReplay bool `long:"oplogReplay" description:"replay oplog for point-in-time restore"`
+ OplogLimit string `long:"oplogLimit" value-name:"<seconds>[:ordinal]" description:"only include oplog entries before the provided Timestamp"`
+ OplogFile string `long:"oplogFile" value-name:"<filename>" description:"oplog file to use for replay of oplog"`
+ Archive string `long:"archive" value-name:"<filename>" optional:"true" optional-value:"-" description:"restore dump from the specified archive file. If flag is specified without a value, archive is read from stdin"`
+ RestoreDBUsersAndRoles bool `long:"restoreDbUsersAndRoles" description:"restore user and role definitions for the given database"`
+ Directory string `long:"dir" value-name:"<directory-name>" description:"input directory, use '-' for stdin"`
+ Gzip bool `long:"gzip" description:"decompress gzipped input"`
+}
+
+// Name returns a human-readable group name for input options.
+func (*InputOptions) Name() string {
+ return "input"
+}
+
+// OutputOptions defines the set of options for restoring dump data.
+type OutputOptions struct {
+ Drop bool `long:"drop" description:"drop each collection before import"`
+ DryRun bool `long:"dryRun" description:"view summary without importing anything. recommended with verbosity"`
+ WriteConcern string `long:"writeConcern" value-name:"<write-concern>" default:"majority" default-mask:"-" description:"write concern options e.g. --writeConcern majority, --writeConcern '{w: 3, wtimeout: 500, fsync: true, j: true}' (defaults to 'majority')"`
+ NoIndexRestore bool `long:"noIndexRestore" description:"don't restore indexes"`
+ NoOptionsRestore bool `long:"noOptionsRestore" description:"don't restore collection options"`
+ KeepIndexVersion bool `long:"keepIndexVersion" description:"don't update index version"`
+ MaintainInsertionOrder bool `long:"maintainInsertionOrder" description:"preserve order of documents during restoration"`
+ NumParallelCollections int `long:"numParallelCollections" short:"j" description:"number of collections to restore in parallel (4 by default)" default:"4" default-mask:"-"`
+ NumInsertionWorkers int `long:"numInsertionWorkersPerCollection" description:"number of insert operations to run concurrently per collection (1 by default)" default:"1" default-mask:"-"`
+ StopOnError bool `long:"stopOnError" description:"stop restoring if an error is encountered on insert (off by default)"`
+ BypassDocumentValidation bool `long:"bypassDocumentValidation" description:"bypass document validation"`
+ TempUsersColl string `long:"tempUsersColl" default:"tempusers" hidden:"true"`
+ TempRolesColl string `long:"tempRolesColl" default:"temproles" hidden:"true"`
+ BulkBufferSize int `long:"batchSize" default:"1000" hidden:"true"`
+}
+
+// Name returns a human-readable group name for output options.
+func (*OutputOptions) Name() string {
+ return "restore"
+}
+
+// NSOptions defines the set of options for configuring involved namespaces
+type NSOptions struct {
+ DB string `short:"d" long:"db" value-name:"<database-name>" description:"database to use when restoring from a BSON file"`
+ Collection string `short:"c" long:"collection" value-name:"<collection-name>" description:"collection to use when restoring from a BSON file"`
+ ExcludedCollections []string `long:"excludeCollection" value-name:"<collection-name>" description:"DEPRECATED; collection to skip over during restore (may be specified multiple times to exclude additional collections)"`
+ ExcludedCollectionPrefixes []string `long:"excludeCollectionsWithPrefix" value-name:"<collection-prefix>" description:"DEPRECATED; collections to skip over during restore that have the given prefix (may be specified multiple times to exclude additional prefixes)"`
+ NSExclude []string `long:"nsExclude" value-name:"<namespace-pattern>" description:"exclude matching namespaces"`
+ NSInclude []string `long:"nsInclude" value-name:"<namespace-pattern>" description:"include matching namespaces"`
+ NSFrom []string `long:"nsFrom" value-name:"<namespace-pattern>" description:"rename matching namespaces, must have matching nsTo"`
+ NSTo []string `long:"nsTo" value-name:"<namespace-pattern>" description:"rename matched namespaces, must have matching nsFrom"`
+}
+
+// Name returns a human-readable group name for output options.
+func (*NSOptions) Name() string {
+ return "namespace"
+}
diff --git a/src/mongo/gotools/mongorestore/restore.go b/src/mongo/gotools/mongorestore/restore.go
new file mode 100644
index 00000000000..34225f8338d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/restore.go
@@ -0,0 +1,320 @@
+package mongorestore
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strings"
+ "time"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/intents"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/progress"
+ "github.com/mongodb/mongo-tools/common/util"
+ "gopkg.in/mgo.v2/bson"
+)
+
+const (
+ progressBarLength = 24
+ progressBarWaitTime = time.Second * 3
+ insertBufferFactor = 16
+)
+
+// RestoreIntents iterates through all of the intents stored in the IntentManager, and restores them.
+func (restore *MongoRestore) RestoreIntents() error {
+ // start up the progress bar manager
+ restore.progressManager = progress.NewProgressBarManager(log.Writer(0), progressBarWaitTime)
+ restore.progressManager.Start()
+ defer restore.progressManager.Stop()
+
+ log.Logvf(log.DebugLow, "restoring up to %v collections in parallel", restore.OutputOptions.NumParallelCollections)
+
+ if restore.OutputOptions.NumParallelCollections > 0 {
+ resultChan := make(chan error)
+
+ // start a goroutine for each job thread
+ for i := 0; i < restore.OutputOptions.NumParallelCollections; i++ {
+ go func(id int) {
+ log.Logvf(log.DebugHigh, "starting restore routine with id=%v", id)
+ var ioBuf []byte
+ for {
+ intent := restore.manager.Pop()
+ if intent == nil {
+ log.Logvf(log.DebugHigh, "ending restore routine with id=%v, no more work to do", id)
+ resultChan <- nil // done
+ return
+ }
+ if fileNeedsIOBuffer, ok := intent.BSONFile.(intents.FileNeedsIOBuffer); ok {
+ if ioBuf == nil {
+ ioBuf = make([]byte, db.MaxBSONSize)
+ }
+ fileNeedsIOBuffer.TakeIOBuffer(ioBuf)
+ }
+ err := restore.RestoreIntent(intent)
+ if err != nil {
+ resultChan <- fmt.Errorf("%v: %v", intent.Namespace(), err)
+ return
+ }
+ restore.manager.Finish(intent)
+ if fileNeedsIOBuffer, ok := intent.BSONFile.(intents.FileNeedsIOBuffer); ok {
+ fileNeedsIOBuffer.ReleaseIOBuffer()
+ }
+
+ }
+ }(i)
+ }
+
+ // wait until all goroutines are done or one of them errors out
+ for i := 0; i < restore.OutputOptions.NumParallelCollections; i++ {
+ if err := <-resultChan; err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // single-threaded
+ for {
+ intent := restore.manager.Pop()
+ if intent == nil {
+ break
+ }
+ err := restore.RestoreIntent(intent)
+ if err != nil {
+ return fmt.Errorf("%v: %v", intent.Namespace(), err)
+ }
+ restore.manager.Finish(intent)
+ }
+ return nil
+}
+
+// RestoreIntent attempts to restore a given intent into MongoDB.
+func (restore *MongoRestore) RestoreIntent(intent *intents.Intent) error {
+
+ collectionExists, err := restore.CollectionExists(intent)
+ if err != nil {
+ return fmt.Errorf("error reading database: %v", err)
+ }
+
+ if restore.safety == nil && !restore.OutputOptions.Drop && collectionExists {
+ log.Logvf(log.Always, "restoring to existing collection %v without dropping", intent.Namespace())
+ log.Logv(log.Always, "Important: restored data will be inserted without raising errors; check your server log")
+ }
+
+ if restore.OutputOptions.Drop {
+ if collectionExists {
+ if strings.HasPrefix(intent.C, "system.") {
+ log.Logvf(log.Always, "cannot drop system collection %v, skipping", intent.Namespace())
+ } else {
+ log.Logvf(log.Info, "dropping collection %v before restoring", intent.Namespace())
+ err = restore.DropCollection(intent)
+ if err != nil {
+ return err // no context needed
+ }
+ collectionExists = false
+ }
+ } else {
+ log.Logvf(log.DebugLow, "collection %v doesn't exist, skipping drop command", intent.Namespace())
+ }
+ }
+
+ var options bson.D
+ var indexes []IndexDocument
+
+ // get indexes from system.indexes dump if we have it but don't have metadata files
+ if intent.MetadataFile == nil {
+ if _, ok := restore.dbCollectionIndexes[intent.DB]; ok {
+ if indexes, ok = restore.dbCollectionIndexes[intent.DB][intent.C]; ok {
+ log.Logvf(log.Always, "no metadata; falling back to system.indexes")
+ }
+ }
+ }
+
+ logMessageSuffix := "with no metadata"
+ // first create the collection with options from the metadata file
+ if intent.MetadataFile != nil {
+ logMessageSuffix = "using options from metadata"
+ err = intent.MetadataFile.Open()
+ if err != nil {
+ return err
+ }
+ defer intent.MetadataFile.Close()
+
+ log.Logvf(log.Always, "reading metadata for %v from %v", intent.Namespace(), intent.MetadataLocation)
+ metadata, err := ioutil.ReadAll(intent.MetadataFile)
+ if err != nil {
+ return fmt.Errorf("error reading metadata from %v: %v", intent.MetadataLocation, err)
+ }
+ options, indexes, err = restore.MetadataFromJSON(metadata)
+ if err != nil {
+ return fmt.Errorf("error parsing metadata from %v: %v", intent.MetadataLocation, err)
+ }
+
+ if restore.OutputOptions.NoOptionsRestore {
+ log.Logv(log.Info, "not restoring collection options")
+ logMessageSuffix = "with no collection options"
+ options = nil
+ }
+ }
+ if !collectionExists {
+ log.Logvf(log.Info, "creating collection %v %s", intent.Namespace(), logMessageSuffix)
+ log.Logvf(log.DebugHigh, "using collection options: %#v", options)
+ err = restore.CreateCollection(intent, options)
+ if err != nil {
+ return fmt.Errorf("error creating collection %v: %v", intent.Namespace(), err)
+ }
+ } else {
+ log.Logvf(log.Info, "collection %v already exists - skipping collection create", intent.Namespace())
+ }
+
+ var documentCount int64
+ if intent.BSONFile != nil {
+ err = intent.BSONFile.Open()
+ if err != nil {
+ return err
+ }
+ defer intent.BSONFile.Close()
+
+ log.Logvf(log.Always, "restoring %v from %v", intent.Namespace(), intent.Location)
+
+ bsonSource := db.NewDecodedBSONSource(db.NewBSONSource(intent.BSONFile))
+ defer bsonSource.Close()
+
+ documentCount, err = restore.RestoreCollectionToDB(intent.DB, intent.C, bsonSource, intent.BSONFile, intent.Size)
+ if err != nil {
+ return fmt.Errorf("error restoring from %v: %v", intent.Location, err)
+ }
+ }
+
+ // finally, add indexes
+ if len(indexes) > 0 && !restore.OutputOptions.NoIndexRestore {
+ log.Logvf(log.Always, "restoring indexes for collection %v from metadata", intent.Namespace())
+ err = restore.CreateIndexes(intent, indexes)
+ if err != nil {
+ return fmt.Errorf("error creating indexes for %v: %v", intent.Namespace(), err)
+ }
+ } else {
+ log.Logv(log.Always, "no indexes to restore")
+ }
+
+ log.Logvf(log.Always, "finished restoring %v (%v %v)",
+ intent.Namespace(), documentCount, util.Pluralize(int(documentCount), "document", "documents"))
+ return nil
+}
+
+// RestoreCollectionToDB pipes the given BSON data into the database.
+// Returns the number of documents restored and any errors that occured.
+func (restore *MongoRestore) RestoreCollectionToDB(dbName, colName string,
+ bsonSource *db.DecodedBSONSource, file PosReader, fileSize int64) (int64, error) {
+
+ var termErr error
+ session, err := restore.SessionProvider.GetSession()
+ if err != nil {
+ return int64(0), fmt.Errorf("error establishing connection: %v", err)
+ }
+ session.SetSafe(restore.safety)
+ defer session.Close()
+
+ collection := session.DB(dbName).C(colName)
+
+ documentCount := int64(0)
+ watchProgressor := progress.NewCounter(fileSize)
+ bar := &progress.Bar{
+ Name: fmt.Sprintf("%v.%v", dbName, colName),
+ Watching: watchProgressor,
+ BarLength: progressBarLength,
+ IsBytes: true,
+ }
+ restore.progressManager.Attach(bar)
+ defer restore.progressManager.Detach(bar)
+
+ maxInsertWorkers := restore.OutputOptions.NumInsertionWorkers
+ if restore.OutputOptions.MaintainInsertionOrder {
+ maxInsertWorkers = 1
+ }
+
+ docChan := make(chan bson.Raw, insertBufferFactor)
+ resultChan := make(chan error, maxInsertWorkers)
+
+ // stream documents for this collection on docChan
+ go func() {
+ doc := bson.Raw{}
+ for bsonSource.Next(&doc) {
+ select {
+ case <-restore.termChan:
+ log.Logvf(log.Always, "terminating read on %v.%v", dbName, colName)
+ termErr = util.ErrTerminated
+ close(docChan)
+ return
+ default:
+ rawBytes := make([]byte, len(doc.Data))
+ copy(rawBytes, doc.Data)
+ docChan <- bson.Raw{Data: rawBytes}
+ documentCount++
+ }
+ }
+ close(docChan)
+ }()
+
+ log.Logvf(log.DebugLow, "using %v insertion workers", maxInsertWorkers)
+
+ for i := 0; i < maxInsertWorkers; i++ {
+ go func() {
+ // get a session copy for each insert worker
+ s := session.Copy()
+ defer s.Close()
+
+ coll := collection.With(s)
+ bulk := db.NewBufferedBulkInserter(
+ coll, restore.OutputOptions.BulkBufferSize, !restore.OutputOptions.StopOnError)
+ for rawDoc := range docChan {
+ if restore.objCheck {
+ err := bson.Unmarshal(rawDoc.Data, &bson.D{})
+ if err != nil {
+ resultChan <- fmt.Errorf("invalid object: %v", err)
+ return
+ }
+ }
+ if err := bulk.Insert(rawDoc); err != nil {
+ if db.IsConnectionError(err) || restore.OutputOptions.StopOnError {
+ // Propagate this error, since it's either a fatal connection error
+ // or the user has turned on --stopOnError
+ resultChan <- err
+ } else {
+ // Otherwise just log the error but don't propagate it.
+ log.Logvf(log.Always, "error: %v", err)
+ }
+ }
+ watchProgressor.Set(file.Pos())
+ }
+ err := bulk.Flush()
+ if err != nil {
+ if !db.IsConnectionError(err) && !restore.OutputOptions.StopOnError {
+ // Suppress this error since it's not a severe connection error and
+ // the user has not specified --stopOnError
+ log.Logvf(log.Always, "error: %v", err)
+ err = nil
+ }
+ }
+ resultChan <- err
+ return
+ }()
+
+ // sleep to prevent all threads from inserting at the same time at start
+ time.Sleep(time.Duration(i) * 10 * time.Millisecond)
+ }
+
+ // wait until all insert jobs finish
+ for done := 0; done < maxInsertWorkers; done++ {
+ err := <-resultChan
+ if err != nil {
+ return int64(0), fmt.Errorf("insertion error: %v", err)
+ }
+ }
+
+ // final error check
+ if err = bsonSource.Err(); err != nil {
+ return int64(0), fmt.Errorf("reading bson input: %v", err)
+ }
+ return documentCount, termErr
+}
diff --git a/src/mongo/gotools/mongorestore/testdata/auth_version_3.bson b/src/mongo/gotools/mongorestore/testdata/auth_version_3.bson
new file mode 100644
index 00000000000..f5673ff6fb9
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/auth_version_3.bson
Binary files differ
diff --git a/src/mongo/gotools/mongorestore/testdata/auth_version_5.bson b/src/mongo/gotools/mongorestore/testdata/auth_version_5.bson
new file mode 100644
index 00000000000..925bcc1f53a
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/auth_version_5.bson
Binary files differ
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/badfile.txt b/src/mongo/gotools/mongorestore/testdata/testdirs/badfile.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/badfile.txt
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/db1/baddir/out.bson b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/baddir/out.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/baddir/out.bson
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c1.bson b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c1.bson
new file mode 100644
index 00000000000..f48276ec9e7
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c1.bson
Binary files differ
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c1.metadata.json b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c1.metadata.json
new file mode 100644
index 00000000000..84ccb248589
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c1.metadata.json
@@ -0,0 +1 @@
+{"options":{},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"db1.c1"}]} \ No newline at end of file
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c2.bson b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c2.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c2.bson
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c3.bson b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c3.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c3.bson
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c3.metadata.json b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c3.metadata.json
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/db1/c3.metadata.json
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/db2/c1.bin b/src/mongo/gotools/mongorestore/testdata/testdirs/db2/c1.bin
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/db2/c1.bin
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/db2/c2.txt b/src/mongo/gotools/mongorestore/testdata/testdirs/db2/c2.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/db2/c2.txt
diff --git a/src/mongo/gotools/mongorestore/testdata/testdirs/oplog.bson b/src/mongo/gotools/mongorestore/testdata/testdirs/oplog.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/mongorestore/testdata/testdirs/oplog.bson
diff --git a/src/mongo/gotools/mongostat/main/mongostat.go b/src/mongo/gotools/mongostat/main/mongostat.go
new file mode 100644
index 00000000000..8663248ceb6
--- /dev/null
+++ b/src/mongo/gotools/mongostat/main/mongostat.go
@@ -0,0 +1,236 @@
+// Main package for the mongostat tool.
+package main
+
+import (
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/password"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongostat"
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer"
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer/line"
+ "github.com/mongodb/mongo-tools/mongostat/status"
+)
+
+// optionKeyNames interprets the CLI options Columns and AppendColumns into
+// the internal keyName mapping.
+func optionKeyNames(option string) map[string]string {
+ kn := make(map[string]string)
+ columns := strings.Split(option, ",")
+ for _, column := range columns {
+ naming := strings.Split(column, "=")
+ if len(naming) == 1 {
+ kn[naming[0]] = naming[0]
+ } else {
+ kn[naming[0]] = naming[1]
+ }
+ }
+ return kn
+}
+
+// optionCustomHeaders interprets the CLI options Columns and AppendColumns
+// into a list of custom headers.
+func optionCustomHeaders(option string) (headers []string) {
+ columns := strings.Split(option, ",")
+ for _, column := range columns {
+ naming := strings.Split(column, "=")
+ headers = append(headers, naming[0])
+ }
+ return
+}
+
+func main() {
+ // initialize command-line opts
+ opts := options.New(
+ "mongostat",
+ mongostat.Usage,
+ options.EnabledOptions{Connection: true, Auth: true, Namespace: false})
+ opts.UseReadOnlyHostDescription()
+
+ // add mongostat-specific options
+ statOpts := &mongostat.StatOptions{}
+ opts.AddOptions(statOpts)
+
+ interactiveOption := opts.FindOptionByLongName("interactive")
+ if _, available := stat_consumer.FormatterConstructors["interactive"]; !available {
+ // make --interactive inaccessible
+ interactiveOption.LongName = ""
+ interactiveOption.ShortName = 0
+ }
+
+ args, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'mongostat --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ log.SetVerbosity(opts.Verbosity)
+ signals.Handle()
+
+ sleepInterval := 1
+ if len(args) > 0 {
+ if len(args) != 1 {
+ log.Logvf(log.Always, "too many positional arguments: %v", args)
+ log.Logvf(log.Always, "try 'mongostat --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+ sleepInterval, err = strconv.Atoi(args[0])
+ if err != nil {
+ log.Logvf(log.Always, "invalid sleep interval: %v", args[0])
+ os.Exit(util.ExitBadOptions)
+ }
+ if sleepInterval < 1 {
+ log.Logvf(log.Always, "sleep interval must be at least 1 second")
+ os.Exit(util.ExitBadOptions)
+ }
+ }
+
+ // print help, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ // print version, if specified
+ if opts.PrintVersion() {
+ return
+ }
+
+ if opts.Auth.Username != "" && opts.Auth.Source == "" && !opts.Auth.RequiresExternalDB() {
+ log.Logvf(log.Always, "--authenticationDatabase is required when authenticating against a non $external database")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ if statOpts.Interactive && statOpts.Json {
+ log.Logvf(log.Always, "cannot use output formats --json and --interactive together")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ if statOpts.Deprecated && !statOpts.Json {
+ log.Logvf(log.Always, "--useDeprecatedJsonKeys can only be used when --json is also specified")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ if statOpts.Columns != "" && statOpts.AppendColumns != "" {
+ log.Logvf(log.Always, "-O cannot be used if -o is also specified")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ if statOpts.HumanReadable != "true" && statOpts.HumanReadable != "false" {
+ log.Logvf(log.Always, "--humanReadable must be set to either 'true' or 'false'")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // we have to check this here, otherwise the user will be prompted
+ // for a password for each discovered node
+ if opts.Auth.ShouldAskForPassword() {
+ opts.Auth.Password = password.Prompt()
+ }
+
+ var factory stat_consumer.FormatterConstructor
+ if statOpts.Json {
+ factory = stat_consumer.FormatterConstructors["json"]
+ } else if statOpts.Interactive {
+ factory = stat_consumer.FormatterConstructors["interactive"]
+ } else {
+ factory = stat_consumer.FormatterConstructors[""]
+ }
+ formatter := factory(statOpts.RowCount, !statOpts.NoHeaders)
+
+ cliFlags := 0
+ if statOpts.Columns == "" {
+ cliFlags = line.FlagAlways
+ if statOpts.Discover {
+ cliFlags |= line.FlagDiscover
+ cliFlags |= line.FlagHosts
+ }
+ if statOpts.All {
+ cliFlags |= line.FlagAll
+ }
+ if strings.Contains(opts.Host, ",") {
+ cliFlags |= line.FlagHosts
+ }
+ }
+
+ var customHeaders []string
+ if statOpts.Columns != "" {
+ customHeaders = optionCustomHeaders(statOpts.Columns)
+ } else if statOpts.AppendColumns != "" {
+ customHeaders = optionCustomHeaders(statOpts.AppendColumns)
+ }
+
+ var keyNames map[string]string
+ if statOpts.Deprecated {
+ keyNames = line.DeprecatedKeyMap()
+ } else if statOpts.Columns == "" {
+ keyNames = line.DefaultKeyMap()
+ } else {
+ keyNames = optionKeyNames(statOpts.Columns)
+ }
+ if statOpts.AppendColumns != "" {
+ addKN := optionKeyNames(statOpts.AppendColumns)
+ for k, v := range addKN {
+ keyNames[k] = v
+ }
+ }
+
+ readerConfig := &status.ReaderConfig{
+ HumanReadable: statOpts.HumanReadable == "true",
+ }
+ if statOpts.Json {
+ readerConfig.TimeFormat = "15:04:05"
+ }
+
+ consumer := stat_consumer.NewStatConsumer(cliFlags, customHeaders,
+ keyNames, readerConfig, formatter, os.Stdout)
+ seedHosts := util.CreateConnectionAddrs(opts.Host, opts.Port)
+ var cluster mongostat.ClusterMonitor
+ if statOpts.Discover || len(seedHosts) > 1 {
+ cluster = &mongostat.AsyncClusterMonitor{
+ ReportChan: make(chan *status.ServerStatus),
+ ErrorChan: make(chan *status.NodeError),
+ LastStatLines: map[string]*line.StatLine{},
+ Consumer: consumer,
+ }
+ } else {
+ cluster = &mongostat.SyncClusterMonitor{
+ ReportChan: make(chan *status.ServerStatus),
+ ErrorChan: make(chan *status.NodeError),
+ Consumer: consumer,
+ }
+ }
+
+ var discoverChan chan string
+ if statOpts.Discover {
+ discoverChan = make(chan string, 128)
+ }
+
+ opts.Direct = true
+ _, setName := util.ParseConnectionString(opts.Host)
+ opts.ReplicaSetName = setName
+ stat := &mongostat.MongoStat{
+ Options: opts,
+ StatOptions: statOpts,
+ Nodes: map[string]*mongostat.NodeMonitor{},
+ Discovered: discoverChan,
+ SleepInterval: time.Duration(sleepInterval) * time.Second,
+ Cluster: cluster,
+ }
+
+ for _, v := range seedHosts {
+ stat.AddNewNode(v)
+ }
+
+ // kick it off
+ err = stat.Run()
+ if err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ os.Exit(util.ExitError)
+ }
+}
diff --git a/src/mongo/gotools/mongostat/mongostat.go b/src/mongo/gotools/mongostat/mongostat.go
new file mode 100644
index 00000000000..2ed06310649
--- /dev/null
+++ b/src/mongo/gotools/mongostat/mongostat.go
@@ -0,0 +1,380 @@
+// Package mongostat provides an overview of the status of a currently running mongod or mongos instance.
+package mongostat
+
+import (
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer"
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer/line"
+ "github.com/mongodb/mongo-tools/mongostat/status"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+// MongoStat is a container for the user-specified options and
+// internal cluster state used for running mongostat.
+type MongoStat struct {
+ // Generic mongo tool options.
+ Options *options.ToolOptions
+
+ // Mongostat-specific output options.
+ StatOptions *StatOptions
+
+ // How long to sleep between printing the rows, and polling the server.
+ SleepInterval time.Duration
+
+ // New nodes can be "discovered" by any other node by sending a hostname
+ // on this channel.
+ Discovered chan string
+
+ // A map of hostname -> NodeMonitor for all the hosts that
+ // are being monitored.
+ Nodes map[string]*NodeMonitor
+
+ // ClusterMonitor to manage collecting and printing the stats from all nodes.
+ Cluster ClusterMonitor
+
+ // Mutex to handle safe concurrent adding to or looping over discovered nodes.
+ nodesLock sync.RWMutex
+}
+
+// ConfigShard holds a mapping for the format of shard hosts as they
+// appear in the config.shards collection.
+type ConfigShard struct {
+ Id string `bson:"_id"`
+ Host string `bson:"host"`
+}
+
+// NodeMonitor contains the connection pool for a single host and collects the
+// mongostat data for that host on a regular interval.
+type NodeMonitor struct {
+ host, alias string
+ sessionProvider *db.SessionProvider
+
+ // The time at which the node monitor last processed an update successfully.
+ LastUpdate time.Time
+
+ // The most recent error encountered when collecting stats for this node.
+ Err error
+}
+
+// SyncClusterMonitor is an implementation of ClusterMonitor that writes output
+// synchronized with the timing of when the polling samples are collected.
+// Only works with a single host at a time.
+type SyncClusterMonitor struct {
+ // Channel to listen for incoming stat data
+ ReportChan chan *status.ServerStatus
+
+ // Channel to listen for incoming errors
+ ErrorChan chan *status.NodeError
+
+ // Creates and consumes StatLines using ServerStatuses
+ Consumer *stat_consumer.StatConsumer
+}
+
+// ClusterMonitor maintains an internal representation of a cluster's state,
+// which can be refreshed with calls to Update(), and dumps output representing
+// this internal state on an interval.
+type ClusterMonitor interface {
+ // Monitor() triggers monitoring and dumping output to begin
+ // sleep is the interval to sleep between output dumps.
+ // returns an error if it fails, and nil when monitoring ends
+ Monitor(sleep time.Duration) error
+
+ // Update signals the ClusterMonitor implementation to refresh its internal
+ // state using the data contained in the provided ServerStatus.
+ Update(stat *status.ServerStatus, err *status.NodeError)
+}
+
+// AsyncClusterMonitor is an implementation of ClusterMonitor that writes output
+// gotten from polling samples collected asynchronously from one or more servers.
+type AsyncClusterMonitor struct {
+ Discover bool
+
+ // Channel to listen for incoming stat data
+ ReportChan chan *status.ServerStatus
+
+ // Channel to listen for incoming errors
+ ErrorChan chan *status.NodeError
+
+ // Map of hostname -> latest stat data for the host
+ LastStatLines map[string]*line.StatLine
+
+ // Mutex to protect access to LastStatLines
+ mapLock sync.RWMutex
+
+ // Creates and consumes StatLines using ServerStatuses
+ Consumer *stat_consumer.StatConsumer
+}
+
+// Update refreshes the internal state of the cluster monitor with the data
+// in the StatLine. SyncClusterMonitor's implementation of Update blocks
+// until it has written out its state, so that output is always dumped exactly
+// once for each poll.
+func (cluster *SyncClusterMonitor) Update(stat *status.ServerStatus, err *status.NodeError) {
+ if err != nil {
+ cluster.ErrorChan <- err
+ return
+ }
+ cluster.ReportChan <- stat
+}
+
+// Monitor waits for data on the cluster's report channel. Once new data comes
+// in, it formats and then displays it to stdout.
+func (cluster *SyncClusterMonitor) Monitor(_ time.Duration) error {
+ receivedData := false
+ for {
+ var statLine *line.StatLine
+ var ok bool
+ select {
+ case stat := <-cluster.ReportChan:
+ statLine, ok = cluster.Consumer.Update(stat)
+ if !ok {
+ continue
+ }
+ case err := <-cluster.ErrorChan:
+ if !receivedData {
+ return err
+ }
+ statLine = &line.StatLine{
+ Error: err,
+ Fields: map[string]string{"host": err.Host},
+ }
+ }
+ receivedData = true
+ if cluster.Consumer.FormatLines([]*line.StatLine{statLine}) {
+ return nil
+ }
+ }
+}
+
+// updateHostInfo updates the internal map with the given StatLine data.
+// Safe for concurrent access.
+func (cluster *AsyncClusterMonitor) updateHostInfo(stat *line.StatLine) {
+ cluster.mapLock.Lock()
+ defer cluster.mapLock.Unlock()
+ host := stat.Fields["host"]
+ cluster.LastStatLines[host] = stat
+}
+
+// printSnapshot formats and dumps the current state of all the stats collected.
+// returns whether the program should now exit
+func (cluster *AsyncClusterMonitor) printSnapshot() bool {
+ cluster.mapLock.RLock()
+ defer cluster.mapLock.RUnlock()
+ lines := make([]*line.StatLine, 0, len(cluster.LastStatLines))
+ for _, stat := range cluster.LastStatLines {
+ lines = append(lines, stat)
+ }
+ if len(lines) == 0 {
+ return false
+ }
+ return cluster.Consumer.FormatLines(lines)
+}
+
+// Update sends a new StatLine on the cluster's report channel.
+func (cluster *AsyncClusterMonitor) Update(stat *status.ServerStatus, err *status.NodeError) {
+ if err != nil {
+ cluster.ErrorChan <- err
+ return
+ }
+ cluster.ReportChan <- stat
+}
+
+// The Async implementation of Monitor starts the goroutines that listen for incoming stat data,
+// and dump snapshots at a regular interval.
+func (cluster *AsyncClusterMonitor) Monitor(sleep time.Duration) error {
+ select {
+ case stat := <-cluster.ReportChan:
+ cluster.Consumer.Update(stat)
+ case err := <-cluster.ErrorChan:
+ // error out if the first result is an error
+ return err
+ }
+
+ go func() {
+ for {
+ select {
+ case stat := <-cluster.ReportChan:
+ statLine, ok := cluster.Consumer.Update(stat)
+ if ok {
+ cluster.updateHostInfo(statLine)
+ }
+ case err := <-cluster.ErrorChan:
+ cluster.updateHostInfo(&line.StatLine{
+ Error: err,
+ Fields: map[string]string{"host": err.Host},
+ })
+ }
+ }
+ }()
+
+ for range time.Tick(sleep) {
+ if cluster.printSnapshot() {
+ break
+ }
+ }
+ return nil
+}
+
+// NewNodeMonitor copies the same connection settings from an instance of
+// ToolOptions, but monitors fullHost.
+func NewNodeMonitor(opts options.ToolOptions, fullHost string) (*NodeMonitor, error) {
+ optsCopy := opts
+ host, port := parseHostPort(fullHost)
+ optsCopy.Connection = &options.Connection{
+ Host: host,
+ Port: port,
+ Timeout: opts.Timeout,
+ }
+ optsCopy.Direct = true
+ sessionProvider, err := db.NewSessionProvider(optsCopy)
+ if err != nil {
+ return nil, err
+ }
+ return &NodeMonitor{
+ host: fullHost,
+ sessionProvider: sessionProvider,
+ LastUpdate: time.Now(),
+ Err: nil,
+ }, nil
+}
+
+// Report collects the stat info for a single node and sends found hostnames on
+// the "discover" channel if checkShards is true.
+func (node *NodeMonitor) Poll(discover chan string, checkShards bool) (*status.ServerStatus, error) {
+ stat := &status.ServerStatus{}
+ log.Logvf(log.DebugHigh, "getting session on server: %v", node.host)
+ s, err := node.sessionProvider.GetSession()
+ if err != nil {
+ log.Logvf(log.DebugLow, "got error getting session to server %v", node.host)
+ return nil, err
+ }
+ log.Logvf(log.DebugHigh, "got session on server: %v", node.host)
+
+ // The read pref for the session must be set to 'secondary' to enable using
+ // the driver with 'direct' connections, which disables the built-in
+ // replset discovery mechanism since we do our own node discovery here.
+ s.SetMode(mgo.Eventual, true)
+
+ // Disable the socket timeout - otherwise if db.serverStatus() takes a long time on the server
+ // side, the client will close the connection early and report an error.
+ s.SetSocketTimeout(0)
+ defer s.Close()
+
+ err = s.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 0}}, stat)
+ if err != nil {
+ log.Logvf(log.DebugLow, "got error calling serverStatus against server %v", node.host)
+ return nil, err
+ }
+ statMap := make(map[string]interface{})
+ s.DB("admin").Run(bson.D{{"serverStatus", 1}, {"recordStats", 0}}, statMap)
+ stat.Flattened = status.Flatten(statMap)
+
+ node.Err = nil
+ stat.SampleTime = time.Now()
+
+ if stat.Repl != nil && discover != nil {
+ for _, host := range stat.Repl.Hosts {
+ discover <- host
+ }
+ for _, host := range stat.Repl.Passives {
+ discover <- host
+ }
+ }
+ node.alias = stat.Host
+ stat.Host = node.host
+ if discover != nil && stat != nil && status.IsMongos(stat) && checkShards {
+ log.Logvf(log.DebugLow, "checking config database to discover shards")
+ shardCursor := s.DB("config").C("shards").Find(bson.M{}).Iter()
+ shard := ConfigShard{}
+ for shardCursor.Next(&shard) {
+ shardHosts := strings.Split(shard.Host, ",")
+ for _, shardHost := range shardHosts {
+ discover <- shardHost
+ }
+ }
+ shardCursor.Close()
+ }
+
+ return stat, nil
+}
+
+// Watch continuously collects and processes stats for a single node on a
+// regular interval. At each interval, it triggers the node's Poll function
+// with the 'discover' channel.
+func (node *NodeMonitor) Watch(sleep time.Duration, discover chan string, cluster ClusterMonitor) {
+ var cycle uint64
+ for ticker := time.Tick(sleep); ; <-ticker {
+ log.Logvf(log.DebugHigh, "polling server: %v", node.host)
+ stat, err := node.Poll(discover, cycle%10 == 0)
+
+ if stat != nil {
+ log.Logvf(log.DebugHigh, "successfully got statline from host: %v", node.host)
+ }
+ var nodeError *status.NodeError
+ if err != nil {
+ nodeError = status.NewNodeError(node.host, err)
+ }
+ cluster.Update(stat, nodeError)
+ cycle++
+ }
+}
+
+func parseHostPort(fullHostName string) (string, string) {
+ if colon := strings.LastIndex(fullHostName, ":"); colon >= 0 {
+ return fullHostName[0:colon], fullHostName[colon+1:]
+ }
+ return fullHostName, "27017"
+}
+
+// AddNewNode adds a new host name to be monitored and spawns the necessary
+// goroutine to collect data from it.
+func (mstat *MongoStat) AddNewNode(fullhost string) error {
+ mstat.nodesLock.Lock()
+ defer mstat.nodesLock.Unlock()
+
+ // Remove the 'shardXX/' prefix from the hostname, if applicable
+ pieces := strings.Split(fullhost, "/")
+ fullhost = pieces[len(pieces)-1]
+
+ if _, hasKey := mstat.Nodes[fullhost]; hasKey {
+ return nil
+ }
+ for _, node := range mstat.Nodes {
+ if node.alias == fullhost {
+ return nil
+ }
+ }
+ log.Logvf(log.DebugLow, "adding new host to monitoring: %v", fullhost)
+ // Create a new node monitor for this host
+ node, err := NewNodeMonitor(*mstat.Options, fullhost)
+ if err != nil {
+ return err
+ }
+ mstat.Nodes[fullhost] = node
+ go node.Watch(mstat.SleepInterval, mstat.Discovered, mstat.Cluster)
+ return nil
+}
+
+// Run is the top-level function that starts the monitoring
+// and discovery goroutines
+func (mstat *MongoStat) Run() error {
+ if mstat.Discovered != nil {
+ go func() {
+ for {
+ newHost := <-mstat.Discovered
+ err := mstat.AddNewNode(newHost)
+ if err != nil {
+ log.Logvf(log.Always, "can't add discovered node %v: %v", newHost, err)
+ }
+ }
+ }()
+ }
+ return mstat.Cluster.Monitor(mstat.SleepInterval)
+}
diff --git a/src/mongo/gotools/mongostat/mongostat_test.go b/src/mongo/gotools/mongostat/mongostat_test.go
new file mode 100644
index 00000000000..e4ad41a84b1
--- /dev/null
+++ b/src/mongo/gotools/mongostat/mongostat_test.go
@@ -0,0 +1,122 @@
+package mongostat
+
+import (
+ "io/ioutil"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/mongodb/mongo-tools/common/testutil"
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer/line"
+ "github.com/mongodb/mongo-tools/mongostat/status"
+ . "github.com/smartystreets/goconvey/convey"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func readBSONFile(file string, t *testing.T) (stat *status.ServerStatus) {
+ stat = &status.ServerStatus{}
+ ssBSON, err := ioutil.ReadFile(file)
+ if err == nil {
+ err = bson.Unmarshal(ssBSON, stat)
+ }
+ if err != nil {
+ t.Logf("Could not load new ServerStatus BSON: %s", err)
+ t.FailNow()
+ }
+ return
+}
+
+func TestStatLine(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ defaultHeaders := make([]string, len(line.CondHeaders))
+ for i, h := range line.CondHeaders {
+ defaultHeaders[i] = h.Key
+ }
+ defaultConfig := &status.ReaderConfig{
+ HumanReadable: true,
+ }
+
+ serverStatusOld := readBSONFile("test_data/server_status_old.bson", t)
+ serverStatusNew := readBSONFile("test_data/server_status_new.bson", t)
+ serverStatusNew.ShardCursorType = nil
+ serverStatusOld.ShardCursorType = nil
+
+ Convey("StatsLine should accurately calculate opcounter diffs", t, func() {
+ statsLine := line.NewStatLine(serverStatusOld, serverStatusNew, defaultHeaders, defaultConfig)
+ So(statsLine.Fields["insert"], ShouldEqual, "10")
+ So(statsLine.Fields["query"], ShouldEqual, "5")
+ So(statsLine.Fields["update"], ShouldEqual, "7")
+ So(statsLine.Fields["delete"], ShouldEqual, "2")
+ So(statsLine.Fields["getmore"], ShouldEqual, "3")
+ command := strings.Split(statsLine.Fields["command"], "|")[0]
+ So(command, ShouldEqual, "669")
+ So(statsLine.Fields["faults"], ShouldEqual, "5")
+
+ locked := strings.Split(statsLine.Fields["locked_db"], ":")
+ So(locked[0], ShouldEqual, "test")
+ So(locked[1], ShouldEqual, "50.0%")
+ qrw := strings.Split(statsLine.Fields["qrw"], "|")
+ So(qrw[0], ShouldEqual, "3")
+ So(qrw[1], ShouldEqual, "2")
+ arw := strings.Split(statsLine.Fields["arw"], "|")
+ So(arw[0], ShouldEqual, "4")
+ So(arw[1], ShouldEqual, "6")
+ So(statsLine.Fields["net_in"], ShouldEqual, "2.00k")
+ So(statsLine.Fields["net_out"], ShouldEqual, "3.00k")
+ So(statsLine.Fields["conn"], ShouldEqual, "5")
+ })
+
+ serverStatusNew.SampleTime, _ = time.Parse("2006 Jan 02 15:04:05", "2015 Nov 30 4:25:33")
+ Convey("StatsLine with non-default interval should calculate average diffs", t, func() {
+ statsLine := line.NewStatLine(serverStatusOld, serverStatusNew, defaultHeaders, defaultConfig)
+ // Opcounters and faults are averaged over sample period
+ So(statsLine.Fields["insert"], ShouldEqual, "3")
+ So(statsLine.Fields["query"], ShouldEqual, "1")
+ So(statsLine.Fields["update"], ShouldEqual, "2")
+ delete := strings.TrimPrefix(statsLine.Fields["delete"], "*")
+ So(delete, ShouldEqual, "0")
+ So(statsLine.Fields["getmore"], ShouldEqual, "1")
+ command := strings.Split(statsLine.Fields["command"], "|")[0]
+ So(command, ShouldEqual, "223")
+ So(statsLine.Fields["faults"], ShouldEqual, "1")
+
+ locked := strings.Split(statsLine.Fields["locked_db"], ":")
+ So(locked[0], ShouldEqual, "test")
+ So(locked[1], ShouldEqual, "50.0%")
+ qrw := strings.Split(statsLine.Fields["qrw"], "|")
+ So(qrw[0], ShouldEqual, "3")
+ So(qrw[1], ShouldEqual, "2")
+ arw := strings.Split(statsLine.Fields["arw"], "|")
+ So(arw[0], ShouldEqual, "4")
+ So(arw[1], ShouldEqual, "6")
+ So(statsLine.Fields["net_in"], ShouldEqual, "666b")
+ So(statsLine.Fields["net_out"], ShouldEqual, "1.00k")
+ So(statsLine.Fields["conn"], ShouldEqual, "5")
+ })
+}
+
+func TestIsMongos(t *testing.T) {
+ testutil.VerifyTestType(t, testutil.UnitTestType)
+
+ runCheck := func(process string) bool {
+ return status.IsMongos(&status.ServerStatus{
+ Process: process,
+ })
+ }
+
+ Convey("should accept reasonable process names", t, func() {
+ So(runCheck("/mongos-prod.exe"), ShouldBeTrue)
+ So(runCheck("/mongos.exe"), ShouldBeTrue)
+ So(runCheck("mongos"), ShouldBeTrue)
+ So(runCheck("mongodb/bin/mongos"), ShouldBeTrue)
+ So(runCheck(`C:\data\mci\48de1dc1ec3c2be5dcd6a53739578de4\src\mongos.exe`), ShouldBeTrue)
+ })
+ Convey("should accept reasonable process names", t, func() {
+ So(runCheck("mongosx/mongod"), ShouldBeFalse)
+ So(runCheck("mongostat"), ShouldBeFalse)
+ So(runCheck("mongos_stuff/mongod"), ShouldBeFalse)
+ So(runCheck("mongos.stuff/mongod"), ShouldBeFalse)
+ So(runCheck("mongodb/bin/mongod"), ShouldBeFalse)
+ })
+}
diff --git a/src/mongo/gotools/mongostat/options.go b/src/mongo/gotools/mongostat/options.go
new file mode 100644
index 00000000000..27909bd0d0c
--- /dev/null
+++ b/src/mongo/gotools/mongostat/options.go
@@ -0,0 +1,27 @@
+package mongostat
+
+var Usage = `<options> <polling interval in seconds>
+
+Monitor basic MongoDB server statistics.
+
+See http://docs.mongodb.org/manual/reference/program/mongostat/ for more information.`
+
+// StatOptions defines the set of options to use for configuring mongostat.
+type StatOptions struct {
+ Columns string `short:"o" value-name:"<field>[,<field>]*" description:"fields to show. For custom fields, use dot-syntax to index into serverStatus output, and optional methods .diff() and .rate() e.g. metrics.record.moves.diff()"`
+ AppendColumns string `short:"O" value-name:"<field>[,<field>]*" description:"like -o, but preloaded with default fields. Specified fields inserted after default output"`
+ HumanReadable string `long:"humanReadable" default:"true" description:"print sizes and time in human readable format (e.g. 1K 234M 2G). To use the more precise machine readable format, use --humanReadable=false"`
+ NoHeaders bool `long:"noheaders" description:"don't output column names"`
+ RowCount int64 `long:"rowcount" value-name:"<count>" short:"n" description:"number of stats lines to print (0 for indefinite)"`
+ Discover bool `long:"discover" description:"discover nodes and display stats for all"`
+ Http bool `long:"http" description:"use HTTP instead of raw db connection"`
+ All bool `long:"all" description:"all optional fields"`
+ Json bool `long:"json" description:"output as JSON rather than a formatted table"`
+ Deprecated bool `long:"useDeprecatedJsonKeys" description:"use old key names; only valid with the json output option."`
+ Interactive bool `short:"i" long:"interactive" description:"display stats in a non-scrolling interface"`
+}
+
+// Name returns a human-readable group name for mongostat options.
+func (*StatOptions) Name() string {
+ return "stat"
+}
diff --git a/src/mongo/gotools/mongostat/stat_consumer/formatter.go b/src/mongo/gotools/mongostat/stat_consumer/formatter.go
new file mode 100644
index 00000000000..2fb1a4c5506
--- /dev/null
+++ b/src/mongo/gotools/mongostat/stat_consumer/formatter.go
@@ -0,0 +1,34 @@
+package stat_consumer
+
+import (
+ "sync/atomic"
+
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer/line"
+)
+
+// A LineFormatter formats StatLines for printing.
+type LineFormatter interface {
+ // FormatLines returns the string representation of the StatLines that are passed in.
+ FormatLines(lines []*line.StatLine, headerKeys []string, keyNames map[string]string) string
+
+ // IsFinished returns true iff the formatter cannot print any more data
+ IsFinished() bool
+}
+
+type limitableFormatter struct {
+ // atomic operations are performed on rowCount, so these two variables
+ // should stay at the beginning for the sake of variable alignment
+ maxRows, rowCount int64
+}
+
+func (lf *limitableFormatter) increment() {
+ atomic.AddInt64(&lf.rowCount, 1)
+}
+
+func (lf *limitableFormatter) IsFinished() bool {
+ return lf.maxRows > 0 && atomic.LoadInt64(&lf.rowCount) >= lf.maxRows
+}
+
+type FormatterConstructor func(maxRows int64, includeHeader bool) LineFormatter
+
+var FormatterConstructors = map[string]FormatterConstructor{}
diff --git a/src/mongo/gotools/mongostat/stat_consumer/grid_line_formatter.go b/src/mongo/gotools/mongostat/stat_consumer/grid_line_formatter.go
new file mode 100644
index 00000000000..53a6e2d4a37
--- /dev/null
+++ b/src/mongo/gotools/mongostat/stat_consumer/grid_line_formatter.go
@@ -0,0 +1,107 @@
+package stat_consumer
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/mongodb/mongo-tools/common/text"
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer/line"
+)
+
+// GridLineFormatter uses a text.GridWriter to format the StatLines as a grid
+type GridLineFormatter struct {
+ *limitableFormatter
+ *text.GridWriter
+
+ // If true, enables printing of headers to output
+ includeHeader bool
+
+ // Counter for periodic headers
+ index int
+
+ // Tracks number of hosts so we can reprint headers when it changes
+ prevLineCount int
+}
+
+func NewGridLineFormatter(maxRows int64, includeHeader bool) LineFormatter {
+ return &GridLineFormatter{
+ limitableFormatter: &limitableFormatter{maxRows: maxRows},
+ includeHeader: includeHeader,
+ GridWriter: &text.GridWriter{ColumnPadding: 1},
+ }
+}
+
+func init() {
+ FormatterConstructors[""] = NewGridLineFormatter
+}
+
+// headerInterval is the number of chunks before the header is re-printed in GridLineFormatter
+const headerInterval = 10
+
+// FormatLines formats the StatLines as a grid
+func (glf *GridLineFormatter) FormatLines(lines []*line.StatLine, headerKeys []string, keyNames map[string]string) string {
+ buf := &bytes.Buffer{}
+
+ // Sort the stat lines by hostname, so that we see the output
+ // in the same order for each snapshot
+ sort.Sort(line.StatLines(lines))
+
+ // Print the columns that are enabled
+ for _, key := range headerKeys {
+ header := keyNames[key]
+ glf.WriteCell(header)
+ }
+ glf.EndRow()
+
+ for _, l := range lines {
+ if l.Printed && l.Error == nil {
+ l.Error = fmt.Errorf("no data received")
+ }
+ l.Printed = true
+
+ if l.Error != nil {
+ glf.WriteCell(l.Fields["host"])
+ glf.Feed(l.Error.Error())
+ continue
+ }
+
+ for _, key := range headerKeys {
+ glf.WriteCell(l.Fields[key])
+ }
+ glf.EndRow()
+ }
+ glf.Flush(buf)
+
+ // clear the flushed data
+ glf.Reset()
+
+ gridLine := buf.String()
+
+ if glf.prevLineCount != len(lines) {
+ glf.index = 0
+ }
+ glf.prevLineCount = len(lines)
+
+ if !glf.includeHeader || glf.index != 0 {
+ // Strip out the first line of the formatted output,
+ // which contains the headers. They've been left in up until this point
+ // in order to force the formatting of the columns to be wide enough.
+ firstNewLinePos := strings.Index(gridLine, "\n")
+ if firstNewLinePos >= 0 {
+ gridLine = gridLine[firstNewLinePos+1:]
+ }
+ }
+ glf.index++
+ if glf.index == headerInterval {
+ glf.index = 0
+ }
+
+ if len(lines) > 1 {
+ // For multi-node stats, add an extra newline to tell each block apart
+ gridLine = fmt.Sprintf("\n%s", gridLine)
+ }
+ glf.increment()
+ return gridLine
+}
diff --git a/src/mongo/gotools/mongostat/stat_consumer/interactive_line_formatter.go b/src/mongo/gotools/mongostat/stat_consumer/interactive_line_formatter.go
new file mode 100644
index 00000000000..87cc08fef26
--- /dev/null
+++ b/src/mongo/gotools/mongostat/stat_consumer/interactive_line_formatter.go
@@ -0,0 +1,231 @@
+// +build !solaris
+
+package stat_consumer
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer/line"
+ "github.com/nsf/termbox-go"
+)
+
+// InteractiveLineFormatter produces ncurses-style output
+type InteractiveLineFormatter struct {
+ *limitableFormatter
+
+ includeHeader bool
+ table []*column
+ row, col int
+ showHelp bool
+}
+
+func NewInteractiveLineFormatter(_ int64, includeHeader bool) LineFormatter {
+ ilf := &InteractiveLineFormatter{
+ limitableFormatter: &limitableFormatter{maxRows: 1},
+ includeHeader: includeHeader,
+ }
+ if err := termbox.Init(); err != nil {
+ fmt.Printf("Error setting up terminal UI: %v", err)
+ panic("could not set up interactive terminal interface")
+ }
+ go func() {
+ for {
+ ilf.handleEvent(termbox.PollEvent())
+ ilf.update()
+ }
+ }()
+ return ilf
+}
+
+func init() {
+ FormatterConstructors["interactive"] = NewInteractiveLineFormatter
+}
+
+type column struct {
+ cells []*cell
+ width int
+}
+
+type cell struct {
+ text string
+ changed bool
+ feed bool
+ selected bool
+ header bool
+}
+
+// FormatLines formats the StatLines as a table in the terminal ui
+func (ilf *InteractiveLineFormatter) FormatLines(lines []*line.StatLine, headerKeys []string, keyNames map[string]string) string {
+ // keep ordering consistent
+ sort.Sort(line.StatLines(lines))
+
+ if ilf.includeHeader {
+ headerLine := &line.StatLine{
+ Fields: keyNames,
+ }
+ lines = append([]*line.StatLine{headerLine}, lines...)
+ }
+
+ // add new rows and columns when new hosts and stats are shown
+ for len(ilf.table) < len(headerKeys) {
+ ilf.table = append(ilf.table, new(column))
+ }
+ for _, column := range ilf.table {
+ for len(column.cells) < len(lines) {
+ column.cells = append(column.cells, new(cell))
+ }
+ }
+
+ for i, column := range ilf.table {
+ key := headerKeys[i]
+ for j, cell := range column.cells {
+ // i, j <=> col, row
+ l := lines[j]
+ if l.Error != nil && i == 0 {
+ cell.text = fmt.Sprintf("%s: %s", l.Fields["host"], l.Error)
+ cell.feed = true
+ continue
+ }
+ newText := l.Fields[key]
+ cell.changed = cell.text != newText
+ cell.text = newText
+ cell.feed = false
+ cell.header = j == 0 && ilf.includeHeader
+ if w := len(cell.text); w > column.width {
+ column.width = w
+ }
+ }
+ }
+
+ ilf.update()
+ return ""
+}
+
+func (ilf *InteractiveLineFormatter) handleEvent(ev termbox.Event) {
+ if ev.Type != termbox.EventKey {
+ return
+ }
+ currSelected := ilf.table[ilf.col].cells[ilf.row].selected
+ switch {
+ case ev.Key == termbox.KeyCtrlC:
+ fallthrough
+ case ev.Key == termbox.KeyEsc:
+ fallthrough
+ case ev.Ch == 'q':
+ termbox.Close()
+ // our max rowCount is set to 1; increment to exit
+ ilf.increment()
+ case ev.Key == termbox.KeyArrowRight:
+ fallthrough
+ case ev.Ch == 'l':
+ if ilf.col+1 < len(ilf.table) {
+ ilf.col++
+ }
+ case ev.Key == termbox.KeyArrowLeft:
+ fallthrough
+ case ev.Ch == 'h':
+ if ilf.col > 0 {
+ ilf.col--
+ }
+ case ev.Key == termbox.KeyArrowDown:
+ fallthrough
+ case ev.Ch == 'j':
+ if ilf.row+1 < len(ilf.table[0].cells) {
+ ilf.row++
+ }
+ case ev.Key == termbox.KeyArrowUp:
+ fallthrough
+ case ev.Ch == 'k':
+ if ilf.row > 0 {
+ ilf.row--
+ }
+ case ev.Ch == 's':
+ cell := ilf.table[ilf.col].cells[ilf.row]
+ cell.selected = !cell.selected
+ case ev.Key == termbox.KeySpace:
+ for _, column := range ilf.table {
+ for _, cell := range column.cells {
+ cell.selected = false
+ }
+ }
+ case ev.Ch == 'c':
+ for _, cell := range ilf.table[ilf.col].cells {
+ cell.selected = !currSelected
+ }
+ case ev.Ch == 'v':
+ for _, column := range ilf.table {
+ cell := column.cells[ilf.row]
+ cell.selected = !currSelected
+ }
+ case ev.Ch == 'r':
+ termbox.Sync()
+ case ev.Ch == '?':
+ ilf.showHelp = !ilf.showHelp
+ default:
+ // ouput a bell on unknown inputs
+ fmt.Printf("\a")
+ }
+}
+
+const (
+ helpPrompt = `Press '?' to toggle help`
+ helpMessage = `
+Exit: 'q' or <Esc>
+Navigation: arrow keys or 'h', 'j', 'k', and 'l'
+Highlighting: 'v' to toggle row
+ 'c' to toggle column
+ 's' to toggle cell
+ <Space> to clear all highlighting
+Redraw: 'r' to fix broken-looking output`
+)
+
+func writeString(x, y int, text string, fg, bg termbox.Attribute) {
+ for i, str := range strings.Split(text, "\n") {
+ for j, ch := range str {
+ termbox.SetCell(x+j, y+i, ch, fg, bg)
+ }
+ }
+}
+
+func (ilf *InteractiveLineFormatter) update() {
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ x := 0
+ for i, column := range ilf.table {
+ for j, cell := range column.cells {
+ if ilf.col == i && ilf.row == j {
+ termbox.SetCursor(x+column.width-1, j)
+ }
+ if cell.text == "" {
+ continue
+ }
+ fgAttr := termbox.ColorWhite
+ bgAttr := termbox.ColorDefault
+ if cell.selected {
+ fgAttr = termbox.ColorBlack
+ bgAttr = termbox.ColorWhite
+ }
+ if cell.changed || cell.feed {
+ fgAttr |= termbox.AttrBold
+ }
+ if cell.header {
+ fgAttr |= termbox.AttrUnderline
+ fgAttr |= termbox.AttrBold
+ }
+ padding := column.width - len(cell.text)
+ if cell.feed && padding < 0 {
+ padding = 0
+ }
+ writeString(x, j, strings.Repeat(" ", padding), termbox.ColorDefault, bgAttr)
+ writeString(x+padding, j, cell.text, fgAttr, bgAttr)
+ }
+ x += 1 + column.width
+ }
+ rowCount := len(ilf.table[0].cells)
+ writeString(0, rowCount+1, helpPrompt, termbox.ColorWhite, termbox.ColorDefault)
+ if ilf.showHelp {
+ writeString(0, rowCount+2, helpMessage, termbox.ColorWhite, termbox.ColorDefault)
+ }
+ termbox.Flush()
+}
diff --git a/src/mongo/gotools/mongostat/stat_consumer/json_line_formatter.go b/src/mongo/gotools/mongostat/stat_consumer/json_line_formatter.go
new file mode 100644
index 00000000000..0452072ad39
--- /dev/null
+++ b/src/mongo/gotools/mongostat/stat_consumer/json_line_formatter.go
@@ -0,0 +1,60 @@
+package stat_consumer
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer/line"
+)
+
+// JSONLineFormatter converts the StatLines to JSON
+type JSONLineFormatter struct {
+ *limitableFormatter
+}
+
+func NewJSONLineFormatter(maxRows int64, _ bool) LineFormatter {
+ return &JSONLineFormatter{
+ limitableFormatter: &limitableFormatter{maxRows: maxRows},
+ }
+}
+
+func init() {
+ FormatterConstructors["json"] = NewJSONLineFormatter
+}
+
+// FormatLines formats the StatLines as JSON
+func (jlf *JSONLineFormatter) FormatLines(lines []*line.StatLine, headerKeys []string, keyNames map[string]string) string {
+ // middle ground b/t the StatLines and the JSON string to be returned
+ jsonFormat := map[string]interface{}{}
+
+ // convert each StatLine to JSON
+ for _, l := range lines {
+ lineJson := make(map[string]interface{})
+
+ if l.Printed && l.Error == nil {
+ l.Error = fmt.Errorf("no data received")
+ }
+ l.Printed = true
+
+ // check for error
+ if l.Error != nil {
+ lineJson["error"] = l.Error.Error()
+ jsonFormat[l.Fields["host"]] = lineJson
+ continue
+ }
+
+ for _, key := range headerKeys {
+ lineJson[keyNames[key]] = l.Fields[key]
+ }
+ jsonFormat[l.Fields["host"]] = lineJson
+ }
+
+ // convert the JSON format of the lines to a json string to be returned
+ linesAsJsonBytes, err := json.Marshal(jsonFormat)
+ if err != nil {
+ return fmt.Sprintf(`{"json error": "%v"}`, err.Error())
+ }
+
+ jlf.increment()
+ return fmt.Sprintf("%s\n", linesAsJsonBytes)
+}
diff --git a/src/mongo/gotools/mongostat/stat_consumer/line/line.go b/src/mongo/gotools/mongostat/stat_consumer/line/line.go
new file mode 100644
index 00000000000..e1ed6d32262
--- /dev/null
+++ b/src/mongo/gotools/mongostat/stat_consumer/line/line.go
@@ -0,0 +1,45 @@
+package line
+
+import (
+ "github.com/mongodb/mongo-tools/mongostat/status"
+)
+
+// StatLine is a wrapper for all metrics reported by mongostat for monitored hosts
+type StatLine struct {
+ Fields map[string]string
+ Error error
+ Printed bool
+}
+
+type StatLines []*StatLine
+
+func (slice StatLines) Len() int {
+ return len(slice)
+}
+
+func (slice StatLines) Less(i, j int) bool {
+ return slice[i].Fields["host"] < slice[j].Fields["host"]
+}
+
+func (slice StatLines) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// NewStatLine constructs a StatLine object from two ServerStatus objects
+func NewStatLine(oldStat, newStat *status.ServerStatus, headerKeys []string, c *status.ReaderConfig) *StatLine {
+ line := &StatLine{
+ Fields: make(map[string]string),
+ }
+ for _, key := range headerKeys {
+ _, ok := StatHeaders[key]
+ if ok {
+ line.Fields[key] = StatHeaders[key].ReadField(c, newStat, oldStat)
+ } else {
+ line.Fields[key] = status.InterpretField(key, newStat, oldStat)
+ }
+ }
+ // We always need host and storage_engine, even if they aren't being displayed
+ line.Fields["host"] = StatHeaders["host"].ReadField(c, newStat, oldStat)
+ line.Fields["storage_engine"] = StatHeaders["storage_engine"].ReadField(c, newStat, oldStat)
+ return line
+}
diff --git a/src/mongo/gotools/mongostat/stat_consumer/line/stat_headers.go b/src/mongo/gotools/mongostat/stat_consumer/line/stat_headers.go
new file mode 100644
index 00000000000..fe96ed2b022
--- /dev/null
+++ b/src/mongo/gotools/mongostat/stat_consumer/line/stat_headers.go
@@ -0,0 +1,138 @@
+package line
+
+import (
+ "github.com/mongodb/mongo-tools/mongostat/status"
+)
+
+// Flags to determine cases when to activate/deactivate columns for output.
+const (
+ FlagAlways = 1 << iota // always activate the column
+ FlagHosts // only active if we may have multiple hosts
+ FlagDiscover // only active when mongostat is in discover mode
+ FlagRepl // only active if one of the nodes being monitored is in a replset
+ FlagLocks // only active if node is capable of calculating lock info
+ FlagAll // only active if mongostat was run with --all option
+ FlagMMAP // only active if node has mmap-specific fields
+ FlagWT // only active if node has wiredtiger-specific fields
+)
+
+// StatHeader describes a single column for mongostat's terminal output,
+// its formatting, and in which modes it should be displayed.
+type StatHeader struct {
+ // ReadField produces a particular field according to the StatHeader instance.
+ // Some fields are based on a diff, so both latest ServerStatuses are taken.
+ ReadField func(c *status.ReaderConfig, newStat, oldStat *status.ServerStatus) string
+}
+
+// StatHeaders are the complete set of data metrics supported by mongostat.
+var (
+ keyNames = map[string][]string{ // short, long, deprecated
+ "host": {"host", "Host", "host"},
+ "storage_engine": {"storage_engine", "Storage engine", "engine"},
+ "insert": {"insert", "Insert opcounter (diff)", "insert"},
+ "query": {"query", "Query opcounter (diff)", "query"},
+ "update": {"update", "Update opcounter (diff)", "update"},
+ "delete": {"delete", "Delete opcounter (diff)", "delete"},
+ "getmore": {"getmore", "GetMore opcounter (diff)", "getmore"},
+ "command": {"command", "Command opcounter (diff)", "command"},
+ "dirty": {"dirty", "Cache dirty (percentage)", "% dirty"},
+ "used": {"used", "Cache used (percentage)", "% used"},
+ "flushes": {"flushes", "Number of flushes (diff)", "flushes"},
+ "mapped": {"mapped", "Mapped (size)", "mapped"},
+ "vsize": {"vsize", "Virtual (size)", "vsize"},
+ "res": {"res", "Resident (size)", "res"},
+ "nonmapped": {"nonmapped", "Non-mapped (size)", "non-mapped"},
+ "faults": {"faults", "Page faults (diff)", "faults"},
+ "lrw": {"lrw", "Lock acquire count, read|write (diff percentage)", "lr|lw %"},
+ "lrwt": {"lrwt", "Lock acquire time, read|write (diff percentage)", "lrt|lwt"},
+ "locked_db": {"locked_db", "Locked db info, '(db):(percentage)'", "locked"},
+ "qrw": {"qrw", "Queued accesses, read|write", "qr|qw"},
+ "arw": {"arw", "Active accesses, read|write", "ar|aw"},
+ "net_in": {"net_in", "Network input (size)", "netIn"},
+ "net_out": {"net_out", "Network output (size)", "netOut"},
+ "conn": {"conn", "Current connection count", "conn"},
+ "set": {"set", "FlagReplica set name", "set"},
+ "repl": {"repl", "FlagReplica set type", "repl"},
+ "time": {"time", "Time of sample", "time"},
+ }
+ StatHeaders = map[string]StatHeader{
+ "host": {status.ReadHost},
+ "storage_engine": {status.ReadStorageEngine},
+ "insert": {status.ReadInsert},
+ "query": {status.ReadQuery},
+ "update": {status.ReadUpdate},
+ "delete": {status.ReadDelete},
+ "getmore": {status.ReadGetMore},
+ "command": {status.ReadCommand},
+ "dirty": {status.ReadDirty},
+ "used": {status.ReadUsed},
+ "flushes": {status.ReadFlushes},
+ "mapped": {status.ReadMapped},
+ "vsize": {status.ReadVSize},
+ "res": {status.ReadRes},
+ "nonmapped": {status.ReadNonMapped},
+ "faults": {status.ReadFaults},
+ "lrw": {status.ReadLRW},
+ "lrwt": {status.ReadLRWT},
+ "locked_db": {status.ReadLockedDB},
+ "qrw": {status.ReadQRW},
+ "arw": {status.ReadARW},
+ "net_in": {status.ReadNetIn},
+ "net_out": {status.ReadNetOut},
+ "conn": {status.ReadConn},
+ "set": {status.ReadSet},
+ "repl": {status.ReadRepl},
+ "time": {status.ReadTime},
+ }
+ CondHeaders = []struct {
+ Key string
+ Flag int
+ }{
+ {"host", FlagHosts},
+ {"insert", FlagAlways},
+ {"query", FlagAlways},
+ {"update", FlagAlways},
+ {"delete", FlagAlways},
+ {"getmore", FlagAlways},
+ {"command", FlagAlways},
+ {"dirty", FlagWT},
+ {"used", FlagWT},
+ {"flushes", FlagAlways},
+ {"mapped", FlagMMAP},
+ {"vsize", FlagAlways},
+ {"res", FlagAlways},
+ {"nonmapped", FlagMMAP | FlagAll},
+ {"faults", FlagMMAP},
+ {"lrw", FlagMMAP | FlagAll},
+ {"lrwt", FlagMMAP | FlagAll},
+ {"locked_db", FlagLocks},
+ {"qrw", FlagAlways},
+ {"arw", FlagAlways},
+ {"net_in", FlagAlways},
+ {"net_out", FlagAlways},
+ {"conn", FlagAlways},
+ {"set", FlagRepl},
+ {"repl", FlagRepl},
+ {"time", FlagAlways},
+ }
+)
+
+func defaultKeyMap(index int) map[string]string {
+ names := make(map[string]string)
+ for k, v := range keyNames {
+ names[k] = v[index]
+ }
+ return names
+}
+
+func DefaultKeyMap() map[string]string {
+ return defaultKeyMap(0)
+}
+
+func LongKeyMap() map[string]string {
+ return defaultKeyMap(1)
+}
+
+func DeprecatedKeyMap() map[string]string {
+ return defaultKeyMap(2)
+}
diff --git a/src/mongo/gotools/mongostat/stat_consumer/stat_consumer.go b/src/mongo/gotools/mongostat/stat_consumer/stat_consumer.go
new file mode 100644
index 00000000000..a1f465602c7
--- /dev/null
+++ b/src/mongo/gotools/mongostat/stat_consumer/stat_consumer.go
@@ -0,0 +1,81 @@
+package stat_consumer
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/mongodb/mongo-tools/mongostat/stat_consumer/line"
+ "github.com/mongodb/mongo-tools/mongostat/status"
+)
+
+// StatConsumer maintains the current set of headers and the most recent
+// ServerStatus for each host. It creates a StatLine when passed a new record
+// and can format and write groups of StatLines.
+type StatConsumer struct {
+ formatter LineFormatter
+ readerConfig *status.ReaderConfig
+ oldStats map[string]*status.ServerStatus
+ headers, customHeaders []string
+ keyNames map[string]string
+ writer io.Writer
+ flags int
+}
+
+// NewStatConsumer creates a new StatConsumer with no previous records
+func NewStatConsumer(flags int, customHeaders []string, keyNames map[string]string, readerConfig *status.ReaderConfig, formatter LineFormatter, writer io.Writer) (sc *StatConsumer) {
+ sc = &StatConsumer{
+ formatter: formatter,
+ readerConfig: readerConfig,
+ oldStats: make(map[string]*status.ServerStatus),
+ customHeaders: customHeaders,
+ keyNames: keyNames,
+ writer: writer,
+ flags: flags,
+ }
+ if flags == 0 {
+ sc.headers = customHeaders
+ }
+ return sc
+}
+
+// Update takes in a ServerStatus and returns a StatLine if it has a previous record
+func (sc *StatConsumer) Update(newStat *status.ServerStatus) (l *line.StatLine, seen bool) {
+ oldStat, seen := sc.oldStats[newStat.Host]
+ sc.oldStats[newStat.Host] = newStat
+ if seen {
+ l = line.NewStatLine(oldStat, newStat, sc.headers, sc.readerConfig)
+ return
+ }
+
+ if sc.flags != 0 {
+ if status.IsMMAP(newStat) {
+ sc.flags |= line.FlagMMAP
+ } else if status.IsWT(newStat) {
+ sc.flags |= line.FlagWT
+ }
+ if status.IsReplSet(newStat) {
+ sc.flags |= line.FlagRepl
+ }
+ if status.HasLocks(newStat) {
+ sc.flags |= line.FlagLocks
+ }
+
+ // Modify headers
+ sc.headers = []string{}
+ for _, desc := range line.CondHeaders {
+ if desc.Flag&sc.flags == desc.Flag {
+ sc.headers = append(sc.headers, desc.Key)
+ }
+ }
+ sc.headers = append(sc.headers, sc.customHeaders...)
+ }
+ return
+}
+
+// FormatLines consumes StatLines, formats them, and sends them to its writer
+// It returns true if the formatter should no longer receive data
+func (sc *StatConsumer) FormatLines(lines []*line.StatLine) bool {
+ str := sc.formatter.FormatLines(lines, sc.headers, sc.keyNames)
+ fmt.Fprintf(sc.writer, "%s", str)
+ return sc.formatter.IsFinished()
+}
diff --git a/src/mongo/gotools/mongostat/status/readers.go b/src/mongo/gotools/mongostat/status/readers.go
new file mode 100644
index 00000000000..eaf6a9949c3
--- /dev/null
+++ b/src/mongo/gotools/mongostat/status/readers.go
@@ -0,0 +1,521 @@
+package status
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "time"
+
+ "github.com/mongodb/mongo-tools/common/text"
+ "github.com/mongodb/mongo-tools/common/util"
+)
+
+type ReaderConfig struct {
+ HumanReadable bool
+ TimeFormat string
+}
+
+type LockUsage struct {
+ Namespace string
+ Reads int64
+ Writes int64
+}
+
+type lockUsages []LockUsage
+
+func (slice lockUsages) Len() int {
+ return len(slice)
+}
+
+func (slice lockUsages) Less(i, j int) bool {
+ return slice[i].Reads+slice[i].Writes < slice[j].Reads+slice[j].Writes
+}
+
+func (slice lockUsages) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func formatBits(should bool, amt int64) string {
+ if should {
+ return text.FormatBits(amt)
+ }
+ return fmt.Sprintf("%v", amt)
+}
+
+func formatMegabyteAmount(should bool, amt int64) string {
+ if should {
+ return text.FormatMegabyteAmount(amt)
+ }
+ return fmt.Sprintf("%v", amt*1024*1024)
+}
+
+func numberToInt64(num interface{}) (int64, bool) {
+ switch n := num.(type) {
+ case int64:
+ return n, true
+ case int32:
+ return int64(n), true
+ case int:
+ return int64(n), true
+ }
+ return 0, false
+}
+
+func percentageInt64(value, outOf int64) float64 {
+ if value == 0 || outOf == 0 {
+ return 0
+ }
+ return 100 * (float64(value) / float64(outOf))
+}
+
+func averageInt64(value, outOf int64) int64 {
+ if value == 0 || outOf == 0 {
+ return 0
+ }
+ return value / outOf
+}
+
+func parseLocks(stat *ServerStatus) map[string]LockUsage {
+ returnVal := map[string]LockUsage{}
+ for namespace, lockInfo := range stat.Locks {
+ returnVal[namespace] = LockUsage{
+ namespace,
+ lockInfo.TimeLockedMicros.Read + lockInfo.TimeLockedMicros.ReadLower,
+ lockInfo.TimeLockedMicros.Write + lockInfo.TimeLockedMicros.WriteLower,
+ }
+ }
+ return returnVal
+}
+
+func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage {
+ lockUsages := lockUsages(make([]LockUsage, 0, len(curLocks)))
+ for namespace, curUsage := range curLocks {
+ prevUsage, hasKey := prevLocks[namespace]
+ if !hasKey {
+ // This namespace didn't appear in the previous batch of lock info,
+ // so we can't compute a diff for it - skip it.
+ continue
+ }
+ // Calculate diff of lock usage for this namespace and add to the list
+ lockUsages = append(lockUsages,
+ LockUsage{
+ namespace,
+ curUsage.Reads - prevUsage.Reads,
+ curUsage.Writes - prevUsage.Writes,
+ })
+ }
+ // Sort the array in order of least to most locked
+ sort.Sort(lockUsages)
+ return lockUsages
+}
+
+func diff(newVal, oldVal int64, sampleSecs float64) int64 {
+ return int64(float64(newVal-oldVal) / sampleSecs)
+}
+
+func diffOp(newStat, oldStat *ServerStatus, f func(*OpcountStats) int64, both bool) string {
+ sampleSecs := float64(newStat.SampleTime.Sub(oldStat.SampleTime).Seconds())
+ var opcount int64
+ var opcountRepl int64
+ if newStat.Opcounters != nil && oldStat.Opcounters != nil {
+ opcount = diff(f(newStat.Opcounters), f(oldStat.Opcounters), sampleSecs)
+ }
+ if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil {
+ opcountRepl = diff(f(newStat.OpcountersRepl), f(oldStat.OpcountersRepl), sampleSecs)
+ }
+ switch {
+ case both || opcount > 0 && opcountRepl > 0:
+ return fmt.Sprintf("%v|%v", opcount, opcountRepl)
+ case opcount > 0:
+ return fmt.Sprintf("%v", opcount)
+ case opcountRepl > 0:
+ return fmt.Sprintf("*%v", opcountRepl)
+ default:
+ return "*0"
+ }
+}
+
+func getStorageEngine(stat *ServerStatus) string {
+ val := "mmapv1"
+ if stat.StorageEngine != nil && stat.StorageEngine["name"] != "" {
+ val = stat.StorageEngine["name"]
+ }
+ return val
+}
+
+// mongosProcessRE matches mongos not followed by any slashes before next whitespace
+var mongosProcessRE = regexp.MustCompile(`^.*\bmongos\b[^\\\/]*(\s.*)?$`)
+
+func IsMongos(stat *ServerStatus) bool {
+ return stat.ShardCursorType != nil || mongosProcessRE.MatchString(stat.Process)
+}
+
+func HasLocks(stat *ServerStatus) bool {
+ return ReadLockedDB(nil, stat, stat) != ""
+}
+
+func IsReplSet(stat *ServerStatus) (res bool) {
+ if stat.Repl != nil {
+ isReplSet, ok := stat.Repl.IsReplicaSet.(bool)
+ res = (ok && isReplSet) || len(stat.Repl.SetName) > 0
+ }
+ return
+}
+
+func IsMMAP(stat *ServerStatus) bool {
+ return getStorageEngine(stat) == "mmapv1"
+}
+
+func IsWT(stat *ServerStatus) bool {
+ return getStorageEngine(stat) == "wiredTiger"
+}
+
+func ReadHost(_ *ReaderConfig, newStat, _ *ServerStatus) string {
+ return newStat.Host
+}
+
+func ReadStorageEngine(_ *ReaderConfig, newStat, _ *ServerStatus) string {
+ return getStorageEngine(newStat)
+}
+
+func ReadInsert(_ *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ return diffOp(newStat, oldStat, func(o *OpcountStats) int64 {
+ return o.Insert
+ }, false)
+}
+
+func ReadQuery(_ *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ return diffOp(newStat, oldStat, func(s *OpcountStats) int64 {
+ return s.Query
+ }, false)
+}
+
+func ReadUpdate(_ *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ return diffOp(newStat, oldStat, func(s *OpcountStats) int64 {
+ return s.Update
+ }, false)
+}
+
+func ReadDelete(_ *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ return diffOp(newStat, oldStat, func(s *OpcountStats) int64 {
+ return s.Delete
+ }, false)
+}
+
+func ReadGetMore(_ *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ sampleSecs := float64(newStat.SampleTime.Sub(oldStat.SampleTime).Seconds())
+ return fmt.Sprintf("%d", diff(newStat.Opcounters.GetMore, oldStat.Opcounters.GetMore, sampleSecs))
+}
+
+func ReadCommand(_ *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ return diffOp(newStat, oldStat, func(s *OpcountStats) int64 {
+ return s.Command
+ }, true)
+}
+
+func ReadDirty(c *ReaderConfig, newStat, _ *ServerStatus) (val string) {
+ if newStat.WiredTiger != nil {
+ bytes := float64(newStat.WiredTiger.Cache.TrackedDirtyBytes)
+ max := float64(newStat.WiredTiger.Cache.MaxBytesConfigured)
+ if max != 0 {
+ val = fmt.Sprintf("%.1f", 100*bytes/max)
+ if c.HumanReadable {
+ val = val + "%"
+ }
+ }
+ }
+ return
+}
+
+func ReadUsed(c *ReaderConfig, newStat, _ *ServerStatus) (val string) {
+ if newStat.WiredTiger != nil {
+ bytes := float64(newStat.WiredTiger.Cache.CurrentCachedBytes)
+ max := float64(newStat.WiredTiger.Cache.MaxBytesConfigured)
+ if max != 0 {
+ val = fmt.Sprintf("%.1f", 100*bytes/max)
+ if c.HumanReadable {
+ val = val + "%"
+ }
+ }
+ }
+ return
+}
+
+func ReadFlushes(_ *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ var val int64
+ if newStat.WiredTiger != nil && oldStat.WiredTiger != nil {
+ val = newStat.WiredTiger.Transaction.TransCheckpoints - oldStat.WiredTiger.Transaction.TransCheckpoints
+ } else if newStat.BackgroundFlushing != nil && oldStat.BackgroundFlushing != nil {
+ val = newStat.BackgroundFlushing.Flushes - oldStat.BackgroundFlushing.Flushes
+ }
+ return fmt.Sprintf("%d", val)
+}
+
+func ReadMapped(c *ReaderConfig, newStat, _ *ServerStatus) (val string) {
+ if util.IsTruthy(newStat.Mem.Supported) && IsMongos(newStat) {
+ val = formatMegabyteAmount(c.HumanReadable, newStat.Mem.Mapped)
+ }
+ return
+}
+
+func ReadVSize(c *ReaderConfig, newStat, _ *ServerStatus) (val string) {
+ if util.IsTruthy(newStat.Mem.Supported) {
+ val = formatMegabyteAmount(c.HumanReadable, newStat.Mem.Virtual)
+ }
+ return
+}
+
+func ReadRes(c *ReaderConfig, newStat, _ *ServerStatus) (val string) {
+ if util.IsTruthy(newStat.Mem.Supported) {
+ val = formatMegabyteAmount(c.HumanReadable, newStat.Mem.Resident)
+ }
+ return
+}
+
+func ReadNonMapped(c *ReaderConfig, newStat, _ *ServerStatus) (val string) {
+ if util.IsTruthy(newStat.Mem.Supported) && !IsMongos(newStat) {
+ val = formatMegabyteAmount(c.HumanReadable, newStat.Mem.Virtual-newStat.Mem.Mapped)
+ }
+ return
+}
+
+func ReadFaults(_ *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ if !IsMMAP(newStat) {
+ return "n/a"
+ }
+ var val int64 = -1
+ if oldStat.ExtraInfo != nil && newStat.ExtraInfo != nil &&
+ oldStat.ExtraInfo.PageFaults != nil && newStat.ExtraInfo.PageFaults != nil {
+ sampleSecs := float64(newStat.SampleTime.Sub(oldStat.SampleTime).Seconds())
+ val = diff(*(newStat.ExtraInfo.PageFaults), *(oldStat.ExtraInfo.PageFaults), sampleSecs)
+ }
+ return fmt.Sprintf("%d", val)
+}
+
+func ReadLRW(_ *ReaderConfig, newStat, oldStat *ServerStatus) (val string) {
+ if !IsMongos(newStat) && newStat.Locks != nil && oldStat.Locks != nil {
+ global, ok := oldStat.Locks["Global"]
+ if ok && global.AcquireCount != nil {
+ newColl, inNew := newStat.Locks["Collection"]
+ oldColl, inOld := oldStat.Locks["Collection"]
+ if inNew && inOld && newColl.AcquireWaitCount != nil && oldColl.AcquireWaitCount != nil {
+ rWait := newColl.AcquireWaitCount.Read - oldColl.AcquireWaitCount.Read
+ wWait := newColl.AcquireWaitCount.Write - oldColl.AcquireWaitCount.Write
+ rTotal := newColl.AcquireCount.Read - oldColl.AcquireCount.Read
+ wTotal := newColl.AcquireCount.Write - oldColl.AcquireCount.Write
+ r := percentageInt64(rWait, rTotal)
+ w := percentageInt64(wWait, wTotal)
+ val = fmt.Sprintf("%.1f%%|%.1f%%", r, w)
+ }
+ }
+ }
+ return
+}
+
+func ReadLRWT(_ *ReaderConfig, newStat, oldStat *ServerStatus) (val string) {
+ if !IsMongos(newStat) && newStat.Locks != nil && oldStat.Locks != nil {
+ global, ok := oldStat.Locks["Global"]
+ if ok && global.AcquireCount != nil {
+ newColl, inNew := newStat.Locks["Collection"]
+ oldColl, inOld := oldStat.Locks["Collection"]
+ if inNew && inOld && newColl.AcquireWaitCount != nil && oldColl.AcquireWaitCount != nil {
+ rWait := newColl.AcquireWaitCount.Read - oldColl.AcquireWaitCount.Read
+ wWait := newColl.AcquireWaitCount.Write - oldColl.AcquireWaitCount.Write
+ rAcquire := newColl.TimeAcquiringMicros.Read - oldColl.TimeAcquiringMicros.Read
+ wAcquire := newColl.TimeAcquiringMicros.Write - oldColl.TimeAcquiringMicros.Write
+ r := averageInt64(rAcquire, rWait)
+ w := averageInt64(wAcquire, wWait)
+ val = fmt.Sprintf("%v|%v", r, w)
+ }
+ }
+ }
+ return
+}
+
+func ReadLockedDB(_ *ReaderConfig, newStat, oldStat *ServerStatus) (val string) {
+ if !IsMongos(newStat) && newStat.Locks != nil && oldStat.Locks != nil {
+ global, ok := oldStat.Locks["Global"]
+ if !ok || global.AcquireCount == nil {
+ prevLocks := parseLocks(oldStat)
+ curLocks := parseLocks(newStat)
+ lockdiffs := computeLockDiffs(prevLocks, curLocks)
+ db := ""
+ var percentage string
+ if len(lockdiffs) == 0 {
+ if newStat.GlobalLock != nil {
+ percentage = fmt.Sprintf("%.1f", percentageInt64(newStat.GlobalLock.LockTime, newStat.GlobalLock.TotalTime))
+ }
+ } else {
+ // Get the entry with the highest lock
+ highestLocked := lockdiffs[len(lockdiffs)-1]
+ timeDiffMillis := newStat.UptimeMillis - oldStat.UptimeMillis
+ lockToReport := highestLocked.Writes
+
+ // if the highest locked namespace is not '.'
+ if highestLocked.Namespace != "." {
+ for _, namespaceLockInfo := range lockdiffs {
+ if namespaceLockInfo.Namespace == "." {
+ lockToReport += namespaceLockInfo.Writes
+ }
+ }
+ }
+
+ // lock data is in microseconds and uptime is in milliseconds - so
+ // divide by 1000 so that the units match
+ lockToReport /= 1000
+
+ db = highestLocked.Namespace
+ percentage = fmt.Sprintf("%.1f", percentageInt64(lockToReport, timeDiffMillis))
+ }
+ if percentage != "" {
+ val = fmt.Sprintf("%s:%s%%", db, percentage)
+ }
+ }
+ }
+ return
+}
+
+func ReadQRW(_ *ReaderConfig, newStat, _ *ServerStatus) string {
+ var qr int64
+ var qw int64
+ gl := newStat.GlobalLock
+ if gl != nil && gl.CurrentQueue != nil {
+ // If we have wiredtiger stats, use those instead
+ if newStat.WiredTiger != nil {
+ qr = gl.CurrentQueue.Readers + gl.ActiveClients.Readers - newStat.WiredTiger.Concurrent.Read.Out
+ qw = gl.CurrentQueue.Writers + gl.ActiveClients.Writers - newStat.WiredTiger.Concurrent.Write.Out
+ if qr < 0 {
+ qr = 0
+ }
+ if qw < 0 {
+ qw = 0
+ }
+ } else {
+ qr = gl.CurrentQueue.Readers
+ qw = gl.CurrentQueue.Writers
+ }
+ }
+ return fmt.Sprintf("%v|%v", qr, qw)
+}
+
+func ReadARW(_ *ReaderConfig, newStat, _ *ServerStatus) string {
+ var ar int64
+ var aw int64
+ if gl := newStat.GlobalLock; gl != nil {
+ if newStat.WiredTiger != nil {
+ ar = newStat.WiredTiger.Concurrent.Read.Out
+ aw = newStat.WiredTiger.Concurrent.Write.Out
+ } else if newStat.GlobalLock.ActiveClients != nil {
+ ar = gl.ActiveClients.Readers
+ aw = gl.ActiveClients.Writers
+ }
+ }
+ return fmt.Sprintf("%v|%v", ar, aw)
+}
+
+func ReadNetIn(c *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ sampleSecs := float64(newStat.SampleTime.Sub(oldStat.SampleTime).Seconds())
+ val := diff(newStat.Network.BytesIn, oldStat.Network.BytesIn, sampleSecs)
+ return formatBits(c.HumanReadable, val)
+}
+
+func ReadNetOut(c *ReaderConfig, newStat, oldStat *ServerStatus) string {
+ sampleSecs := float64(newStat.SampleTime.Sub(oldStat.SampleTime).Seconds())
+ val := diff(newStat.Network.BytesOut, oldStat.Network.BytesOut, sampleSecs)
+ return formatBits(c.HumanReadable, val)
+}
+
+func ReadConn(_ *ReaderConfig, newStat, _ *ServerStatus) string {
+ return fmt.Sprintf("%d", newStat.Connections.Current)
+}
+
+func ReadSet(_ *ReaderConfig, newStat, _ *ServerStatus) (name string) {
+ if newStat.Repl != nil {
+ name = newStat.Repl.SetName
+ }
+ return
+}
+
+func ReadRepl(_ *ReaderConfig, newStat, _ *ServerStatus) string {
+ switch {
+ case newStat.Repl == nil && IsMongos(newStat):
+ return "RTR"
+ case newStat.Repl == nil:
+ return ""
+ case util.IsTruthy(newStat.Repl.IsMaster):
+ return "PRI"
+ case util.IsTruthy(newStat.Repl.Secondary):
+ return "SEC"
+ case util.IsTruthy(newStat.Repl.IsReplicaSet):
+ return "REC"
+ case util.IsTruthy(newStat.Repl.ArbiterOnly):
+ return "ARB"
+ case util.SliceContains(newStat.Repl.Passives, newStat.Repl.Me):
+ return "PSV"
+ default:
+ if !IsReplSet(newStat) {
+ return "UNK"
+ }
+ return "SLV"
+ }
+}
+
+func ReadTime(c *ReaderConfig, newStat, _ *ServerStatus) string {
+ if c.TimeFormat != "" {
+ return newStat.SampleTime.Format(c.TimeFormat)
+ }
+ if c.HumanReadable {
+ return newStat.SampleTime.Format(time.StampMilli)
+ }
+ return newStat.SampleTime.Format(time.RFC3339)
+}
+
+func ReadStatField(field string, stat *ServerStatus) string {
+ val, ok := stat.Flattened[field]
+ if ok {
+ return fmt.Sprintf("%v", val)
+ }
+ return "INVALID"
+}
+
+func ReadStatDiff(field string, newStat, oldStat *ServerStatus) string {
+ new, validNew := newStat.Flattened[field]
+ old, validOld := oldStat.Flattened[field]
+ if validNew && validOld {
+ new, validNew := numberToInt64(new)
+ old, validOld := numberToInt64(old)
+ if validNew && validOld {
+ return fmt.Sprintf("%v", new-old)
+ }
+ }
+ return "INVALID"
+}
+
+func ReadStatRate(field string, newStat, oldStat *ServerStatus) string {
+ sampleSecs := float64(newStat.SampleTime.Sub(oldStat.SampleTime).Seconds())
+ new, validNew := newStat.Flattened[field]
+ old, validOld := oldStat.Flattened[field]
+ if validNew && validOld {
+ new, validNew := numberToInt64(new)
+ old, validOld := numberToInt64(old)
+ if validNew && validOld {
+ return fmt.Sprintf("%v", diff(new, old, sampleSecs))
+ }
+ }
+ return "INVALID"
+}
+
+var literalRE = regexp.MustCompile(`^(.*?)(\.(\w+)\(\))?$`)
+
+func InterpretField(field string, newStat, oldStat *ServerStatus) string {
+ match := literalRE.FindStringSubmatch(field)
+ if len(match) == 4 {
+ switch match[3] {
+ case "diff":
+ return ReadStatDiff(match[1], newStat, oldStat)
+ case "rate":
+ return ReadStatRate(match[1], newStat, oldStat)
+ }
+ }
+ return ReadStatField(field, newStat)
+}
diff --git a/src/mongo/gotools/mongostat/status/server_status.go b/src/mongo/gotools/mongostat/status/server_status.go
new file mode 100644
index 00000000000..168e652443e
--- /dev/null
+++ b/src/mongo/gotools/mongostat/status/server_status.go
@@ -0,0 +1,231 @@
+package status
+
+import "time"
+
+type ServerStatus struct {
+ SampleTime time.Time `bson:""`
+ Flattened map[string]interface{} `bson:""`
+ Host string `bson:"host"`
+ Version string `bson:"version"`
+ Process string `bson:"process"`
+ Pid int64 `bson:"pid"`
+ Uptime int64 `bson:"uptime"`
+ UptimeMillis int64 `bson:"uptimeMillis"`
+ UptimeEstimate int64 `bson:"uptimeEstimate"`
+ LocalTime time.Time `bson:"localTime"`
+ Asserts map[string]int64 `bson:"asserts"`
+ BackgroundFlushing *FlushStats `bson:"backgroundFlushing"`
+ ExtraInfo *ExtraInfo `bson:"extra_info"`
+ Connections *ConnectionStats `bson:"connections"`
+ Dur *DurStats `bson:"dur"`
+ GlobalLock *GlobalLockStats `bson:"globalLock"`
+ Locks map[string]LockStats `bson:"locks,omitempty"`
+ Network *NetworkStats `bson:"network"`
+ Opcounters *OpcountStats `bson:"opcounters"`
+ OpcountersRepl *OpcountStats `bson:"opcountersRepl"`
+ RecordStats *DBRecordStats `bson:"recordStats"`
+ Mem *MemStats `bson:"mem"`
+ Repl *ReplStatus `bson:"repl"`
+ ShardCursorType map[string]interface{} `bson:"shardCursorType"`
+ StorageEngine map[string]string `bson:"storageEngine"`
+ WiredTiger *WiredTiger `bson:"wiredTiger"`
+}
+
+// WiredTiger stores information related to the WiredTiger storage engine.
+type WiredTiger struct {
+ Transaction TransactionStats `bson:"transaction"`
+ Concurrent ConcurrentTransactions `bson:"concurrentTransactions"`
+ Cache CacheStats `bson:"cache"`
+}
+
+type ConcurrentTransactions struct {
+ Write ConcurrentTransStats `bson:"write"`
+ Read ConcurrentTransStats `bson:"read"`
+}
+
+type ConcurrentTransStats struct {
+ Out int64 `bson:"out"`
+}
+
+// CacheStats stores cache statistics for WiredTiger.
+type CacheStats struct {
+ TrackedDirtyBytes int64 `bson:"tracked dirty bytes in the cache"`
+ CurrentCachedBytes int64 `bson:"bytes currently in the cache"`
+ MaxBytesConfigured int64 `bson:"maximum bytes configured"`
+}
+
+// TransactionStats stores transaction checkpoints in WiredTiger.
+type TransactionStats struct {
+ TransCheckpoints int64 `bson:"transaction checkpoints"`
+}
+
+// ReplStatus stores data related to replica sets.
+type ReplStatus struct {
+ SetName string `bson:"setName"`
+ IsMaster interface{} `bson:"ismaster"`
+ Secondary interface{} `bson:"secondary"`
+ IsReplicaSet interface{} `bson:"isreplicaset"`
+ ArbiterOnly interface{} `bson:"arbiterOnly"`
+ Hosts []string `bson:"hosts"`
+ Passives []string `bson:"passives"`
+ Me string `bson:"me"`
+}
+
+// DBRecordStats stores data related to memory operations across databases.
+type DBRecordStats struct {
+ AccessesNotInMemory int64 `bson:"accessesNotInMemory"`
+ PageFaultExceptionsThrown int64 `bson:"pageFaultExceptionsThrown"`
+ DBRecordAccesses map[string]RecordAccesses `bson:",inline"`
+}
+
+// RecordAccesses stores data related to memory operations scoped to a database.
+type RecordAccesses struct {
+ AccessesNotInMemory int64 `bson:"accessesNotInMemory"`
+ PageFaultExceptionsThrown int64 `bson:"pageFaultExceptionsThrown"`
+}
+
+// MemStats stores data related to memory statistics.
+type MemStats struct {
+ Bits int64 `bson:"bits"`
+ Resident int64 `bson:"resident"`
+ Virtual int64 `bson:"virtual"`
+ Supported interface{} `bson:"supported"`
+ Mapped int64 `bson:"mapped"`
+ MappedWithJournal int64 `bson:"mappedWithJournal"`
+}
+
+// FlushStats stores information about memory flushes.
+type FlushStats struct {
+ Flushes int64 `bson:"flushes"`
+ TotalMs int64 `bson:"total_ms"`
+ AverageMs float64 `bson:"average_ms"`
+ LastMs int64 `bson:"last_ms"`
+ LastFinished time.Time `bson:"last_finished"`
+}
+
+// ConnectionStats stores information related to incoming database connections.
+type ConnectionStats struct {
+ Current int64 `bson:"current"`
+ Available int64 `bson:"available"`
+ TotalCreated int64 `bson:"totalCreated"`
+}
+
+// DurTiming stores information related to journaling.
+type DurTiming struct {
+ Dt int64 `bson:"dt"`
+ PrepLogBuffer int64 `bson:"prepLogBuffer"`
+ WriteToJournal int64 `bson:"writeToJournal"`
+ WriteToDataFiles int64 `bson:"writeToDataFiles"`
+ RemapPrivateView int64 `bson:"remapPrivateView"`
+}
+
+// DurStats stores information related to journaling statistics.
+type DurStats struct {
+ Commits int64 `bson:"commits"`
+ JournaledMB int64 `bson:"journaledMB"`
+ WriteToDataFilesMB int64 `bson:"writeToDataFilesMB"`
+ Compression int64 `bson:"compression"`
+ CommitsInWriteLock int64 `bson:"commitsInWriteLock"`
+ EarlyCommits int64 `bson:"earlyCommits"`
+ TimeMs DurTiming
+}
+
+// QueueStats stores the number of queued read/write operations.
+type QueueStats struct {
+ Total int64 `bson:"total"`
+ Readers int64 `bson:"readers"`
+ Writers int64 `bson:"writers"`
+}
+
+// ClientStats stores the number of active read/write operations.
+type ClientStats struct {
+ Total int64 `bson:"total"`
+ Readers int64 `bson:"readers"`
+ Writers int64 `bson:"writers"`
+}
+
+// GlobalLockStats stores information related locks in the MMAP storage engine.
+type GlobalLockStats struct {
+ TotalTime int64 `bson:"totalTime"`
+ LockTime int64 `bson:"lockTime"`
+ CurrentQueue *QueueStats `bson:"currentQueue"`
+ ActiveClients *ClientStats `bson:"activeClients"`
+}
+
+// NetworkStats stores information related to network traffic.
+type NetworkStats struct {
+ BytesIn int64 `bson:"bytesIn"`
+ BytesOut int64 `bson:"bytesOut"`
+ NumRequests int64 `bson:"numRequests"`
+}
+
+// OpcountStats stores information related to comamnds and basic CRUD operations.
+type OpcountStats struct {
+ Insert int64 `bson:"insert"`
+ Query int64 `bson:"query"`
+ Update int64 `bson:"update"`
+ Delete int64 `bson:"delete"`
+ GetMore int64 `bson:"getmore"`
+ Command int64 `bson:"command"`
+}
+
+// ReadWriteLockTimes stores time spent holding read/write locks.
+type ReadWriteLockTimes struct {
+ Read int64 `bson:"R"`
+ Write int64 `bson:"W"`
+ ReadLower int64 `bson:"r"`
+ WriteLower int64 `bson:"w"`
+}
+
+// LockStats stores information related to time spent acquiring/holding locks
+// for a given database.
+type LockStats struct {
+ TimeLockedMicros ReadWriteLockTimes `bson:"timeLockedMicros"`
+ TimeAcquiringMicros ReadWriteLockTimes `bson:"timeAcquiringMicros"`
+
+ // AcquireCount and AcquireWaitCount are new fields of the lock stats only populated on 3.0 or newer.
+ // Typed as a pointer so that if it is nil, mongostat can assume the field is not populated
+ // with real namespace data.
+ AcquireCount *ReadWriteLockTimes `bson:"acquireCount,omitempty"`
+ AcquireWaitCount *ReadWriteLockTimes `bson:"acquireWaitCount,omitempty"`
+}
+
+// ExtraInfo stores additional platform specific information.
+type ExtraInfo struct {
+ PageFaults *int64 `bson:"page_faults"`
+}
+
+// NodeError pairs an error with a hostname
+type NodeError struct {
+ Host string
+ err error
+}
+
+func (ne *NodeError) Error() string {
+ return ne.err.Error()
+}
+
+func NewNodeError(host string, err error) *NodeError {
+ return &NodeError{
+ err: err,
+ Host: host,
+ }
+}
+
+// Flatten takes a map and returns a new one where nested maps are replaced
+// by dot-delimited keys.
+func Flatten(m map[string]interface{}) map[string]interface{} {
+ o := make(map[string]interface{})
+ for k, v := range m {
+ switch child := v.(type) {
+ case map[string]interface{}:
+ nm := Flatten(child)
+ for nk, nv := range nm {
+ o[k+"."+nk] = nv
+ }
+ default:
+ o[k] = v
+ }
+ }
+ return o
+}
diff --git a/src/mongo/gotools/mongostat/test_data/server_status_new.bson b/src/mongo/gotools/mongostat/test_data/server_status_new.bson
new file mode 100644
index 00000000000..c59b609f17e
--- /dev/null
+++ b/src/mongo/gotools/mongostat/test_data/server_status_new.bson
Binary files differ
diff --git a/src/mongo/gotools/mongostat/test_data/server_status_old.bson b/src/mongo/gotools/mongostat/test_data/server_status_old.bson
new file mode 100644
index 00000000000..a3ce121fa22
--- /dev/null
+++ b/src/mongo/gotools/mongostat/test_data/server_status_old.bson
Binary files differ
diff --git a/src/mongo/gotools/mongotop/command.go b/src/mongo/gotools/mongotop/command.go
new file mode 100644
index 00000000000..595668d68a0
--- /dev/null
+++ b/src/mongo/gotools/mongotop/command.go
@@ -0,0 +1,235 @@
+package mongotop
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/text"
+ "sort"
+ "time"
+)
+
+// FormattableDiff represents a diff of two samples taken by mongotop,
+// which can be printed to output in various formats.
+type FormattableDiff interface {
+ // Generate a JSON representation of the diff
+ JSON() string
+ // Generate a table-like representation which can be printed to a terminal
+ Grid() string
+}
+
+// ServerStatus represents the results of the "serverStatus" command.
+type ServerStatus struct {
+ Locks map[string]LockStats `bson:"locks,omitempty"`
+}
+
+// LockStats contains information on time spent acquiring and holding a lock.
+type LockStats struct {
+ AcquireCount *ReadWriteLockTimes `bson:"acquireCount"`
+ TimeLockedMicros ReadWriteLockTimes `bson:"timeLockedMicros"`
+ TimeAcquiringMicros ReadWriteLockTimes `bson:"timeAcquiringMicros"`
+}
+
+// ReadWriteLockTimes contains read/write lock times on a database.
+type ReadWriteLockTimes struct {
+ Read int64 `bson:"R"`
+ Write int64 `bson:"W"`
+ ReadLower int64 `bson:"r"`
+ WriteLower int64 `bson:"w"`
+}
+
+// ServerStatusDiff contains a map of the lock time differences for each database.
+type ServerStatusDiff struct {
+ // namespace -> lock times
+ Totals map[string]LockDelta `json:"totals"`
+ Time time.Time `json:"time"`
+}
+
+// LockDelta represents the differences in read/write lock times between two samples.
+type LockDelta struct {
+ Read int64 `json:"read"`
+ Write int64 `json:"write"`
+}
+
+// TopDiff contains a map of the differences between top samples for each namespace.
+type TopDiff struct {
+ // namespace -> totals
+ Totals map[string]NSTopInfo `json:"totals"`
+ Time time.Time `json:"time"`
+}
+
+// Top holds raw output of the "top" command.
+type Top struct {
+ Totals map[string]NSTopInfo `bson:"totals"`
+}
+
+// NSTopInfo holds information about a single namespace.
+type NSTopInfo struct {
+ Total TopField `bson:"total" json:"total"`
+ Read TopField `bson:"readLock" json:"read"`
+ Write TopField `bson:"writeLock" json:"write"`
+}
+
+// TopField contains the timing and counts for a single lock statistic within the "top" command.
+type TopField struct {
+ Time int `bson:"time" json:"time"`
+ Count int `bson:"count" json:"count"`
+}
+
+// struct to enable sorting of namespaces by lock time with the sort package
+type sortableTotal struct {
+ Name string
+ Total int64
+}
+
+type sortableTotals []sortableTotal
+
+func (a sortableTotals) Less(i, j int) bool {
+ if a[i].Total == a[j].Total {
+ return a[i].Name > a[j].Name
+ }
+ return a[i].Total < a[j].Total
+}
+func (a sortableTotals) Len() int { return len(a) }
+func (a sortableTotals) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// Diff takes an older Top sample, and produces a TopDiff
+// representing the deltas of each metric between the two samples.
+func (top Top) Diff(previous Top) TopDiff {
+ // The diff to eventually return
+ diff := TopDiff{
+ Totals: map[string]NSTopInfo{},
+ Time: time.Now(),
+ }
+
+ // For each namespace we are tracking, subtract the times and counts
+ // for total/read/write and build a new map containing the diffs.
+ prevTotals := previous.Totals
+ curTotals := top.Totals
+ for ns, prevNSInfo := range prevTotals {
+ if curNSInfo, ok := curTotals[ns]; ok {
+ diff.Totals[ns] = NSTopInfo{
+ Total: TopField{
+ Time: (curNSInfo.Total.Time - prevNSInfo.Total.Time) / 1000,
+ Count: curNSInfo.Total.Count - prevNSInfo.Total.Count,
+ },
+ Read: TopField{
+ Time: (curNSInfo.Read.Time - prevNSInfo.Read.Time) / 1000,
+ Count: curNSInfo.Read.Count - prevNSInfo.Read.Count,
+ },
+ Write: TopField{
+ Time: (curNSInfo.Write.Time - prevNSInfo.Write.Time) / 1000,
+ Count: curNSInfo.Write.Count - prevNSInfo.Write.Count,
+ },
+ }
+ }
+ }
+ return diff
+}
+
+// Grid returns a tabular representation of the TopDiff.
+func (td TopDiff) Grid() string {
+ buf := &bytes.Buffer{}
+ out := &text.GridWriter{ColumnPadding: 4}
+ out.WriteCells("ns", "total", "read", "write", time.Now().Format("2006-01-02T15:04:05Z07:00"))
+ out.EndRow()
+
+ //Sort by total time
+ totals := make(sortableTotals, 0, len(td.Totals))
+ for ns, diff := range td.Totals {
+ totals = append(totals, sortableTotal{ns, int64(diff.Total.Time)})
+ }
+
+ sort.Sort(sort.Reverse(totals))
+ for i, st := range totals {
+ diff := td.Totals[st.Name]
+ out.WriteCells(st.Name,
+ fmt.Sprintf("%vms", diff.Total.Time),
+ fmt.Sprintf("%vms", diff.Read.Time),
+ fmt.Sprintf("%vms", diff.Write.Time),
+ "")
+ out.EndRow()
+ if i >= 9 {
+ break
+ }
+ }
+ out.Flush(buf)
+ return buf.String()
+}
+
+// JSON returns a JSON representation of the TopDiff.
+func (td TopDiff) JSON() string {
+ bytes, err := json.Marshal(td)
+ if err != nil {
+ panic(err)
+ }
+ return string(bytes)
+}
+
+// JSON returns a JSON representation of the ServerStatusDiff.
+func (ssd ServerStatusDiff) JSON() string {
+ bytes, err := json.Marshal(ssd)
+ if err != nil {
+ panic(err)
+ }
+ return string(bytes)
+}
+
+// Grid returns a tabular representation of the ServerStatusDiff.
+func (ssd ServerStatusDiff) Grid() string {
+ buf := &bytes.Buffer{}
+ out := &text.GridWriter{ColumnPadding: 4}
+ out.WriteCells("db", "total", "read", "write", time.Now().Format("2006-01-02T15:04:05Z07:00"))
+ out.EndRow()
+
+ //Sort by total time
+ totals := make(sortableTotals, 0, len(ssd.Totals))
+ for ns, diff := range ssd.Totals {
+ totals = append(totals, sortableTotal{ns, diff.Read + diff.Write})
+ }
+
+ sort.Sort(sort.Reverse(totals))
+ for i, st := range totals {
+ diff := ssd.Totals[st.Name]
+ out.WriteCells(st.Name,
+ fmt.Sprintf("%vms", diff.Read+diff.Write),
+ fmt.Sprintf("%vms", diff.Read),
+ fmt.Sprintf("%vms", diff.Write),
+ "")
+ out.EndRow()
+ if i >= 9 {
+ break
+ }
+ }
+
+ out.Flush(buf)
+ return buf.String()
+}
+
+// Diff takes an older ServerStatus sample, and produces a ServerStatusDiff
+// representing the deltas of each metric between the two samples.
+func (ss ServerStatus) Diff(previous ServerStatus) ServerStatusDiff {
+ // the diff to eventually return
+ diff := ServerStatusDiff{
+ Totals: map[string]LockDelta{},
+ Time: time.Now(),
+ }
+
+ prevLocks := previous.Locks
+ curLocks := ss.Locks
+ for ns, prevNSInfo := range prevLocks {
+ if curNSInfo, ok := curLocks[ns]; ok {
+ prevTimeLocked := prevNSInfo.TimeLockedMicros
+ curTimeLocked := curNSInfo.TimeLockedMicros
+
+ diff.Totals[ns] = LockDelta{
+ Read: (curTimeLocked.Read + curTimeLocked.ReadLower -
+ (prevTimeLocked.Read + prevTimeLocked.ReadLower)) / 1000,
+ Write: (curTimeLocked.Write + curTimeLocked.WriteLower -
+ (prevTimeLocked.Write + prevTimeLocked.WriteLower)) / 1000,
+ }
+ }
+ }
+
+ return diff
+}
diff --git a/src/mongo/gotools/mongotop/main/mongotop.go b/src/mongo/gotools/mongotop/main/mongotop.go
new file mode 100644
index 00000000000..fe06f0e0d64
--- /dev/null
+++ b/src/mongo/gotools/mongotop/main/mongotop.go
@@ -0,0 +1,111 @@
+// Main package for the mongotop tool.
+package main
+
+import (
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "github.com/mongodb/mongo-tools/common/signals"
+ "github.com/mongodb/mongo-tools/common/util"
+ "github.com/mongodb/mongo-tools/mongotop"
+ "gopkg.in/mgo.v2"
+ "os"
+ "strconv"
+ "time"
+)
+
+func main() {
+ // initialize command-line opts
+ opts := options.New("mongotop", mongotop.Usage,
+ options.EnabledOptions{Auth: true, Connection: true, Namespace: false})
+ opts.UseReadOnlyHostDescription()
+
+ // add mongotop-specific options
+ outputOpts := &mongotop.Output{}
+ opts.AddOptions(outputOpts)
+
+ args, err := opts.Parse()
+ if err != nil {
+ log.Logvf(log.Always, "error parsing command line options: %v", err)
+ log.Logvf(log.Always, "try 'mongotop --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // print help, if specified
+ if opts.PrintHelp(false) {
+ return
+ }
+
+ // print version, if specified
+ if opts.PrintVersion() {
+ return
+ }
+
+ log.SetVerbosity(opts.Verbosity)
+ signals.Handle()
+
+ if len(args) > 1 {
+ log.Logvf(log.Always, "too many positional arguments")
+ log.Logvf(log.Always, "try 'mongotop --help' for more information")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ sleeptime := 1 // default to 1 second sleep time
+ if len(args) > 0 {
+ sleeptime, err = strconv.Atoi(args[0])
+ if err != nil || sleeptime <= 0 {
+ log.Logvf(log.Always, "invalid sleep time: %v", args[0])
+ os.Exit(util.ExitBadOptions)
+ }
+ }
+ if outputOpts.RowCount < 0 {
+ log.Logvf(log.Always, "invalid value for --rowcount: %v", outputOpts.RowCount)
+ os.Exit(util.ExitBadOptions)
+ }
+
+ if opts.Auth.Username != "" && opts.Auth.Source == "" && !opts.Auth.RequiresExternalDB() {
+ log.Logvf(log.Always, "--authenticationDatabase is required when authenticating against a non $external database")
+ os.Exit(util.ExitBadOptions)
+ }
+
+ // connect directly, unless a replica set name is explicitly specified
+ _, setName := util.ParseConnectionString(opts.Host)
+ opts.Direct = (setName == "")
+ opts.ReplicaSetName = setName
+
+ // create a session provider to connect to the db
+ sessionProvider, err := db.NewSessionProvider(*opts)
+ if err != nil {
+ log.Logvf(log.Always, "error connecting to host: %v", err)
+ os.Exit(util.ExitError)
+ }
+
+ if setName == "" {
+ sessionProvider.SetReadPreference(mgo.PrimaryPreferred)
+ }
+
+ // fail fast if connecting to a mongos
+ isMongos, err := sessionProvider.IsMongos()
+ if err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ os.Exit(util.ExitError)
+ }
+ if isMongos {
+ log.Logvf(log.Always, "cannot run mongotop against a mongos")
+ os.Exit(util.ExitError)
+ }
+
+ // instantiate a mongotop instance
+ top := &mongotop.MongoTop{
+ Options: opts,
+ OutputOptions: outputOpts,
+ SessionProvider: sessionProvider,
+ Sleeptime: time.Duration(sleeptime) * time.Second,
+ }
+
+ // kick it off
+ if err := top.Run(); err != nil {
+ log.Logvf(log.Always, "Failed: %v", err)
+ os.Exit(util.ExitError)
+ }
+}
diff --git a/src/mongo/gotools/mongotop/mongotop.go b/src/mongo/gotools/mongotop/mongotop.go
new file mode 100644
index 00000000000..0743d25d4e0
--- /dev/null
+++ b/src/mongo/gotools/mongotop/mongotop.go
@@ -0,0 +1,125 @@
+// Package mongotop provides a method to track the amount of time a MongoDB instance spends reading and writing data.
+package mongotop
+
+import (
+ "fmt"
+ "github.com/mongodb/mongo-tools/common/db"
+ "github.com/mongodb/mongo-tools/common/log"
+ "github.com/mongodb/mongo-tools/common/options"
+ "time"
+)
+
+// MongoTop is a container for the user-specified options and
+// internal state used for running mongotop.
+type MongoTop struct {
+ // Generic mongo tool options
+ Options *options.ToolOptions
+
+ // Mongotop-specific output options
+ OutputOptions *Output
+
+ // for connecting to the db
+ SessionProvider *db.SessionProvider
+
+ // Length of time to sleep between each polling.
+ Sleeptime time.Duration
+
+ previousServerStatus *ServerStatus
+ previousTop *Top
+}
+
+func (mt *MongoTop) runDiff() (outDiff FormattableDiff, err error) {
+ session, err := mt.SessionProvider.GetSession()
+ if err != nil {
+ return nil, err
+ }
+ defer session.Close()
+ session.SetSocketTimeout(0)
+
+ var currentServerStatus ServerStatus
+ var currentTop Top
+ commandName := "top"
+ var dest interface{} = &currentTop
+ if mt.OutputOptions.Locks {
+ commandName = "serverStatus"
+ dest = &currentServerStatus
+ }
+ err = session.DB("admin").Run(commandName, dest)
+ if err != nil {
+ mt.previousServerStatus = nil
+ mt.previousTop = nil
+ return nil, err
+ }
+ if mt.OutputOptions.Locks {
+ if currentServerStatus.Locks == nil {
+ return nil, fmt.Errorf("server does not support reporting lock information")
+ }
+ for _, ns := range currentServerStatus.Locks {
+ if ns.AcquireCount != nil {
+ return nil, fmt.Errorf("server does not support reporting lock information")
+ }
+ }
+ if mt.previousServerStatus != nil {
+ serverStatusDiff := currentServerStatus.Diff(*mt.previousServerStatus)
+ outDiff = serverStatusDiff
+ }
+ mt.previousServerStatus = &currentServerStatus
+ } else {
+ if mt.previousTop != nil {
+ topDiff := currentTop.Diff(*mt.previousTop)
+ outDiff = topDiff
+ }
+ mt.previousTop = &currentTop
+ }
+ return outDiff, nil
+}
+
+// Run executes the mongotop program.
+func (mt *MongoTop) Run() error {
+
+ connURL := mt.Options.Host
+ if connURL == "" {
+ connURL = "127.0.0.1"
+ }
+ if mt.Options.Port != "" {
+ connURL = connURL + ":" + mt.Options.Port
+ }
+
+ hasData := false
+ numPrinted := 0
+
+ for {
+ if mt.OutputOptions.RowCount > 0 && numPrinted > mt.OutputOptions.RowCount {
+ return nil
+ }
+ numPrinted++
+ diff, err := mt.runDiff()
+ if err != nil {
+ // If this is the first time trying to poll the server and it fails,
+ // just stop now instead of trying over and over.
+ if !hasData {
+ return err
+ }
+
+ log.Logvf(log.Always, "Error: %v\n", err)
+ time.Sleep(mt.Sleeptime)
+ }
+
+ // if this is the first time and the connection is successful, print
+ // the connection message
+ if !hasData && !mt.OutputOptions.Json {
+ log.Logvf(log.Always, "connected to: %v\n", connURL)
+ }
+
+ hasData = true
+
+ if diff != nil {
+ if mt.OutputOptions.Json {
+ fmt.Println(diff.JSON())
+ } else {
+ fmt.Println(diff.Grid())
+ }
+ }
+ time.Sleep(mt.Sleeptime)
+ }
+}
diff --git a/src/mongo/gotools/mongotop/options.go b/src/mongo/gotools/mongotop/options.go
new file mode 100644
index 00000000000..054b522d14b
--- /dev/null
+++ b/src/mongo/gotools/mongotop/options.go
@@ -0,0 +1,19 @@
+package mongotop
+
+var Usage = `<options> <polling interval in seconds>
+
+Monitor basic usage statistics for each collection.
+
+See http://docs.mongodb.org/manual/reference/program/mongotop/ for more information.`
+
+// Output defines the set of options to use in displaying data from the server.
+type Output struct {
+ Locks bool `long:"locks" description:"report on use of per-database locks"`
+ RowCount int `long:"rowcount" value-name:"<count>" short:"n" description:"number of stats lines to print (0 for indefinite)"`
+ Json bool `long:"json" description:"format output as JSON"`
+}
+
+// Name returns a human-readable group name for output options.
+func (_ *Output) Name() string {
+ return "output"
+}
diff --git a/src/mongo/gotools/mongotop/smoke.sh b/src/mongo/gotools/mongotop/smoke.sh
new file mode 100755
index 00000000000..5f0d8980f02
--- /dev/null
+++ b/src/mongo/gotools/mongotop/smoke.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+set -e
+
+if ! [ -a mongotop ]
+then
+ echo "need a mongotop binary in the same directory as the smoke script"
+ exit 1
+fi
+
+chmod 755 mongotop
+
+./mongotop > output.out &
+mongotop_pid=$!
+
+sleep 5
+
+kill $mongotop_pid
+
+headers=( "ns" "total" "read" "write" )
+for header in "${headers[@]}"
+do
+ if [ `head -2 output.out | grep -c $header` -ne 1 ]
+ then
+ echo "header row doesn't contain $header"
+ exit 1
+ fi
+done
+
+if [ `head -5 output.out | grep -c ms` -ne 3 ]
+then
+ echo "subsequent lines don't contain ms totals"
+ exit 1
+fi
diff --git a/src/mongo/gotools/set_gopath.bat b/src/mongo/gotools/set_gopath.bat
new file mode 100755
index 00000000000..61d40b44753
--- /dev/null
+++ b/src/mongo/gotools/set_gopath.bat
@@ -0,0 +1,6 @@
+@echo off
+
+if exist "%cd%\.gopath\" rd /s /q "%cd%\.gopath\"
+md "%cd%\.gopath\src\github.com\mongodb\"
+mklink /J "%cd%\.gopath\src\github.com\mongodb\mongo-tools" "%cd%" >nul 2>&1
+set GOPATH=%cd%\.gopath;%cd%\vendor
diff --git a/src/mongo/gotools/set_gopath.ps1 b/src/mongo/gotools/set_gopath.ps1
new file mode 100644
index 00000000000..87f7dda2435
--- /dev/null
+++ b/src/mongo/gotools/set_gopath.ps1
@@ -0,0 +1,9 @@
+$goPath = "${pwd}\.gopath"
+$vendorPath = "${pwd}\vendor"
+
+# Using cmd invocation to recursively delete directories because Remove-Item -Recurse -Force
+# has a bug causing the script to fail.
+Invoke-Expression "cmd /c rd /s /q $goPath"
+New-Item $goPath\src\github.com\mongodb -ItemType Container | Out-Null
+Invoke-Expression "cmd /c mklink /J $goPath\src\github.com\mongodb\mongo-tools ${pwd}" | Out-Null
+$env:GOPATH = "$goPath;$vendorPath"
diff --git a/src/mongo/gotools/set_gopath.sh b/src/mongo/gotools/set_gopath.sh
new file mode 100755
index 00000000000..24b59a324e8
--- /dev/null
+++ b/src/mongo/gotools/set_gopath.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+TOOLS_PKG='github.com/mongodb/mongo-tools'
+
+setgopath() {
+ if [ "Windows_NT" != "$OS" ]; then
+ SOURCE_GOPATH=`pwd`.gopath
+ VENDOR_GOPATH=`pwd`/vendor
+
+ # set up the $GOPATH to use the vendored dependencies as
+ # well as the source for the mongo tools
+ rm -rf .gopath/
+ mkdir -p .gopath/src/"$(dirname "${TOOLS_PKG}")"
+ ln -sf `pwd` .gopath/src/$TOOLS_PKG
+ export GOPATH=`pwd`/.gopath:`pwd`/vendor
+ else
+ local SOURCE_GOPATH=`pwd`/.gopath
+ local VENDOR_GOPATH=`pwd`/vendor
+ SOURCE_GOPATH=$(cygpath -w $SOURCE_GOPATH);
+ VENDOR_GOPATH=$(cygpath -w $VENDOR_GOPATH);
+
+ # set up the $GOPATH to use the vendored dependencies as
+ # well as the source for the mongo tools
+ rm -rf .gopath/
+ mkdir -p .gopath/src/"$TOOLS_PKG"
+ cp -r `pwd`/bsondump .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/common .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/mongodump .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/mongoexport .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/mongofiles .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/mongoimport .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/mongooplog .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/mongorestore .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/mongostat .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/mongotop .gopath/src/$TOOLS_PKG
+ cp -r `pwd`/vendor/src/github.com/* .gopath/src/github.com
+ cp -r `pwd`/vendor/src/gopkg.in .gopath/src/
+ export GOPATH="$SOURCE_GOPATH;$VENDOR_GOPATH"
+ fi;
+}
+
+setgopath
diff --git a/src/mongo/gotools/test/legacy24/buildscripts/buildlogger.py b/src/mongo/gotools/test/legacy24/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..a31b3e2dfa1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/buildscripts/buildlogger.py
@@ -0,0 +1,480 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
+
diff --git a/src/mongo/gotools/test/legacy24/buildscripts/cleanbb.py b/src/mongo/gotools/test/legacy24/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/src/mongo/gotools/test/legacy24/buildscripts/smoke.py b/src/mongo/gotools/test/legacy24/buildscripts/smoke.py
new file mode 100755
index 00000000000..29fe6dbd712
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/buildscripts/smoke.py
@@ -0,0 +1,1314 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test",
+# "perftest"), don't take arguments for the dbpath, but
+# unconditionally use "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+
+from pymongo import MongoClient
+from pymongo.errors import OperationFailure
+from pymongo import ReadPreference
+
+import cleanbb
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API, for a sneaky
+# purpose below.
+class Nothing(object):
+ def __enter__(self):
+ return self
+ def __exit__(self, type, value, traceback):
+ return not isinstance(value, Exception)
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not small_oplog:
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(object):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.stop()
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ return not isinstance(value, Exception)
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are alwas set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1']
+ if self.kwargs.get('small_oplog'):
+ argv += ["--master", "--oplogSize", "511"]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'MONGODB-CR')
+ if authMechanism != 'MONGODB-CR':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = MongoClient(port=self.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find(fields=["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On windows,
+ add the started process to a new Job Object, so that any
+ child processes of this process can be killed with a single
+ call to TerminateJobObject (see self.stop()).
+ """
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+
+ proc = Popen(argv, creationflags=CREATE_BREAKAWAY_FROM_JOB)
+
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ else:
+ proc = Popen(argv)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32":
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ import time
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ else:
+ # This function not available in Python 2.5
+ self.proc.terminate()
+ except AttributeError:
+ from os import kill
+ kill(self.proc.pid, 15)
+ self.proc.wait()
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ def wait_for_repl(self):
+ print "Awaiting replicated (w:2, wtimeout:5min) insert (port:" + str(self.port) + ")"
+ MongoClient(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+ print "Replicated write completed -- done wait_for_repl"
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port)
+ master.wait_for_repl()
+ print "caught up!"
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ client = MongoClient(port=mongod.port, read_preference=ReadPreference.SECONDARY_PREFERRED)
+ mongod.dbhash = client.test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = MongoClient(port=master.port).test
+ sTestDB = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+forceCommandsForDirs = ["aggregation", "auth", "core", "parallel", "replsets"]
+# look for jstests and one of the above suites separated by either posix or windows slashes
+forceCommandsRE = re.compile(r"jstests[/\\](%s)" % ('|'.join(forceCommandsForDirs)))
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if swm == "legacy": # change when the default changes to "commands"
+ if use_write_commands or forceCommandsRE.search(path):
+ swm = "commands"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
+
+ #setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["test", "test.exe", "perftest", "perftest.exe"]:
+ argv = [path]
+ # default data directory for test and perftest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/use_extended_timeout.js");' + \
+ 'TestData = new Object();' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ argv = argv + [ '--eval', evalString]
+
+ if argv[0].endswith( 'test' ) or argv[0].endswith( 'test.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ if start_mongod:
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+ else:
+ master = Nothing()
+ try:
+ if small_oplog:
+ slave = mongod(slave=True,
+ set_parameters=set_parameters).__enter__()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+ primary = MongoClient(port=master.port);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ time.sleep(1)
+ else:
+ slave = Nothing()
+
+ try:
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.__exit__(None, None, None)
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ finally:
+ slave.__exit__(None, None, None)
+ finally:
+ master.__exit__(None, None, None)
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+suiteGlobalConfig = {"js": ("core/*.js", True),
+ "quota": ("quota/*.js", True),
+ "jsPerf": ("perf/*.js", True),
+ "disk": ("disk/*.js", True),
+ "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True),
+ "noPassthrough": ("noPassthrough/*.js", False),
+ "parallel": ("parallel/*.js", True),
+ "clone": ("clone/*.js", False),
+ "repl": ("repl/*.js", False),
+ "replSets": ("replsets/*.js", False),
+ "dur": ("dur/*.js", False),
+ "auth": ("auth/*.js", False),
+ "sharding": ("sharding/*.js", False),
+ "tool": ("tool/*.js", False),
+ "aggregation": ("aggregation/*.js", True),
+ "multiVersion": ("multiVersion/*.js", True),
+ "failPoint": ("fail_point/*.js", False),
+ "ssl": ("ssl/*.js", True),
+ "sslSpecial": ("sslSpecial/*.js", True),
+ "jsCore": ("core/*.js", True),
+ "gle": ("gle/*.js", True),
+ "slow1": ("slow1/*.js", True),
+ "slow2": ("slow2/*.js", True),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['test',
+ 'perf',
+ 'jsCore',
+ 'jsPerf',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'test.exe'
+ else:
+ program = 'test'
+ (globstr, usedb) = (program, False)
+ elif suite == 'perf':
+ if os.sys.platform == "win32":
+ program = 'perftest.exe'
+ else:
+ program = 'perftest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('test', 'test.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=20,
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="legacy",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/src/mongo/gotools/test/legacy24/buildscripts/utils.py b/src/mongo/gotools/test/legacy24/buildscripts/utils.py
new file mode 100644
index 00000000000..68273ee69c8
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/buildscripts/utils.py
@@ -0,0 +1,230 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/src/mongo/gotools/test/legacy24/jstests/libs/use_extended_timeout.js b/src/mongo/gotools/test/legacy24/jstests/libs/use_extended_timeout.js
new file mode 100644
index 00000000000..7f770249214
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/libs/use_extended_timeout.js
@@ -0,0 +1,12 @@
+var _orig_runMongoProgram = runMongoProgram;
+runMongoProgram = function() {
+ var args = [];
+ for (var i in arguments) {
+ args[i] = arguments[i];
+ }
+ var progName = args[0];
+ if (progName !== "bsondump" && args.indexOf("--dialTimeout") === -1) {
+ args.push("--dialTimeout", "30");
+ }
+ return _orig_runMongoProgram.apply(null, args);
+};
diff --git a/src/mongo/gotools/test/legacy24/jstests/replsets/rslib.js b/src/mongo/gotools/test/legacy24/jstests/replsets/rslib.js
new file mode 100644
index 00000000000..fb561cc90e1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/replsets/rslib.js
@@ -0,0 +1,115 @@
+
+var count = 0;
+var w = 0;
+
+var wait = function(f,msg) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
+ sleep(1000);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+var occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ db = a.getDB('foo');
+ }
+ else {
+ db = a;
+ }
+ db.bar.stats();
+ if (jsTest.options().keyFile) { // SERVER-4241: Shell connections don't re-authenticate on reconnect
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+
+var waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon( function() {
+ var state = null
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ failCount = 0;
+ } catch ( e ) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print( "Calling replSetGetStatus failed" );
+ print( e );
+ return false;
+ }
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
+ return false;
+ }
+ }
+ printjson( state );
+ return true;
+ }, "not all members ready", timeout || 60000);
+
+ print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" );
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getPrimary().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getPrimary().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/csv1.js b/src/mongo/gotools/test/legacy24/jstests/tool/csv1.js
new file mode 100644
index 00000000000..5eb7ab0249a
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/csv1.js
@@ -0,0 +1,42 @@
+// csv1.js
+
+t = new ToolTest( "csv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
+
+assert.eq( 0 , c.count() , "setup1" );
+c.insert( base );
+delete base._id
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"} ) , tojson( a[1] ) , "csv parse 1" );
+assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( tojson( base ) , tojson(x) , "csv parse 2" )
+
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/csvexport1.js b/src/mongo/gotools/test/legacy24/jstests/tool/csvexport1.js
new file mode 100644
index 00000000000..2ae85e37401
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/csvexport1.js
@@ -0,0 +1,47 @@
+// csvexport1.js
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1.0, 2.0, 3.0], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27"), c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i, e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : "[1.0,2.0,3.0]", d : "{\"a\":\"hello\",\"b\":\"world\"}", e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while
+// they are stored as seconds. See SERVER-7718.
+expected.push({ a : "D76DF8", b : "2009-08-27T00:00:00.000Z", c : "{ \"$timestamp\": { \"t\": 1234, \"i\": 9876 } }", d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq( expected[i], actual[i], "CSV export " + i);
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/csvexport2.js b/src/mongo/gotools/test/legacy24/jstests/tool/csvexport2.js
new file mode 100644
index 00000000000..3e0dd2c6829
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/csvexport2.js
@@ -0,0 +1,31 @@
+// csvexport2.js
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop() \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/csvimport1.js b/src/mongo/gotools/test/legacy24/jstests/tool/csvimport1.js
new file mode 100644
index 00000000000..3bff1110cbe
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/csvimport1.js
@@ -0,0 +1,40 @@
+// csvimport1.js
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.eq( tojson(base[i]), tojson(a[i]), "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.eq( tojson(base[i]), tojson(x[i]), "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/data/a.tsv b/src/mongo/gotools/test/legacy24/jstests/tool/data/a.tsv
new file mode 100644
index 00000000000..1e094179a63
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/data/csvimport1.csv b/src/mongo/gotools/test/legacy24/jstests/tool/data/csvimport1.csv
new file mode 100644
index 00000000000..256d40a9184
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/data/dumprestore6/foo.bson b/src/mongo/gotools/test/legacy24/jstests/tool/data/dumprestore6/foo.bson
new file mode 100644
index 00000000000..b8f8f99e6bf
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/data/dumprestore6/foo.bson
Binary files differ
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/data/dumprestore6/system.indexes.bson b/src/mongo/gotools/test/legacy24/jstests/tool/data/dumprestore6/system.indexes.bson
new file mode 100644
index 00000000000..dde25da302a
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/data/dumprestore6/system.indexes.bson
Binary files differ
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumpauth.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumpauth.js
new file mode 100644
index 00000000000..6050fc6b79b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumpauth.js
@@ -0,0 +1,29 @@
+// dumpauth.js
+// test mongodump with authentication
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_dumpauth";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+for(var i = 0; i < 100; i++) {
+ t["testcol"].save({ "x": i });
+}
+
+users = db.getCollection( "system.users" );
+
+db.addUser( "testuser" , "testuser" );
+
+assert( db.auth( "testuser" , "testuser" ) , "auth failed" );
+
+x = runMongoProgram( "mongodump",
+ "--db", baseName,
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "--collection", "testcol" );
+assert.eq(x, 0, "mongodump should succeed with authentication");
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumpfilename1.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumpfilename1.js
new file mode 100644
index 00000000000..cb0255afefc
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumpfilename1.js
@@ -0,0 +1,13 @@
+//dumpfilename1.js
+
+//Test designed to make sure error that dumping a collection with "/" fails
+
+t = new ToolTest( "dumpfilename1" );
+
+t.startDB( "foo" );
+
+c = t.db;
+c.getCollection("df/").insert({ a: 3 });
+assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code")
+t.stop();
+
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore1.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore1.js
new file mode 100644
index 00000000000..fd1e8789ea6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore1.js
@@ -0,0 +1,23 @@
+// dumprestore1.js
+
+t = new ToolTest( "dumprestore1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save( { a : 22 } );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "dump" , "--out" , t.ext );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+// ensure that --collection is used with --db. See SERVER-7721
+var ret = t.runTool( "dump" , "--collection" , "col" );
+assert.neq( ret, 0, "mongodump should return failure code" );
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore10.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore10.js
new file mode 100644
index 00000000000..49f008ea591
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore10.js
@@ -0,0 +1,63 @@
+// simple test to ensure write concern functions as expected
+
+var name = "dumprestore10";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+var total = 1000;
+
+{
+ step("store data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < total; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("mongodump from replset");
+
+var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data );
+
+
+{
+ step("remove data after dumping");
+ master.getDB("foo").getCollection("bar").drop();
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("try mongorestore with write concern");
+
+runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data );
+
+var x = 0;
+
+// no waiting for replication
+x = master.getDB("foo").getCollection("bar").count();
+
+assert.eq(x, total, "mongorestore should have successfully restored the collection");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore3.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore3.js
new file mode 100644
index 00000000000..fe9f54d704c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore3.js
@@ -0,0 +1,60 @@
+// dumprestore3.js
+
+var name = "dumprestore3";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("populate master");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait for slaves");
+ replTest.awaitReplication();
+}
+
+{
+ step("dump & restore a db into a slave");
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+ var c = conn.getDB("foo").bar;
+ c.save({ a: 22 });
+ assert.eq(1, c.count(), "setup2");
+}
+
+step("try mongorestore to slave");
+
+var data = MongoRunner.dataDir + "/dumprestore3-other1/";
+resetDbpath(data);
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data );
+
+var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data );
+assert.eq(x, 1, "mongorestore should exit w/ 1 on slave");
+
+step("try mongoimport to slave");
+
+dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" );
+
+x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile );
+assert.eq(x, 1, "mongoreimport should exit w/ 1 on slave"); // windows return is signed
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore4.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore4.js
new file mode 100644
index 00000000000..568e196061f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore4.js
@@ -0,0 +1,42 @@
+// dumprestore4.js -- see SERVER-2186
+
+// The point of this test is to ensure that mongorestore successfully
+// constructs indexes when the database being restored into has a
+// different name than the database dumped from. There are 2
+// issues here: (1) if you dumped from database "A" and restore into
+// database "B", B should have exactly the right indexes; (2) if for
+// some reason you have another database called "A" at the time of the
+// restore, mongorestore shouldn't touch it.
+
+t = new ToolTest( "dumprestore4" );
+
+c = t.startDB( "dumprestore4" );
+
+db=t.db
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db2=db.getSisterDB( dbname2 );
+
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
+
+assert.eq( 0 , db.system.indexes.count() , "setup1" );
+c.ensureIndex({ x : 1} );
+assert.eq( 2 , db.system.indexes.count() , "setup2" ); // _id and x_1
+
+assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump")
+
+// to ensure issue (2), we have to clear out the first db.
+// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
+// so we have to drop the collection.
+c.drop();
+assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+
+// issue (1)
+assert.eq( 2 , db2.system.indexes.count() , "after restore 1" );
+// issue (2)
+assert.eq( 0 , db.system.indexes.count() , "after restore 2" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore6.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore6.js
new file mode 100644
index 00000000000..d8b349e9589
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore6.js
@@ -0,0 +1,27 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+assert.eq( 0 , c.count() , "setup1" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore" );
+assert.eq( 1 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't updated")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+db.dropDatabase()
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+assert.soon( "c.findOne()" , "no data after sleep2" );
+assert.eq( 1 , c.count() , "after restore2" );
+assert.eq( 0 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't maintained")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore7.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore7.js
new file mode 100644
index 00000000000..b28a056422d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore7.js
@@ -0,0 +1,62 @@
+var name = "dumprestore7";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 1} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("first chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+ var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next();
+ step(time.ts.t);
+}
+
+{
+ step("second chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 30; i < 50; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+{
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+}
+
+step("try mongodump with $timestamp");
+
+var data = MongoRunner.dataDir + "/data/db/dumprestore7-dump1/";
+var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}";
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--db", "local", "--collection", "oplog.rs", "--query", query, "--out", data );
+
+step("try mongorestore from $timestamp");
+
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+port, "--dir", data );
+var x = 9;
+x = conn.getDB("local").getCollection("oplog.rs").count();
+
+assert.eq(x, 20, "mongorestore should only have the latter 20 entries");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
+
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore8.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore8.js
new file mode 100644
index 00000000000..4e6591738d6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore8.js
@@ -0,0 +1,105 @@
+// dumprestore8.js
+
+// This file tests that indexes and capped collection options get properly dumped and restored.
+// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection
+
+t = new ToolTest( "dumprestore8" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+assert.eq( 0 , db.foo.count() , "setup1" );
+db.foo.save( { a : 1, b : 1 } );
+db.foo.ensureIndex({a:1});
+db.foo.ensureIndex({b:1, _id:-1});
+assert.eq( 1 , db.foo.count() , "setup2" );
+
+
+assert.eq( 0 , db.bar.count() , "setup3" );
+db.createCollection("bar", {capped:true, size:1000});
+
+for (var i = 0; i < 1000; i++) {
+ db.bar.save( { x : i } );
+}
+db.bar.ensureIndex({x:1});
+
+barDocCount = db.bar.count();
+assert.gt( barDocCount, 0 , "No documents inserted" );
+assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created right" );
+
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped" );
+assert.eq( 0 , db.bar.count() , "bar not dropped" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore" );
+
+
+// Dump/restore single DB
+
+dumppath = t.ext + "singledbdump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped2" );
+assert.eq( 0 , db.bar.count() , "bar not dropped2" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped2" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname );
+
+db = db.getSiblingDB(dbname2);
+
+assert.soon( "db.foo.findOne()" , "no data after sleep 2" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore 2" );
+
+
+// Dump/restore single collection
+
+dumppath = t.ext + "singlecolldump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.bar.count() , "bar not dropped3" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped3" );
+
+t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" );
+
+db = db.getSiblingDB(dbname);
+
+assert.soon( "db.baz.findOne()" , "no data after sleep 2" );
+assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.baz.save({x:i});
+}
+assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." );
+assert.eq( 2 , db.system.indexes.count() , "Indexes weren't created correctly by restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore9.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore9.js
new file mode 100644
index 00000000000..a4a98e8e430
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore9.js
@@ -0,0 +1,79 @@
+if (0) { // Test disabled until SERVER-3853 is finished.
+var name = "dumprestore9";
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+s = new ShardingTest( "dumprestore9a", 2, 0, 3, {chunksize:1} );
+
+step("Shard collection");
+
+s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first
+s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+step("insert data");
+
+str = 'a';
+while (str.length < 1024*512) {
+ str += str;
+}
+
+numDocs = 20;
+for (var i = 0; i < numDocs; i++) {
+ coll.insert({x:i, str:str});
+}
+
+step("Wait for balancing");
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
+
+assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
+
+step("dump cluster");
+
+dumpdir = "/data/db/dumprestore9-dump1/";
+resetDbpath(dumpdir);
+runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir );
+
+step("Shutting down cluster");
+
+s.stop();
+
+step("Starting up clean cluster");
+s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
+
+step("Restore data and config");
+
+runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore");
+
+config = s.getDB("config");
+assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly");
+
+assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly");
+
+assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
+assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
+
+for (var i = 0; i < numDocs; i++) {
+ doc = coll.findOne({x:i});
+ assert.eq(i, doc.x, "Doc missing from the shard it should be on");
+}
+
+for (var i = 0; i < s._connections.length; i++) {
+ assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host);
+}
+
+step("Stop cluster");
+s.stop();
+step("SUCCESS");
+} \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestoreWithNoOptions.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestoreWithNoOptions.js
new file mode 100644
index 00000000000..dd3300ad4f9
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestoreWithNoOptions.js
@@ -0,0 +1,117 @@
+// SERVER-6366
+// relates to SERVER-808
+//
+// This file tests that options are not restored upon
+// mongorestore with --noOptionsRestore
+//
+// It checks that this works both when doing a full
+// database dump/restore and when doing it just for a
+// single db or collection.
+
+t = new ToolTest( "dumprestoreWithNoOptions" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+var options = { capped: true, size: 1000, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore", "-vvv");
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+
+// Note: The 2.4 server seems to have a bug where part of the "create" command
+// itself ends up being set in the collection options object. So as a workaround,
+// we will just delete that key and make sure there are no other keys set in
+// the options object.
+opts = db.capped.exists().options;
+delete opts.create;
+assert.eq(Object.keys(opts).length, 0, "restore options not ignored");
+
+// Dump/restore single DB
+
+db.dropDatabase();
+var options = { capped: true, size: 1000, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+dumppath = t.ext + "noOptionsSingleDump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore");
+
+db = db.getSiblingDB(dbname2);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+
+opts = db.capped.exists().options;
+delete opts.create;
+assert.eq(Object.keys(opts).length, 0, "restore options not ignored");
+
+// Dump/restore single collection
+
+db.dropDatabase();
+var options = { capped: true, size: 1000, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+dumppath = t.ext + "noOptionsSingleColDump/";
+mkdir(dumppath);
+dbname = db.getName();
+t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath );
+
+db.dropDatabase();
+
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", "--dir", dumppath + dbname );
+
+db = db.getSiblingDB(dbname);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert( true !== db.capped.stats().capped, "restore options were not ignored" );
+opts = db.capped.exists().options;
+delete opts.create;
+assert.eq(Object.keys(opts).length, 0, "restore options not ignored");
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore_auth.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore_auth.js
new file mode 100644
index 00000000000..6f0e6c0a05c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumprestore_auth.js
@@ -0,0 +1,28 @@
+// dumprestore_auth.js
+
+t = new ToolTest("dumprestore_auth", { auth : "" });
+
+c = t.startDB("foo");
+
+adminDB = c.getDB().getSiblingDB('admin');
+adminDB.addUser('admin', 'password');
+adminDB.auth('admin','password');
+
+assert.eq(0 , c.count() , "setup1");
+c.save({ a : 22 });
+assert.eq(1 , c.count() , "setup2");
+
+t.runTool("dump" , "--out" , t.ext, "--username", "admin", "--password", "password");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+t.runTool("restore" , "--dir" , t.ext); // Should fail
+assert.eq(0 , c.count() , "after restore without auth");
+
+t.runTool("restore" , "--dir" , t.ext, "--username", "admin", "--password", "password");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 2");
+assert.eq(22 , c.findOne().a , "after restore 2");
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/dumpsecondary.js b/src/mongo/gotools/test/legacy24/jstests/tool/dumpsecondary.js
new file mode 100644
index 00000000000..7a641542498
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/dumpsecondary.js
@@ -0,0 +1,38 @@
+var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+db = master.getDB("foo")
+db.foo.save({a: 1000});
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+assert.eq( 1 , db.foo.count() , "setup" );
+
+var slaves = replTest.liveNodes.slaves;
+assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+slave = slaves[0];
+
+var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args);
+db.foo.drop()
+
+assert.eq( 0 , db.foo.count() , "after drop" );
+args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args)
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "after restore" );
+assert.eq( 1000 , db.foo.findOne().a , "after restore 2" );
+
+resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external')
+
+replTest.stopSet(15)
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/exportimport1.js b/src/mongo/gotools/test/legacy24/jstests/tool/exportimport1.js
new file mode 100644
index 00000000000..a7a7bcee90c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/exportimport1.js
@@ -0,0 +1,66 @@
+// exportimport1.js
+
+t = new ToolTest( "exportimport1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.b[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+ }
+}
+
+// now with --jsonArray
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.a[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+ }
+}
+
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/exportimport3.js b/src/mongo/gotools/test/legacy24/jstests/tool/exportimport3.js
new file mode 100644
index 00000000000..f18ba6cbd4b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/exportimport3.js
@@ -0,0 +1,27 @@
+// exportimport3.js
+
+t = new ToolTest( "exportimport3" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save({a:1})
+c.save({a:2})
+c.save({a:3})
+c.save({a:4})
+c.save({a:5})
+
+assert.eq( 5 , c.count() , "setup2" );
+
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 5 , c.count() , "after restore 2" );
+
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/exportimport4.js b/src/mongo/gotools/test/legacy24/jstests/tool/exportimport4.js
new file mode 100644
index 00000000000..605e21b7337
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/exportimport4.js
@@ -0,0 +1,56 @@
+// exportimport4.js
+
+t = new ToolTest( "exportimport4" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, NaN, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ NaN ] } );
+ c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+
+ assert.eq( 5 , c.count() , "setup2" );
+};
+
+// attempt to export fields without NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 1" );
+
+// attempt to export fields with NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 5 , c.count() , "after restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/exportimport5.js b/src/mongo/gotools/test/legacy24/jstests/tool/exportimport5.js
new file mode 100644
index 00000000000..427b03f0232
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/exportimport5.js
@@ -0,0 +1,81 @@
+// exportimport4.js
+
+t = new ToolTest( "exportimport5" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ Infinity ] } );
+ c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+ c.save( { a : [ -Infinity ] } );
+
+ assert.eq( 6 , c.count() , "setup2" );
+};
+
+// attempt to export fields without Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 1" );
+
+// attempt to export fields with Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export fields without -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 4 , c.count() , "after restore 3" );
+
+// attempt to export fields with -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 4" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 6 , c.count() , "after restore 5" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/files1.js b/src/mongo/gotools/test/legacy24/jstests/tool/files1.js
new file mode 100644
index 00000000000..acfcc16dcc3
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/files1.js
@@ -0,0 +1,27 @@
+// files1.js
+
+t = new ToolTest( "files1" )
+
+db = t.startDB();
+
+filename = 'mongod'
+if ( _isWindows() )
+ filename += '.exe'
+
+t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+md5 = md5sumFile(filename);
+
+file_obj = db.fs.files.findOne()
+assert( file_obj , "A 0" );
+md5_stored = file_obj.md5;
+md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
+assert.eq( md5 , md5_stored , "A 1" );
+assert.eq( md5 , md5_computed, "A 2" );
+
+mkdir(t.ext);
+
+t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+md5 = md5sumFile(t.extFile);
+assert.eq( md5 , md5_stored , "B" );
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/oplog1.js b/src/mongo/gotools/test/legacy24/jstests/tool/oplog1.js
new file mode 100644
index 00000000000..0429e6e3416
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/oplog1.js
@@ -0,0 +1,29 @@
+// oplog1.js
+
+// very basic test for mongooplog
+// need a lot more, but test that it functions at all
+
+t = new ToolTest( "oplog1" );
+
+db = t.startDB();
+
+output = db.output
+
+doc = { x : 17, _id: 5 };
+
+db.oplog.insert( { ts : new Timestamp() , "op" : "i" , "ns" : output.getFullName() , "o" : doc } );
+
+assert.eq( 0 , output.count() , "before" )
+
+t.runTool( "oplog" , "--oplogns" , db.getName() + ".oplog" , "--from" , "127.0.0.1:" + t.port , "-vv" );
+
+assert.eq( 1 , output.count() , "after" );
+
+var res = output.findOne()
+assert.eq( doc["x"], res["x"], "have same val for x after check" )
+assert.eq( doc["_id"], res["_id"], "have same val for _id after check" )
+assert.eq( Object.keys(doc).length, Object.keys(res).length, "have same amount of keys after check" )
+
+t.stop();
+
+
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/oplog_all_ops.js b/src/mongo/gotools/test/legacy24/jstests/tool/oplog_all_ops.js
new file mode 100644
index 00000000000..8f231cb233d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/oplog_all_ops.js
@@ -0,0 +1,61 @@
+/**
+ * Performs a simple test on mongooplog by doing different types of operations
+ * that will show up in the oplog then replaying it on another replica set.
+ * Correctness is verified using the dbhash command.
+ */
+
+var repl1 = new ReplSetTest({ name: 'rs1', nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl1.startSet({ oplogSize: 10 });
+repl1.initiate();
+repl1.awaitSecondaryNodes();
+
+var repl1Conn = new Mongo(repl1.getURL());
+var testDB = repl1Conn.getDB('test');
+var testColl = testDB.user;
+
+// op i
+testColl.insert({ x: 1 });
+testColl.insert({ x: 2 });
+
+// op c
+testDB.dropDatabase();
+
+testColl.insert({ y: 1 });
+testColl.insert({ y: 2 });
+testColl.insert({ y: 3 });
+
+// op u
+testColl.update({}, { $inc: { z: 1 }}, true, true);
+
+// op d
+testColl.remove({ y: 2 });
+
+// op n
+var oplogColl = repl1Conn.getCollection('local.oplog.rs');
+oplogColl.insert({ ts: new Timestamp(), op: 'n', ns: testColl.getFullName(), 'o': { x: 'noop' }});
+
+var repl2 = new ReplSetTest({ name: 'rs2', startPort: 31100, nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl2.startSet({ oplogSize: 10 });
+repl2.initiate();
+repl2.awaitSecondaryNodes();
+
+var srcConn = repl1.getPrimary();
+runMongoProgram('mongooplog', '--from', repl1.getPrimary().host,
+ '--host', repl2.getPrimary().host);
+
+var repl1Hash = testDB.runCommand({ dbhash: 1 });
+
+var repl2Conn = new Mongo(repl2.getURL());
+var testDB2 = repl2Conn.getDB(testDB.getName());
+var repl2Hash = testDB2.runCommand({ dbhash: 1 });
+
+assert(repl1Hash.md5);
+assert.eq(repl1Hash.md5, repl2Hash.md5);
+
+repl1.stopSet();
+repl2.stopSet();
+
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/restorewithauth.js b/src/mongo/gotools/test/legacy24/jstests/tool/restorewithauth.js
new file mode 100644
index 00000000000..a759ccf038c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/restorewithauth.js
@@ -0,0 +1,113 @@
+/* SERVER-4972
+ * Test for mongorestore on server with --auth allows restore without credentials of colls
+ * with no index
+ */
+/*
+ * 1) Start mongo without auth.
+ * 2) Write to collection
+ * 3) Take dump of the collection using mongodump.
+ * 4) Drop the collection.
+ * 5) Stop mongod from step 1.
+ * 6) Restart mongod with auth.
+ * 7) Add admin user to kick authentication
+ * 8) Try restore without auth credentials. The restore should fail
+ * 9) Try restore with correct auth credentials. The restore should succeed this time.
+ */
+
+var port = allocatePorts(1)[0];
+baseName = "jstests_restorewithauth";
+var conn = startMongod( "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// write to ns foo.bar
+var foo = conn.getDB( "foo" );
+for( var i = 0; i < 4; i++ ) {
+ foo["bar"].save( { "x": i } );
+ foo["baz"].save({"x": i});
+}
+
+// make sure the collection exists
+assert.eq( foo.system.namespaces.count({name: "foo.bar"}), 1 )
+
+//make sure it has no index except _id
+assert.eq(foo.system.indexes.count(), 2);
+
+foo.bar.createIndex({x:1});
+assert.eq(foo.system.indexes.count(), 3);
+
+// get data dump
+var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/";
+resetDbpath( dumpdir );
+x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+port, "--out", dumpdir);
+
+// now drop the db
+foo.dropDatabase();
+
+// stop mongod
+stopMongod( port );
+
+// start mongod with --auth
+conn = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// admin user
+var admin = conn.getDB( "admin" )
+admin.addUser( "admin" , "admin" );
+admin.auth( "admin" , "admin" );
+
+var foo = conn.getDB( "foo" )
+
+// make sure no collection with the same name exists
+assert.eq(foo.system.namespaces.count( {name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count( {name: "foo.baz"}), 0);
+
+// now try to restore dump
+x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + port, "--dir" , dumpdir, "-vvvvv" );
+
+// make sure that the collection isn't restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+// now try to restore dump with correct credentials
+x = runMongoProgram( "mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "--authenticationDatabase=admin",
+ "-u", "admin",
+ "-p", "admin",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+
+// make sure the collection has 4 documents
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+
+foo.dropDatabase();
+
+// make sure that the collection is empty
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+foo.addUser('user', 'password');
+
+// now try to restore dump with foo database credentials
+x = runMongoProgram("mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "-u", "user",
+ "-p", "password",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+assert.eq(foo.system.indexes.count(), 5); // _id on foo, _id on bar, x on foo, _id + 1 on system.users
+
+stopMongod( port );
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/stat1.js b/src/mongo/gotools/test/legacy24/jstests/tool/stat1.js
new file mode 100644
index 00000000000..d2e00756e75
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/stat1.js
@@ -0,0 +1,23 @@
+// stat1.js
+// test mongostat with authentication SERVER-3875
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_stat1";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", "/data/db/" + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+users = db.getCollection( "system.users" );
+users.remove( {} );
+
+db.addUser( "eliot" , "eliot" );
+
+assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "eliot", "--rowcount", "1", "--authenticationDatabase", "admin");
+assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot");
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "wrong", "--rowcount", "1", "--authenticationDatabase", "admin");
+assert.eq(x, 1, "mongostat should exit with -1 with eliot:wrong");
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/tool_replset.js b/src/mongo/gotools/test/legacy24/jstests/tool/tool_replset.js
new file mode 100644
index 00000000000..bc50a0fd7d4
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/tool_replset.js
@@ -0,0 +1,89 @@
+/*
+ * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string
+ * 1. Start a replica set.
+ * 2. Add data to a collection.
+ * 3. Take a dump of the database.
+ * 4. Drop the db.
+ * 5. Restore the db.
+ * 6. Export a collection.
+ * 7. Drop the collection.
+ * 8. Import the collection.
+ * 9. Add data to the oplog.rs collection.
+ * 10. Ensure that the document doesn't exist yet.
+ * 11. Now play the mongooplog tool.
+ * 12. Make sure that the oplog was played
+*/
+
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
+
+print("starting the replica set")
+
+var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+for (var i = 0; i < 100; i++) {
+ master.getDB("foo").bar.insert({ a: i });
+}
+replTest.awaitReplication();
+
+var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
+ ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+print("dump the db");
+var data = MongoRunner.dataDir + "/tool_replset-dump1/";
+runMongoProgram("mongodump", "--host", replSetConnString, "--out", data);
+
+print("db successfully dumped, dropping now");
+master.getDB("foo").dropDatabase();
+replTest.awaitReplication();
+
+print("restore the db");
+runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data);
+
+print("db successfully restored, checking count")
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = MongoRunner.dataDir + "/tool_replset/export";
+runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
+ "-d", "foo", "-c", "bar");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
+ "-d", "foo", "-c", "bar");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+// Test with mongooplog
+var doc = { _id : 5, x : 17 };
+master.getDB("local").oplog.rs.insert({ ts : new Timestamp(), "op" : "i", "ns" : "foo.bar",
+ "o" : doc });
+
+assert.eq(100, master.getDB("foo").getCollection("bar").count(), "count before running mongooplog " +
+ "was not 100 as expected");
+
+runMongoProgram("mongooplog" , "--from", "127.0.0.1:" + replTest.ports[0],
+ "--host", replSetConnString);
+
+print("running mongooplog to replay the oplog")
+
+assert.eq(101, master.getDB("foo").getCollection("bar").count(), "count after running mongooplog " +
+ "was not 101 as expected")
+
+print("all tests successful, stopping replica set")
+
+replTest.stopSet();
+
+print("replica set stopped, test complete")
diff --git a/src/mongo/gotools/test/legacy24/jstests/tool/tsv1.js b/src/mongo/gotools/test/legacy24/jstests/tool/tsv1.js
new file mode 100644
index 00000000000..1b0ddbb7c9e
--- /dev/null
+++ b/src/mongo/gotools/test/legacy24/jstests/tool/tsv1.js
@@ -0,0 +1,32 @@
+// tsv1.js
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.eq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.eq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( base , x , "tsv parse 2" )
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy26/buildscripts/buildlogger.py b/src/mongo/gotools/test/legacy26/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..a31b3e2dfa1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/buildscripts/buildlogger.py
@@ -0,0 +1,480 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
+
diff --git a/src/mongo/gotools/test/legacy26/buildscripts/cleanbb.py b/src/mongo/gotools/test/legacy26/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/src/mongo/gotools/test/legacy26/buildscripts/smoke.py b/src/mongo/gotools/test/legacy26/buildscripts/smoke.py
new file mode 100755
index 00000000000..7c8da1108f9
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/buildscripts/smoke.py
@@ -0,0 +1,1314 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test",
+# "perftest"), don't take arguments for the dbpath, but
+# unconditionally use "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+
+from pymongo import MongoClient
+from pymongo.errors import OperationFailure
+from pymongo import ReadPreference
+
+import cleanbb
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API, for a sneaky
+# purpose below.
+class Nothing(object):
+ def __enter__(self):
+ return self
+ def __exit__(self, type, value, traceback):
+ return not isinstance(value, Exception)
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not small_oplog:
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(object):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ try:
+ self.stop()
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ return not isinstance(value, Exception)
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are alwas set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ argv += ["--master", "--oplogSize", "511"]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'MONGODB-CR')
+ if authMechanism != 'MONGODB-CR':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = MongoClient(port=self.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find(fields=["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On windows,
+ add the started process to a new Job Object, so that any
+ child processes of this process can be killed with a single
+ call to TerminateJobObject (see self.stop()).
+ """
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+
+ proc = Popen(argv, creationflags=CREATE_BREAKAWAY_FROM_JOB)
+
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ else:
+ proc = Popen(argv)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32":
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ import time
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ else:
+ # This function not available in Python 2.5
+ self.proc.terminate()
+ except AttributeError:
+ from os import kill
+ kill(self.proc.pid, 15)
+ self.proc.wait()
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ def wait_for_repl(self):
+ print "Awaiting replicated (w:2, wtimeout:5min) insert (port:" + str(self.port) + ")"
+ MongoClient(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+ print "Replicated write completed -- done wait_for_repl"
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port)
+ master.wait_for_repl()
+ print "caught up!"
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ client = MongoClient(port=mongod.port, read_preference=ReadPreference.SECONDARY_PREFERRED)
+ mongod.dbhash = client.test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = MongoClient(port=master.port).test
+ sTestDB = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+forceCommandsForDirs = ["aggregation", "auth", "core", "parallel", "replsets"]
+# look for jstests and one of the above suites separated by either posix or windows slashes
+forceCommandsRE = re.compile(r"jstests[/\\](%s)" % ('|'.join(forceCommandsForDirs)))
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if swm == "legacy": # change when the default changes to "commands"
+ if use_write_commands or forceCommandsRE.search(path):
+ swm = "commands"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["test", "test.exe", "perftest", "perftest.exe"]:
+ argv = [path]
+ # default data directory for test and perftest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/use_extended_timeout.js");' + \
+ 'TestData = new Object();' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ argv = argv + [ '--eval', evalString]
+
+ if argv[0].endswith( 'test' ) or argv[0].endswith( 'test.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ if start_mongod:
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+ else:
+ master = Nothing()
+ try:
+ if small_oplog:
+ slave = mongod(slave=True,
+ set_parameters=set_parameters).__enter__()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+ primary = MongoClient(port=master.port);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ time.sleep(1)
+ else:
+ slave = Nothing()
+
+ try:
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.__exit__(None, None, None)
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509).__enter__()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ finally:
+ slave.__exit__(None, None, None)
+ finally:
+ master.__exit__(None, None, None)
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+suiteGlobalConfig = {"js": ("core/*.js", True),
+ "quota": ("quota/*.js", True),
+ "jsPerf": ("perf/*.js", True),
+ "disk": ("disk/*.js", True),
+ "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True),
+ "noPassthrough": ("noPassthrough/*.js", False),
+ "parallel": ("parallel/*.js", True),
+ "clone": ("clone/*.js", False),
+ "repl": ("repl/*.js", False),
+ "replSets": ("replsets/*.js", False),
+ "dur": ("dur/*.js", False),
+ "auth": ("auth/*.js", False),
+ "sharding": ("sharding/*.js", False),
+ "tool": ("tool/*.js", False),
+ "aggregation": ("aggregation/*.js", True),
+ "multiVersion": ("multiVersion/*.js", True),
+ "failPoint": ("fail_point/*.js", False),
+ "ssl": ("ssl/*.js", True),
+ "sslSpecial": ("sslSpecial/*.js", True),
+ "jsCore": ("core/*.js", True),
+ "gle": ("gle/*.js", True),
+ "slow1": ("slow1/*.js", True),
+ "slow2": ("slow2/*.js", True),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['test',
+ 'perf',
+ 'jsCore',
+ 'jsPerf',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'test.exe'
+ else:
+ program = 'test'
+ (globstr, usedb) = (program, False)
+ elif suite == 'perf':
+ if os.sys.platform == "win32":
+ program = 'perftest.exe'
+ else:
+ program = 'perftest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('test', 'test.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=20,
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="legacy",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/src/mongo/gotools/test/legacy26/buildscripts/utils.py b/src/mongo/gotools/test/legacy26/buildscripts/utils.py
new file mode 100644
index 00000000000..68273ee69c8
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/buildscripts/utils.py
@@ -0,0 +1,230 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/authTestsKey b/src/mongo/gotools/test/legacy26/jstests/libs/authTestsKey
new file mode 100644
index 00000000000..573898a4f05
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/authTestsKey
@@ -0,0 +1 @@
+This key is only for running the suite with authentication dont use it in any tests directly
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/ca.pem b/src/mongo/gotools/test/legacy26/jstests/libs/ca.pem
new file mode 100644
index 00000000000..f739ef0627b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/ca.pem
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICnTCCAgYCCQD4+RCKzwZr/zANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMC
+VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4w
+DAYDVQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0
+IEF1dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEz
+MDAyMzU0OVoXDTIzMTEyODAyMzU0OVowgZIxCzAJBgNVBAYTAlVTMREwDwYDVQQI
+DAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwFMTBH
+ZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3JpdHkx
+GzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1czCBnzANBgkqhkiG9w0BAQEFAAOB
+jQAwgYkCgYEA1xymeY+U/evUuQvxpun9moe4GopN80c1ptmaAHM/1Onwaq54Wt27
+nl1wUVme3dh4DdWviYY7mJ333HVEnp/QhVcT4kQhICZqdgPKPdCseQW3H+8x6Gwz
+hrNRBdz0NkSoFxDlIymfy2Q2xoQpbCGAg+EnRYUTKlHMXNpUDLFhGjcCAwEAATAN
+BgkqhkiG9w0BAQUFAAOBgQDRQB3c/9osTexEzMPHyMGTzG5nGwy8Wv77GgW3BETM
+hECoGqueXLa5ZgvealJrnMHNKdj6vrCGgBDzE0K0VdXc4dLtLmx3DRntDOAWKJdB
+2XPMvdC7Ec//Fwep/9emz0gDiJrTiEpL4p74+h+sp4Xy8cBokQ3Ss5S9NmnPXT7E
+qQ==
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/client.pem b/src/mongo/gotools/test/legacy26/jstests/libs/client.pem
new file mode 100644
index 00000000000..85ace4fd40b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/client.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 7 (0x7)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Aug 23 14:55:32 2013 GMT
+ Not After : Jan 7 14:55:32 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=kerneluser, CN=client
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:ba:16:42:d4:8b:3d:5e:8a:67:9e:a7:c0:cd:4a:
+ 9c:9c:fd:95:b9:83:bf:f4:cf:03:8c:2e:db:a9:c1:
+ 35:58:80:f6:e2:e9:87:28:84:e3:d0:9b:68:60:51:
+ 0e:42:84:d8:6f:e8:34:cc:18:97:79:d3:8d:d8:2f:
+ 23:11:25:6f:69:7a:38:bb:8c:b2:29:e9:91:be:79:
+ 8c:cc:1b:56:98:98:d3:83:2a:c5:f9:9c:86:0c:2c:
+ 24:0e:5c:46:3b:a9:95:44:6c:c5:e0:7c:9d:03:ae:
+ 0d:23:99:49:a4:48:dd:0e:35:a2:e5:b4:8b:86:bd:
+ c0:c8:ce:d5:ac:c4:36:f3:9e:5f:17:00:23:8d:53:
+ a1:43:1b:a3:61:96:36:80:4d:35:50:b5:8b:69:31:
+ 39:b4:63:8b:96:59:5c:d1:ea:92:eb:eb:fa:1b:35:
+ 64:44:b3:f6:f3:a6:9d:49:3a:59:e5:e1:c2:cb:98:
+ be:29:b3:22:dd:33:97:d7:50:4f:db:c2:58:64:18:
+ b5:8c:3c:6b:2d:21:f6:bd:8d:e5:d2:da:8d:79:fe:
+ a7:80:75:a8:15:b9:ee:79:7f:01:31:1d:e5:e7:15:
+ 76:53:65:f6:fe:f0:93:7d:20:3d:cc:ff:9b:ca:b2:
+ 50:2c:1b:3a:69:d5:e6:70:cf:ac:be:7e:5c:33:c4:
+ 6e:a7
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 4A:8B:EE:22:42:E6:F8:62:4C:86:38:8D:C5:78:95:98:C1:10:05:7C
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ 13:13:a8:f0:de:78:c6:b1:e0:85:cc:27:e6:04:28:44:93:1d:
+ f1:ff:5e:81:69:33:1f:f3:76:e0:49:ca:d9:ad:aa:db:f5:a5:
+ f8:a6:50:bb:a1:a7:40:14:e4:2f:8d:b8:21:7f:35:04:60:db:
+ af:f0:9e:dd:a1:ca:0b:7f:03:2e:2f:19:1e:32:6e:1e:2d:87:
+ 68:e3:37:47:a8:5b:93:d1:88:41:73:da:88:21:59:27:d4:35:
+ 1c:6a:27:b5:c0:c6:17:ba:f3:87:c8:e1:f4:8f:43:12:bc:fa:
+ 8d:90:d5:86:83:df:51:a5:c9:e0:92:f0:66:d0:37:61:6f:85:
+ 24:18
+-----BEGIN CERTIFICATE-----
+MIIDdjCCAt+gAwIBAgIBBzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgyMzE0
+NTUzMloXDTQxMDEwNzE0NTUzMlowbjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjET
+MBEGA1UECwwKa2VybmVsdXNlcjEPMA0GA1UEAwwGY2xpZW50MIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuhZC1Is9XopnnqfAzUqcnP2VuYO/9M8DjC7b
+qcE1WID24umHKITj0JtoYFEOQoTYb+g0zBiXedON2C8jESVvaXo4u4yyKemRvnmM
+zBtWmJjTgyrF+ZyGDCwkDlxGO6mVRGzF4HydA64NI5lJpEjdDjWi5bSLhr3AyM7V
+rMQ2855fFwAjjVOhQxujYZY2gE01ULWLaTE5tGOLlllc0eqS6+v6GzVkRLP286ad
+STpZ5eHCy5i+KbMi3TOX11BP28JYZBi1jDxrLSH2vY3l0tqNef6ngHWoFbnueX8B
+MR3l5xV2U2X2/vCTfSA9zP+byrJQLBs6adXmcM+svn5cM8RupwIDAQABo3sweTAJ
+BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0
+aWZpY2F0ZTAdBgNVHQ4EFgQUSovuIkLm+GJMhjiNxXiVmMEQBXwwHwYDVR0jBBgw
+FoAUB0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAExOo8N54
+xrHghcwn5gQoRJMd8f9egWkzH/N24EnK2a2q2/Wl+KZQu6GnQBTkL424IX81BGDb
+r/Ce3aHKC38DLi8ZHjJuHi2HaOM3R6hbk9GIQXPaiCFZJ9Q1HGontcDGF7rzh8jh
+9I9DErz6jZDVhoPfUaXJ4JLwZtA3YW+FJBg=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6FkLUiz1eimee
+p8DNSpyc/ZW5g7/0zwOMLtupwTVYgPbi6YcohOPQm2hgUQ5ChNhv6DTMGJd5043Y
+LyMRJW9peji7jLIp6ZG+eYzMG1aYmNODKsX5nIYMLCQOXEY7qZVEbMXgfJ0Drg0j
+mUmkSN0ONaLltIuGvcDIztWsxDbznl8XACONU6FDG6NhljaATTVQtYtpMTm0Y4uW
+WVzR6pLr6/obNWREs/bzpp1JOlnl4cLLmL4psyLdM5fXUE/bwlhkGLWMPGstIfa9
+jeXS2o15/qeAdagVue55fwExHeXnFXZTZfb+8JN9ID3M/5vKslAsGzpp1eZwz6y+
+flwzxG6nAgMBAAECggEBALYw92urjAFVFxCiA8W7aEzYhtAkaztft4R3mD/C19z4
+H0CZDeig+3+RuIactY5xDIu8WHz/EseHVlg0BmxSL5ugu4z8uq8IbNaFoVFw7r7m
+2ieRKFY0ZpXiXcbllynw5iEhMjeRKhWhQmH5Qb2kTTINV5j4xKa+f9Lblx7Y2Uh4
+tsaOtlMwb98D2/KYJdTv5Nj1nyuSqRVhECsd00Cb6JUBGQBx8Ja0wFy9gEygq6kU
+w3s1XNOSnYNEo4FaVZwp5KZyCyBENcKpNUq4nXt/7ncEfVYdJck0Li3wN4Jr2J9S
+eHqRzh8QkHxc1Ro8ktcXaUSs9kFuwvVvb4rcGUpOMWkCgYEA9xxp8yDtFVgzMtc/
+vS8xgM1Wj4SrgKKYhE2wS05BJh/41oFMzfH1FpZ1GCM983r4QgYWoT71XsBgiOMC
+yN2p2IbV4V44bMGKJqaVMkB91CVCUWI6piaCQb/1CJTwaXE7zPim6dlUSxxBBnRn
+LP50NTscRLFcCZELD3Yl7jR8XFUCgYEAwMfkNFmGtBKAwlHZ3Y3XOwPWg+jCll7s
+9nhv8TU2IB9pcCRGqyOT7k1YymvYkDT2Je4JUPWEBs4cW7yD61LrQ8w8+DrE9dGo
+czzGPyjOAANSX0asG74UjkNIQThmyEOltVHIxYMaSqowjHRSPdA+R4Od9EdcDdfS
+q5SfSVFxmwsCgYBtl1thqUOcCL7EGHQ7KdfxgJ+YDMWmyfWMD4xVCYKZLurD7xop
+59nDR7zslIygE/RQC7Uzk+FsQTNO4ibVAIGX9syaI5gwm3DyjURzwehMEq4ju8W4
+9DEmicRZJvysNrzHvasA4RKiMQihnTQ43yyYgvuZd3MTBxF5rPNLfll89QKBgQC9
+SsmiOZIR+OUjaTmS2bbQBNm7Fm8TNcxZyzKn1wb5jb57VbNqUfnskVgxEqpIFyjn
+X48YRqtH/1RLI5UpGXdXUBFB8Hr7oM1VsgQ7ejakPp7AXOWcLA2FDz3AhMAvvnTU
+0KRihHPpgqk/EOy8M2Ej2XHcrcEO+q+quLmbRXRWtwKBgHacQiwci/2J+v0e9i52
+re/2AJHKP5MwNHFe1e01iNc5EEN0G+/Ut8XW19DWf6bsxqie0ChC+xN8TUst8alT
+F+tXTsHHmt/lRcjTROjT5XVuoqjtU2Q0QeVeGLgvObso+fZy3ZNeQuSJjWukdMZ3
+57rGT6p0OuM8qbrTzpv3JMrm
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/client_revoked.pem b/src/mongo/gotools/test/legacy26/jstests/libs/client_revoked.pem
new file mode 100644
index 00000000000..276e62644b6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/client_revoked.pem
@@ -0,0 +1,34 @@
+-----BEGIN CERTIFICATE-----
+MIIC7jCCAlegAwIBAgIBDDANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1
+MjUzMVoXDTQxMDQyMjE1MjUzMVowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZjbGllbnQwgZ8wDQYJKoZIhvcNAQEB
+BQADgY0AMIGJAoGBALX6DqSWRJBEJJRIRqG5X3cFHzse5jGIdV8fTqikaVitvuhs
+15z1njzfqBQZMJBCEvNb4eaenXJRMBDkEOcbfy6ah+ZLLqGFy7b6OxTROfx++3fT
+gsCAjBaIWvtGKNkwdcdM7PQ2jE5bL8vN/ufbH2sX451nVd+j6oAz0dTz7RvhAgMB
+AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh
+dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBTjciYidtPfd5ILsm7c2yYGV99vwjAf
+BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB
+gQCgs74YrlZ6nivONRO8tNWi+gJ1TcWbQV+5yfF7Ispxo1TFxpa6GTWeZA3X4CwK
+PHmCdhb+oZoi59Qny0KECxtBj6zwdYIKLN0gIFYygaGX5J+YrRVatTjCJUHz9fco
+hZwApLEUkYg2Ldvbg+FncDwiVhi74OW685SkThNIulmPcQ==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBALX6DqSWRJBEJJRI
+RqG5X3cFHzse5jGIdV8fTqikaVitvuhs15z1njzfqBQZMJBCEvNb4eaenXJRMBDk
+EOcbfy6ah+ZLLqGFy7b6OxTROfx++3fTgsCAjBaIWvtGKNkwdcdM7PQ2jE5bL8vN
+/ufbH2sX451nVd+j6oAz0dTz7RvhAgMBAAECgYEAmHRy+g5uSJLeNmBK1EiSIwtm
+e8hKP+s7scJvyrdbDpEZJG2zQWtA82zIynXECsdgSwOKQQRXkaNU6oG3a3bM19uY
+0CqFRb9EwOLIStp+CM5zLRGmUr73u/+JrBPUWWFJkJvINvTXt18CMnCmosTvygWB
+IBZqsuEXQ6JcejxzQ6UCQQDdVUNdE2JgHp1qrr5l8563dztcrfCxuVFtgsj6qnhd
+UrBAa388B9kn4yVAe2i55xFmtHsO9Bz3ViiDFO163SafAkEA0nq8PeZtcIlZ2c7+
+6/Vdw1uLE5APVG2H9VEZdaVvkwIIXo8WQfMwWo5MQyPjVyBhUGlDwnKa46AcuplJ
+2XMtfwJBAIDrMfKb4Ng13OEP6Yz+yvr4MxZ3plQOqlRMMn53HubUzB6pvpGbzKwE
+DWWyvDxUT/lvtKHwJJMYlz5KyUygVecCQHr50RBNmLW+2muDILiWlOD2lIyqh/pp
+QJ2Zc8mkDkuTTXaKHZQM1byjFXXI+yRFu/Xyeu+abFsAiqiPtXFCdVsCQHai+Ykv
+H3y0mUJmwBVP2fBE3GiTGlaadM0auZKu7/ad+yo7Hv8Kibacwibzrj9PjT3mFSSF
+vujX1oWOaxAMVbE=
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/cluster-cert.pem b/src/mongo/gotools/test/legacy26/jstests/libs/cluster-cert.pem
new file mode 100644
index 00000000000..74dc9845e3d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/cluster-cert.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 5 (0x5)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Aug 7 17:19:17 2013 GMT
+ Not After : Dec 22 17:19:17 2040 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=clustertest
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:98:ec:01:6e:f4:ae:8e:16:c8:87:a2:44:86:a0:
+ 45:5c:ca:82:56:ba:0d:a9:60:bf:07:40:da:db:70:
+ 33:a6:c2:ec:9d:e1:f0:da:fe:b9:f9:ac:23:33:64:
+ e6:63:71:cc:a2:0d:eb:86:bc:31:32:aa:30:e6:1d:
+ 5d:6d:fd:45:f4:2f:dc:72:93:bc:92:27:f7:6a:5a:
+ 18:04:f7:64:d0:6a:3c:a9:14:f6:9e:9d:58:26:f4:
+ 16:93:7e:3d:2e:3c:9e:54:41:4d:1a:e1:bd:b4:cf:
+ d0:05:4c:4d:15:fb:5c:70:1e:0c:32:6d:d7:67:5b:
+ ec:b2:61:83:e3:f0:b1:78:aa:30:45:86:f9:6d:f5:
+ 48:1f:f1:90:06:25:db:71:ed:af:d7:0d:65:65:70:
+ 89:d4:c8:c8:23:a0:67:22:de:d9:6e:1d:44:38:cf:
+ 0f:eb:2c:fe:79:01:d7:98:15:5f:22:42:3f:ee:c9:
+ 16:eb:b9:25:08:9a:2a:11:74:47:e0:51:75:8c:ae:
+ eb:8d:b5:30:fe:48:98:0a:9e:ba:6e:a4:60:08:81:
+ c6:05:a0:97:38:70:c0:1f:b4:27:96:8e:c3:d2:c1:
+ 14:5f:34:16:91:7d:ad:4c:e9:23:07:f0:42:86:78:
+ 11:a1:1e:9d:f3:d0:41:09:06:7d:5c:89:ef:d2:0d:
+ 6c:d5
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ C9:00:3A:28:CC:6A:75:57:82:81:00:A6:25:48:6C:CE:0A:A0:4A:59
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ d1:55:e3:5c:43:8c:4f:d3:29:8d:74:4a:1d:23:50:17:27:b3:
+ 30:6f:c6:d7:4c:6c:96:7e:52:a0:2f:91:92:b3:f5:4c:a1:ca:
+ 88:62:31:e4:d6:64:ac:40:17:47:00:24:e8:0d:3b:7b:c7:d4:
+ 7f:3a:76:45:27:fd:9b:ae:9d:44:71:8f:ab:62:60:e5:9d:e8:
+ 59:dd:0e:25:17:14:f8:83:b0:b6:fc:5f:27:8b:69:a2:dc:31:
+ b9:17:a1:27:92:96:c1:73:bf:a3:f0:b8:97:b9:e2:fb:97:6d:
+ 44:01:b0:68:68:47:4b:84:56:3b:19:66:f8:0b:6c:1b:f5:44:
+ a9:ae
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAuCgAwIBAgIBBTANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMDgwNzE3
+MTkxN1oXDTQwMTIyMjE3MTkxN1owbzELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMRQwEgYDVQQDDAtjbHVzdGVydGVzdDCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJjsAW70ro4WyIeiRIagRVzKgla6DalgvwdA
+2ttwM6bC7J3h8Nr+ufmsIzNk5mNxzKIN64a8MTKqMOYdXW39RfQv3HKTvJIn92pa
+GAT3ZNBqPKkU9p6dWCb0FpN+PS48nlRBTRrhvbTP0AVMTRX7XHAeDDJt12db7LJh
+g+PwsXiqMEWG+W31SB/xkAYl23Htr9cNZWVwidTIyCOgZyLe2W4dRDjPD+ss/nkB
+15gVXyJCP+7JFuu5JQiaKhF0R+BRdYyu6421MP5ImAqeum6kYAiBxgWglzhwwB+0
+J5aOw9LBFF80FpF9rUzpIwfwQoZ4EaEenfPQQQkGfVyJ79INbNUCAwEAAaN7MHkw
+CQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2Vy
+dGlmaWNhdGUwHQYDVR0OBBYEFMkAOijManVXgoEApiVIbM4KoEpZMB8GA1UdIwQY
+MBaAFAdBGTqffsW3Ik63vNXf5PwJuGQWMA0GCSqGSIb3DQEBBQUAA4GBANFV41xD
+jE/TKY10Sh0jUBcnszBvxtdMbJZ+UqAvkZKz9UyhyohiMeTWZKxAF0cAJOgNO3vH
+1H86dkUn/ZuunURxj6tiYOWd6FndDiUXFPiDsLb8XyeLaaLcMbkXoSeSlsFzv6Pw
+uJe54vuXbUQBsGhoR0uEVjsZZvgLbBv1RKmu
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCY7AFu9K6OFsiH
+okSGoEVcyoJWug2pYL8HQNrbcDOmwuyd4fDa/rn5rCMzZOZjccyiDeuGvDEyqjDm
+HV1t/UX0L9xyk7ySJ/dqWhgE92TQajypFPaenVgm9BaTfj0uPJ5UQU0a4b20z9AF
+TE0V+1xwHgwybddnW+yyYYPj8LF4qjBFhvlt9Ugf8ZAGJdtx7a/XDWVlcInUyMgj
+oGci3tluHUQ4zw/rLP55AdeYFV8iQj/uyRbruSUImioRdEfgUXWMruuNtTD+SJgK
+nrpupGAIgcYFoJc4cMAftCeWjsPSwRRfNBaRfa1M6SMH8EKGeBGhHp3z0EEJBn1c
+ie/SDWzVAgMBAAECggEAfogRK5Dz+gfqByiCEO7+VagOrtolwbeWeNb2AEpXwq1Z
+Ac5Y76uDkI4ZVkYvx6r6ykBAWOzQvH5MFavIieDeiA0uF/QcPMcrFmnTpBBb74No
+C/OXmGjS7vBa2dHDp8VqsIaT2SFeSgUFt8yJoB2rP+3s47E1YYWTVYoQioO3JQJN
+f0mSuvTnvJO9lbTWiW+yWGVkQvIciCCnHkCEwU0fHht8IoFBGNFlpWZcGiMeietr
+16GdRcmAq95q8TTCeQxkgmmL+0ZJ1BrF7llG2pGYdacawXj1eVRqOHQaFIlcKe05
+RITpuXVYOWBpBpfbQsBZaCGLe7WxHJedrFxdbqm0ZQKBgQDLUQrmIl2wz43t3sI+
+WjW6y1GwMPG9EjXUT1Boq6PNHKgw04/32QNn5IMmz4cp2Mgyz7Hc0ABDU/ZATujd
+yCkxVErPbKRDKSxSl6nLXtLpLbHFmVPfKPbNKIuyFMBsOFOtoFoVbo33wI5dI7aO
+i7sTGB3ngbq4pzCJ9dVt/t81QwKBgQDAjAtBXS8WB69l9w35tx+MgYG0LJ+ykAug
+d91pwiWqSt02fZ0nr/S/76G6B4C8eqeOnYh1RzF5isLD246rLD2Y+uuFrgasvSiS
+4qSKbpG2kk02R/DRTAglAyXI0rhYIDrYKCQPWqNMWpawT/FQQwbFjTuhmz10FyXS
+hmVztZWoBwKBgQCBdnptLibghllGxViEoaai6gJ7Ib9ceHMEXPjDnb+wxPWoGZ8L
+4AjWJ+EHXpAfqmVYTX5hL6VrOdSNAHIxftoUCiuUxwYVqesKMH6y/A9q4WjYfRi1
++fyliJLjc2lPv9IwtfGGwh3uS5ObZTlCrWES+IFaP/YozHUQ9BPSdb+lxwKBgB35
+Lv9b3CqXw6why2EmKpkax/AeSjXnyoeOYT9HY8mgodMLtt0ovPbr/McSx+2PQmon
+B8kJ7h+3hB4tHYZz+prH5MYIky1svNYwxeBu2ewL1k0u4cQTC+mHFeivNNczHTXs
++cASIf2O1IpZx3zxEirKk4/StLxPpimhlkVu7P8dAoGBAJVw2U70+PagVBPtvheu
+ZDEvxSEzrn90ivIh7Y6ZIwdSOSLW04sOVL2JAzO155u4g77jdmcxV3urr1vD9LbF
+qkBGLXx7FFC/Mn/H42qerxr16Bt6RtvVpms71UIQLYxA7caab9cqoyt0wkgqJFKX
+fj0TVODnIf+zPMDCu+frpLbA
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/command_line/test_parsed_options.js b/src/mongo/gotools/test/legacy26/jstests/libs/command_line/test_parsed_options.js
new file mode 100644
index 00000000000..e2ca646b63a
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/command_line/test_parsed_options.js
@@ -0,0 +1,202 @@
+// Merge the two options objects. Used as a helper when we are trying to actually compare options
+// despite the fact that our test framework adds extra stuff to it. Anything set in the second
+// options object overrides the first options object. The two objects must have the same structure.
+function mergeOptions(obj1, obj2) {
+ var obj3 = {};
+ for (var attrname in obj1) {
+ if (typeof obj1[attrname] === "object" &&
+ typeof obj2[attrname] !== "undefined") {
+ if (typeof obj2[attrname] !== "object") {
+ throw "Objects being merged must have the same structure";
+ }
+ obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
+ }
+ else {
+ obj3[attrname] = obj1[attrname];
+ }
+ }
+ for (var attrname in obj2) {
+ if (typeof obj2[attrname] === "object" &&
+ typeof obj1[attrname] !== "undefined") {
+ if (typeof obj1[attrname] !== "object") {
+ throw "Objects being merged must have the same structure";
+ }
+ // Already handled above
+ }
+ else {
+ obj3[attrname] = obj2[attrname];
+ }
+ }
+ return obj3;
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongod. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongod;
+function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts");
+
+ // Stop the mongod we used to get the options
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongod;
+ }
+
+ if (typeof getCmdLineOptsBaseMongod === "undefined") {
+ getCmdLineOptsBaseMongod = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongod;
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsExpected.parsed.storage.dbPath;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with options
+ var mongod = MongoRunner.runMongod(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts");
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsResult.parsed.storage.dbPath;
+ }
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongod(mongod.port);
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongos. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongos;
+function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Start mongos with only the configdb option
+ var baseMongos = MongoRunner.runMongos({ configdb : baseMongod.host });
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts");
+
+ // Remove the configdb option
+ delete getCmdLineOptsBaseMongos.parsed.sharding.configDB;
+
+ // Stop the mongod and mongos we used to get the options
+ MongoRunner.stopMongos(baseMongos.port);
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongos;
+ }
+
+ if (typeof getCmdLineOptsBaseMongos === "undefined") {
+ getCmdLineOptsBaseMongos = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongos;
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with no options
+ var mongod = MongoRunner.runMongod();
+
+ // Add configdb option
+ mongoRunnerConfig['configdb'] = mongod.host;
+
+ // Start mongos connected to mongod
+ var mongos = MongoRunner.runMongos(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts");
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+
+ // Remove the configdb option
+ delete getCmdLineOptsResult.parsed.sharding.configDB;
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongos(mongos.port);
+ MongoRunner.stopMongod(mongod.port);
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini
new file mode 100644
index 00000000000..4cfaf3395f6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/disable_noscripting.ini
@@ -0,0 +1 @@
+noscripting=false
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_auth.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_auth.json
new file mode 100644
index 00000000000..9f9cc84d107
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_auth.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "authorization" : "enabled"
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_autosplit.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_autosplit.json
new file mode 100644
index 00000000000..a0d4f8af1be
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_autosplit.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "autoSplit" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_httpinterface.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_httpinterface.json
new file mode 100644
index 00000000000..c87dabe125d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_httpinterface.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json
new file mode 100644
index 00000000000..362db08edd3
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_indexbuildretry.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "indexBuildRetry" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_journal.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_journal.json
new file mode 100644
index 00000000000..d75b94ccbc7
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_journal.json
@@ -0,0 +1,7 @@
+{
+ "storage" : {
+ "journal" : {
+ "enabled" : false
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_objcheck.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_objcheck.json
new file mode 100644
index 00000000000..b52be7382ed
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_objcheck.json
@@ -0,0 +1,5 @@
+{
+ "net" : {
+ "wireObjectCheck" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_paranoia.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_paranoia.json
new file mode 100644
index 00000000000..218646b1662
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_paranoia.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "archiveMovedChunks" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_prealloc.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_prealloc.json
new file mode 100644
index 00000000000..15ecefbb546
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_prealloc.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "preallocDataFiles" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_scripting.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_scripting.json
new file mode 100644
index 00000000000..e8f32f2c23c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_scripting.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "javascriptEnabled" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_unixsocket.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_unixsocket.json
new file mode 100644
index 00000000000..660d21eb17f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/enable_unixsocket.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "unixDomainSocket" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_profiling.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_profiling.json
new file mode 100644
index 00000000000..944f0de1575
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_profiling.json
@@ -0,0 +1,5 @@
+{
+ "operationProfiling" : {
+ "mode" : "all"
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_replsetname.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_replsetname.json
new file mode 100644
index 00000000000..522ca2b766f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_replsetname.json
@@ -0,0 +1,5 @@
+{
+ "replication" : {
+ "replSetName" : "myconfigname"
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_shardingrole.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_shardingrole.json
new file mode 100644
index 00000000000..71f92f122db
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_shardingrole.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "clusterRole" : "configsvr"
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_verbosity.json b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_verbosity.json
new file mode 100644
index 00000000000..47a1cce1b03
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/config_files/set_verbosity.json
@@ -0,0 +1,5 @@
+{
+ "systemLog" : {
+ "verbosity" : 5
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/crl.pem b/src/mongo/gotools/test/legacy26/jstests/libs/crl.pem
new file mode 100644
index 00000000000..dce0a0fb3f1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/crl.pem
@@ -0,0 +1,10 @@
+-----BEGIN X509 CRL-----
+MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV
+BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx
+MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0
+eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQ3NDFaFw00
+MDA0MjgxODQ3NDFaoA4wDDAKBgNVHRQEAwIBCzANBgkqhkiG9w0BAQUFAAOBgQAu
+PlPDGei2q6kdkoHe8vmDuts7Hm/o9LFbBmn0XUcfHisCJCPsJTyGCsgnfIiBcXJY
+1LMKsQFnYGv28rE2ZPpFg2qNxL+6qUEzCvqaHLX9q1V0F+f8hHDxucNYu52oo/h0
+uNZxB1KPFI2PReG5d3oUYqJ2+EctKkrGtxSPzbN0gg==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/crl_client_revoked.pem b/src/mongo/gotools/test/legacy26/jstests/libs/crl_client_revoked.pem
new file mode 100644
index 00000000000..85eeaff5543
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/crl_client_revoked.pem
@@ -0,0 +1,12 @@
+-----BEGIN X509 CRL-----
+MIIBujCCASMCAQEwDQYJKoZIhvcNAQEFBQAwgZIxCzAJBgNVBAYTAlVTMREwDwYD
+VQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEOMAwGA1UECgwF
+MTBHZW4xDzANBgNVBAsMBktlcm5lbDEaMBgGA1UEAwwRTXkgQ2VydCBBdXRob3Jp
+dHkxGzAZBgkqhkiG9w0BCQEWDHJvb3RAbGF6YXJ1cxcNMTMxMjA2MTUzMzUwWhcN
+MTQwMTA1MTUzMzUwWjBMMBICAQwXDTEzMTIwNjE1MjczMFowGgIJAJGUg/wuW1KD
+Fw0xMjEyMTIxODQ4MjJaMBoCCQCRlIP8LltShRcNMTIxMjEyMTg0ODUyWqAOMAww
+CgYDVR0UBAMCAQ4wDQYJKoZIhvcNAQEFBQADgYEAERPfPdQnIafo1lYbFEx2ojrb
+eYqvWN9ykTyUGq2bKv+STYiuaKUz6daGVjELjn/safn5wHkYr9+C/kRRoCor5HYw
+N3uxHnkMpl6Xn7kgXL2b0jbdvfa44faOXdH2gbhzd8bFsOMra4QJHT6CgpYb3ei1
++ePhAd1KS7tS/dyyP4c=
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/crl_expired.pem b/src/mongo/gotools/test/legacy26/jstests/libs/crl_expired.pem
new file mode 100644
index 00000000000..88307503240
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/crl_expired.pem
@@ -0,0 +1,10 @@
+-----BEGIN X509 CRL-----
+MIIBazCB1QIBATANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMxETAPBgNV
+BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUx
+MEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1dGhvcml0
+eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzFw0xMjEyMTIxODQwNTBaFw0x
+MzAxMTExODQwNTBaoA4wDDAKBgNVHRQEAwIBAzANBgkqhkiG9w0BAQUFAAOBgQBs
+jyvEdX8o0+PfRJsEv5oLwgp5y+YmKjRlXg2oj/ETxBDKNYtBY7B9Uu9q0chFtwTu
+XMXeEFWuxnKG+4Ovp6JmNcCKkttUwsWQuR6dGpClW6ttTk0putAWtDnqukTPlEQ2
+XU3wco7ZgrTphvuGpaIQLM1sQg9x8SfW3q6/hxYm3A==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal
new file mode 100644
index 00000000000..687317844a7
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_first.journal
Binary files differ
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal
new file mode 100644
index 00000000000..7dd98e2c97b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_bad_last.journal
Binary files differ
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_good.journal b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_good.journal
new file mode 100644
index 00000000000..d76790d2451
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/dur_checksum_good.journal
Binary files differ
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/fts.js b/src/mongo/gotools/test/legacy26/jstests/libs/fts.js
new file mode 100644
index 00000000000..73b7d339ba5
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/fts.js
@@ -0,0 +1,18 @@
+
+function queryIDS( coll, search, filter, extra ){
+ var cmd = { search : search }
+ if ( filter )
+ cmd.filter = filter;
+ if ( extra )
+ Object.extend( cmd, extra );
+ lastCommadResult = coll.runCommand( "text" , cmd);
+
+ return getIDS( lastCommadResult );
+}
+
+function getIDS( commandResult ){
+ if ( ! ( commandResult && commandResult.results ) )
+ return []
+
+ return commandResult.results.map( function(z){ return z.obj._id; } )
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/fun.js b/src/mongo/gotools/test/legacy26/jstests/libs/fun.js
new file mode 100644
index 00000000000..276f32a8f40
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/fun.js
@@ -0,0 +1,32 @@
+// General high-order functions
+
+function forEach (action, array) {
+ for (var i = 0; i < array.length; i++)
+ action (array[i]);
+}
+
+function foldl (combine, base, array) {
+ for (var i = 0; i < array.length; i++)
+ base = combine (base, array[i]);
+ return base
+}
+
+function foldr (combine, base, array) {
+ for (var i = array.length - 1; i >= 0; i--)
+ base = combine (array[i], base);
+ return base
+}
+
+function map (func, array) {
+ var result = [];
+ for (var i = 0; i < array.length; i++)
+ result.push (func (array[i]));
+ return result
+}
+
+function filter (pred, array) {
+ var result = []
+ for (var i = 0; i < array.length; i++)
+ if (pred (array[i])) result.push (array[i]);
+ return result
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/geo_near_random.js b/src/mongo/gotools/test/legacy26/jstests/libs/geo_near_random.js
new file mode 100644
index 00000000000..60cb7733f5d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/geo_near_random.js
@@ -0,0 +1,99 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+}
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
+
+}
+
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ for (var i=0; i<nPts; i++){
+ this.t.insert({_id: i, loc: this.mkPt(scale, indexBounds)});
+ }
+
+ if(!indexBounds)
+ this.t.ensureIndex({loc: '2d'});
+ else
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
+}
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++){
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
+ }
+}
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++){
+ //print(i); // uncomment to watch status
+ cmd.num = i
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded){
+ last = last.map(function(x){return x.obj});
+
+ var query = {loc:{}};
+ query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+}
+
+
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/grid.js b/src/mongo/gotools/test/legacy26/jstests/libs/grid.js
new file mode 100644
index 00000000000..3a1253d83cd
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/grid.js
@@ -0,0 +1,171 @@
+// Grid infrastructure: Servers, ReplicaSets, ConfigSets, Shards, Routers (mongos). Convenient objects and functions on top of those in shell/servers.js -Tony
+
+load('jstests/libs/fun.js')
+load('jstests/libs/network.js')
+
+// New servers and routers take and increment port number from this.
+// A comment containing FreshPorts monad implies reading and incrementing this, IO may also read/increment this.
+var nextPort = 31000
+
+/*** Server is the spec of a mongod, ie. all its command line options.
+ To start a server call 'begin' ***/
+// new Server :: String -> FreshPorts Server
+function Server (name) {
+ this.addr = '127.0.0.1';
+ this.dirname = name + nextPort;
+ this.args = { port : nextPort++,
+ noprealloc : '',
+ smallfiles : '',
+ rest : '',
+ oplogSize : 8 }
+}
+
+// Server -> String <addr:port>
+Server.prototype.host = function() {
+ return this.addr + ':' + this.args.port
+}
+
+// Start a new server with this spec and return connection to it
+// Server -> IO Connection
+Server.prototype.begin = function() {
+ return startMongodTest(this.args.port, this.dirname, false, this.args);
+}
+
+// Stop server and remove db directory
+// Server -> IO ()
+Server.prototype.end = function() {
+ print('Stopping mongod on port ' + this.args.port)
+ stopMongod (this.args.port)
+ resetDbpath (MongoRunner.dataPath + this.dirname)
+}
+
+// Cut server from network so it is unreachable (but still alive)
+// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux.
+function cutServer (conn) {
+ var addrport = parseHost (conn.host)
+ cutNetwork (addrport.port)
+}
+
+// Ensure server is connected to network (undo cutServer)
+// Requires sudo access and ipfw program (Mac OS X and BSD Unix). TODO: use iptables on Linux.
+function uncutServer (conn) {
+ var iport = parseHost (conn.host)
+ restoreNetwork (iport.port)
+}
+
+// Kill server process at other end of this connection
+function killServer (conn, _signal) {
+ var signal = _signal || 15
+ var iport = parseHost (conn.host)
+ stopMongod (iport.port, signal)
+}
+
+/*** ReplicaSet is the spec of a replica set, ie. options given to ReplicaSetTest.
+ To start a replica set call 'begin' ***/
+// new ReplicaSet :: String -> Int -> FreshPorts ReplicaSet
+function ReplicaSet (name, numServers) {
+ this.name = name
+ this.host = '127.0.0.1'
+ this.nodes = numServers
+ this.startPort = nextPort
+ this.oplogSize = 40
+ nextPort += numServers
+}
+
+// Start a replica set with this spec and return ReplSetTest, which hold connections to the servers including the master server. Call ReplicaSetTest.stopSet() to end all servers
+// ReplicaSet -> IO ReplicaSetTest
+ReplicaSet.prototype.begin = function() {
+ var rs = new ReplSetTest(this)
+ rs.startSet()
+ rs.initiate()
+ rs.awaitReplication()
+ return rs
+}
+
+// Create a new server and add it to replica set
+// ReplicaSetTest -> IO Connection
+ReplSetTest.prototype.addServer = function() {
+ var conn = this.add()
+ nextPort++
+ this.reInitiate()
+ this.awaitReplication(60000)
+ assert.soon(function() {
+ var doc = conn.getDB('admin').isMaster()
+ return doc['ismaster'] || doc['secondary']
+ })
+ return conn
+}
+
+/*** ConfigSet is a set of specs (Servers) for sharding config servers.
+ Supply either the servers or the number of servers desired.
+ To start the config servers call 'begin' ***/
+// new ConfigSet :: [Server] or Int -> FreshPorts ConfigSet
+function ConfigSet (configSvrsOrNumSvrs) {
+ if (typeof configSvrsOrNumSvrs == 'number') {
+ this.configSvrs = []
+ for (var i = 0; i < configSvrsOrNumSvrs; i++)
+ this.configSvrs.push (new Server ('config'))
+ } else
+ this.configSvrs = configSvrs
+}
+
+// Start config servers, return list of connections to them
+// ConfigSet -> IO [Connection]
+ConfigSet.prototype.begin = function() {
+ return map (function(s) {return s.begin()}, this.configSvrs)
+}
+
+// Stop config servers
+// ConfigSet -> IO ()
+ConfigSet.prototype.end = function() {
+ return map (function(s) {return s.end()}, this.configSvrs)
+}
+
+/*** Router is the spec for a mongos, ie, its command line options.
+ To start a router (mongos) call 'begin' ***/
+// new Router :: ConfigSet -> FreshPorts Router
+function Router (configSet) {
+ this.args = { port : nextPort++,
+ v : 0,
+ configdb : map (function(s) {return s.host()}, configSet.configSvrs) .join(','),
+ chunkSize : 1}
+}
+
+// Start router (mongos) with this spec and return connection to it.
+// Router -> IO Connection
+Router.prototype.begin = function() {
+ return startMongos (this.args);
+}
+
+// Stop router
+// Router -> IO ()
+Router.prototype.end = function() {
+ return stopMongoProgram (this.args.port)
+}
+
+// Add shard to config via router (mongos) connection. Shard is either a replSet name (replSet.getURL()) or single server (server.host)
+// Connection -> String -> IO ()
+function addShard (routerConn, repSetOrHostName) {
+ var ack = routerConn.getDB('admin').runCommand ({addshard: repSetOrHostName})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Connection -> String -> IO ()
+function enableSharding (routerConn, dbName) {
+ var ack = routerConn.getDB('admin').runCommand ({enablesharding: dbName})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Connection -> String -> String -> String -> IO ()
+function shardCollection (routerConn, dbName, collName, shardKey) {
+ var ack = routerConn.getDB('admin').runCommand ({shardcollection: dbName + '.' + collName, key: shardKey})
+ assert (ack['ok'], tojson(ack))
+}
+
+// Move db from its current primary shard to given shard. Shard is either a replSet name (replSet.getURL()) or single server (server.host)
+// Connection -> String -> String -> IO ()
+function moveDB (routerConn, dbname, repSetOrHostName) {
+ var ack = routerConn.getDB('admin').runCommand ({moveprimary: dbname, to: repSetOrHostName})
+ printjson(ack)
+ assert (ack['ok'], tojson(ack))
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/key1 b/src/mongo/gotools/test/legacy26/jstests/libs/key1
new file mode 100644
index 00000000000..b5c19e4092f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/key2 b/src/mongo/gotools/test/legacy26/jstests/libs/key2
new file mode 100644
index 00000000000..cbde8212841
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/key2
@@ -0,0 +1 @@
+other key
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameCN.pem b/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameCN.pem
new file mode 100644
index 00000000000..e181139b5d9
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameCN.pem
@@ -0,0 +1,101 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 8 (0x8)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Nov 6 14:31:58 2013 GMT
+ Not After : Mar 23 14:31:58 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, CN=127.0.0.1
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:aa:e1:a0:6c:09:dc:fd:d0:9f:0f:b6:77:40:60:
+ f9:01:f9:9e:55:20:fe:88:04:93:c9:ab:96:93:3a:
+ ed:7e:7d:ad:e4:eb:a7:e9:07:35:ef:6e:14:64:dd:
+ 31:9b:e5:24:06:18:bb:60:67:e3:c5:49:8e:79:b6:
+ 78:07:c1:64:3f:de:c1:7d:1b:a9:96:35:d5:f9:b8:
+ b4:5e:2a:34:b7:d0:19:ad:f6:8a:00:ef:8e:b0:d5:
+ 36:1f:66:a0:7a:7d:cf:f0:98:3c:ee:0f:be:67:d2:
+ de:c3:e6:b8:79:2f:64:40:0c:39:15:97:8c:13:da:
+ 1b:db:5c:bb:a3:43:0b:74:c7:46:55:9b:ea:d7:93:
+ d5:15:2f:d1:34:ac:a9:99:3b:01:f0:c1:d7:42:89:
+ 24:bb:ab:60:99:c1:4d:9f:bf:9a:a3:92:3a:58:05:
+ e2:47:a6:8e:71:b2:0a:32:b0:c5:cc:a0:58:40:bf:
+ 09:a7:76:f5:37:ce:90:71:e0:75:89:17:ea:fb:80:
+ 24:a1:9d:6e:1b:7e:e3:44:52:d3:fe:e3:de:80:9a:
+ 8e:c3:4f:8c:bb:b4:8c:d2:a9:a9:aa:af:90:ac:b4:
+ ee:6b:d2:c5:71:1e:08:7f:4c:b6:2a:5f:13:7a:e3:
+ 29:f7:2e:bb:f7:c5:48:0a:4e:2e:1e:d4:2c:40:b3:
+ 4c:19
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 0E:3F:54:C4:77:85:FF:93:58:A7:24:23:32:35:73:B0:BE:8C:C3:BB
+ X509v3 Authority Key Identifier:
+ keyid:07:41:19:3A:9F:7E:C5:B7:22:4E:B7:BC:D5:DF:E4:FC:09:B8:64:16
+
+ Signature Algorithm: sha1WithRSAEncryption
+ 4c:9d:31:81:b5:e9:6a:64:4c:1e:eb:91:7f:f1:66:74:46:13:
+ 19:cb:f2:3b:9a:41:f2:83:67:32:53:a6:cd:33:37:4c:92:a6:
+ 36:d4:f3:0b:56:a2:2b:66:f1:09:a7:06:36:b8:83:b7:31:70:
+ fe:bf:af:b5:3d:59:f3:f2:18:48:c7:6c:b0:90:8c:24:47:30:
+ 53:8d:c5:3e:7c:7b:33:53:15:ec:bd:8a:83:ed:05:e8:8b:21:
+ d7:65:39:69:95:c8:58:7d:4f:1b:32:51:85:2d:4d:8b:be:00:
+ 60:17:83:9b:2b:13:43:05:78:db:a4:2e:a2:cb:31:34:7e:b9:
+ 8a:72
+-----BEGIN CERTIFICATE-----
+MIIDZDCCAs2gAwIBAgIBCDANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEwNjE0
+MzE1OFoXDTQxMDMyMzE0MzE1OFowXDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjES
+MBAGA1UEAwwJMTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAquGgbAnc/dCfD7Z3QGD5AfmeVSD+iASTyauWkzrtfn2t5Oun6Qc1724UZN0x
+m+UkBhi7YGfjxUmOebZ4B8FkP97BfRupljXV+bi0Xio0t9AZrfaKAO+OsNU2H2ag
+en3P8Jg87g++Z9Lew+a4eS9kQAw5FZeME9ob21y7o0MLdMdGVZvq15PVFS/RNKyp
+mTsB8MHXQokku6tgmcFNn7+ao5I6WAXiR6aOcbIKMrDFzKBYQL8Jp3b1N86QceB1
+iRfq+4AkoZ1uG37jRFLT/uPegJqOw0+Mu7SM0qmpqq+QrLTua9LFcR4If0y2Kl8T
+euMp9y6798VICk4uHtQsQLNMGQIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG
++EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQU
+Dj9UxHeF/5NYpyQjMjVzsL6Mw7swHwYDVR0jBBgwFoAUB0EZOp9+xbciTre81d/k
+/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEATJ0xgbXpamRMHuuRf/FmdEYTGcvyO5pB
+8oNnMlOmzTM3TJKmNtTzC1aiK2bxCacGNriDtzFw/r+vtT1Z8/IYSMdssJCMJEcw
+U43FPnx7M1MV7L2Kg+0F6Ish12U5aZXIWH1PGzJRhS1Ni74AYBeDmysTQwV426Qu
+ossxNH65inI=
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCq4aBsCdz90J8P
+tndAYPkB+Z5VIP6IBJPJq5aTOu1+fa3k66fpBzXvbhRk3TGb5SQGGLtgZ+PFSY55
+tngHwWQ/3sF9G6mWNdX5uLReKjS30Bmt9ooA746w1TYfZqB6fc/wmDzuD75n0t7D
+5rh5L2RADDkVl4wT2hvbXLujQwt0x0ZVm+rXk9UVL9E0rKmZOwHwwddCiSS7q2CZ
+wU2fv5qjkjpYBeJHpo5xsgoysMXMoFhAvwmndvU3zpBx4HWJF+r7gCShnW4bfuNE
+UtP+496Amo7DT4y7tIzSqamqr5CstO5r0sVxHgh/TLYqXxN64yn3Lrv3xUgKTi4e
+1CxAs0wZAgMBAAECggEADtdh04BXzUOdTQQP/2tstRs1ATfIY4/iNhXNEiSAFAhe
+Xg+Jmdeie5UX+FqtwFh6dH0ZaRoc0jm9Qhzy99l4F4QFUhRg+kbausGsCLGpun08
+fbt36PTlc75Q4RFMxta+hKr0P8jmRKYv6tvTEdNn5ZgqLRHofKDo4nh/Y4KjMBUq
+VIMUu+VO9Ol2GPlZVRBaJec0E1+HUyzaK5JVUIFh4atcrHyXxae+rY9o6G57BBEj
+ZzlahfMI5aYj9HhXnB8RuhVBuIZBNSA41nxHmOs6JBQsatVML51RFIV4KPU+AyDR
+bdYXHJehRIUF8RL92aHjGYsvXdSxVhuUBqMIQhOwAQKBgQDUtj+p+7SHpLyQIZpU
+EQFK+42LDc6zF4uJVjq1d8fC2Hrmz8PLs0KcH36VWNbo48B3iFiPWIMID5xwLuIb
+FkLOzJ8QrbILn0zcu/hplrCiy6PZas3rpLJ+X406wLQeCikOLhQkz+cuKuQmvWkK
+eyqwBIIxg8t5dTtTAmu3w/DDgQKBgQDNqByxKduTgEND1+isUOt+L/ipR3SzXQ4m
+ZsOKiSxyXxge0/CUxPxO6WeEVGQ7bGAr5yQD9ukvJnCo3phYcuRRj+RTMrTL73Kz
+p/cyOUx2NMUIgURTsO+s3D0lC4+NmoDge0roeEDX+/lFNjqgRKJ+1LUimqbo5uNE
+EupkyTh0mQKBgGw/81ZGSjFdnLic4TU3Ejlem0HQ3Qg3S0OxJl+DfZ2jHaiowzO/
+Hn7laD4I4BXVEfXC5Y7NtKE9kJdmxJqUUZt8dta+DoXro+oRnvHdRjcS+2eB+xmY
+z12QswbbWs6OzSXyPT4er7/HBCTS78nttGOvZ7JbKAm/p1kvOjJi/PwBAoGAE7Tw
+Sum/6Lp5t56Q5TI73rOqGE6ImEdqe7ONOVE7uRnzrcCRZTAbHVSwXrXXhPo1nP9h
+LCAU6De+w+/QmWkpB8fKEU7ilEg1rZGC1oU3FnyoBNCeQ4bI8L+J/GrHLsKHZvtp
+ii07yXaTxFYV+BWbnJu1X8OCCv9U98j4PQArMMECgYEAm6uLN647vb+ZhzNBMtsX
+1wnMSgzbgGpgjhWwk6dNmw8YJNKg9CFa8sQ8N7yKXWBEF/RkU0kfzZL8iddHEb/k
+Ti1BlwrEzFfIQLlBfv47tYWOj8ZxN0ujlzUoN2VAC25LZhjcQCo3ftBk2lkrmllu
+MxjxBfRk/teUdRl80oi5R0w=
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameSAN.pem b/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameSAN.pem
new file mode 100644
index 00000000000..beb0bb91b61
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/localhostnameSAN.pem
@@ -0,0 +1,100 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 9 (0x9)
+ Signature Algorithm: sha1WithRSAEncryption
+ Issuer: C=US, ST=New York, L=New York City, O=10Gen, OU=Kernel, CN=My Cert Authority/emailAddress=root@lazarus
+ Validity
+ Not Before: Nov 6 14:45:13 2013 GMT
+ Not After : Mar 23 14:45:13 2041 GMT
+ Subject: C=US, ST=New York, L=New York City, O=10Gen, CN=santesthostname.com
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:c9:83:7a:75:42:cf:35:a4:95:c7:c8:d8:4d:19:
+ 0e:89:87:d5:bd:f9:2f:ee:20:2c:4c:ca:6d:0b:c1:
+ 10:5b:06:1b:c4:a1:26:12:25:06:7a:1e:d1:e6:d0:
+ 91:2b:a3:c8:74:de:95:10:d9:ff:20:03:ec:84:db:
+ 49:d9:a4:e9:c2:93:f0:d2:32:01:a6:55:db:14:bf:
+ 16:fe:88:e0:e4:46:0f:6a:bd:27:95:45:2e:8d:13:
+ e2:99:09:74:e4:2b:32:c3:6d:61:0c:86:85:eb:12:
+ f5:dc:9e:7b:d3:00:a3:ce:f4:8a:4b:51:7f:a2:c6:
+ 0b:52:a4:f1:41:d5:01:53:88:99:b9:3b:29:f8:43:
+ 5e:a4:c7:41:d9:d3:34:43:f2:c7:a6:8d:22:1c:f9:
+ b2:63:cb:df:83:9c:6f:ec:e3:b0:63:af:0b:51:c9:
+ 20:ca:c2:59:c1:2c:ec:de:37:18:76:3d:73:85:82:
+ 12:11:cd:b6:ef:2f:7b:64:cd:a3:2d:f6:7a:54:7f:
+ b3:4f:c9:38:f4:62:b6:da:00:f0:59:df:e1:d3:15:
+ ca:4b:73:6c:22:c1:9a:c1:51:c4:28:59:0f:71:2a:
+ 39:e9:17:08:9d:b0:88:61:a7:53:67:da:dc:fb:6e:
+ 38:f7:a8:cd:cd:88:ed:d9:4c:88:f4:a4:75:5e:3f:
+ 8b:ff
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Cert Type:
+ SSL Server
+ Netscape Comment:
+ OpenSSL Certificate for SSL Server
+ X509v3 Subject Alternative Name:
+ DNS:*.example.com, DNS:127.0.0.1, DNS:morefun!, IP Address:154.2.2.3, email:user@host.com
+ Signature Algorithm: sha1WithRSAEncryption
+ 0b:82:c6:7d:e0:ba:71:24:d6:a8:f4:cb:6f:0f:f6:69:28:32:
+ 98:81:e6:14:49:81:07:ff:92:dd:0a:a4:68:3c:92:00:e5:8c:
+ 43:d1:29:04:4a:5e:f2:b1:db:d2:ca:5d:7d:fc:fe:7b:f5:01:
+ 65:87:25:cd:4c:68:09:16:bd:c7:b0:a4:d2:89:5e:dd:92:44:
+ 6c:6e:7a:fe:7e:05:e2:2b:56:96:96:16:44:4a:01:87:8f:0c:
+ df:35:88:97:3e:e5:21:23:a2:af:87:ad:ee:f7:9e:05:36:f7:
+ 96:88:c8:fa:92:33:c2:60:2e:14:d9:ea:34:ab:04:a6:78:04:
+ be:da
+-----BEGIN CERTIFICATE-----
+MIIDjDCCAvWgAwIBAgIBCTANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTEwNjE0
+NDUxM1oXDTQxMDMyMzE0NDUxM1owZjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEc
+MBoGA1UEAwwTc2FudGVzdGhvc3RuYW1lLmNvbTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAMmDenVCzzWklcfI2E0ZDomH1b35L+4gLEzKbQvBEFsGG8Sh
+JhIlBnoe0ebQkSujyHTelRDZ/yAD7ITbSdmk6cKT8NIyAaZV2xS/Fv6I4ORGD2q9
+J5VFLo0T4pkJdOQrMsNtYQyGhesS9dyee9MAo870iktRf6LGC1Kk8UHVAVOImbk7
+KfhDXqTHQdnTNEPyx6aNIhz5smPL34Ocb+zjsGOvC1HJIMrCWcEs7N43GHY9c4WC
+EhHNtu8ve2TNoy32elR/s0/JOPRittoA8Fnf4dMVyktzbCLBmsFRxChZD3EqOekX
+CJ2wiGGnU2fa3PtuOPeozc2I7dlMiPSkdV4/i/8CAwEAAaOBmDCBlTAJBgNVHRME
+AjAAMBEGCWCGSAGG+EIBAQQEAwIGQDAxBglghkgBhvhCAQ0EJBYiT3BlblNTTCBD
+ZXJ0aWZpY2F0ZSBmb3IgU1NMIFNlcnZlcjBCBgNVHREEOzA5gg0qLmV4YW1wbGUu
+Y29tggkxMjcuMC4wLjGCCG1vcmVmdW4hhwSaAgIDgQ11c2VyQGhvc3QuY29tMA0G
+CSqGSIb3DQEBBQUAA4GBAAuCxn3gunEk1qj0y28P9mkoMpiB5hRJgQf/kt0KpGg8
+kgDljEPRKQRKXvKx29LKXX38/nv1AWWHJc1MaAkWvcewpNKJXt2SRGxuev5+BeIr
+VpaWFkRKAYePDN81iJc+5SEjoq+Hre73ngU295aIyPqSM8JgLhTZ6jSrBKZ4BL7a
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJg3p1Qs81pJXH
+yNhNGQ6Jh9W9+S/uICxMym0LwRBbBhvEoSYSJQZ6HtHm0JEro8h03pUQ2f8gA+yE
+20nZpOnCk/DSMgGmVdsUvxb+iODkRg9qvSeVRS6NE+KZCXTkKzLDbWEMhoXrEvXc
+nnvTAKPO9IpLUX+ixgtSpPFB1QFTiJm5Oyn4Q16kx0HZ0zRD8semjSIc+bJjy9+D
+nG/s47BjrwtRySDKwlnBLOzeNxh2PXOFghIRzbbvL3tkzaMt9npUf7NPyTj0Yrba
+APBZ3+HTFcpLc2wiwZrBUcQoWQ9xKjnpFwidsIhhp1Nn2tz7bjj3qM3NiO3ZTIj0
+pHVeP4v/AgMBAAECggEAbaQ12ttQ9rToMd2bosdBW58mssiERaIHuHhjQIP5LC10
+qlWr6y9uCMAAIP/WHNJuXPhGTvbtkzPPWrIdymeqMI5h91vx/di07OLT1gYPpuRf
+uwnUIamUnHn3TqEQkpzWb/JxXWlMMA0O7MzmPnYYqp/vJu/e7Geo/Xx1MAZ/RD0U
+YUvrjAyHcor01VVa/eV69jL+6x9ExFNmRYRbmjmK/f10R4o86nIfqhXbM8qKsT6x
+1U/S2I4oModm0x12PgiMDMDzVD+cNE/h8lSnFtBTNEY3xRe7CZnhMV4nBVGjWi9D
+XjcIBA0kGd4G10ploiF+37J/PQbyodLA/Y30BIYCkQKBgQD6XvEzd4DbBa08pcCa
+CYZd5pyAHur1GzJ4rTQNqB84hzuyG6dKkk0rPXjExrj/GAtGWg2ohggmC5OPInKM
+WdpMC56Q0aZYMId3Be/Wg4kRgFO0YOsrx0dRVi5nwbRXkMjXbfewSopwbzP5hIo1
+7rfOhdhbjXx6W269FPE4Epmj1QKBgQDOC1QjGeEzwEgSq3LuojRLHFo31pWYr7UU
+sxhpoWMB6ImPMVjXaEsRKfc7Gulpee1KVQLVmzbkqrHArVNXEpuG4egRwZ10UJ0L
+v4PqrElyHKxgAvllflkkMSX4rx791T+AZMq6W5VX1fKiojfvSLzmEFaI6VmS43GZ
+KCz9RFbegwKBgHSE4vP01b8YsTrcWPpXHHVu8b6epPJVKfQHh4YjjAQey6VkQULv
+O4K4JRBO+6GcawLeviSD3B74nD+s5Gp1Fqb1cWIsb6HzU9gMp0XKCWxfsJTt1gSV
+xZcQ6J/ZAjkOZKn9v5wH1M3msuWYzUm0Q06V888H1bqL+sl8iZZy8ZXRAoGBALf6
+GZh2BUYGTNSOzkMSBouCt3PgYRdC3PesqwG2nwcXMazwLRm6AD1FMYJPF1edDSow
+GiXNQAiR+cHHggDflourr2IbdZJkYLYavZmPWM1RmQDp5vKfDM1qLTOOeqe//8GP
+Pg2EtScG3G4nVraMRk9PC1WYtuiXudk9rF5A5SgtAoGBAL1oVSnQpi5tzBNJqhzM
+mQIF7ct5WNj2b1lKqqsXUTd2pcgMCRrryatqH+gLz1rAjtbVfx2FAYkutH5TFgqP
+c4uomUH3so1EjEA8GtFS9SSkLn5nIr4TnVy4+Qsr1svOo8mhtztORXz+xOTxR6ud
+p7rd/YEbc5GhNSXlcW+apZW+
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/mockkrb5.conf b/src/mongo/gotools/test/legacy26/jstests/libs/mockkrb5.conf
new file mode 100644
index 00000000000..0f004f2de8a
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/mockkrb5.conf
@@ -0,0 +1,13 @@
+[libdefaults]
+ default_realm = 10GEN.ME
+
+[realms]
+ 10GEN.ME = {
+ kdc = kdc.10gen.me
+ admin_server = kdc.10gen.me
+ default_domain = 10gen.me
+ }
+
+[domain_realm]
+ .10gen.me = 10GEN.ME
+ 10gen.me = 10GEN.ME
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/mockservice.keytab b/src/mongo/gotools/test/legacy26/jstests/libs/mockservice.keytab
new file mode 100644
index 00000000000..3529d5fcbc6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/mockservice.keytab
Binary files differ
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/mockuser.keytab b/src/mongo/gotools/test/legacy26/jstests/libs/mockuser.keytab
new file mode 100644
index 00000000000..35fd2ff06e7
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/mockuser.keytab
Binary files differ
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/network.js b/src/mongo/gotools/test/legacy26/jstests/libs/network.js
new file mode 100644
index 00000000000..e5b33f3219e
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/network.js
@@ -0,0 +1,37 @@
+
+// Parse "127.0.0.1:300" into {addr: "127.0.0.1", port: 300},
+// and "127.0.0.1" into {addr: "127.0.0.1", port: undefined}
+function parseHost (hostString) {
+ var items = hostString.match(/(\d+.\d+.\d+.\d+)(:(\d+))?/)
+ return {addr: items[1], port: parseInt(items[3])}
+}
+
+
+/* Network traffic shaping (packet dropping) to simulate network problems
+ Currently works on BSD Unix and Mac OS X only (using ipfw).
+ Requires sudo access.
+ TODO: make it work on Linux too (using iptables). */
+
+var nextRuleNum = 100 // this grows indefinitely but can't exceed 65534, so can't call routines below indefinitely
+var portRuleNum = {}
+
+// Cut network connection to local port by dropping packets using iptables
+function cutNetwork (port) {
+ portRuleNum[port] = nextRuleNum
+ runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any to any ' + port)
+ runProgram ('sudo', 'ipfw', 'add ' + nextRuleNum++ + ' deny tcp from any ' + port + ' to any')
+ //TODO: confirm it worked (since sudo may not work)
+ runProgram ('sudo', 'ipfw', 'show')
+}
+
+// Restore network connection to local port by not dropping packets using iptables
+function restoreNetwork (port) {
+ var ruleNum = portRuleNum[port]
+ if (ruleNum) {
+ runProgram ('sudo', 'ipfw', 'delete ' + ruleNum++)
+ runProgram ('sudo', 'ipfw', 'delete ' + ruleNum)
+ delete portRuleNum[port]
+ }
+ //TODO: confirm it worked (since sudo may not work)
+ runProgram ('sudo', 'ipfw', 'show')
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/parallelTester.js b/src/mongo/gotools/test/legacy26/jstests/libs/parallelTester.js
new file mode 100644
index 00000000000..d5cb5346abe
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/parallelTester.js
@@ -0,0 +1,259 @@
+/**
+ * The ParallelTester class is used to test more than one test concurrently
+ */
+
+
+if ( typeof _threadInject != "undefined" ){
+ //print( "fork() available!" );
+
+ Thread = function(){
+ this.init.apply( this, arguments );
+ }
+ _threadInject( Thread.prototype );
+
+ ScopedThread = function() {
+ this.init.apply( this, arguments );
+ }
+ ScopedThread.prototype = new Thread( function() {} );
+ _scopedThreadInject( ScopedThread.prototype );
+
+ fork = function() {
+ var t = new Thread( function() {} );
+ Thread.apply( t, arguments );
+ return t;
+ }
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function( me, collectionName, mean, host ) {
+ this.mean = mean;
+ if (host == undefined) host = db.getMongo().host;
+ this.events = new Array( me, collectionName, host );
+ }
+
+ EventGenerator.prototype._add = function( action ) {
+ this.events.push( [ Random.genExp( this.mean ), action ] );
+ }
+
+ EventGenerator.prototype.addInsert = function( obj ) {
+ this._add( "t.insert( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addRemove = function( obj ) {
+ this._add( "t.remove( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addUpdate = function( objOld, objNew ) {
+ this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" );
+ }
+
+ EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );"
+ if ( checkQuery ) {
+ action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );"
+ }
+ if ( shouldPrint ) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add( action );
+ }
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ }
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray( arguments );
+ var me = args.shift();
+ var collectionName = args.shift();
+ var host = args.shift();
+ var m = new Mongo( host );
+ var t = m.getDB( "test" )[ collectionName ];
+ for( var i in args ) {
+ sleep( args[ i ][ 0 ] );
+ eval( args[ i ][ 1 ] );
+ }
+ }
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode")
+ this.params = new Array();
+ }
+
+ ParallelTester.prototype.add = function( fun, args ) {
+ args = args || [];
+ args.unshift( fun );
+ this.params.push( args );
+ }
+
+ ParallelTester.prototype.run = function( msg, newScopes ) {
+ newScopes = newScopes || false;
+ assert.parallelTests( this.params, msg, newScopes );
+ }
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function( n ) {
+ var params = new Array();
+ for( var i = 0; i < n; ++i ) {
+ params.push( [] );
+ }
+
+ var makeKeys = function( a ) {
+ var ret = {};
+ for( var i in a ) {
+ ret[ a[ i ] ] = 1;
+ }
+ return ret;
+ }
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys([ "dbadmin.js",
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js",// log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ ] );
+
+ var parallelFilesDir = "jstests/core";
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys( serialTestsArr );
+
+ // prefix the first thread with the serialTests
+ // (which we will exclude from the rest of the threads below)
+ params[ 0 ] = serialTestsArr;
+ var files = listFiles( parallelFilesDir );
+ files = Array.shuffle( files );
+
+ var i = 0;
+ files.forEach(
+ function(x) {
+ if ( ( /[\/\\]_/.test(x.name) ) ||
+ ( ! /\.js$/.test(x.name) ) ||
+ ( x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests ) || //
+ ( x.name in serialTests )) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[ i % n ].push( x.name );
+ ++i;
+ }
+ );
+
+ // randomize ordering of the serialTests
+ params[ 0 ] = Array.shuffle( params[ 0 ] );
+
+ for( var i in params ) {
+ params[ i ].unshift( i );
+ }
+
+ return params;
+ }
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray( arguments );
+ var suite = args.shift();
+ args.forEach(
+ function( x ) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc( function() { load(x); }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms" );
+ }
+ );
+ }
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function( params, msg, newScopes ) {
+ newScopes = newScopes || false;
+ var wrapper = function( fun, argv ) {
+ eval (
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson( argv ) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "print('');" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ }
+ var runners = new Array();
+ for( var i in params ) {
+ var param = params[ i ];
+ var test = param.shift();
+ var t;
+ if ( newScopes )
+ t = new ScopedThread( wrapper( test, param ) );
+ else
+ t = new Thread( wrapper( test, param ) );
+ runners.push( t );
+ }
+
+ runners.forEach( function( x ) { x.start(); } );
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );
+ assert.eq( 0, nFailed, msg );
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/password_protected.pem b/src/mongo/gotools/test/legacy26/jstests/libs/password_protected.pem
new file mode 100644
index 00000000000..87976e7a574
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/password_protected.pem
@@ -0,0 +1,51 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIgWTIkEmBBfoCAggA
+MBQGCCqGSIb3DQMHBAjzL6xrCrEygwSCBMihG8kg3nTnTtWAbB+d1D+HJxriqm37
+7rwjkfa+T5w5ZBRGpsTt3QB5ep0maX72H55ns6ukkeMoDBSadhDWrGWcLQ2IOGt3
+E14KU6vMFe3gQkfF1fupp7F+3ma58/VNUKa4X5pzZ7OCf8inlLWejp8BRqbrPWqw
+Errgw1kNN3gWfQMr7JtIt1yI1xIMEB2Z976Jn0gaGnJAtzIW4thqjkDdb8b33S9f
+cb7N1Fq4cly22f9HdqNcLgVTi1zIlPXc/f/6mtsGTsJv/rMPthJ7c3Smvh3Fce2G
+w8e+ypfey+9QG3fk7RslaFRe8ShgqfdR8CAalp2UzwNbX91Agyuim3TA6s4jM8N9
+cF6CXlqEaA4sKhiOJmw69DfTC7QRee/gi2A8bz17pX85nKrGiLYn+Od8CEhTFxVk
+lNgBLv4+RcYHVqxWlbJMdDliMN53E+hYbh0y+GDLjteEXbrxRo1aSgd/9PGiSl97
+KY4F7b/OwRzRZh1F+cXY+uP5ZQMbx5EMMkhzuj3Hiy/AVlQrW2B1lXtcf11YFFJj
+xWq6YcpmEjL+xRq1PgoU7ahl6K0A3ScedQA5b1rLdPE8+bkRAfoN+0r8HVkIL7M+
+PorrwuWnvUmovZ0yDvm153HVvRnKZKHcelklphuUWfXvcRNITG/Rx6ssj+MVjqjb
+Xy7t7wgIrk10TFWNEcunGjSSjPDkjYPazJ2dasI0rODzhlQzrnlWM+El9P5zSu2z
+1Bvet44nmAKi2WLMda5YKbJcLSNbpBFB+rTwDt/D+dfwsJeC0sjpzzatKGXNJLJQ
+7x9BZfAbBn0QrIZYGMkaxWvcpJcaVUbCKiST4DK5ze584ptrlH+Bqw4u4xLcVrdk
+hu/8IBNybLrl4zahIz7bRRNmw5wo9zUVXPXEtuYak+MK+gmD3TzJ12OUKAlAj3Go
+Fj3NFQoxBJJjuXM3zZRvHp+/AAOUANBYIyV2WssF6C+SH4o+jKyxWC/GawPFvx/B
+gy55kdEt+ORdcOfV8L5Q2xI8Qpck6E3odmaHCvjz1bUVUWqhJcTuoewHRBfWiWgc
+UCXBS/YgendUQroBOPyYIwTtk4XY9fhhKGI4LhWcx4LfzntBnM9FGmDOwhu3HqEd
+HOs8p+HhB8LPjGRot63m7gkJ1T6AswSi9hTeZeSgXuSgL23zqwPGbGTwO3AmFs/M
+8luXQ4My9bk74K3d9lFdJPaxeTpeeWNodnBItbioT5aImptU+pkKWLTVmXi4V+JE
+1ootg+DSbz+bKp4A/LLOBO4Rsx5FCGAbBMnKc/n8lF86LjKq2PLRfgdPCaVfBrcd
+TnOkBZYU0HwJAc++4AZQJvA/KRB4UPUzMe2atjVxcrr6r6vL8G04+7TBFoynpzJ+
+4KZPCJz0Avb4wYKu/IHkdKL7UY8WEGz1mMDbAu4/xCriLg49D2f1eY3FTEjBotBI
+J9hE4ccmwqlxtl4qCVRezh0C+viJ6q2tCji2SPQviaVMNWiis9cZ52J+F9TC2p9R
+PdatJg0rjuVzfoPFE8Rq8V6+zf818b19vQ4F31J+VXTz7sF8it9IO0w/3MbtfBNE
+pKmMZ9h5RdSw1kXRWXbROR9XItS7gE1wkXAxw11z7jqNSNvhotkJXH/A5qGpTFBl
+Z8A=
+-----END ENCRYPTED PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDczCCAtygAwIBAgIBCzANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNjE1
+MTgxMFoXDTQxMDQyMjE1MTgxMFowazELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMRAwDgYDVQQDDAdsYXphcnVzMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA0+uq+UcogTSS+BLNTwwsBU7/HnNNhNgLKnk8pdUC
+UFOzAjXnXlXEravmbhWeIj5TsCElc5FPE66OvmiixFU6l27Z5P8gopjokxll7e1B
+ujeJOXgy5h+K76xdeQ90JmQX4OO0K5rLXvNH3ufuhGr2NObrBz6kbF5Wdr3urPl6
+pFSLH02zPLqPHhhUvO8jcbUD3RrS/5ZGHqE++F+QRMuYeCXTjECA8iLDvQsiqvT6
+qK1y04V/8K0BYJd/yE31H3cvRLUu7mRAkN87lY1Aj0i3dKM/l2RAa3tsy2/kSDH3
+VeUaqjoPN8PTfJaoMZz7xV7C+Zha+JZh3E7pq6viMR6bkwIDAQABo3sweTAJBgNV
+HRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZp
+Y2F0ZTAdBgNVHQ4EFgQUbw3OWXLJpkDMpGnLWM4vxSbwUSAwHwYDVR0jBBgwFoAU
+B0EZOp9+xbciTre81d/k/Am4ZBYwDQYJKoZIhvcNAQEFBQADgYEAL+OC9x0P7Ql+
+8NbONrIeOIoJD++K5rUM0vI+u9RDAxTm9TO6cP7Cl6H4zzvlzJ3w9DL66c2r+ZTy
+BxzFO1wtDKUo5RJKneC0tMz0rJQIWTqo45fDLs8UIDB5t4xp6zed34nvct+wIRaV
+hCjHBaVmILlBWb6OF9/kl1JhLtElyDs=
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/server.pem b/src/mongo/gotools/test/legacy26/jstests/libs/server.pem
new file mode 100644
index 00000000000..e5980d4856e
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/server.pem
@@ -0,0 +1,34 @@
+-----BEGIN PRIVATE KEY-----
+MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAK53miP9GczBWXnq
+NxHwQkgVqsDuesjwJbWilMK4gf3fjnf2PN3qDpnGbZbPD0ij8975pIKtSPoDycFm
+A8Mogip0yU2Lv2lL56CWthSBftOFDL2CWIsmuuURFXZPiVLtLytfI9oLASZFlywW
+Cs83qEDTvdW8VoVhVsxV1JFDnpXLAgMBAAECgYBoGBgxrMt97UazhNkCrPT/CV5t
+6lv8E7yMGMrlOyzkCkR4ssQyK3o2qbutJTGbR6czvIM5LKbD9Qqlh3ZrNHokWmTR
+VQQpJxt8HwP5boQvwRHg9+KSGr4JvRko1qxFs9C7Bzjt4r9VxdjhwZPdy0McGI/z
+yPXyQHjqBayrHV1EwQJBANorfCKeIxLhH3LAeUZuRS8ACldJ2N1kL6Ov43/v+0S/
+OprQeBTODuTds3sv7FCT1aYDTOe6JLNOwN2i4YVOMBsCQQDMuCozrwqftD17D06P
+9+lRXUekY5kFBs5j28Xnl8t8jnuxsXtQUTru660LD0QrmDNSauhpEmlpJknicnGt
+hmwRAkEA12MI6bBPlir0/jgxQqxI1w7mJqj8Vg27zpEuO7dzzLoyJHddpcSNBbwu
+npaAakiZK42klj26T9+XHvjYRuAbMwJBAJ5WnwWEkGH/pUHGEAyYQdSVojDKe/MA
+Vae0tzguFswK5C8GyArSGRPsItYYA7D4MlG/sGx8Oh2C6MiFndkJzBECQDcP1y4r
+Qsek151t1zArLKH4gG5dQAeZ0Lc2VeC4nLMUqVwrHcZDdd1RzLlSaH3j1MekFVfT
+6v6rrcNLEVbeuk4=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIC7jCCAlegAwIBAgIBCjANBgkqhkiG9w0BAQUFADCBkjELMAkGA1UEBhMCVVMx
+ETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYD
+VQQKDAUxMEdlbjEPMA0GA1UECwwGS2VybmVsMRowGAYDVQQDDBFNeSBDZXJ0IEF1
+dGhvcml0eTEbMBkGCSqGSIb3DQEJARYMcm9vdEBsYXphcnVzMB4XDTEzMTIwNTEz
+MjU0MFoXDTQxMDQyMTEzMjU0MFowajELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5l
+dyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ4wDAYDVQQKDAUxMEdlbjEP
+MA0GA1UECwwGS2VybmVsMQ8wDQYDVQQDDAZzZXJ2ZXIwgZ8wDQYJKoZIhvcNAQEB
+BQADgY0AMIGJAoGBAK53miP9GczBWXnqNxHwQkgVqsDuesjwJbWilMK4gf3fjnf2
+PN3qDpnGbZbPD0ij8975pIKtSPoDycFmA8Mogip0yU2Lv2lL56CWthSBftOFDL2C
+WIsmuuURFXZPiVLtLytfI9oLASZFlywWCs83qEDTvdW8VoVhVsxV1JFDnpXLAgMB
+AAGjezB5MAkGA1UdEwQCMAAwLAYJYIZIAYb4QgENBB8WHU9wZW5TU0wgR2VuZXJh
+dGVkIENlcnRpZmljYXRlMB0GA1UdDgQWBBQgCkKiZhUV9/Zo7RwYYwm2cNK6tzAf
+BgNVHSMEGDAWgBQHQRk6n37FtyJOt7zV3+T8CbhkFjANBgkqhkiG9w0BAQUFAAOB
+gQCbsfr+Q4pty4Fy38lSxoCgnbB4pX6+Ex3xyw5zxDYR3xUlb/uHBiNZ1dBrXBxU
+ekU8dEvf+hx4iRDSW/C5N6BGnBBhCHcrPabo2bEEWKVsbUC3xchTB5rNGkvnMt9t
+G9ol7vanuzjL3S8/2PB33OshkBH570CxqqPflQbdjwt9dg==
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/slow_weekly_util.js b/src/mongo/gotools/test/legacy26/jstests/libs/slow_weekly_util.js
new file mode 100644
index 00000000000..1e2c7391cb1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/slow_weekly_util.js
@@ -0,0 +1,20 @@
+
+SlowWeeklyMongod = function( name ) {
+ this.name = name;
+ this.port = 30201;
+
+ this.start = new Date();
+
+ this.conn = startMongodEmpty("--port", this.port, "--dbpath", MongoRunner.dataPath + this.name , "--smallfiles", "--nojournal" );
+};
+
+SlowWeeklyMongod.prototype.getDB = function( name ) {
+ return this.conn.getDB( name );
+}
+
+SlowWeeklyMongod.prototype.stop = function(){
+ stopMongod( this.port );
+ var end = new Date();
+ print( "slowWeekly test: " + this.name + " completed succesfully in " + ( ( end.getTime() - this.start.getTime() ) / 1000 ) + " seconds" );
+};
+
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/smoke.pem b/src/mongo/gotools/test/legacy26/jstests/libs/smoke.pem
new file mode 100644
index 00000000000..0f6deb368c5
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/smoke.pem
@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDLSU04xAL7eZ/Y
+J3euMDP/Uq7+a65zEIk7wzD2K5Htosbdysn67l8OzVlF2/IcB0/2SLuHHyC7+4pv
+O2+ndtvi6hr9zF4S8Bz0In/UUb+WzhFHuZd0YLl2arhnYMoDUkyLheVqEcDbECgi
+a6i5SNpAff2eUy29FVGwsaUl7+iEHqYxS9Ibmw1CeQYLEOGyhkTI9BjfO/3HwQyW
+FmOJp/IAJUFRCXTgluaMHptaonX5GmRK64wlF8Reu+uyQRdWM0cK9b3AxbBWAAyT
+SLQto+PW1J7QQ95Kn+aJ8nH1Jj80iUAjx2yAGchl1wfSHf5yAAo4OJNXgKUrQHIs
+dofsw/KTAgMBAAECggEBAItF+SX/BJwNw7lvsMsiMz2mBEZCuA4VMjBDlnPRffT1
+JJInsSG91lppzdPS0JjrWZk+U1xLsz2XJEz4x5JQGG3qPfvL3FfVMcEBMdrg9wX2
+wFgHiwAslGPQ0e3hngWQiOi+H2MALsTm2NhcMghfJUgyCWRDUH7O8FzCGIdZSk/Z
+Bx4CvBad+k+OFvUt03gwGtoCn7XneMRVGt04EU/srg0h6C3810k7+OLC1xZc8jaE
+5UAZwKO4pqJn/w0s9T2eAC+b+1YNuUTLvMTdhfH6ZkANxgcfQHWok14iGxCyXMeQ
+dBHeyNTIYKnfpwjFz85LgEvl4gsUTaa/IM0DfGPDOkECgYEA5z8Px0Sh0DSRr6PW
+3Ki9sDtJP5f+x0ARaebOfkscOJ5YvDejIxVNVBi5PYRtfCyLT78AKpRfxtBDQtW1
+w02xqkh/RR/GZm8hLyh/KzroTA3+GQvMqnE1irkJCKEOWwUjZNAFt+kgZIQWCfbn
+V1CjeK9xnEt00Icn7sh1CKubvakCgYEA4QwKZ2zj10i90NqlAAJlj6NTK/h+bHHw
+6VkUUO93GJZ1cC++dVZRhPTqBRdACJSey4nCMFdO3PLwy2gBG9LwU4rcN0Euo2bm
+J2uBBJVoXySE1250vem9I7KAramtTzQuHtIEvYhB3DHY+oYv4Eg6NSB4zAdtDKiV
+iiP23IN0+9sCgYA0KHconQRab+EEWtIVx0GxxE2LOH9Q9dR3rIWa2tossxqUqX/0
+Y9OjSkhN5dbEEVAC1rP05q6Lq2Hga0+qE5YlMGD0eGxJons7pci5OXo33VgY0h6B
+uzM2bPHqrlkMkqYfEQSZLM4PnfNSoAwiF6Anknrvo91fQ3zwUOqE4CAqsQKBgGX2
+a5xShKRcy8ud1JY9f8BlkmBgtP7zXOCMwJyu8nnMaacLqrJFCqg/wuvNjfCVTaEQ
+aFA4rn2DAMBX/fCaUNK5Hm9WdAgKrgp8Nbda7i/1Ps7Qt8n35f8PeCe2sdQp4x+J
+riYlXxmh6BoRxA1NDDpX3QMr9id/FknBY66jTNRzAoGBALab2GqBYInkmPj1nGDA
+f9+VQWFzl98k0PbLQcvKgbWuxLDf/Pz9lBi9tPzhNuTRt9RLuCMc5ZbpPbHPNWI0
+6+zofHTHoW0+prDdtZqpEE/TKmr8emjYMf4CBIKwW3CwbBRLr9C8G01ClTaan2Ge
+LMUhIseBsaQhmkL8n1AyauGL
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDlzCCAn+gAwIBAgIJAJDxQ4ilLvoVMA0GCSqGSIb3DQEBBQUAMGIxCzAJBgNV
+BAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazERMA8GA1UEBwwITmV3IFlvcmsxDjAM
+BgNVBAoMBTEwZ2VuMR0wGwYJKoZIhvcNAQkBFg50ZXN0QDEwZ2VuLmNvbTAeFw0x
+MjEyMDQxNTA0MDJaFw0xODA1MjcxNTA0MDJaMGIxCzAJBgNVBAYTAlVTMREwDwYD
+VQQIDAhOZXcgWW9yazERMA8GA1UEBwwITmV3IFlvcmsxDjAMBgNVBAoMBTEwZ2Vu
+MR0wGwYJKoZIhvcNAQkBFg50ZXN0QDEwZ2VuLmNvbTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMtJTTjEAvt5n9gnd64wM/9Srv5rrnMQiTvDMPYrke2i
+xt3KyfruXw7NWUXb8hwHT/ZIu4cfILv7im87b6d22+LqGv3MXhLwHPQif9RRv5bO
+EUe5l3RguXZquGdgygNSTIuF5WoRwNsQKCJrqLlI2kB9/Z5TLb0VUbCxpSXv6IQe
+pjFL0hubDUJ5BgsQ4bKGRMj0GN87/cfBDJYWY4mn8gAlQVEJdOCW5owem1qidfka
+ZErrjCUXxF6767JBF1YzRwr1vcDFsFYADJNItC2j49bUntBD3kqf5onycfUmPzSJ
+QCPHbIAZyGXXB9Id/nIACjg4k1eApStAcix2h+zD8pMCAwEAAaNQME4wHQYDVR0O
+BBYEFO6qoBUb1CN4lCkGhaatcjUBKwWmMB8GA1UdIwQYMBaAFO6qoBUb1CN4lCkG
+haatcjUBKwWmMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAGcJdsiI
+JdhJDPkZksOhHZUMMRHLHfWubMGAvuml6hs+SL850DRc+vRP43eF/yz+WbEydkFz
+3qXkQQSG8A2bLOtg0c6Gyi5snUOX0CKcOl3jitgwVkHcdX/v6vbiwALk+r8kJExv
+vpiWIp3nxgLtYVJP/XPoEomEwmu5zWaw28MWXM4XrEjPYmK5ZL16VXXD+lfO0cnT
+2vjkbNK8g7fKaIYYX+cr8GLZi19kO+jUYfhtxQbn8nxUfSjHseAy9BbOLUbGTdAV
+MbGRQveOnFW0eDLjiZffwqCtn91EtYy+vBuYHT/C7Ws4hNwd9lTvmg0SHAm01vi1
+b4fBFFjNvg1wCrU=
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/test_background_ops.js b/src/mongo/gotools/test/legacy26/jstests/libs/test_background_ops.js
new file mode 100644
index 00000000000..91f50aaa362
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/test_background_ops.js
@@ -0,0 +1,340 @@
+//
+// Utilities related to background operations while other operations are working
+//
+
+/**
+ * Allows synchronization between background ops and the test operations
+ */
+var waitForLock = function( mongo, name ){
+
+ var ts = new ObjectId()
+ var lockColl = mongo.getCollection( "config.testLocks" )
+
+ lockColl.update({ _id : name, state : 0 }, { $set : { state : 0 } }, true)
+
+ //
+ // Wait until we can set the state to 1 with our id
+ //
+
+ var startTime = new Date().getTime()
+
+ assert.soon( function() {
+ lockColl.update({ _id : name, state : 0 }, { $set : { ts : ts, state : 1 } })
+ var gleObj = lockColl.getDB().getLastErrorObj()
+
+ if( new Date().getTime() - startTime > 20 * 1000 ){
+ print( "Waiting for..." )
+ printjson( gleObj )
+ printjson( lockColl.findOne() )
+ printjson( ts )
+ }
+
+ return gleObj.n == 1 || gleObj.updatedExisting
+ }, "could not acquire lock", 30 * 1000, 100 )
+
+ print( "Acquired lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+
+ // Set the state back to 0
+ var unlock = function(){
+ print( "Releasing lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+ lockColl.update({ _id : name, ts : ts }, { $set : { state : 0 } })
+ }
+
+ // Return an object we can invoke unlock on
+ return { unlock : unlock }
+}
+
+/**
+ * Allows a test or background op to say it's finished
+ */
+var setFinished = function( mongo, name, finished ){
+ if( finished || finished == undefined )
+ mongo.getCollection( "config.testFinished" ).update({ _id : name }, { _id : name }, true )
+ else
+ mongo.getCollection( "config.testFinished" ).remove({ _id : name })
+}
+
+/**
+ * Checks whether a test or background op is finished
+ */
+var isFinished = function( mongo, name ){
+ return mongo.getCollection( "config.testFinished" ).findOne({ _id : name }) != null
+}
+
+/**
+ * Sets the result of a background op
+ */
+var setResult = function( mongo, name, result, err ){
+ mongo.getCollection( "config.testResult" ).update({ _id : name }, { _id : name, result : result, err : err }, true )
+}
+
+/**
+ * Gets the result for a background op
+ */
+var getResult = function( mongo, name ){
+ return mongo.getCollection( "config.testResult" ).findOne({ _id : name })
+}
+
+/**
+ * Overrides the parallel shell code in mongo
+ */
+function startParallelShell( jsCode, port ){
+
+ var x;
+ if ( port ) {
+ x = startMongoProgramNoConnect( "mongo" , "--port" , port , "--eval" , jsCode );
+ } else {
+ x = startMongoProgramNoConnect( "mongo" , "--eval" , jsCode , db ? db.getMongo().host : null );
+ }
+
+ return function(){
+ jsTestLog( "Waiting for shell " + x + "..." )
+ waitProgram( x );
+ jsTestLog( "Shell " + x + " finished." )
+ };
+}
+
+startParallelOps = function( mongo, proc, args, context ){
+
+ var procName = proc.name + "-" + new ObjectId()
+ var seed = new ObjectId( new ObjectId().valueOf().split("").reverse().join("") )
+ .getTimestamp().getTime()
+
+ // Make sure we aren't finished before we start
+ setFinished( mongo, procName, false )
+ setResult( mongo, procName, undefined, undefined )
+
+ // TODO: Make this a context of its own
+ var procContext = { procName : procName,
+ seed : seed,
+ waitForLock : waitForLock,
+ setFinished : setFinished,
+ isFinished : isFinished,
+ setResult : setResult,
+
+ setup : function( context, stored ){
+
+ waitForLock = function(){
+ return context.waitForLock( db.getMongo(), context.procName )
+ }
+ setFinished = function( finished ){
+ return context.setFinished( db.getMongo(), context.procName, finished )
+ }
+ isFinished = function(){
+ return context.isFinished( db.getMongo(), context.procName )
+ }
+ setResult = function( result, err ){
+ return context.setResult( db.getMongo(), context.procName, result, err )
+ }
+ }}
+
+ var bootstrapper = function( stored ){
+
+ var procContext = stored.procContext
+ procContext.setup( procContext, stored )
+
+ var contexts = stored.contexts
+ eval( "contexts = " + contexts )
+
+ for( var i = 0; i < contexts.length; i++ ){
+ if( typeof( contexts[i] ) != "undefined" ){
+ // Evaluate all contexts
+ contexts[i]( procContext )
+ }
+ }
+
+ var operation = stored.operation
+ eval( "operation = " + operation )
+
+ var args = stored.args
+ eval( "args = " + args )
+
+ result = undefined
+ err = undefined
+
+ try{
+ result = operation.apply( null, args )
+ }
+ catch( e ){
+ err = e
+ }
+
+ setResult( result, err )
+ }
+
+ var contexts = [ RandomFunctionContext, context ]
+
+ var testDataColl = mongo.getCollection( "config.parallelTest" )
+
+ testDataColl.insert({ _id : procName,
+ bootstrapper : tojson( bootstrapper ),
+ operation : tojson( proc ),
+ args : tojson( args ),
+ procContext : procContext,
+ contexts : tojson( contexts ) })
+
+ assert.eq( null, testDataColl.getDB().getLastError() )
+
+ var bootstrapStartup =
+ "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}"
+
+
+ var oldDB = db
+ db = mongo.getDB( "test" )
+
+ jsTest.log( "Starting " + proc.name + " operations..." )
+
+ var rawJoin = startParallelShell( bootstrapStartup )
+
+ db = oldDB
+
+
+ var join = function(){
+ setFinished( mongo, procName, true )
+
+ rawJoin();
+ result = getResult( mongo, procName )
+
+ assert.neq( result, null )
+
+ if( result.err ) throw "Error in parallel ops " + procName + " : "
+ + tojson( result.err )
+
+ else return result.result
+ }
+
+ join.isFinished = function(){
+ return isFinished( mongo, procName )
+ }
+
+ join.setFinished = function( finished ){
+ return setFinished( mongo, procName, finished )
+ }
+
+ join.waitForLock = function( name ){
+ return waitForLock( mongo, name )
+ }
+
+ return join
+}
+
+var RandomFunctionContext = function( context ){
+
+ Random.srand( context.seed );
+
+ Random.randBool = function(){ return Random.rand() > 0.5 }
+
+ Random.randInt = function( min, max ){
+
+ if( max == undefined ){
+ max = min
+ min = 0
+ }
+
+ return min + Math.floor( Random.rand() * max )
+ }
+
+ Random.randShardKey = function(){
+
+ var numFields = 2 //Random.randInt(1, 3)
+
+ var key = {}
+ for( var i = 0; i < numFields; i++ ){
+ var field = String.fromCharCode( "a".charCodeAt() + i )
+ key[ field ] = 1
+ }
+
+ return key
+ }
+
+ Random.randShardKeyValue = function( shardKey ){
+
+ var keyValue = {}
+ for( field in shardKey ){
+ keyValue[ field ] = Random.randInt(1, 100)
+ }
+
+ return keyValue
+ }
+
+ Random.randCluster = function(){
+
+ var numShards = 2 //Random.randInt( 1, 10 )
+ var rs = false //Random.randBool()
+ var st = new ShardingTest({ shards : numShards,
+ mongos : 4,
+ other : { separateConfig : true, rs : rs } })
+
+ return st
+ }
+}
+
+
+//
+// Some utility operations
+//
+
+function moveOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var findKey = Random.randShardKeyValue( shardKey )
+ var toShard = shards[ Random.randInt( shards.length ) ]._id
+
+ try {
+ printjson( admin.runCommand({ moveChunk : collName,
+ find : findKey,
+ to : toShard }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping moveOps..." )
+}
+
+function splitOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var middleKey = Random.randShardKeyValue( shardKey )
+
+ try {
+ printjson( admin.runCommand({ split : collName,
+ middle : middleKey }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping splitOps..." )
+}
+
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/testconfig b/src/mongo/gotools/test/legacy26/jstests/libs/testconfig
new file mode 100644
index 00000000000..0c1fc871d61
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/testconfig
@@ -0,0 +1,4 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/testconfig.json b/src/mongo/gotools/test/legacy26/jstests/libs/testconfig.json
new file mode 100644
index 00000000000..5af32aad7d3
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/testconfig.json
@@ -0,0 +1,4 @@
+{
+ "fastsync" : true,
+ "version" : false
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/trace_missing_docs.js b/src/mongo/gotools/test/legacy26/jstests/libs/trace_missing_docs.js
new file mode 100644
index 00000000000..3faf50b4606
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/trace_missing_docs.js
@@ -0,0 +1,90 @@
+
+//
+// On error inserting documents, traces back and shows where the document was dropped
+//
+
+function traceMissingDoc( coll, doc, mongos ) {
+
+ if (mongos) coll = mongos.getCollection(coll + "");
+ else mongos = coll.getMongo();
+
+ var config = mongos.getDB( "config" );
+ var shards = config.shards.find().toArray();
+ for ( var i = 0; i < shards.length; i++ ) {
+ shards[i].conn = new Mongo( shards[i].host );
+ }
+
+ var shardKeyPatt = config.collections.findOne({ _id : coll + "" }).key;
+
+ // Project out the shard key
+ var shardKey = {};
+ for ( var k in shardKeyPatt ) {
+ if ( doc[k] == undefined ) {
+ jsTest.log( "Shard key " + tojson( shardKey ) +
+ " not found in doc " + tojson( doc ) +
+ ", falling back to _id search..." );
+ shardKeyPatt = { _id : 1 };
+ shardKey = { _id : doc['_id'] };
+ break;
+ }
+ shardKey[k] = doc[k];
+ }
+
+ if ( doc['_id'] == undefined ) {
+ jsTest.log( "Id not found in doc " + tojson( doc ) + " cannot trace oplog entries." );
+ return;
+ }
+
+ jsTest.log( "Using shard key : " + tojson( shardKey ) );
+
+ var allOps = [];
+ for ( var i = 0; i < shards.length; i++ ) {
+
+ var oplog = shards[i].conn.getCollection( "local.oplog.rs" );
+ if ( !oplog.findOne() ) {
+ oplog = shards[i].conn.getCollection( "local.oplog.$main" );
+ }
+
+ if ( !oplog.findOne() ) {
+ jsTest.log( "No oplog was found on shard " + shards[i]._id );
+ continue;
+ }
+
+ var addKeyQuery = function( query, prefix ) {
+ for ( var k in shardKey ) {
+ query[prefix + '.' + k] = shardKey[k];
+ }
+ return query;
+ };
+
+ var addToOps = function( cursor ) {
+ cursor.forEach( function( doc ) {
+ doc.shard = shards[i]._id;
+ doc.realTime = new Date( doc.ts.getTime() * 1000 );
+ allOps.push( doc );
+ });
+ };
+
+ // Find ops
+ addToOps( oplog.find( addKeyQuery( { op : 'i' }, 'o' ) ) );
+ var updateQuery = { $or : [ addKeyQuery( { op : 'u' }, 'o2' ),
+ { op : 'u', 'o2._id' : doc['_id'] } ] };
+ addToOps( oplog.find( updateQuery ) );
+ addToOps( oplog.find({ op : 'd', 'o._id' : doc['_id'] }) );
+ }
+
+ var compareOps = function( opA, opB ) {
+ if ( opA.ts < opB.ts ) return -1;
+ if ( opB.ts < opA.ts ) return 1;
+ else return 0;
+ }
+
+ allOps.sort( compareOps );
+
+ print( "Ops found for doc " + tojson( doc ) + " on each shard:\n" );
+ for ( var i = 0; i < allOps.length; i++ ) {
+ printjson( allOps[i] );
+ }
+
+ return allOps;
+} \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy26/jstests/libs/use_extended_timeout.js b/src/mongo/gotools/test/legacy26/jstests/libs/use_extended_timeout.js
new file mode 100644
index 00000000000..7f770249214
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/libs/use_extended_timeout.js
@@ -0,0 +1,12 @@
+var _orig_runMongoProgram = runMongoProgram;
+runMongoProgram = function() {
+ var args = [];
+ for (var i in arguments) {
+ args[i] = arguments[i];
+ }
+ var progName = args[0];
+ if (progName !== "bsondump" && args.indexOf("--dialTimeout") === -1) {
+ args.push("--dialTimeout", "30");
+ }
+ return _orig_runMongoProgram.apply(null, args);
+};
diff --git a/src/mongo/gotools/test/legacy26/jstests/misc/biginsert.js b/src/mongo/gotools/test/legacy26/jstests/misc/biginsert.js
new file mode 100755
index 00000000000..ebbdc18ba3e
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/misc/biginsert.js
@@ -0,0 +1,18 @@
+o = "xxxxxxxxxxxxxxxxxxx";
+o = o + o;
+o + o;
+o = o + o;
+o = o + o;
+o = o + o;
+
+var B = 40000;
+var last = new Date();
+for (i = 0; i < 30000000; i++) {
+ db.foo.insert({ o: o });
+ if (i % B == 0) {
+ var n = new Date();
+ print(i);
+ print("per sec: " + B*1000 / (n - last));
+ last = n;
+ }
+}
diff --git a/src/mongo/gotools/test/legacy26/jstests/replsets/rslib.js b/src/mongo/gotools/test/legacy26/jstests/replsets/rslib.js
new file mode 100644
index 00000000000..6a16db232e4
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/replsets/rslib.js
@@ -0,0 +1,115 @@
+
+var count = 0;
+var w = 0;
+
+var wait = function(f,msg) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
+ sleep(1000);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+var occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ db = a.getDB('foo');
+ }
+ else {
+ db = a;
+ }
+ db.bar.stats();
+ if (jsTest.options().keyFile || jsTest.options().useX509) { // SERVER-4241: Shell connections don't re-authenticate on reconnect
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+
+var waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon( function() {
+ var state = null
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ failCount = 0;
+ } catch ( e ) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print( "Calling replSetGetStatus failed" );
+ print( e );
+ return false;
+ }
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
+ return false;
+ }
+ }
+ printjson( state );
+ return true;
+ }, "not all members ready", timeout || 60000);
+
+ print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" );
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getPrimary().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getPrimary().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/csv1.js b/src/mongo/gotools/test/legacy26/jstests/tool/csv1.js
new file mode 100644
index 00000000000..5eb7ab0249a
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/csv1.js
@@ -0,0 +1,42 @@
+// csv1.js
+
+t = new ToolTest( "csv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
+
+assert.eq( 0 , c.count() , "setup1" );
+c.insert( base );
+delete base._id
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+assert.eq( tojson( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"} ) , tojson( a[1] ) , "csv parse 1" );
+assert.eq( tojson( base ) , tojson(a[0]) , "csv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( tojson( base ) , tojson(x) , "csv parse 2" )
+
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/csvexport1.js b/src/mongo/gotools/test/legacy26/jstests/tool/csvexport1.js
new file mode 100644
index 00000000000..2cd3c9c0447
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/csvexport1.js
@@ -0,0 +1,65 @@
+// csvexport1.js
+
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27T12:34:56.789"),
+ c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i,
+ e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : [ 1, 2, 3 ], d : { "a" : "hello", "b" : "world" }, e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while
+// they are stored as seconds. See SERVER-7718.
+expected.push({ a : "D76DF8", b : "2009-08-27T12:34:56.789Z",
+ c : { "$timestamp" : { "t" : 1234, "i" : 9876 } },
+ d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq(Object.keys(expected[i]).length, Object.keys(actual[i]).length)
+ keys = Object.keys(expected[i])
+ for(var j=0;j<keys.length;j++){
+ expectedVal = expected[i][keys[j]]
+ if((typeof expectedVal)== "object"){
+ // For fields which contain arrays or objects, they have been
+ // exported as JSON - parse the JSON in the output and verify
+ // that it matches the original document's value
+ assert.docEq(expectedVal, JSON.parse(actual[i][keys[j]]), "CSV export " + i)
+ }else{
+ // Otherwise just compare the values directly
+ assert.eq(expectedVal, actual[i][keys[j]], "CSV export " + i)
+ }
+ }
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/csvexport2.js b/src/mongo/gotools/test/legacy26/jstests/tool/csvexport2.js
new file mode 100644
index 00000000000..3e0dd2c6829
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/csvexport2.js
@@ -0,0 +1,31 @@
+// csvexport2.js
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop() \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/csvimport1.js b/src/mongo/gotools/test/legacy26/jstests/tool/csvimport1.js
new file mode 100644
index 00000000000..3bff1110cbe
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/csvimport1.js
@@ -0,0 +1,40 @@
+// csvimport1.js
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.eq( tojson(base[i]), tojson(a[i]), "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.eq( tojson(base[i]), tojson(x[i]), "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/data/a.tsv b/src/mongo/gotools/test/legacy26/jstests/tool/data/a.tsv
new file mode 100644
index 00000000000..1e094179a63
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/data/csvimport1.csv b/src/mongo/gotools/test/legacy26/jstests/tool/data/csvimport1.csv
new file mode 100644
index 00000000000..256d40a9184
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/foo.bson b/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/foo.bson
new file mode 100644
index 00000000000..b8f8f99e6bf
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/foo.bson
Binary files differ
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson b/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson
new file mode 100644
index 00000000000..dde25da302a
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/data/dumprestore6/system.indexes.bson
Binary files differ
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumpauth.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumpauth.js
new file mode 100644
index 00000000000..2a2d613b708
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumpauth.js
@@ -0,0 +1,27 @@
+// dumpauth.js
+// test mongodump with authentication
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_dumpauth";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+for(var i = 0; i < 100; i++) {
+ t["testcol"].save({ "x": i });
+}
+
+db.createUser({user: "testuser" , pwd: "testuser", roles: jsTest.adminUserRoles});
+
+assert( db.auth( "testuser" , "testuser" ) , "auth failed" );
+
+x = runMongoProgram( "mongodump",
+ "--db", baseName,
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "--collection", "testcol" );
+assert.eq(x, 0, "mongodump should succeed with authentication");
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumpfilename1.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumpfilename1.js
new file mode 100644
index 00000000000..fbe24551929
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumpfilename1.js
@@ -0,0 +1,14 @@
+//dumpfilename1.js
+
+//Test designed to make sure error that dumping a collection with "/" fails
+
+t = new ToolTest( "dumpfilename1" );
+
+t.startDB( "foo" );
+
+c = t.db;
+c.getCollection("df/").insert({ a: 3 })
+assert(c.getCollection("df/").count() > 0) // check write worked
+assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code")
+t.stop();
+
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore1.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore1.js
new file mode 100644
index 00000000000..fd1e8789ea6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore1.js
@@ -0,0 +1,23 @@
+// dumprestore1.js
+
+t = new ToolTest( "dumprestore1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save( { a : 22 } );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "dump" , "--out" , t.ext );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+// ensure that --collection is used with --db. See SERVER-7721
+var ret = t.runTool( "dump" , "--collection" , "col" );
+assert.neq( ret, 0, "mongodump should return failure code" );
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore10.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore10.js
new file mode 100644
index 00000000000..49f008ea591
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore10.js
@@ -0,0 +1,63 @@
+// simple test to ensure write concern functions as expected
+
+var name = "dumprestore10";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+var total = 1000;
+
+{
+ step("store data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < total; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("mongodump from replset");
+
+var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data );
+
+
+{
+ step("remove data after dumping");
+ master.getDB("foo").getCollection("bar").drop();
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("try mongorestore with write concern");
+
+runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data );
+
+var x = 0;
+
+// no waiting for replication
+x = master.getDB("foo").getCollection("bar").count();
+
+assert.eq(x, total, "mongorestore should have successfully restored the collection");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore3.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore3.js
new file mode 100644
index 00000000000..f1e5941cbd0
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore3.js
@@ -0,0 +1,60 @@
+// dumprestore3.js
+
+var name = "dumprestore3";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("populate master");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait for slaves");
+ replTest.awaitReplication();
+}
+
+{
+ step("dump & restore a db into a slave");
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+ var c = conn.getDB("foo").bar;
+ c.save({ a: 22 });
+ assert.eq(1, c.count(), "setup2");
+}
+
+step("try mongorestore to slave");
+
+var data = MongoRunner.dataDir + "/dumprestore3-other1/";
+resetDbpath(data);
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data );
+
+var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data );
+assert.eq(x, 1, "mongorestore should exit w/ -1 on slave");
+
+step("try mongoimport to slave");
+
+dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" );
+
+x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile );
+assert.eq(x, 1, "mongoreimport should exit w/ 1 on slave"); // windows return is signed
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore4.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore4.js
new file mode 100644
index 00000000000..568e196061f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore4.js
@@ -0,0 +1,42 @@
+// dumprestore4.js -- see SERVER-2186
+
+// The point of this test is to ensure that mongorestore successfully
+// constructs indexes when the database being restored into has a
+// different name than the database dumped from. There are 2
+// issues here: (1) if you dumped from database "A" and restore into
+// database "B", B should have exactly the right indexes; (2) if for
+// some reason you have another database called "A" at the time of the
+// restore, mongorestore shouldn't touch it.
+
+t = new ToolTest( "dumprestore4" );
+
+c = t.startDB( "dumprestore4" );
+
+db=t.db
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db2=db.getSisterDB( dbname2 );
+
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
+
+assert.eq( 0 , db.system.indexes.count() , "setup1" );
+c.ensureIndex({ x : 1} );
+assert.eq( 2 , db.system.indexes.count() , "setup2" ); // _id and x_1
+
+assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump")
+
+// to ensure issue (2), we have to clear out the first db.
+// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
+// so we have to drop the collection.
+c.drop();
+assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+
+// issue (1)
+assert.eq( 2 , db2.system.indexes.count() , "after restore 1" );
+// issue (2)
+assert.eq( 0 , db.system.indexes.count() , "after restore 2" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore6.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore6.js
new file mode 100644
index 00000000000..d8b349e9589
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore6.js
@@ -0,0 +1,27 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+assert.eq( 0 , c.count() , "setup1" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore" );
+assert.eq( 1 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't updated")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+db.dropDatabase()
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+assert.soon( "c.findOne()" , "no data after sleep2" );
+assert.eq( 1 , c.count() , "after restore2" );
+assert.eq( 0 , db.system.indexes.findOne({name:'a_1'}).v, "index version wasn't maintained")
+assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore7.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore7.js
new file mode 100644
index 00000000000..a71725f434b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore7.js
@@ -0,0 +1,66 @@
+var name = "dumprestore7";
+
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 1} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("first chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+ var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next();
+ step(time.ts.t);
+}
+
+{
+ step("second chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 30; i < 50; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+{
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+}
+
+step("try mongodump with $timestamp");
+
+var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
+var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}";
+
+MongoRunner.runMongoTool( "mongodump",
+ { "host": "127.0.0.1:"+replTest.ports[0],
+ "db": "local", "collection": "oplog.rs",
+ "query": query, "out": data });
+
+step("try mongorestore from $timestamp");
+
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+port, "--dir", data );
+var x = 9;
+x = conn.getDB("local").getCollection("oplog.rs").count();
+
+assert.eq(x, 20, "mongorestore should only have the latter 20 entries");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
+
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore8.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore8.js
new file mode 100644
index 00000000000..4e6591738d6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore8.js
@@ -0,0 +1,105 @@
+// dumprestore8.js
+
+// This file tests that indexes and capped collection options get properly dumped and restored.
+// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection
+
+t = new ToolTest( "dumprestore8" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+assert.eq( 0 , db.foo.count() , "setup1" );
+db.foo.save( { a : 1, b : 1 } );
+db.foo.ensureIndex({a:1});
+db.foo.ensureIndex({b:1, _id:-1});
+assert.eq( 1 , db.foo.count() , "setup2" );
+
+
+assert.eq( 0 , db.bar.count() , "setup3" );
+db.createCollection("bar", {capped:true, size:1000});
+
+for (var i = 0; i < 1000; i++) {
+ db.bar.save( { x : i } );
+}
+db.bar.ensureIndex({x:1});
+
+barDocCount = db.bar.count();
+assert.gt( barDocCount, 0 , "No documents inserted" );
+assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created right" );
+
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped" );
+assert.eq( 0 , db.bar.count() , "bar not dropped" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore" );
+
+
+// Dump/restore single DB
+
+dumppath = t.ext + "singledbdump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped2" );
+assert.eq( 0 , db.bar.count() , "bar not dropped2" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped2" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname );
+
+db = db.getSiblingDB(dbname2);
+
+assert.soon( "db.foo.findOne()" , "no data after sleep 2" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." );
+assert.eq( 5 , db.system.indexes.count() , "Indexes weren't created correctly by restore 2" );
+
+
+// Dump/restore single collection
+
+dumppath = t.ext + "singlecolldump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.bar.count() , "bar not dropped3" );
+assert.eq( 0 , db.system.indexes.count() , "indexes not dropped3" );
+
+t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" );
+
+db = db.getSiblingDB(dbname);
+
+assert.soon( "db.baz.findOne()" , "no data after sleep 2" );
+assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.baz.save({x:i});
+}
+assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." );
+assert.eq( 2 , db.system.indexes.count() , "Indexes weren't created correctly by restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore9.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore9.js
new file mode 100644
index 00000000000..4bbb2fc18b1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore9.js
@@ -0,0 +1,79 @@
+if (0) { // Test disabled until SERVER-3853 is finished.
+var name = "dumprestore9";
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+s = new ShardingTest( "dumprestore9a", 2, 0, 3, {chunksize:1} );
+
+step("Shard collection");
+
+s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first
+s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+step("insert data");
+
+str = 'a';
+while (str.length < 1024*512) {
+ str += str;
+}
+
+numDocs = 20;
+for (var i = 0; i < numDocs; i++) {
+ coll.insert({x:i, str:str});
+}
+
+step("Wait for balancing");
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
+
+assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
+
+step("dump cluster");
+
+dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/";
+resetDbpath(dumpdir);
+runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir );
+
+step("Shutting down cluster");
+
+s.stop();
+
+step("Starting up clean cluster");
+s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
+
+step("Restore data and config");
+
+runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore");
+
+config = s.getDB("config");
+assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly");
+
+assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly");
+
+assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
+assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
+
+for (var i = 0; i < numDocs; i++) {
+ doc = coll.findOne({x:i});
+ assert.eq(i, doc.x, "Doc missing from the shard it should be on");
+}
+
+for (var i = 0; i < s._connections.length; i++) {
+ assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host);
+}
+
+step("Stop cluster");
+s.stop();
+step("SUCCESS");
+} \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js
new file mode 100644
index 00000000000..d6b87ffe70c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestoreWithNoOptions.js
@@ -0,0 +1,107 @@
+// SERVER-6366
+// relates to SERVER-808
+//
+// This file tests that options are not restored upon
+// mongorestore with --noOptionsRestore
+//
+// It checks that this works both when doing a full
+// database dump/restore and when doing it just for a
+// single db or collection.
+
+t = new ToolTest( "dumprestoreWithNoOptions" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt],
+ 'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore");
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert(undefined === db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single DB
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+dumppath = t.ext + "noOptionsSingleDump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore");
+
+db = db.getSiblingDB(dbname2);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert(undefined === db.capped.exists().options, "restore options not ignored");
+
+// Dump/restore single collection
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.system.indexes.count(), "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+db.capped.insert({ x: 1 });
+db.getLastError()
+
+dumppath = t.ext + "noOptionsSingleColDump/";
+mkdir(dumppath);
+dbname = db.getName();
+t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath );
+
+db.dropDatabase();
+
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.system.indexes.count(), "indexes not dropped" );
+
+t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname );
+
+db = db.getSiblingDB(dbname);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert( true !== db.capped.stats().capped, "restore options were not ignored" );
+assert( undefined === db.capped.exists().options );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth.js
new file mode 100644
index 00000000000..f99b5d0405c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth.js
@@ -0,0 +1,35 @@
+// dumprestore_auth.js
+
+t = new ToolTest("dumprestore_auth", { auth : "" });
+
+c = t.startDB("foo");
+
+adminDB = c.getDB().getSiblingDB('admin');
+adminDB.createUser({user: 'admin', pwd: 'password', roles: ['root']});
+adminDB.auth('admin','password');
+adminDB.createUser({user: 'backup', pwd: 'password', roles: ['backup']});
+adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']});
+
+assert.eq(0 , c.count() , "setup1");
+c.save({ a : 22 });
+assert.eq(1 , c.count() , "setup2");
+
+assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false}));
+assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
+
+t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+t.runTool("restore" , "--dir" , t.ext, "--writeConcern", "0"); // Should fail
+assert.eq(0 , c.count() , "after restore without auth");
+
+t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "password", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 2");
+assert.eq(22 , c.findOne().a , "after restore 2");
+assert.eq(0, c.getDB().system.namespaces.findOne({name: c.getFullName()}).options.flags);
+assert.eq(3, adminDB.system.users.count());
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth2.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth2.js
new file mode 100644
index 00000000000..fd7d9a034d3
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth2.js
@@ -0,0 +1,96 @@
+// dumprestore_auth2.js
+// Tests that mongodump and mongorestore properly handle access control information
+// Tests that the default auth roles of backup and restore work properly.
+
+t = new ToolTest("dumprestore_auth2", {auth: ""});
+
+coll = t.startDB("foo");
+admindb = coll.getDB().getSiblingDB("admin")
+
+// Create the relevant users and roles.
+admindb.createUser({user: "root", pwd: "pass", roles: ["root"]});
+admindb.auth("root", "pass");
+
+admindb.createUser({user: "backup", pwd: "pass", roles: ["backup"]});
+admindb.createUser({user: "restore", pwd: "pass", roles: ["restore"]});
+
+admindb.createRole({role: "customRole",
+ privileges:[{resource: {db: "jstests_tool_dumprestore_auth2",
+ collection: "foo"},
+ actions: ["find"]}],
+ roles:[]});
+admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
+
+coll.insert({word: "tomato"});
+assert.eq(1, coll.count());
+
+assert.eq(4, admindb.system.users.count(), "setup users")
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "setup2: " + tojson( admindb.system.users.getIndexes() ) );
+assert.eq(1, admindb.system.roles.count(), "setup3")
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}), "setup4")
+assert.eq(1, admindb.system.version.count());
+var versionDoc = admindb.system.version.findOne();
+
+// Logout root user.
+admindb.logout();
+
+// Verify that the custom role works as expected.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+// Dump the database.
+t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass");
+
+// Drop the relevant data in the database.
+admindb.auth("root", "pass");
+coll.getDB().dropDatabase();
+admindb.dropUser("backup");
+admindb.dropUser("test");
+admindb.dropRole("customRole");
+
+assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users");
+assert.eq(0, admindb.system.roles.count(), "didn't drop roles");
+assert.eq(0, coll.count(), "didn't drop foo coll");
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--writeConcern", "0");
+
+assert.soon("admindb.system.users.findOne()", "no data after restore");
+assert.eq(4, admindb.system.users.count(), "didn't restore users");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "didn't restore user indexes");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}),
+ "didn't restore role indexes");
+
+admindb.logout();
+
+// Login as user with customRole to verify privileges are restored.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+admindb.auth("root", "pass");
+admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]});
+admindb.dropRole("customRole");
+admindb.createRole({role: "customRole2", roles: [], privileges:[]});
+admindb.dropUser("root");
+admindb.logout();
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--drop", "--writeConcern", "0");
+
+admindb.auth("root", "pass");
+assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2");
+assert.eq(0, admindb.system.users.find({user:'root2'}).count(), "didn't drop users");
+assert.eq(0, admindb.system.roles.find({role:'customRole2'}).count(), "didn't drop roles");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.users"}),
+ "didn't maintain user indexes");
+assert.eq(2, admindb.system.indexes.count({ns: "admin.system.roles"}),
+ "didn't maintain role indexes");
+assert.eq(1, admindb.system.version.count(), "didn't restore version");
+assert.docEq(versionDoc, admindb.system.version.findOne(), "version doc wasn't restored properly");
+admindb.logout();
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth3.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth3.js
new file mode 100644
index 00000000000..b87418ed176
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumprestore_auth3.js
@@ -0,0 +1,199 @@
+// dumprestore_auth3.js
+// Tests that mongodump and mongorestore properly handle access control information when doing
+// single-db dumps and restores
+
+
+// Runs the tool with the given name against the given mongod.
+function runTool(toolName, mongod, options) {
+ var opts = {host: mongod.host};
+ Object.extend(opts, options);
+ MongoRunner.runMongoTool(toolName, opts);
+}
+
+var mongod = MongoRunner.runMongod();
+var admindb = mongod.getDB("admin");
+var db = mongod.getDB("foo");
+
+jsTestLog("Creating Admin user & initial data");
+admindb.createUser({user: 'root', pwd: 'pass', roles: ['root']});
+admindb.createUser({user: 'backup', pwd: 'pass', roles: ['backup']});
+admindb.createUser({user: 'restore', pwd: 'pass', roles: ['restore']});
+admindb.createRole({role: "dummyRole", roles: [], privileges:[]});
+db.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+var backupActions = ['find'];
+db.createRole({role: 'backupFooChester',
+ privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}],
+ roles: []});
+db.createUser({user: 'backupFooChester', pwd: 'pass', roles: ['backupFooChester']});
+
+var userCount = db.getUsers().length;
+var rolesCount = db.getRoles().length;
+var adminUsersCount = admindb.getUsers().length;
+var adminRolesCount = admindb.getRoles().length;
+var systemUsersCount = admindb.system.users.count();
+var systemVersionCount = admindb.system.version.count();
+
+db.bar.insert({a:1});
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "setup");
+assert.eq(rolesCount, db.getRoles().length, "setup2");
+assert.eq(adminUsersCount, admindb.getUsers().length, "setup3");
+assert.eq(adminRolesCount, admindb.getRoles().length, "setup4");
+assert.eq(systemUsersCount, admindb.system.users.count(), "setup5");
+assert.eq(systemVersionCount, admindb.system.version.count(),"system version");
+assert.eq(1, admindb.system.users.count({user: "restore"}), "Restore user is missing");
+assert.eq(1, admindb.system.users.count({user: "backup"}), "Backup user is missing");
+var versionDoc = admindb.system.version.findOne();
+
+jsTestLog("Dump foo database without dumping user data");
+var dumpDir = MongoRunner.getAndPrepareDumpDirectory("dumprestore_auth3");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo"});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+jsTestLog("Restore foo database from dump that doesn't contain user data ");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restore created users somehow");
+assert.eq(0, db.getRoles().length, "Restore created roles somehow");
+
+// Re-create user data
+db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+userCount = 1;
+rolesCount = 1;
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't create user");
+assert.eq(rolesCount, db.getRoles().length, "didn't create role");
+
+jsTestLog("Dump foo database *with* user data");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo", dumpDbUsersAndRoles: ""});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore foo database without restoring user data, even though it's in the dump");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restored users even though it shouldn't have");
+assert.eq(0, db.getRoles().length, "Restored roles even though it shouldn't have");
+
+jsTestLog("Restore foo database *with* user data");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq(1, admindb.system.users.count({user: "restore", db: "admin"}), "Restore user is missing");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Make modifications to user data that should be overridden by the restore");
+db.dropUser('user')
+db.createUser({user: 'user2', pwd: 'password2', roles: jsTest.basicUserRoles});
+db.dropRole('role')
+db.createRole({role: 'role2', roles: [], privileges:[]});
+
+jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made");
+// Restore with --drop to override the changes to user data
+runTool("mongorestore", mongod,
+ {dir: dumpDir + "foo/", db: 'foo', drop: "", restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(adminUsersCount, admindb.getUsers().length, "Admin users were dropped");
+assert.eq(adminRolesCount, admindb.getRoles().length, "Admin roles were dropped");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't update user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't update role");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+
+jsTestLog("Dump just the admin database. User data should be dumped by default");
+// Make a user in another database to make sure it is properly captured
+db.getSiblingDB('bar').createUser({user: "user", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').createUser({user: "user", pwd: 'pwd', roles: []});
+adminUsersCount += 1;
+runTool("mongodump", mongod, {out: dumpDir, db: "admin"});
+db = mongod.getDB('foo');
+
+// Change user data a bit.
+db.dropAllUsers();
+db.getSiblingDB('bar').createUser({user: "user2", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').dropAllUsers();
+
+jsTestLog("Restore just the admin database. User data should be restored by default");
+runTool("mongorestore", mongod, {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+var otherdb = db.getSiblingDB('bar');
+var admindb = db.getSiblingDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't restore user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't restore role");
+assert.eq(1, otherdb.getUsers().length, "didn't restore users for bar database");
+assert.eq("user", otherdb.getUsers()[0].user, "didn't restore user for bar database");
+assert.eq(adminUsersCount, admindb.getUsers().length, "didn't restore users for admin database");
+assert.eq("user", admindb.getUser("user").user, "didn't restore user for admin database");
+assert.eq(6, admindb.system.users.count(), "has the wrong # of users for the whole server");
+assert.eq(2, admindb.system.roles.count(), "has the wrong # of roles for the whole server");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Dump all databases");
+runTool("mongodump", mongod, {out: dumpDir});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore all databases");
+runTool("mongorestore", mongod, {dir: dumpDir, writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(1, db.getUsers().length, "didn't restore users");
+assert.eq(1, db.getRoles().length, "didn't restore roles");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+MongoRunner.stopMongod(mongod);
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/dumpsecondary.js b/src/mongo/gotools/test/legacy26/jstests/tool/dumpsecondary.js
new file mode 100644
index 00000000000..7a641542498
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/dumpsecondary.js
@@ -0,0 +1,38 @@
+var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+db = master.getDB("foo")
+db.foo.save({a: 1000});
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+assert.eq( 1 , db.foo.count() , "setup" );
+
+var slaves = replTest.liveNodes.slaves;
+assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+slave = slaves[0];
+
+var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args);
+db.foo.drop()
+
+assert.eq( 0 , db.foo.count() , "after drop" );
+args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args)
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "after restore" );
+assert.eq( 1000 , db.foo.findOne().a , "after restore 2" );
+
+resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external')
+
+replTest.stopSet(15)
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport1.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport1.js
new file mode 100644
index 00000000000..a7a7bcee90c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport1.js
@@ -0,0 +1,66 @@
+// exportimport1.js
+
+t = new ToolTest( "exportimport1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.b[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+ }
+}
+
+// now with --jsonArray
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.a[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+ }
+}
+
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport3.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport3.js
new file mode 100644
index 00000000000..f18ba6cbd4b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport3.js
@@ -0,0 +1,27 @@
+// exportimport3.js
+
+t = new ToolTest( "exportimport3" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save({a:1})
+c.save({a:2})
+c.save({a:3})
+c.save({a:4})
+c.save({a:5})
+
+assert.eq( 5 , c.count() , "setup2" );
+
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 5 , c.count() , "after restore 2" );
+
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport4.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport4.js
new file mode 100644
index 00000000000..c0d82a135bc
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport4.js
@@ -0,0 +1,57 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport4" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, NaN, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ NaN ] } );
+ c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+
+ assert.eq( 5 , c.count() , "setup2" );
+};
+
+// attempt to export fields without NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 1" );
+
+// attempt to export fields with NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 5 , c.count() , "after restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport5.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport5.js
new file mode 100644
index 00000000000..47dd98c2553
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport5.js
@@ -0,0 +1,82 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport5" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ Infinity ] } );
+ c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+ c.save( { a : [ -Infinity ] } );
+
+ assert.eq( 6 , c.count() , "setup2" );
+};
+
+// attempt to export fields without Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 1" );
+
+// attempt to export fields with Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export fields without -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 4 , c.count() , "after restore 3" );
+
+// attempt to export fields with -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 4" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 6 , c.count() , "after restore 5" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport6.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport6.js
new file mode 100644
index 00000000000..a01d49a9c8b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport6.js
@@ -0,0 +1,26 @@
+// exportimport6.js
+// test export with skip, limit and sort
+
+t = new ToolTest("exportimport6");
+
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
+c.save({a:1, b:1})
+c.save({a:1, b:2})
+c.save({a:2, b:3})
+c.save({a:2, b:3})
+c.save({a:3, b:4})
+c.save({a:3, b:5})
+
+assert.eq(6, c.count(), "setup2");
+
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo",
+ "--sort", "{a:1, b:-1}", "--skip", "4", "--limit", "1");
+
+c.drop();
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.eq(1, c.count(), "count should be 1");
+assert.eq(5, c.findOne().b, printjson(c.findOne()));
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_bigarray.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_bigarray.js
new file mode 100644
index 00000000000..43a209b8453
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_bigarray.js
@@ -0,0 +1,62 @@
+// Test importing collections represented as a single line array above the maximum document size
+var tt = new ToolTest('exportimport_bigarray_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe)
+var bigString = new Array(1025).toString();
+var doc = {_id: new ObjectId(), x:bigString};
+var docSize = Object.bsonsize(doc);
+var numDocs = Math.floor(20*1024*1024 / docSize);
+
+print('Size of one document: ' + docSize)
+print('Number of documents to exceed maximum BSON size: ' + numDocs)
+
+print('About to insert ' + numDocs + ' documents into ' +
+ exportimport_db.getName() + '.' + src.getName());
+var i;
+for (i = 0; i < numDocs; ++i) {
+ src.insert({ x : bigString });
+}
+var lastError = exportimport_db.getLastError();
+if (lastError == null) {
+ print('Finished inserting ' + numDocs + ' documents');
+}
+else {
+ doassert('Insertion failed: ' + lastError);
+}
+
+data = 'data/exportimport_array_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName(),
+ '--jsonArray');
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(),
+ '--jsonArray');
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_date.js b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_date.js
new file mode 100644
index 00000000000..57a860ca1a8
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/exportimport_date.js
@@ -0,0 +1,49 @@
+var tt = new ToolTest('exportimport_date_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Insert a date that we can format
+var formatable = ISODate("1970-01-02T05:00:00Z");
+assert.eq(formatable.valueOf(), 104400000);
+src.insert({ "_id" : formatable });
+
+// Insert a date that we cannot format as an ISODate string
+var nonformatable = ISODate("3001-01-01T00:00:00Z");
+assert.eq(nonformatable.valueOf(), 32535216000000);
+src.insert({ "_id" : nonformatable });
+
+// Verify number of documents inserted
+assert.eq(2, src.find().itcount());
+
+data = 'data/exportimport_date_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/files1.js b/src/mongo/gotools/test/legacy26/jstests/tool/files1.js
new file mode 100644
index 00000000000..acfcc16dcc3
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/files1.js
@@ -0,0 +1,27 @@
+// files1.js
+
+t = new ToolTest( "files1" )
+
+db = t.startDB();
+
+filename = 'mongod'
+if ( _isWindows() )
+ filename += '.exe'
+
+t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+md5 = md5sumFile(filename);
+
+file_obj = db.fs.files.findOne()
+assert( file_obj , "A 0" );
+md5_stored = file_obj.md5;
+md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
+assert.eq( md5 , md5_stored , "A 1" );
+assert.eq( md5 , md5_computed, "A 2" );
+
+mkdir(t.ext);
+
+t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+md5 = md5sumFile(t.extFile);
+assert.eq( md5 , md5_stored , "B" );
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/oplog1.js b/src/mongo/gotools/test/legacy26/jstests/tool/oplog1.js
new file mode 100644
index 00000000000..e9a002bfb65
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/oplog1.js
@@ -0,0 +1,26 @@
+// oplog1.js
+
+// very basic test for mongooplog
+// need a lot more, but test that it functions at all
+
+t = new ToolTest( "oplog1" );
+
+db = t.startDB();
+
+output = db.output
+
+doc = { _id : 5 , x : 17 };
+
+db.oplog.insert( { ts : new Timestamp() , "op" : "i" , "ns" : output.getFullName() , "o" : doc } );
+
+assert.eq( 0 , output.count() , "before" )
+
+t.runTool( "oplog" , "--oplogns" , db.getName() + ".oplog" , "--from" , "127.0.0.1:" + t.port , "-vv" );
+
+assert.eq( 1 , output.count() , "after" );
+
+assert.eq( doc , output.findOne() , "after check" );
+
+t.stop();
+
+
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/oplog_all_ops.js b/src/mongo/gotools/test/legacy26/jstests/tool/oplog_all_ops.js
new file mode 100644
index 00000000000..8f231cb233d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/oplog_all_ops.js
@@ -0,0 +1,61 @@
+/**
+ * Performs a simple test on mongooplog by doing different types of operations
+ * that will show up in the oplog then replaying it on another replica set.
+ * Correctness is verified using the dbhash command.
+ */
+
+var repl1 = new ReplSetTest({ name: 'rs1', nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl1.startSet({ oplogSize: 10 });
+repl1.initiate();
+repl1.awaitSecondaryNodes();
+
+var repl1Conn = new Mongo(repl1.getURL());
+var testDB = repl1Conn.getDB('test');
+var testColl = testDB.user;
+
+// op i
+testColl.insert({ x: 1 });
+testColl.insert({ x: 2 });
+
+// op c
+testDB.dropDatabase();
+
+testColl.insert({ y: 1 });
+testColl.insert({ y: 2 });
+testColl.insert({ y: 3 });
+
+// op u
+testColl.update({}, { $inc: { z: 1 }}, true, true);
+
+// op d
+testColl.remove({ y: 2 });
+
+// op n
+var oplogColl = repl1Conn.getCollection('local.oplog.rs');
+oplogColl.insert({ ts: new Timestamp(), op: 'n', ns: testColl.getFullName(), 'o': { x: 'noop' }});
+
+var repl2 = new ReplSetTest({ name: 'rs2', startPort: 31100, nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl2.startSet({ oplogSize: 10 });
+repl2.initiate();
+repl2.awaitSecondaryNodes();
+
+var srcConn = repl1.getPrimary();
+runMongoProgram('mongooplog', '--from', repl1.getPrimary().host,
+ '--host', repl2.getPrimary().host);
+
+var repl1Hash = testDB.runCommand({ dbhash: 1 });
+
+var repl2Conn = new Mongo(repl2.getURL());
+var testDB2 = repl2Conn.getDB(testDB.getName());
+var repl2Hash = testDB2.runCommand({ dbhash: 1 });
+
+assert(repl1Hash.md5);
+assert.eq(repl1Hash.md5, repl2Hash.md5);
+
+repl1.stopSet();
+repl2.stopSet();
+
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/restorewithauth.js b/src/mongo/gotools/test/legacy26/jstests/tool/restorewithauth.js
new file mode 100644
index 00000000000..ac9e7bc756b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/restorewithauth.js
@@ -0,0 +1,113 @@
+/* SERVER-4972
+ * Test for mongorestore on server with --auth allows restore without credentials of colls
+ * with no index
+ */
+/*
+ * 1) Start mongo without auth.
+ * 2) Write to collection
+ * 3) Take dump of the collection using mongodump.
+ * 4) Drop the collection.
+ * 5) Stop mongod from step 1.
+ * 6) Restart mongod with auth.
+ * 7) Add admin user to kick authentication
+ * 8) Try restore without auth credentials. The restore should fail
+ * 9) Try restore with correct auth credentials. The restore should succeed this time.
+ */
+
+var port = allocatePorts(1)[0];
+baseName = "jstests_restorewithauth";
+var conn = startMongod( "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// write to ns foo.bar
+var foo = conn.getDB( "foo" );
+for( var i = 0; i < 4; i++ ) {
+ foo["bar"].save( { "x": i } );
+ foo["baz"].save({"x": i});
+}
+
+// make sure the collection exists
+assert.eq( foo.system.namespaces.count({name: "foo.bar"}), 1 )
+
+//make sure it has no index except _id
+assert.eq(foo.system.indexes.count(), 2);
+
+foo.bar.createIndex({x:1});
+assert.eq(foo.system.indexes.count(), 3);
+
+// get data dump
+var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/";
+resetDbpath( dumpdir );
+x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+port, "--out", dumpdir);
+
+// now drop the db
+foo.dropDatabase();
+
+// stop mongod
+stopMongod( port );
+
+// start mongod with --auth
+conn = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// admin user
+var admin = conn.getDB( "admin" )
+admin.createUser({user: "admin" , pwd: "admin", roles: jsTest.adminUserRoles});
+admin.auth( "admin" , "admin" );
+
+var foo = conn.getDB( "foo" )
+
+// make sure no collection with the same name exists
+assert.eq(foo.system.namespaces.count( {name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count( {name: "foo.baz"}), 0);
+
+// now try to restore dump
+x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + port, "--dir" , dumpdir, "-vvvvv" );
+
+// make sure that the collection isn't restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+// now try to restore dump with correct credentials
+x = runMongoProgram( "mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "--authenticationDatabase=admin",
+ "-u", "admin",
+ "-p", "admin",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+
+// make sure the collection has 4 documents
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+
+foo.dropDatabase();
+
+// make sure that the collection is empty
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 0);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 0);
+
+foo.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+
+// now try to restore dump with foo database credentials
+x = runMongoProgram("mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "-u", "user",
+ "-p", "password",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+assert.eq(foo.system.namespaces.count({name: "foo.bar"}), 1);
+assert.eq(foo.system.namespaces.count({name: "foo.baz"}), 1);
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+assert.eq(foo.system.indexes.count(), 3); // _id on foo, _id on bar, x on foo
+
+stopMongod( port );
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/stat1.js b/src/mongo/gotools/test/legacy26/jstests/tool/stat1.js
new file mode 100644
index 00000000000..539827e1704
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/stat1.js
@@ -0,0 +1,22 @@
+// stat1.js
+// test mongostat with authentication SERVER-3875
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_stat1";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+t = db[ baseName ];
+t.drop();
+
+db.dropAllUsers();
+
+db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.adminUserRoles});
+
+assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "eliot", "--rowcount", "1", "--authenticationDatabase=admin" );
+assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot");
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "wrong", "--rowcount", "1", "--authenticationDatabase=admin" );
+assert.eq(x, 1, "mongostat should exit with 1 with eliot:wrong");
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/tool1.js b/src/mongo/gotools/test/legacy26/jstests/tool/tool1.js
new file mode 100644
index 00000000000..f7c6f769e72
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/tool1.js
@@ -0,0 +1,44 @@
+// mongo tool tests, very basic to start with
+
+
+baseName = "jstests_tool_tool1";
+dbPath = MongoRunner.dataPath + baseName + "/";
+externalPath = MongoRunner.dataPath + baseName + "_external/";
+externalBaseName = "export.json";
+externalFile = externalPath + externalBaseName;
+
+function fileSize(){
+ var l = listFiles( externalPath );
+ for ( var i=0; i<l.length; i++ ){
+ if ( l[i].baseName == externalBaseName )
+ return l[i].size;
+ }
+ return -1;
+}
+
+
+port = allocatePorts( 1 )[ 0 ];
+resetDbpath( externalPath );
+
+m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--noprealloc" , "--bind_ip", "127.0.0.1" );
+c = m.getDB( baseName ).getCollection( baseName );
+c.save( { a: 1 } );
+assert( c.findOne() );
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:" + port, "--out", externalPath );
+c.drop();
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:" + port, "--dir", externalPath );
+assert.soon( "c.findOne()" , "mongodump then restore has no data w/sleep" );
+assert( c.findOne() , "mongodump then restore has no data" );
+assert.eq( 1 , c.findOne().a , "mongodump then restore has no broken data" );
+
+resetDbpath( externalPath );
+
+assert.eq( -1 , fileSize() , "mongoexport prep invalid" );
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--out", externalFile );
+assert.lt( 10 , fileSize() , "file size changed" );
+
+c.drop();
+runMongoProgram( "mongoimport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--file", externalFile );
+assert.soon( "c.findOne()" , "mongo import json A" );
+assert( c.findOne() && 1 == c.findOne().a , "mongo import json B" );
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/tool_replset.js b/src/mongo/gotools/test/legacy26/jstests/tool/tool_replset.js
new file mode 100644
index 00000000000..bc50a0fd7d4
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/tool_replset.js
@@ -0,0 +1,89 @@
+/*
+ * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string
+ * 1. Start a replica set.
+ * 2. Add data to a collection.
+ * 3. Take a dump of the database.
+ * 4. Drop the db.
+ * 5. Restore the db.
+ * 6. Export a collection.
+ * 7. Drop the collection.
+ * 8. Import the collection.
+ * 9. Add data to the oplog.rs collection.
+ * 10. Ensure that the document doesn't exist yet.
+ * 11. Now play the mongooplog tool.
+ * 12. Make sure that the oplog was played
+*/
+
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
+
+print("starting the replica set")
+
+var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+for (var i = 0; i < 100; i++) {
+ master.getDB("foo").bar.insert({ a: i });
+}
+replTest.awaitReplication();
+
+var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
+ ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+print("dump the db");
+var data = MongoRunner.dataDir + "/tool_replset-dump1/";
+runMongoProgram("mongodump", "--host", replSetConnString, "--out", data);
+
+print("db successfully dumped, dropping now");
+master.getDB("foo").dropDatabase();
+replTest.awaitReplication();
+
+print("restore the db");
+runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data);
+
+print("db successfully restored, checking count")
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = MongoRunner.dataDir + "/tool_replset/export";
+runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
+ "-d", "foo", "-c", "bar");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
+ "-d", "foo", "-c", "bar");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+// Test with mongooplog
+var doc = { _id : 5, x : 17 };
+master.getDB("local").oplog.rs.insert({ ts : new Timestamp(), "op" : "i", "ns" : "foo.bar",
+ "o" : doc });
+
+assert.eq(100, master.getDB("foo").getCollection("bar").count(), "count before running mongooplog " +
+ "was not 100 as expected");
+
+runMongoProgram("mongooplog" , "--from", "127.0.0.1:" + replTest.ports[0],
+ "--host", replSetConnString);
+
+print("running mongooplog to replay the oplog")
+
+assert.eq(101, master.getDB("foo").getCollection("bar").count(), "count after running mongooplog " +
+ "was not 101 as expected")
+
+print("all tests successful, stopping replica set")
+
+replTest.stopSet();
+
+print("replica set stopped, test complete")
diff --git a/src/mongo/gotools/test/legacy26/jstests/tool/tsv1.js b/src/mongo/gotools/test/legacy26/jstests/tool/tsv1.js
new file mode 100644
index 00000000000..1b0ddbb7c9e
--- /dev/null
+++ b/src/mongo/gotools/test/legacy26/jstests/tool/tsv1.js
@@ -0,0 +1,32 @@
+// tsv1.js
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.eq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.eq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.eq( base , x , "tsv parse 2" )
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy28/buildscripts/buildlogger.py b/src/mongo/gotools/test/legacy28/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..d2466e495c0
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/buildscripts/buildlogger.py
@@ -0,0 +1,479 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
diff --git a/src/mongo/gotools/test/legacy28/buildscripts/cleanbb.py b/src/mongo/gotools/test/legacy28/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/src/mongo/gotools/test/legacy28/buildscripts/smoke.py b/src/mongo/gotools/test/legacy28/buildscripts/smoke.py
new file mode 100755
index 00000000000..cd06b315664
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/buildscripts/smoke.py
@@ -0,0 +1,1447 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test"),
+# don't take arguments for the dbpath, but unconditionally use
+# "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import signal
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+import threading
+import traceback
+
+from pymongo import MongoClient
+from pymongo.errors import OperationFailure
+from pymongo import ReadPreference
+
+import cleanbb
+import smoke
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API
+class NullMongod(object):
+ def start(self):
+ pass
+
+ def stop(self):
+ pass
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
+ return not isinstance(value, Exception)
+
+
+def dump_stacks(signal, frame):
+ print "======================================"
+ print "DUMPING STACKS due to SIGUSR1 signal"
+ print "======================================"
+ threads = threading.enumerate();
+
+ print "Total Threads: " + str(len(threads))
+
+ for id, stack in sys._current_frames().items():
+ print "Thread %d" % (id)
+ print "".join(traceback.format_stack(stack))
+ print "======================================"
+
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not small_oplog:
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(NullMongod):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ if not start_mongod:
+ return False
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are always set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ argv += ["--master", "--oplogSize", "511"]
+ if self.kwargs.get('storage_engine'):
+ argv += ["--storageEngine", self.kwargs.get('storage_engine')]
+ if self.kwargs.get('wiredtiger_engine_config'):
+ argv += ["--wiredTigerEngineConfig", self.kwargs.get('wiredtiger_engine_config')]
+ if self.kwargs.get('wiredtiger_collection_config'):
+ argv += ["--wiredTigerCollectionConfig", self.kwargs.get('wiredtiger_collection_config')]
+ if self.kwargs.get('wiredtiger_index_config'):
+ argv += ["--wiredTigerIndexConfig", self.kwargs.get('wiredtiger_index_config')]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'SCRAM-SHA-1')
+ if authMechanism != 'SCRAM-SHA-1':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = MongoClient(port=self.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find(fields=["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On windows,
+ add the started process to a new Job Object, so that any
+ child processes of this process can be killed with a single
+ call to TerminateJobObject (see self.stop()).
+ """
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+
+ proc = Popen(argv, creationflags=CREATE_BREAKAWAY_FROM_JOB)
+
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ else:
+ proc = Popen(argv)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32":
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ elif hasattr(self.proc, "terminate"):
+ # This method added in Python 2.6
+ self.proc.terminate()
+ else:
+ os.kill(self.proc.pid, 15)
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ self.proc.wait()
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ # Fail hard if mongod terminates with an error. That might indicate that an
+ # instrumented build (e.g. LSAN) has detected an error. For now we aren't doing this on
+ # windows because the exit code seems to be unpredictable. We don't have LSAN there
+ # anyway.
+ retcode = self.proc.returncode
+ if os.sys.platform != "win32" and retcode != 0:
+ raise(Exception('mongod process exited with non-zero code %d' % retcode))
+
+ def wait_for_repl(self):
+ print "Awaiting replicated (w:2, wtimeout:5min) insert (port:" + str(self.port) + ")"
+ MongoClient(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+ print "Replicated write completed -- done wait_for_repl"
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ print "waiting for slave (%s) to catch up to master (%s)" % (slave.port, master.port)
+ master.wait_for_repl()
+ print "caught up!"
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ client = MongoClient(port=mongod.port, read_preference=ReadPreference.SECONDARY_PREFERRED)
+ mongod.dbhash = client.test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = MongoClient(port=master.port).test
+ sTestDB = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters_write_cmd.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("sharding", "copydb_from_mongos.js"), # SERVER-13080
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+legacyWriteRE = re.compile(r"jstests[/\\]multiVersion")
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if legacyWriteRE.search(path):
+ swm = "legacy"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port, '--authenticationMechanism', authMechanism]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["dbtest", "dbtest.exe"]:
+ argv = [path]
+ # default data directory for dbtest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+
+ if storage_engine:
+ argv.extend(["--storageEngine", storage_engine])
+ if wiredtiger_engine_config:
+ argv.extend(["--wiredTigerEngineConfig", wiredtiger_engine_config])
+ if wiredtiger_collection_config:
+ argv.extend(["--wiredTigerCollectionConfig", wiredtiger_collection_config])
+ if wiredtiger_index_config:
+ argv.extend(["--wiredTigerIndexConfig", wiredtiger_index_config])
+
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/servers.js");load("jstests/libs/servers_misc.js");' +\
+ 'TestData = new Object();' + \
+ 'TestData.storageEngine = "' + ternary( storage_engine, storage_engine, "" ) + '";' + \
+ 'TestData.wiredTigerEngineConfig = "' + ternary( wiredtiger_engine_config, wiredtiger_engine_config, "" ) + '";' + \
+ 'TestData.wiredTigerCollectionConfig = "' + ternary( wiredtiger_collection_config, wiredtiger_collection_config, "" ) + '";' + \
+ 'TestData.wiredTigerIndexConfig = "' + ternary( wiredtiger_index_config, wiredtiger_index_config, "" ) + '";' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if temp_path:
+ evalString += 'TestData.tmpPath = "' + temp_path + '";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ if os.getenv('SMOKE_EVAL') is not None:
+ evalString += os.getenv('SMOKE_EVAL')
+
+ argv = argv + [ '--eval', evalString]
+
+
+ if argv[0].endswith( 'dbtest' ) or argv[0].endswith( 'dbtest.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if start_mongod and not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ master = NullMongod()
+ slave = NullMongod()
+
+ try:
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config=wiredtiger_engine_config,
+ wiredtiger_collection_config=wiredtiger_collection_config,
+ wiredtiger_index_config=wiredtiger_index_config,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ if small_oplog:
+ slave = mongod(slave=True,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config=wiredtiger_engine_config,
+ wiredtiger_collection_config=wiredtiger_collection_config,
+ wiredtiger_index_config=wiredtiger_index_config,
+ set_parameters=set_parameters)
+ slave.start()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config=wiredtiger_engine_config,
+ wiredtiger_collection_config=wiredtiger_collection_config,
+ wiredtiger_index_config=wiredtiger_index_config,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ slave.start()
+ primary = MongoClient(port=master.port);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ time.sleep(1)
+
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.stop()
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config=wiredtiger_engine_config,
+ wiredtiger_collection_config=wiredtiger_collection_config,
+ wiredtiger_index_config=wiredtiger_index_config,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+
+ finally:
+ slave.stop()
+ master.stop()
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+suiteGlobalConfig = {"js": ("core/*.js", True),
+ "quota": ("quota/*.js", True),
+ "jsPerf": ("perf/*.js", True),
+ "disk": ("disk/*.js", True),
+ "noPassthroughWithMongod": ("noPassthroughWithMongod/*.js", True),
+ "noPassthrough": ("noPassthrough/*.js", False),
+ "parallel": ("parallel/*.js", True),
+ "clone": ("clone/*.js", False),
+ "repl": ("repl/*.js", False),
+ "replSets": ("replsets/*.js", False),
+ "dur": ("dur/*.js", False),
+ "auth": ("auth/*.js", False),
+ "sharding": ("sharding/*.js", False),
+ "tool": ("tool/*.js", False),
+ "aggregation": ("aggregation/*.js", True),
+ "multiVersion": ("multiVersion/*.js", True),
+ "failPoint": ("fail_point/*.js", False),
+ "ssl": ("ssl/*.js", True),
+ "sslSpecial": ("sslSpecial/*.js", True),
+ "jsCore": ("core/*.js", True),
+ "mmap_v1": ("mmap_v1/*.js", True),
+ "gle": ("gle/*.js", True),
+ "rocksDB": ("rocksDB/*.js", True),
+ "slow1": ("slow1/*.js", True),
+ "slow2": ("slow2/*.js", True),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['dbtest',
+ 'jsCore',
+ 'jsPerf',
+ 'mmap_v1',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'dbtest' or suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'dbtest.exe'
+ else:
+ program = 'dbtest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+
+def filter_tests_by_tag(tests, tag_query):
+ """Selects tests from a list based on a query over the tags in the tests."""
+
+ test_map = {}
+ roots = []
+ for test in tests:
+ root = os.path.abspath(test[0])
+ roots.append(root)
+ test_map[root] = test
+
+ new_style_tests = smoke.tests.build_tests(roots, extract_metadata=True)
+ new_style_tests = smoke.suites.build_suite(new_style_tests, tag_query)
+
+ print "\nTag query matches %s tests out of %s.\n" % (len(new_style_tests),
+ len(tests))
+
+ tests = []
+ for new_style_test in new_style_tests:
+ tests.append(test_map[os.path.abspath(new_style_test.filename)])
+
+ return tests
+
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, storage_engine, wiredtiger_engine_config, wiredtiger_collection_config, wiredtiger_index_config
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ storage_engine = options.storage_engine
+ wiredtiger_engine_config = options.wiredtiger_engine_config
+ wiredtiger_collection_config = options.wiredtiger_collection_config
+ wiredtiger_index_config = options.wiredtiger_index_config
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('dbtest', 'dbtest.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth, storage_engine, wiredtiger_engine_config, wiredtiger_collection_config, wiredtiger_index_config
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ try:
+ signal.signal(signal.SIGUSR1, dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--storageEngine', dest='storage_engine', default=None,
+ help='What storage engine to start mongod with')
+ parser.add_option('--wiredTigerEngineConfig', dest='wiredtiger_engine_config', default=None,
+ help='Wired Tiger configuration to pass through to mongod')
+ parser.add_option('--wiredTigerCollectionConfig', dest='wiredtiger_collection_config', default=None,
+ help='Wired Tiger collection configuration to pass through to mongod')
+ parser.add_option('--wiredTigerIndexConfig', dest='wiredtiger_index_config', default=None,
+ help='Wired Tiger index configuration to pass through to mongod')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='SCRAM-SHA-1',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=(1 if 'detect_leaks=1' in os.getenv("ASAN_OPTIONS", "") else 20),
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests or TestData.tmpPath to mongo')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="commands",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ parser.add_option('--include-tags', dest='include_tags', default="", action='store',
+ help='Filters jstests run by tag regex(es) - a tag in the test must match the regexes. ' +
+ 'Specify single regex string or JSON array.')
+
+ parser.add_option('--exclude-tags', dest='exclude_tags', default="", action='store',
+ help='Filters jstests run by tag regex(es) - no tags in the test must match the regexes. ' +
+ 'Specify single regex string or JSON array.')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if options.include_tags or options.exclude_tags:
+
+ def to_regex_array(tags_option):
+ if not tags_option:
+ return []
+
+ tags_list = smoke.json_options.json_coerce(tags_option)
+ if isinstance(tags_list, basestring):
+ tags_list = [tags_list]
+
+ return map(re.compile, tags_list)
+
+ tests = filter_tests_by_tag(tests,
+ smoke.suites.RegexQuery(include_res=to_regex_array(options.include_tags),
+ exclude_res=to_regex_array(options.exclude_tags)))
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/src/mongo/gotools/test/legacy28/buildscripts/utils.py b/src/mongo/gotools/test/legacy28/buildscripts/utils.py
new file mode 100644
index 00000000000..0a46ef440d4
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/buildscripts/utils.py
@@ -0,0 +1,235 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ # XXX: Avoid conflict between v8 and v8-3.25 source files in
+ # src/mongo/scripting
+ # Remove after v8-3.25 migration.
+ if x.find("v8-3.25") != -1:
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/analyze_plan.js b/src/mongo/gotools/test/legacy28/jstests/libs/analyze_plan.js
new file mode 100644
index 00000000000..9c2ebffd890
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/analyze_plan.js
@@ -0,0 +1,80 @@
+// Contains helpers for checking, based on the explain output, properties of a
+// plan. For instance, there are helpers for checking whether a plan is a collection
+// scan or whether the plan is covered (index only).
+
+/**
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan has a stage called 'stage'.
+ */
+function planHasStage(root, stage) {
+ if (root.stage === stage) {
+ return true;
+ }
+ else if ("inputStage" in root) {
+ return planHasStage(root.inputStage, stage);
+ }
+ else if ("inputStages" in root) {
+ for (var i = 0; i < root.inputStages.length; i++) {
+ if (planHasStage(root.inputStages[i], stage)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * A query is covered iff it does *not* have a FETCH stage or a COLLSCAN.
+ *
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan is index only. Otherwise returns false.
+ */
+function isIndexOnly(root) {
+ return !planHasStage(root, "FETCH") && !planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * an index scan, and false otherwise.
+ */
+function isIxscan(root) {
+ return planHasStage(root, "IXSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * the idhack fast path, and false otherwise.
+ */
+function isIdhack(root) {
+ return planHasStage(root, "IDHACK");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * a collection scan, and false otherwise.
+ */
+function isCollscan(root) {
+ return planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Get the number of chunk skips for the BSON exec stats tree rooted at 'root'.
+ */
+function getChunkSkips(root) {
+ if (root.stage === "SHARDING_FILTER") {
+ return root.chunkSkips;
+ }
+ else if ("inputStage" in root) {
+ return getChunkSkips(root.inputStage);
+ }
+ else if ("inputStages" in root) {
+ var skips = 0;
+ for (var i = 0; i < root.inputStages.length; i++) {
+ skips += getChunkSkips(root.inputStages[0]);
+ }
+ return skips;
+ }
+
+ return 0;
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/authTestsKey b/src/mongo/gotools/test/legacy28/jstests/libs/authTestsKey
new file mode 100644
index 00000000000..573898a4f05
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/authTestsKey
@@ -0,0 +1 @@
+This key is only for running the suite with authentication dont use it in any tests directly
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/badSAN.pem b/src/mongo/gotools/test/legacy28/jstests/libs/badSAN.pem
new file mode 100644
index 00000000000..d8e362731e0
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/badSAN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgIDAYKXMA0GCSqGSIb3DQEBBQUAMHQxFzAVBgNVBAMTDktl
+cm5lbCBUZXN0IENBMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIx
+FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD
+VQQGEwJVUzAeFw0xNDA5MjMxNTE3MjNaFw0zNDA5MjMxNTE3MjNaMG8xEjAQBgNV
+BAMTCTEyNy4wLjAuMTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCDB/lxuzeU
+OHR5nnOTJM0fHz0WeicnuUfGG5wP89Mbkd3Y+BNS0ozbnkW+NAGhD+ehNBjogISZ
+jLCd+uaYu7TLWpkgki+1+gM99Ro0vv7dIc8vD7ToILKMbM8xQmLbSxDT2tCUoXlc
+m7ccgDZl9oW1scQYQ8gWHjmk3yK8sCoGa/uwr49u74aVM7673tLsK41m8oYPzt/q
+VGT+mXpBJQcGXkTNQtIPxBtD25jr+aPietS3u70zrVPY6ZDsGE7DofEeRl97kVoF
+NcpaQmVEwEo8KCWaT6OaPaUUUjAMwzqiZaHNZ6mL1pCr65bLXP6T9tiMtWLw5+SG
+3E09fhQuWod5AgMBAAGjFTATMBEGA1UdEQQKMAiCBmJhZFNBTjANBgkqhkiG9w0B
+AQUFAAOCAQEAQzlibJvlUpJG3vc5JppdrudpXoVAP3wtpzvnkrY0GTWIUE52mCIf
+MJ5sARvjzs/uMhV5GLnjqTcT+DFkihqKyFo1tKBD7LSuSjfDvjmggG9lq0/xDvVU
+uczAuNtI1T7N+6P7LyTG4HqniYouPMDWyCKBOmzzNsk+r1OJb6cxU7QQwmSWw1n1
+ztNcF6JzCQVcd9Isau9AEXZ9q0M0sjD9mL67Qo3Dh3Mvf4UkJKqm3KOQOupUHZLU
+vJwfsS2u+gfHY1Plywzq3AuT7ygbksR3Pqfs8LFPnuRAH+41sFTGUM52hiU7mNPj
+ebl8s1tjK7WQ+a8GTABJV0hDNeWd3Sr+Og==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAgwf5cbs3lDh0eZ5zkyTNHx89FnonJ7lHxhucD/PTG5Hd2PgT
+UtKM255FvjQBoQ/noTQY6ICEmYywnfrmmLu0y1qZIJIvtfoDPfUaNL7+3SHPLw+0
+6CCyjGzPMUJi20sQ09rQlKF5XJu3HIA2ZfaFtbHEGEPIFh45pN8ivLAqBmv7sK+P
+bu+GlTO+u97S7CuNZvKGD87f6lRk/pl6QSUHBl5EzULSD8QbQ9uY6/mj4nrUt7u9
+M61T2OmQ7BhOw6HxHkZfe5FaBTXKWkJlRMBKPCglmk+jmj2lFFIwDMM6omWhzWep
+i9aQq+uWy1z+k/bYjLVi8OfkhtxNPX4ULlqHeQIDAQABAoIBAC4Bx8jyJmKpq+Pk
+CcqZelg6HLXesA7XlGbv3M0RHIeqoM2E1SwYd5LJMM3G7ueBcR/97dz8+xH6/yyJ
+Ixxvk9xu9CMmkRABN9AyVkA867nzHA73Idr7WBXMQreWCqXa5o6sXt5BEB6/If0k
+23TTqUERqLuoWQHDHRRRsJ218RuNmbvBe8TGXcfunC0eeDVKDeqAXol6bD5lztdu
+B6jkdLt5UZSQ7X8OmClbeDlac90B8usNi+pUE9q1p7X462vAw8LohkxLY2nyIcmU
+feNdTNHP+lklv+E+p9w/Az7Hf6zxm525tw90QVI048fr9SL3ftLHOt4FhucSCn0Z
+CjylP4ECgYEA+nQrNVdVwmxcWCVn69LR1grNXUSz+fLHCo+QKma4IyC1kuuZ+BBo
+Iwdf9t/S1tgtTYru3uxzCpQg7J1iDeEFEsMHl0rc6U1MmIE+6OvACVG3yotqoOqE
+852pi1OWIe94yTk2ZmNXJ8gpUE/gtMprbcSWOb7IzzrXy2lDcaEMuGkCgYEAhe7L
+ZvYI4LEvu6GSPp97qBzDH9m5UrHaTZIJk/Nu7ie919Sdg62LTfphsaK+pSyA55XQ
+8L9P7wNUPC44NnE+7CIJZsIuKdYqR5QI6No9RdTyij0Hgljfc7KuH2b8lf8EjvuH
+qZAf5zL3pIOQs8E8/MYHlGIqmTkYK41eCAcS9JECgYEADnra6KmU9rmnGR2IhZTZ
+tuNG/kZzlVbY9R5ZumnX6YgBl23xp+ri6muJu88y9GLpM5t9tfu7pvfrc2KiAaVp
+0qzd6nxUi1SBwituxK6kmqVT1+z5jDYi26bY34pEms+qjw+0unSx3EXxRYhouGsf
+jOgZu1rxZzHCuirq0E38W0kCgYBzOK16RX37t9OFywlioJekWCIxu4BouSNCirl8
+s/eiIUR8cqiUCPAIRLhZNtZmiTPYiBW5mAyvZiDIqUao56InSVznL3TBf0LeU2ea
+023VLs79yGU2aTjLc1PDJjl03XDRhWj/okMgBsPvn1QUoNDT8ZXBvPZC3VCC31qe
+818GUQKBgQDBUP2BC/Th/0dErOQ5lWkY3YbmzrTp2pDsHGZJRD+OdQ5B8FUvCP8m
+JESk/0ATn7niUqawnOy/2KlKIkeBBV2XL1rjIGEhCkBUuhCiInNDqz1AGdXzIKaT
+myoZ4PhIsH1D643e6iLhyAZuUAA4yB31E2a3l7EMyhV3vKbdWWygGQ==
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/ca.pem b/src/mongo/gotools/test/legacy28/jstests/libs/ca.pem
new file mode 100644
index 00000000000..d1a5689cf0f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/ca.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD
+Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n
+b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL
+MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj
+qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N
+shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa
+zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO
+Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7
+SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb
+WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS
+8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP
+b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY
+8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2
+vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp
+nOjaLwQJQgKejY62PiNcw7xC/nIxBeI=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAgcUl46gOcthDEJHJrywzYpBPPDoN/rKSd3setB1lT5PBHlze
+vI8n2yi2LKAnARNqwQBPzbIVQSWvIq4dKP1E4dVzVuIhgrHzL2Gpl4h52jo5hlSo
+3waI+qc0B9LitF+aJZt0ms2T5CywkfNcsbCusKCWTBU765RI8Y/0pg2fbVV1uvdm
+wuIupxrzgkqYL/ZJrPOcThHdsW7gi1xQ1ymn12XeQCvonoJA7N2oe1VzBBPLG6hp
+5agtZ7MuW6Qp2+2gvt2n+0ldlxBZUkE+ZME5pPQr0w8zPTu/EjvJzYRDhQS0BHRg
+xndwUoRPHQnYdiU8t8yrG1nhPpsctJhMMTXxnwIDAQABAoIBAD5iGOnM800wO2Uu
+wGbOd9FNEFoiinHDRHfdnw/1BavwmqjO+mBo7T8E3jarsrRosiwfyz1V+7O6uuuQ
+CgKXZlKuOuksgfGDPCWt7EolWHaZAOhbsGaujJD6ah/MuCD/yGmFxtNYOl05QpSX
+Cht9lSzhtf7TQl/og/xkOLbO27JB540ck/OCSOczXg9Z/O8AmIUyDn7AKb6G1Zhk
+2IN//HQoAvDUMZLWrzy+L7YGbA8pBR3yiPsYBH0rX2Oc9INpiGA+B9Nf1HDDsxeZ
+/o+5xLbRDDfIDtlYO0cekJ053W0zUQLrMEIn9991EpG2O/fPgs10NlKJtaFH8CmT
+ExgVA9ECgYEA+6AjtUdxZ0BL3Wk773nmhesNH5/5unWFaGgWpMEaEM7Ou7i6QApL
+KAbzOYItV3NNCbkcrejq7jsDGEmiwUOdXeQx6XN7/Gb2Byc/wezy5ALi0kcUwaur
+6s9+Ah+T4vcU2AjfuCWXIpe46KLEbwORmCRQGwkCBCwRhHGt5sGGxTkCgYEAhAaw
+voHI6Cb+4z3PNAKRnf2rExBYRyCz1KF16ksuwJyQSLzFleXRyRWFUEwLuVRL0+EZ
+JXhMbtrILrc23dJGEsB8kOCFehSH/IuL5eB0QfKpDFA+e6pimsbVeggx/rZhcERB
+WkcV3jN4O82gSL3EnIgvAT1/nwhmbmjvDhFJhZcCgYBaW4E3IbaZaz9S/O0m69Fa
+GbQWvS3CRV1oxqgK9cTUcE9Qnd9UC949O3GwHw0FMERjz3N7B/8FGW/dEuQ9Hniu
+NLmvqWbGlnqWywNcMihutJKbDCdp/Km5olUPkiNbB3sWsOkViXoiU/V0pK6BZvir
+d67EZpGwydpogyH9kVVCEQKBgGHXc3Q7SmCBRbOyQrQQk0m6i+V8328W1S5m2bPg
+M62aWXMOMn976ZRT1pBDSwz1Y5yJ3NDf7gTZLjEwpgCNrFCJRcc4HLL0NDL8V5js
+VjvpUU5GyYdsJdb+M4ZUPHi/QEaqzqPQumwJSLlJEdfWirZWVj9dDA8XcpGwQjjy
+psHRAoGBAJUTgeJYhjK7k5sgfh+PRqiRJP0msIH8FK7SenBGRUkelWrW6td2Riey
+EcOCMFkRWBeDgnZN5xDyWLBgrzpw9iHQQIUyyBaFknQcRUYKHkCx+k+fr0KHHCUb
+X2Kvf0rbeMucb4y/h7950HkBBq83AYKMAoI8Ql3cx7pKmyOLXRov
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/client.pem b/src/mongo/gotools/test/legacy28/jstests/libs/client.pem
new file mode 100644
index 00000000000..50a64e41728
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/client.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDXTCCAkWgAwIBAgIBAzANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBwMQ8wDQYDVQQD
+EwZjbGllbnQxEzARBgNVBAsTCktlcm5lbFVzZXIxEDAOBgNVBAoTB01vbmdvREIx
+FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD
+VQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJIFboAk9Fdi
+DY5Xld2iw36vB3IpHEfgWIimd+l1HX4jyp35i6xoqkZZHJUL/NMbUFJ6+44EfFJ5
+biB1y1Twr6GqpYp/3R30jKQU4PowO7DSal38MR34yiRFYPG4ZPPXXfwPSuwKrSNo
+bjqa0/DRJRVQlnGwzJkPsWxIgCjc8KNO/dSHv/CGymc9TjiFAI0VVOhMok1CBNvc
+ifwWjGBg5V1s3ItMw9x5qk+b9ff5hiOAGxPiCrr8R0C7RoeXg7ZG8K/TqXbsOZEG
+AOQPRGcrmqG3t4RNBJpZugarPWW6lr11zMpiPLFTrbq3ZNYB9akdsps4R43TKI4J
+AOtGMJmK430CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAA+nPgVT4addi13yB6mjW
++UhdUkFwtb1Wcg0sLtnNucopHZLlCj5FfDdp1RQxe3CyMonxyHTKkrWtQmVtUyvf
+C/fjpIKt9A9kAmveMHBiu9FTNTc0sbiXcrEBeHF5cD7N+Uwfoc/4rJm0WjEGNkAd
+pYLCCLVZXPVr3bnc3ZLY1dFZPsJrdH3nJGMjLgUmoNsKnaGozcjiKiXqm6doFzkg
+0Le5yD4C/QTaie2ycFa1X5bJfrgoMP7NqKko05h4l0B0+DnjpoTJN+zRreNTMKvE
+ETGvpUu0IYGxe8ZVAFnlEO/lUeMrPFvH+nDmJYsxO1Sjpds2hi1M1JoeyrTQPwXj
+2Q==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAkgVugCT0V2INjleV3aLDfq8HcikcR+BYiKZ36XUdfiPKnfmL
+rGiqRlkclQv80xtQUnr7jgR8UnluIHXLVPCvoaqlin/dHfSMpBTg+jA7sNJqXfwx
+HfjKJEVg8bhk89dd/A9K7AqtI2huOprT8NElFVCWcbDMmQ+xbEiAKNzwo0791Ie/
+8IbKZz1OOIUAjRVU6EyiTUIE29yJ/BaMYGDlXWzci0zD3HmqT5v19/mGI4AbE+IK
+uvxHQLtGh5eDtkbwr9Opduw5kQYA5A9EZyuaobe3hE0Emlm6Bqs9ZbqWvXXMymI8
+sVOturdk1gH1qR2ymzhHjdMojgkA60YwmYrjfQIDAQABAoIBAB249VEoNIRE9TVw
+JpVCuEBlKELYk2UeCWdnWykuKZ6vcmLNlNy3QVGoeeTs172w5ZykY+f4icXP6da5
+o3XauCVUMvYKKNwcFzSe+1xxzPSlH/mZh/Xt2left6f8PLBVuk/AXSPG2I9Ihodv
+VIzERaQdD0J9FmhhhV/hMhUfQ+w5rTCaDpq1KVGU61ks+JAtlQ46g+cvPF9c80cI
+TEC875n2LqWKmLRN43JUnctV3uGTmolIqCRMHPAs/egl+lG2RXJjqXSQ2uFLOvC/
+PXtBb597yadSs2BWPnTu/r7LbLGBAExzlQK1uFsTvuKsBPb3qrvUux0L68qwPuiv
+W24N8BECgYEAydtAvVB7OymQEX3mck2j7ixDN01wc1ZaCLBDvYPYS/Pvzq4MBiAD
+lHRtbIa6HPGA5jskbccPqQn8WGnJWCaYvCQryvgaA+BBgo1UTLfQJUo/7N5517vv
+KvbUa6NF0nj3VwfDV1vvy+amoWi9NOVn6qOh0K84PF4gwagb1EVy9MsCgYEAuTAt
+KCWdZ/aNcKgJc4NCUqBpLPF7EQypX14teixrbF/IRNS1YC9S20hpkG25HMBXjpBe
+tVg/MJe8R8CKzYjCt3z5Ff1bUQ2bzivbAtgjcaO0Groo8WWjnamQlrIQcvWM7vBf
+dnIflQ0slxbHfCi3XEe8tj2T69R7wJZ8L7PxR9cCgYEACgwNtt6Qo6s37obzt3DB
+3hL57YC/Ph5oMNKFLKOpWm5z2zeyhYOGahc5cxNppBMpNUxwTb6AuwsyMjxhty+E
+nqi2PU4IDXVWDWd3cLIdfB2r/OA99Ez4ZI0QmaLw0L8QoJZUVL7QurdqR9JsyHs6
+puUqIrb195s/yiPR7sjeJe0CgYEAuJviKEd3JxCN52RcJ58OGrh2oKsJ9/EbV0rX
+Ixfs7th9GMDDHuOOQbNqKOR4yMSlhCU/hKA4PgTFWPIEbOiM08XtuZIb2i0qyNjH
+N4qnqr166bny3tJnzOAgl1ljNHa8y+UsBTO3cCr17Jh0vL0KLSAGa9XvBAWKaG6b
+1iIXwXkCgYAVz+DA1yy0qfXdS1pgPiCJGlGZXpbBcFnqvbpGSclKWyUG4obYCbrb
+p5VKVfoK7uU0ly60w9+PNIRsX/VN/6SVcoOzKx40qQBMuYfJ72DQrsPjPYvNg/Nb
+4SK94Qhp9TlAyXbqKJ02DjtuDim44sGZ8g7b+k3FfoK4OtzNsqdVdQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/client_revoked.pem b/src/mongo/gotools/test/legacy28/jstests/libs/client_revoked.pem
new file mode 100644
index 00000000000..03db67deb50
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/client_revoked.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDZTCCAk2gAwIBAgIBAjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB4MRcwFQYDVQQD
+Ew5jbGllbnRfcmV2b2tlZDETMBEGA1UECxMKS2VybmVsVXNlcjEQMA4GA1UEChMH
+TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv
+cmsxCzAJBgNVBAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+lJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSKyZMGCcqlYVQmqT/J
+Fnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505HaWv7b+M3qksRHDLpw
+/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/6vcUkg/aU/50MRUN
+qGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4lQjrCpR36fkr5a+vI
+UbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNFvGDOCNBKZK5ZxLZ3
+gGFcR6kL6u11y4zoLrZ6xwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQB8WQMn/cjh
+9qFtr7JL4VPIz/+96QaBmkHxMqiYL/iMg5Vko3GllLc1mgfWQfaWOvyRJClKj395
+595L2u8wBKon3DXUPAkinc6+VOwDWsxFLNtWl+jhigat5UDzGm8ZKFhl0WwNhqzZ
+dlNPrh2LJZzPFfimfGyVkhPHYYdELvn+bnEMT8ae1jw2yQEeVFzHe7ZdlV5nMOE7
+Gx6ZZhYlS+jgpIxez5aiKqit/0azq5GGkpCv2H8/EXxkR4gLZGYnIqGuZP3r34NY
+Lkh5J3Qnpyhdopa/34yOCa8mY1wW7vEro0fb/Dh21bpyEOz6tBk3C1QRaGD+XQOM
+cedxtUjYmWqn
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAlJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSK
+yZMGCcqlYVQmqT/JFnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505Ha
+Wv7b+M3qksRHDLpw/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/
+6vcUkg/aU/50MRUNqGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4l
+QjrCpR36fkr5a+vIUbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNF
+vGDOCNBKZK5ZxLZ3gGFcR6kL6u11y4zoLrZ6xwIDAQABAoIBAFlu0T3q18Iu0VlR
+n5GEYMgvSuM4NAVVKo8wwwYMwu1xuvXb+NMLyuyFqzaCQKpHmywOOnfhCC/KkxX8
+Ho87kTbTDKhuXZyOHx0cA1zKCDSlGdK8yt9M1vJMa0pdGi2M34b+uOQ35IVsOocH
+4KWayIH7g52V2xZ2bpOSSnpm0uCPZSBTgClCgTUYepOT2wbLn/8V0NtVpZhDsBqg
+fORuEHkiurrbLa8yjQsvbR+hsR/XbGhre8sTQapj4EITXvkEuOL/vwbRebhOFHgh
+8sipsXZ9CMaJkBpVoLZTxTKQID/9006cczJK2MGKFhn6mvP6AeFuJAM3xqLGZTc4
+xxpfJyECgYEA0+iKxy5r1WUpBHR8jTh7WjLc6r5MFJQlGgLPjdQW6gCIe/PZc+b9
+x5vDp27EQ1cAEePEu0glQ/yk19yfxbxrqHsRjRrgwoiYTXjGI5zZSjXKArHyEgBj
+XOyo5leO5XMFnk2AShPlh+/RhAW3NhxcWkBEAsCD6QyC3BPvP6aaAXkCgYEAs4WH
+dTuweTdnyquHQm59ijatvBeP8h4tBozSupflQjB9WxJeW5uEa8lNQ3lSz1F4TV3M
+xvGdDSqwftLRS2mWGho/1jaCeAzjsiUQ2WUHChxprt0+QU7XkJbaBY9eF+6THZFw
+sDG688TiolxqoD8OYi8EtxmIvbQhXHmXnrk3jj8CgYBSi74rkrisuqg8tQejl0Ht
+w+xsgM5wIblGJZwmOlzmsGh6KGYnkO6Ap/uSKELJnIVJcrk63wKtNigccjPGufwR
++EbA+ZxeCwmQ/B/q1XmLP+K+JAUQ4BfUpdexSqA+XwzsOnJj6NY7mr65t+RDbs7G
+1Uvo6oc37Ai5pAZJfCN3uQKBgQAJr5qvaJkM8UBYXwjdPLjpTCnzjBHoLlifkdmM
+18U23QbmcwdESg/LAQF6MoGVTf//rJ/v2/ltTHBZZ2aDex7uKZxoImjHsWpXokhW
+cmz+zqmlFarWOzrGQl1hD2s0P1sQrVg3KXe8z1KrD/Fw0/Yitga7GlWWZrGmG6li
+lvu4YQKBgQANODQYEaz739IoPNnMfTpTqAoQIOR4PNdMfCXSQrCB8i0Hh4z48E4F
+DEAd1xIYyxI8pu7r52dQlBk7yrILOTG0gmgLJd5xKdtCTrasYAICI3hsRLtP8dVA
+8WeykXY4Wf1bYQ+VzKVImkwL/SBm2ik5woyxCzT8JSjyoAwRrQp9Vw==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/cluster_cert.pem b/src/mongo/gotools/test/legacy28/jstests/libs/cluster_cert.pem
new file mode 100644
index 00000000000..a8623ab67ef
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/cluster_cert.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDXjCCAkagAwIBAgIBBDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBxMRQwEgYDVQQD
+EwtjbHVzdGVydGVzdDEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCX42ZTwADG
+sEkS7ijfADlDQaJpbdgrnQKa5ssMQK3oRGSqXfTp0ThsJiVBbYZ8ZZRpPMgJdowa
+pFCGHQJh6VOdKelR0f/uNVpBGVz1yD4E4AtkA6UYcIJq6ywcj+W7Pli1Ed8VUN3Q
+tBU+HvHiEdMj74kLJb4ID1cP3gehvRv/0szkN8/ODFKCgYb1619BdFb9gRn8eily
+Wcg1m1gXz2xSfqRZkFEcEYet3BeOEGZBhaufJFzinvQjocH+kWFKlZf0+2DEFFbH
+NRqmabMmqMBUke629EUn8a7PBWBYNLld9afoNHwNY68wpONf5IqR2mNar5bVz8/d
+4g7BuVNvEFdJAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAA3U2O+cE/ZS8SDBw/sr
+BVFf0uaoME7+XX2jdTi4RUpWPfQ6uTkhKnXKzTzGrQtKwA96slGp4c3mxGBaAbC5
+IuTS97mLCju9NFvJVtazIajO4eNlG6dJSk0pQzjc0RAeLYksX/9NRNKZ+lQ5QVS2
+NVLce70QZBIvujjVJZ5hqDdjPV0JGOOUzNGyyUhzgY7s9MQagNnBSu5HO4CK1onc
+goOkizulq/5WF+JtqW8VKKx+/CH6SnTkS4b3qbjgKRmHZcOshH/d4KqhoLya7sfH
+pedmm7WgO9p8umXXqNj+04ehuPKTnD8tLMhj+GbJ9eIChPCBf1XnIzOXYep+fq9j
+n/g=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAl+NmU8AAxrBJEu4o3wA5Q0GiaW3YK50CmubLDECt6ERkql30
+6dE4bCYlQW2GfGWUaTzICXaMGqRQhh0CYelTnSnpUdH/7jVaQRlc9cg+BOALZAOl
+GHCCaussHI/luz5YtRHfFVDd0LQVPh7x4hHTI++JCyW+CA9XD94Hob0b/9LM5DfP
+zgxSgoGG9etfQXRW/YEZ/HopclnINZtYF89sUn6kWZBRHBGHrdwXjhBmQYWrnyRc
+4p70I6HB/pFhSpWX9PtgxBRWxzUapmmzJqjAVJHutvRFJ/GuzwVgWDS5XfWn6DR8
+DWOvMKTjX+SKkdpjWq+W1c/P3eIOwblTbxBXSQIDAQABAoIBAHhjNFMDZ1oUlgbc
+ICcI/VoyprFb8DA5ZbwzXBMdHPpxYzyp9hpxy3/lCDiAwEzPEsAK/h6KCOiA/pYe
+XioPD0gN1TIV+f3r6dqZaNYi3g1tK3odbXkejDCEsFT/NT6hXxw9yw0RKI9ofUHc
+synVqP3duUjNpH6s8fvQp0nqI0wzoNm1kklpTWVjZmbtSZF9m/xfv7NGwQEYUL2V
+f5YvX6aHPVDtUXAqyPBgv6SGuogSSjwRTsNTef3aY6Se5MlP3YIfRqdad8+ORkKu
+WSrO+GjQccV4sztD8Sn3LR7qe6Lmid4yopHSS4EFq0Sc8LznTeflWcRAsBLezRp5
+xZB/blECgYEA8yrEzFA247AOXbhL1CdqMyPs523oy5+dmByyovjYjEhjUCRlAa9D
+ApvID4TfAkA4n0rUdICCtwbZlFrBZbn6rXNvJ362ufZjvaFIucQm90YkG1J6Ldek
+8ohJfLyyLLWzVHJIS7WxFqqsGmDhYUTErFbJZjI8tNSglrc81jUWT7UCgYEAn+dw
+ICyc09f6+xm3nFZIOq2Gtpw8lrOJlwZugn1AqY2D5Ko2gq1Fx2oZWpVaBivjH3gU
+ONlnPuealE0RJHvCm/+axy7Rcj65IwTrN5V+j6rg1tuEdi70PvNKmN6XQqRvEjOX
+HOh3gQYP6EFAoVINZZqUkwJzqpv4tnOSpEHXncUCgYB3+Z8Vq3IZjtDXvslzCGtm
+hhAp81mLtdocpfQhYqP9Ou39KafIV/+49sGTnpwlUShet53xSUK1KSULBGgtV8Bt
++ela1DM1t3Joqn3mYfhTwoCoFl5/5cjVfRa8+6DxXEj5nlU7PY79PwIhFbG9ux9K
+ZJuD17+J/Oqq0gerLJAwjQKBgAS4AbkRV/dwcjmiwqZcbXk90bHl3mvcFH1edTho
+ldXrFS9UTpOApYSC/wiLS8LO3L76/i3HTKKwlwE1XQIknNOZsWmbWhby/uenp4FW
+agu3UTdF9xy9uft5loP4XaJb0+NHnnf97DjkgueptUyNbVPIQgYsllk8jRRlSLiM
+MN65AoGAUPLlh8ok/iNirO5YKqc5/3FKA1o1V1KSTHYVUK+Y+vuVJxQZeO3LMybe
+7AJ1cLHEWc8V4B27e6g33rfGGAW+/+RJ7/uHxuYCuKhstbq/x+rf9i4nl93emlMV
+PC3yuZsCmpk9Uypzi2+PT10yVgXkXRYtLpuUpoABWRzVXGnEsXo=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/command_line/test_parsed_options.js b/src/mongo/gotools/test/legacy28/jstests/libs/command_line/test_parsed_options.js
new file mode 100644
index 00000000000..f194b73ce7f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/command_line/test_parsed_options.js
@@ -0,0 +1,214 @@
+// Merge the two options objects. Used as a helper when we are trying to actually compare options
+// despite the fact that our test framework adds extra stuff to it. Anything set in the second
+// options object overrides the first options object. The two objects must have the same structure.
+function mergeOptions(obj1, obj2) {
+ var obj3 = {};
+ for (var attrname in obj1) {
+ if (typeof obj1[attrname] === "object" &&
+ typeof obj2[attrname] !== "undefined") {
+ if (typeof obj2[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
+ }
+ else {
+ obj3[attrname] = obj1[attrname];
+ }
+ }
+ for (var attrname in obj2) {
+ if (typeof obj2[attrname] === "object" &&
+ typeof obj1[attrname] !== "undefined") {
+ if (typeof obj1[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ // Already handled above
+ }
+ else {
+ obj3[attrname] = obj2[attrname];
+ }
+ }
+ return obj3;
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongod. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongod;
+function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts");
+
+ // Stop the mongod we used to get the options
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongod;
+ }
+
+ if (typeof getCmdLineOptsBaseMongod === "undefined") {
+ getCmdLineOptsBaseMongod = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongod;
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsExpected.parsed.storage.dbPath;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with options
+ var mongod = MongoRunner.runMongod(mongoRunnerConfig);
+
+ // Create and authenticate high-privilege user in case mongod is running with authorization.
+ // Try/catch is necessary in case this is being run on an uninitiated replset, by a test
+ // such as repl_options.js for example.
+ var ex;
+ try {
+ mongod.getDB("admin").createUser({user: "root", pwd: "pass", roles: ["root"]});
+ mongod.getDB("admin").auth("root", "pass");
+ }
+ catch (ex) {
+ }
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts");
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsResult.parsed.storage.dbPath;
+ }
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ mongod.getDB("admin").logout();
+ MongoRunner.stopMongod(mongod.port);
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongos. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongos;
+function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Start mongos with only the configdb option
+ var baseMongos = MongoRunner.runMongos({ configdb : baseMongod.host });
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts");
+
+ // Remove the configdb option
+ delete getCmdLineOptsBaseMongos.parsed.sharding.configDB;
+
+ // Stop the mongod and mongos we used to get the options
+ MongoRunner.stopMongos(baseMongos.port);
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongos;
+ }
+
+ if (typeof getCmdLineOptsBaseMongos === "undefined") {
+ getCmdLineOptsBaseMongos = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongos;
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with no options
+ var mongod = MongoRunner.runMongod();
+
+ // Add configdb option
+ mongoRunnerConfig['configdb'] = mongod.host;
+
+ // Start mongos connected to mongod
+ var mongos = MongoRunner.runMongos(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts");
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+
+ // Remove the configdb option
+ delete getCmdLineOptsResult.parsed.sharding.configDB;
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongos(mongos.port);
+ MongoRunner.stopMongod(mongod.port);
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_auth.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_auth.ini
new file mode 100644
index 00000000000..c1193be1b03
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_auth.ini
@@ -0,0 +1 @@
+auth=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_dur.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_dur.ini
new file mode 100644
index 00000000000..8f83f3ae5a7
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_dur.ini
@@ -0,0 +1 @@
+dur=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_httpinterface.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_httpinterface.ini
new file mode 100644
index 00000000000..fc839a98a76
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_httpinterface.ini
@@ -0,0 +1 @@
+httpinterface=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_ipv6.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_ipv6.ini
new file mode 100644
index 00000000000..a091421022d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_ipv6.ini
@@ -0,0 +1 @@
+ipv6=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_journal.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_journal.ini
new file mode 100644
index 00000000000..d0010a86906
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_journal.ini
@@ -0,0 +1 @@
+journal=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_jsonp.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_jsonp.ini
new file mode 100644
index 00000000000..82847f50b2b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_jsonp.ini
@@ -0,0 +1 @@
+jsonp=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_jsonp.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_jsonp.json
new file mode 100644
index 00000000000..4d5477a8547
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_jsonp.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "JSONPEnabled" : false
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_moveparanoia.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_moveparanoia.ini
new file mode 100644
index 00000000000..f21b50f9513
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_moveparanoia.ini
@@ -0,0 +1 @@
+moveParanoia=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noauth.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noauth.ini
new file mode 100644
index 00000000000..a65f909baf3
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noauth.ini
@@ -0,0 +1 @@
+noauth=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noautosplit.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noautosplit.ini
new file mode 100644
index 00000000000..b490f9038dd
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noautosplit.ini
@@ -0,0 +1 @@
+noAutoSplit=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nodur.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nodur.ini
new file mode 100644
index 00000000000..b0c73a48b30
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nodur.ini
@@ -0,0 +1 @@
+nodur=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nohttpinterface.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nohttpinterface.ini
new file mode 100644
index 00000000000..52c4958da6e
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nohttpinterface.ini
@@ -0,0 +1 @@
+nohttpinterface=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noindexbuildretry.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noindexbuildretry.ini
new file mode 100644
index 00000000000..79e428c492f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noindexbuildretry.ini
@@ -0,0 +1 @@
+noIndexBuildRetry=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nojournal.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nojournal.ini
new file mode 100644
index 00000000000..17172363d25
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nomoveparanoia.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nomoveparanoia.ini
new file mode 100644
index 00000000000..4696304134f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nomoveparanoia.ini
@@ -0,0 +1 @@
+noMoveParanoia=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noobjcheck.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noobjcheck.ini
new file mode 100644
index 00000000000..471e83c3172
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noobjcheck.ini
@@ -0,0 +1 @@
+noobjcheck=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noprealloc.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noprealloc.ini
new file mode 100644
index 00000000000..08c78be3507
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noprealloc.ini
@@ -0,0 +1 @@
+noprealloc=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noscripting.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noscripting.ini
new file mode 100644
index 00000000000..4cfaf3395f6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_noscripting.ini
@@ -0,0 +1 @@
+noscripting=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nounixsocket.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nounixsocket.ini
new file mode 100644
index 00000000000..66da9f08391
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_nounixsocket.ini
@@ -0,0 +1 @@
+nounixsocket=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_objcheck.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_objcheck.ini
new file mode 100644
index 00000000000..bd19d026bbf
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_objcheck.ini
@@ -0,0 +1 @@
+objcheck=false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_rest_interface.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_rest_interface.json
new file mode 100644
index 00000000000..f9ad93a4f5d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/disable_rest_interface.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "RESTInterfaceEnabled" : false
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_auth.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_auth.json
new file mode 100644
index 00000000000..9f9cc84d107
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_auth.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "authorization" : "enabled"
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_autosplit.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_autosplit.json
new file mode 100644
index 00000000000..a0d4f8af1be
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_autosplit.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "autoSplit" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_httpinterface.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_httpinterface.json
new file mode 100644
index 00000000000..c87dabe125d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_httpinterface.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_indexbuildretry.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_indexbuildretry.json
new file mode 100644
index 00000000000..362db08edd3
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_indexbuildretry.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "indexBuildRetry" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_journal.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_journal.json
new file mode 100644
index 00000000000..d75b94ccbc7
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_journal.json
@@ -0,0 +1,7 @@
+{
+ "storage" : {
+ "journal" : {
+ "enabled" : false
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_objcheck.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_objcheck.json
new file mode 100644
index 00000000000..b52be7382ed
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_objcheck.json
@@ -0,0 +1,5 @@
+{
+ "net" : {
+ "wireObjectCheck" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_paranoia.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_paranoia.json
new file mode 100644
index 00000000000..218646b1662
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_paranoia.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "archiveMovedChunks" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_prealloc.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_prealloc.json
new file mode 100644
index 00000000000..15ecefbb546
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_prealloc.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "preallocDataFiles" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_scripting.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_scripting.json
new file mode 100644
index 00000000000..e8f32f2c23c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_scripting.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "javascriptEnabled" : true
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_unixsocket.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_unixsocket.json
new file mode 100644
index 00000000000..660d21eb17f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/enable_unixsocket.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "unixDomainSocket" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_dur.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_dur.ini
new file mode 100644
index 00000000000..43495fbd0bd
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_dur.ini
@@ -0,0 +1 @@
+dur=
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_journal.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_journal.ini
new file mode 100644
index 00000000000..f750ac2e185
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_journal.ini
@@ -0,0 +1 @@
+journal=
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_nodur.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_nodur.ini
new file mode 100644
index 00000000000..f1046df16a9
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_nodur.ini
@@ -0,0 +1 @@
+nodur=
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_nojournal.ini b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_nojournal.ini
new file mode 100644
index 00000000000..737e5c28029
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/implicitly_enable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_component_verbosity.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_component_verbosity.json
new file mode 100644
index 00000000000..69c200834a1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_component_verbosity.json
@@ -0,0 +1,16 @@
+{
+ "systemLog" : {
+ "verbosity" : 2,
+ "component" : {
+ "accessControl" : {
+ "verbosity" : 0
+ },
+ "storage" : {
+ "verbosity" : 3,
+ "journaling" : {
+ "verbosity" : 5
+ }
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_profiling.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_profiling.json
new file mode 100644
index 00000000000..944f0de1575
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_profiling.json
@@ -0,0 +1,5 @@
+{
+ "operationProfiling" : {
+ "mode" : "all"
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_replsetname.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_replsetname.json
new file mode 100644
index 00000000000..522ca2b766f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_replsetname.json
@@ -0,0 +1,5 @@
+{
+ "replication" : {
+ "replSetName" : "myconfigname"
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_shardingrole.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_shardingrole.json
new file mode 100644
index 00000000000..71f92f122db
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_shardingrole.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "clusterRole" : "configsvr"
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_verbosity.json b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_verbosity.json
new file mode 100644
index 00000000000..47a1cce1b03
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/config_files/set_verbosity.json
@@ -0,0 +1,5 @@
+{
+ "systemLog" : {
+ "verbosity" : 5
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/crl.pem b/src/mongo/gotools/test/legacy28/jstests/libs/crl.pem
new file mode 100644
index 00000000000..275c9e2d91c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/crl.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:56:28 2014 GMT
+ Next Update: Aug 18 13:56:28 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 48:1b:0b:b1:89:f5:6f:af:3c:dd:2a:a0:e5:55:04:80:16:b4:
+ 23:98:39:bb:9f:16:c9:25:73:72:c6:a6:73:21:1d:1a:b6:99:
+ fc:47:5e:bc:af:64:29:02:9c:a5:db:15:8a:65:48:3c:4f:a6:
+ cd:35:47:aa:c6:c0:39:f5:a6:88:8f:1b:6c:26:61:4e:10:d7:
+ e2:b0:20:3a:64:92:c1:d3:2a:11:3e:03:e2:50:fd:4e:3c:de:
+ e2:e5:78:dc:8e:07:a5:69:55:13:2b:8f:ae:21:00:42:85:ff:
+ b6:b1:2b:69:08:40:5a:25:8c:fe:57:7f:b1:06:b0:72:ff:61:
+ de:21:59:05:a8:1b:9e:c7:8a:08:ab:f5:bc:51:b3:36:68:0f:
+ 54:65:3c:8d:b7:80:d0:27:01:3e:43:97:89:19:89:0e:c5:01:
+ 2c:55:9f:b6:e4:c8:0b:35:f8:52:45:d3:b4:09:ce:df:73:98:
+ f5:4c:e4:5a:06:ac:63:4c:f8:4d:9c:af:88:fc:19:f7:77:ea:
+ ee:56:18:49:16:ce:62:66:d1:1b:8d:66:33:b5:dc:b1:25:b3:
+ 6c:81:e9:d0:8a:1d:83:61:49:0e:d9:94:6a:46:80:41:d6:b6:
+ 59:a9:30:55:3d:5b:d3:5b:f1:37:ec:2b:76:d0:3a:ac:b2:c8:
+ 7c:77:04:78
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNTYyOFoXDTI0MDgxODEzNTYyOFqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEASBsLsYn1b6883Sqg5VUEgBa0I5g5u58WySVzcsam
+cyEdGraZ/EdevK9kKQKcpdsVimVIPE+mzTVHqsbAOfWmiI8bbCZhThDX4rAgOmSS
+wdMqET4D4lD9Tjze4uV43I4HpWlVEyuPriEAQoX/trEraQhAWiWM/ld/sQawcv9h
+3iFZBagbnseKCKv1vFGzNmgPVGU8jbeA0CcBPkOXiRmJDsUBLFWftuTICzX4UkXT
+tAnO33OY9UzkWgasY0z4TZyviPwZ93fq7lYYSRbOYmbRG41mM7XcsSWzbIHp0Iod
+g2FJDtmUakaAQda2WakwVT1b01vxN+wrdtA6rLLIfHcEeA==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/crl_client_revoked.pem b/src/mongo/gotools/test/legacy28/jstests/libs/crl_client_revoked.pem
new file mode 100644
index 00000000000..0b99d56936e
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/crl_client_revoked.pem
@@ -0,0 +1,41 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:43:27 2014 GMT
+ Next Update: Aug 18 13:43:27 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+Revoked Certificates:
+ Serial Number: 02
+ Revocation Date: Aug 21 13:43:27 2014 GMT
+ Signature Algorithm: sha256WithRSAEncryption
+ 24:86:73:8d:7f:55:15:d0:d6:8a:47:53:cf:97:f7:e5:3d:0b:
+ 4a:ea:fb:02:6a:2e:79:c6:b1:38:b2:ac:f0:c0:64:47:b0:3e:
+ ad:4e:2e:94:e6:64:ed:79:34:bd:74:c0:d4:3d:b9:a1:bb:38:
+ 89:5c:02:6a:ad:6b:dc:3b:64:34:6a:2d:4c:90:36:82:95:0c:
+ 19:88:e2:a3:bf:8e:1b:56:98:37:32:87:ed:f0:bd:dd:e2:0d:
+ f9:80:dc:f2:a5:b4:ee:d9:bb:83:fe:b8:3a:13:e0:da:fc:04:
+ 77:fb:ce:f9:c5:2a:54:a7:f0:34:09:2a:b2:3d:46:1b:48:e6:
+ e8:16:c7:a1:3c:88:8c:72:cd:cc:53:dc:f8:54:63:1f:b9:8b:
+ ea:2c:e5:26:c5:b4:a4:9f:8b:e1:6c:85:9b:c6:63:6f:2f:ae:
+ 18:c5:6a:23:f0:58:27:85:5c:0f:01:04:da:d2:8b:de:9e:ab:
+ 46:00:22:07:28:e1:ef:46:91:90:06:58:95:05:68:67:58:6e:
+ 67:a8:0b:06:1a:73:d9:04:18:c9:a3:e4:e3:d6:94:a3:e1:5c:
+ e5:08:1b:b3:9d:ab:3e:ea:20:b1:04:e5:90:e1:42:54:b2:58:
+ bb:51:1a:48:87:60:b0:95:4a:2e:ce:a0:4f:8c:17:6d:6b:4c:
+ 37:aa:4d:d7
+-----BEGIN X509 CRL-----
+MIIB5DCBzQIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNDMyN1oXDTI0MDgxODEzNDMyN1owFDASAgECFw0xNDA4MjExMzQz
+MjdaoA8wDTALBgNVHRQEBAICEAAwDQYJKoZIhvcNAQELBQADggEBACSGc41/VRXQ
+1opHU8+X9+U9C0rq+wJqLnnGsTiyrPDAZEewPq1OLpTmZO15NL10wNQ9uaG7OIlc
+Amqta9w7ZDRqLUyQNoKVDBmI4qO/jhtWmDcyh+3wvd3iDfmA3PKltO7Zu4P+uDoT
+4Nr8BHf7zvnFKlSn8DQJKrI9RhtI5ugWx6E8iIxyzcxT3PhUYx+5i+os5SbFtKSf
+i+FshZvGY28vrhjFaiPwWCeFXA8BBNrSi96eq0YAIgco4e9GkZAGWJUFaGdYbmeo
+CwYac9kEGMmj5OPWlKPhXOUIG7Odqz7qILEE5ZDhQlSyWLtRGkiHYLCVSi7OoE+M
+F21rTDeqTdc=
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/crl_expired.pem b/src/mongo/gotools/test/legacy28/jstests/libs/crl_expired.pem
new file mode 100644
index 00000000000..c9b3abb05a7
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/crl_expired.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Jul 21 19:45:56 2014 GMT
+ Next Update: Jul 21 20:45:56 2014 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 14:e8:6d:51:fc:0e:66:08:22:b2:4d:fb:da:7a:5f:4d:d1:a0:
+ 80:f0:18:f3:c5:ca:c7:05:6c:70:59:fa:d5:96:68:fa:c7:1d:
+ 7e:fb:53:3b:4a:8f:ed:bb:51:04:e8:fb:db:d7:b8:96:d9:e2:
+ 8d:bb:54:cc:11:60:c8:20:ea:81:28:5f:e1:eb:d6:8c:94:bf:
+ 42:e0:7f:a3:13:0c:76:05:f2:f0:34:98:a3:e8:64:74:4c:cb:
+ bf:39:bb:fa:d5:2d:72:02:d1:fa:56:15:59:12:b7:ff:a3:cc:
+ c9:d6:14:ca:4a:1e:0b:b4:47:cf:58:b0:e5:24:d2:21:71:0d:
+ 2d:09:77:5c:2f:ef:40:f8:74:90:03:cc:37:2e:ea:6a:25:59:
+ c0:bf:48:90:00:55:9c:db:bf:1f:f0:7b:b6:5a:90:94:b6:8d:
+ 7c:7d:bb:2d:11:5f:0c:f5:4a:9b:c5:ed:ab:e3:fd:35:c8:76:
+ 3b:2e:41:cb:df:76:b5:f4:e9:05:72:f6:56:7a:fc:34:07:d6:
+ a2:55:eb:7c:58:33:5b:9d:3e:b2:03:89:01:c6:d1:54:75:1a:
+ 5c:73:3f:5e:2e:fd:3b:38:ed:d4:e1:fa:ec:ff:84:f0:55:ee:
+ 83:e0:f0:13:97:e7:f0:55:8c:00:a3:1a:31:e4:31:9e:68:d0:
+ 6d:3e:81:b0
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDcyMTE5NDU1NloXDTE0MDcyMTIwNDU1NlqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEAFOhtUfwOZggisk372npfTdGggPAY88XKxwVscFn6
+1ZZo+scdfvtTO0qP7btRBOj729e4ltnijbtUzBFgyCDqgShf4evWjJS/QuB/oxMM
+dgXy8DSYo+hkdEzLvzm7+tUtcgLR+lYVWRK3/6PMydYUykoeC7RHz1iw5STSIXEN
+LQl3XC/vQPh0kAPMNy7qaiVZwL9IkABVnNu/H/B7tlqQlLaNfH27LRFfDPVKm8Xt
+q+P9Nch2Oy5By992tfTpBXL2Vnr8NAfWolXrfFgzW50+sgOJAcbRVHUaXHM/Xi79
+Ozjt1OH67P+E8FXug+DwE5fn8FWMAKMaMeQxnmjQbT6BsA==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_bad_first.journal b/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_bad_first.journal
new file mode 100644
index 00000000000..687317844a7
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_bad_first.journal
Binary files differ
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_bad_last.journal b/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_bad_last.journal
new file mode 100644
index 00000000000..7dd98e2c97b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_bad_last.journal
Binary files differ
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_good.journal b/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_good.journal
new file mode 100644
index 00000000000..d76790d2451
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/dur_checksum_good.journal
Binary files differ
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/expired.pem b/src/mongo/gotools/test/legacy28/jstests/libs/expired.pem
new file mode 100644
index 00000000000..e1d2ceb8de8
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/expired.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDfzCCAmegAwIBAgIBEDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzEwMTYwMDAwWhcNMTQwNzE2MTYwMDAwWjBtMRAwDgYDVQQD
+EwdleHBpcmVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIxFjAU
+BgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQG
+EwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPFSQZlHvJpi3dmA
+1X5U1qaUN/O/EQy5IZ5Rw+cfFHWOZ84EsLZxehWyqDZRH49Rg06xSYdO2WZOopP8
+OnUVCLGL819K83ikZ5sCbvB/gKCSCenwveEN992gJfs70HaZfiJNC7/cFigSb5Jg
+5G77E1/Uml4hIThfYG2NbCsTuP/P4JLwuzCkfgEUWRbCioMPEpIpxQw2LCx5DCy6
+Llhct0Hp14N9dZ4nA1h1621wOckgGJHw9DXdt9rGzulY1UgOOPczyqT08CdpaVxK
+VzrJCcUxfUjhO4ukHz+LBFQY+ZEm+tVboDbinbiHxY24urP46/u+BwRvBvjOovJi
+NVUh5GsCAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEw
+DQYJKoZIhvcNAQEFBQADggEBAG3rRSFCSG3hilGK9SMtTpFnrquJNlL+yG0TP8VG
+1qVt1JGaDJ8YUc5HXXtKBeLnRYii7EUx1wZIKn78RHRdITo5OJvlmcwwh0bt+/eK
+u9XFgR3z35w5UPr/YktgoX39SOzAZUoorgNw500pfxfneqCZtcRufVvjtk8TUdlN
+lcd2HfIxtUHWJeTcVM18g0JdHMYdMBXDKuXOW9VWLIBC2G6nAL/8SZJtUaDllPb4
+NisuIGjfjGgNxMpEXn+sQjFTupAoJru21OtAgERWFJhKQ0hbO0kucEPKEfxHDBVG
+dKSRIl6b0XSDLfxEXPv5ZhdrK4KEw1dYYXySvIVXtn0Ys38=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA8VJBmUe8mmLd2YDVflTWppQ3878RDLkhnlHD5x8UdY5nzgSw
+tnF6FbKoNlEfj1GDTrFJh07ZZk6ik/w6dRUIsYvzX0rzeKRnmwJu8H+AoJIJ6fC9
+4Q333aAl+zvQdpl+Ik0Lv9wWKBJvkmDkbvsTX9SaXiEhOF9gbY1sKxO4/8/gkvC7
+MKR+ARRZFsKKgw8SkinFDDYsLHkMLLouWFy3QenXg311nicDWHXrbXA5ySAYkfD0
+Nd232sbO6VjVSA449zPKpPTwJ2lpXEpXOskJxTF9SOE7i6QfP4sEVBj5kSb61Vug
+NuKduIfFjbi6s/jr+74HBG8G+M6i8mI1VSHkawIDAQABAoIBAGAO1QvVkU6HAjX8
+4X6a+KJwJ2F/8aJ14trpQyixp2wv1kQce9bzjpwqdGjCm+RplvHxAgq5KTJfJLnx
+UbefOsmpoqOQ6x9fmdoK+uwCZMoFt6qGaJ63960hfVzm71D2Qk4XCxFA4xTqWb0T
+knpWuNyRfSzw1Q9ib7jL7X2sKRyx9ZP+1a41ia/Ko6iYPUUnRb1Ewo10alYVWVIE
+upeIlWqv+1DGfda9f34pGVh3ldIDh1LHqaAZhdn6sKtcgIUGcWatZRmQiA5kSflP
+VBpOI2c2tkQv0j5cPGwD7GGaJ2aKayHG0EwnoNmxCeR0Ay3MO0vBAsxn7Wy6yqrS
+EfkYhFkCgYEA/OA2AHFIH7mE0nrMwegXrEy7BZUgLRCRFWTjxwnCKFQj2Uo2dtYD
+2QQKuQWeiP+LD2nHj4n1KXuSJiB1GtmEF3JkYV4Wd7mPWEVNDHa0G8ZndquPK40s
+YSjh9u0KesUegncBFfIiwzxsk9724iaXq3aXOexc0btQB2xltRzj6/0CgYEA9E2A
+QU6pnCOzGDyOV7+TFr0ha7TXaMOb5aIVz6tJ7r5Nb7oZP9T9UCdUnw2Tls5Ce5tI
+J23O7JqwT4CudnWnk5ZtVtGBYA23mUryrgf/Utfg08hU2uRyq9LOxVaVqfV/AipN
+62GmfuxkK4PatOcAOhKqmS/zGfZqIg7V6rtX2ocCgYEAlY1ogpR8ij6mvfBgPmGr
+9nues+uBDwXYOCXlzCYKTN2OIgkQ8vEZb3RDfy9CllVDgccWfd6iPnlVcvUJLOrt
+gwxlL2x8ryvwCc1ahv+A/1g0gmtuDdy9HW0XTnjcFMWViKUm4DrGsl5+/GkF67PV
+SVOmllwifOthpjJGaHmAlmUCgYB6EFMZzlzud+PfIzqX20952Avfzd6nKL03EjJF
+rbbmA82bGmfNPfVHXC9qvRTWD76mFeMKWFJAY9XeE1SYOZb+JfYBn/I9dP0cKZdx
+nutSkCx0hK7pI6Wr9kt7zBRBdDj+cva1ufe/iQtPtrTLGHRDj9oPaibT/Qvwcmst
+umdd9wKBgQDM7j6Rh7v8AeLy2bw73Qtk0ORaHqRBHSQw87srOLwtfQzE92zSGMj+
+FVt/BdPgzyaddegKvJ9AFCPAxbA8Glnmc89FO7pcXn9Wcy+ZoZIF6YwgUPhPCp/4
+r9bKuXuQiutFbKyes/5PTXqbJ/7xKRZIpQCvxg2syrW3hxx8LIx/kQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/fts.js b/src/mongo/gotools/test/legacy28/jstests/libs/fts.js
new file mode 100644
index 00000000000..73b7d339ba5
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/fts.js
@@ -0,0 +1,18 @@
+
+function queryIDS( coll, search, filter, extra ){
+ var cmd = { search : search }
+ if ( filter )
+ cmd.filter = filter;
+ if ( extra )
+ Object.extend( cmd, extra );
+ lastCommadResult = coll.runCommand( "text" , cmd);
+
+ return getIDS( lastCommadResult );
+}
+
+function getIDS( commandResult ){
+ if ( ! ( commandResult && commandResult.results ) )
+ return []
+
+ return commandResult.results.map( function(z){ return z.obj._id; } )
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/geo_near_random.js b/src/mongo/gotools/test/legacy28/jstests/libs/geo_near_random.js
new file mode 100644
index 00000000000..248f5e49a6c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/geo_near_random.js
@@ -0,0 +1,101 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+}
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds){
+ if(!indexBounds){
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ else{
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [( Random.rand() * (range - eps) + eps) + indexBounds.min, ( Random.rand() * (range - eps) + eps ) + indexBounds.min];
+ }
+
+}
+
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ var bulk = this.t.initializeUnorderedBulkOp();
+ for (var i=0; i<nPts; i++){
+ bulk.insert({ _id: i, loc: this.mkPt(scale, indexBounds) });
+ }
+ assert.writeOK(bulk.execute());
+
+ if(!indexBounds)
+ this.t.ensureIndex({loc: '2d'});
+ else
+ this.t.ensureIndex({loc: '2d'}, indexBounds)
+}
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++){
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0]
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1]
+ var dS = short[i].obj ? short[i].dis : 1
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0]
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1]
+ var dL = long[i].obj ? long[i].dis : 1
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
+ }
+}
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear:this.t.getName(), near: pt, num: 1, spherical:opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++){
+ //print(i); // uncomment to watch status
+ cmd.num = i
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded){
+ last = last.map(function(x){return x.obj});
+
+ var query = {loc:{}};
+ query.loc[ opts.sphere ? '$nearSphere' : '$near' ] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+}
+
+
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/host_ipaddr.js b/src/mongo/gotools/test/legacy28/jstests/libs/host_ipaddr.js
new file mode 100644
index 00000000000..7db1417e977
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/host_ipaddr.js
@@ -0,0 +1,38 @@
+// Returns non-localhost ipaddr of host running the mongo shell process
+function get_ipaddr() {
+ // set temp path, if it exists
+ var path = "";
+ try {
+ path = TestData.tmpPath;
+ if (typeof path == "undefined") {
+ path = "";
+ } else if (path.slice(-1) != "/") {
+ // Terminate path with / if defined
+ path += "/";
+ }
+ }
+ catch (err) {}
+
+ var ipFile = path+"ipaddr.log";
+ var windowsCmd = "ipconfig > "+ipFile;
+ var unixCmd = "/sbin/ifconfig | grep inet | grep -v '127.0.0.1' > "+ipFile;
+ var ipAddr = null;
+ var hostType = null;
+
+ try {
+ hostType = getBuildInfo().sysInfo.split(' ')[0];
+
+ // os-specific methods
+ if (hostType == "windows") {
+ runProgram('cmd.exe', '/c', windowsCmd);
+ ipAddr = cat(ipFile).match(/IPv4.*: (.*)/)[1];
+ } else {
+ runProgram('bash', '-c', unixCmd);
+ ipAddr = cat(ipFile).replace(/addr:/g, "").match(/inet (.[^ ]*) /)[1];
+ }
+ }
+ finally {
+ removeFile(ipFile);
+ }
+ return ipAddr;
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/key1 b/src/mongo/gotools/test/legacy28/jstests/libs/key1
new file mode 100644
index 00000000000..b5c19e4092f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/key2 b/src/mongo/gotools/test/legacy28/jstests/libs/key2
new file mode 100644
index 00000000000..cbde8212841
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/key2
@@ -0,0 +1 @@
+other key
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/localhostnameCN.pem b/src/mongo/gotools/test/legacy28/jstests/libs/localhostnameCN.pem
new file mode 100644
index 00000000000..e6aca6a217d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/localhostnameCN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDgTCCAmmgAwIBAgIBBTANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBvMRIwEAYDVQQD
+EwkxMjcuMC4wLjExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEW
+MBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNV
+BAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiqQNGgQggL8S
+LlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9wd0VNuD6+Ycg1mBbopO+M/K/ZWv8c
+7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9kVGRb2bNAfV2bC5/UnO1ulQdHoIB
+p3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8rwNNFvooMRg8yq8tq0qBkVhh85kct
+HHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqvI/Y5eIeZLhdIzAv37kolr8AuyqIR
+qcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZxoZN9Jv7x5LyiA+ijtQ+5aI/kMPqG
+nox+/bNFCQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAu
+MTANBgkqhkiG9w0BAQUFAAOCAQEAVJJNuUVzMRaft17NH6AzMSTiJxMFWoafmYgx
+jZnzA42XDPoPROuN7Bst6WVYDNpPb1AhPDco9qDylSZl0d341nHAuZNc84fD0omN
+Mbqieu8WseRQ300cbnS8p11c9aYpO/fNQ5iaYhGsRT7pnLs9MIgR468KVjY2xt49
+V0rshG6RxZj83KKuJd0T4X+5UeYz4B677y+SR0aoK2I2Sh+cffrMX2LotHc2I+JI
+Y9SDLvQT7chD9GzaWz634kmy3EEY0LreMm6AxhMOsr0lbZx5O8wLTScSjKARJ6OH
+nPxM1gYT07mkNmfyEnl1ChAN0MPgcLHQqEfe7x7ZQSbAv2gWfA==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAiqQNGgQggL8SLlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9w
+d0VNuD6+Ycg1mBbopO+M/K/ZWv8c7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9
+kVGRb2bNAfV2bC5/UnO1ulQdHoIBp3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8r
+wNNFvooMRg8yq8tq0qBkVhh85kctHHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqv
+I/Y5eIeZLhdIzAv37kolr8AuyqIRqcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZx
+oZN9Jv7x5LyiA+ijtQ+5aI/kMPqGnox+/bNFCQIDAQABAoIBAQAMiUT+Az2FJsHY
+G1Trf7Ba5UiS+/FDNNn7cJX++/lZQaOj9BSRVFzwuguw/8+Izxl+QIL5HlWDGupc
+tJICWwoWIuVl2S7RI6NPlhcEJF7hgzwUElnOWBfUgPEsqitpINM2e2wFSzHO3maT
+5AoO0zgUYK+8n9d74KT9CFcLqWvyS3iksK/FXfCZt0T1EoJ4LsDjeCTfVKqrku2U
++fCnZZYNkrgUI7Hku94EJfOh462V4KQAUGsvllwb1lfmR5NR86G6VX6oyMGctL5e
+1M6XQv+JQGEmAe6uULtCUGh32fzwJ9Un3j2GXOHT0LWrVc5iLuXwwzQvCGaMYtKm
+FAIDpPxhAoGBAMtwzpRyhf2op/REzZn+0aV5FWKjeq69Yxd62RaOf2EetcPwvUOs
+yQXcP0KZv15VWU/XhZUmTkPf52f0YHV/b1Sm6wUOiMNQ4XpnRj2THf0N7RS4idMm
+VwtMf1pxqttxQVKPpOvPEiTyIh2Nx/juyfD4CWkOVNTvOCd1w+av6ukNAoGBAK51
+gIXDuwJ2e5h3IJyewN/HOZqlgPKyMjnACaeXQ5wPJSrz4+UkJkuXT2dYKhv6u7K/
+GtucTdvBIJeq61+LjjkYk7OVDzoqP/uWU7p1y7gU9LZq+7tgq7r8cgeaC3IBQe7X
+jdFPEy1+zAEBh6MfFjnLZ2Kop9qbH3cNih/g9pTtAoGBAJ8dmdUtRXNByCsa7Rv2
+243qiDlf14J4CdrBcK1dwm75j/yye7VEnO2Cd8/lZHGpm3MBBC/FiA06QElkL1V2
+2GKDMun/liP9TH1p7NwYBqp3i+ha9SE6qXXi3PCmWpXLnOWwB7OPf4d6AgjPbYpb
+aYKY3PNYDC2G9IqYZyI0kSy5AoGBAJ5Fe5PfPom9c+OeL7fnTpO16kyiWZnUkDxU
+PG4OjQfHtbCCEv6PDS8G1sKq+Yjor+A5/+O8qeX0D92I8oB720txQI5rbKUYL3PP
+raY7t9YJLPlRlY8o5KN+4vSCjF+hRG+qnr6FPqDHp8xB1wvl6AQGxIR8/csVcDZR
+0j2ZmhsBAoGAO1Cpk/hWXOLAhSj8P8Q/+3439HEctTZheVBd8q/TtdwXocaZMLi8
+MXURuVTw0GtS9TmdqOFXzloFeaMhJx6TQzZ2aPcxu95b7RjEDtVHus3ed2cSJ2El
+AuRvFT2RCVvTu1mM0Ti7id+d8QBcpbIpPjNjK2Wxir/19gtEawlqlkA=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/localhostnameSAN.pem b/src/mongo/gotools/test/legacy28/jstests/libs/localhostnameSAN.pem
new file mode 100644
index 00000000000..480300f29e1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/localhostnameSAN.pem
@@ -0,0 +1,49 @@
+-----BEGIN CERTIFICATE-----
+MIIDpDCCAoygAwIBAgIBBjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB5MRwwGgYDVQQD
+ExNzYW50ZXN0aG9zdG5hbWUuY29tMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoT
+B01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZ
+b3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AJKOLTNEPv08IVmhfkv6Xq1dT6pki76ggpJ7UpwdUSsTsWDKO2o1c7wnzEjfhYQ+
+CtlEvbYyL3O7f8AaO15WJdi53SMuWS+QfCKs6b0symYbinSXlZGb4oZYFSrodSxH
++G8u+TUxyeaXgTHowMWArmTRi2LgtIwXwwHJawfhFDxji3cSmLAr5YQMAaXUynq3
+g0DEAGMaeOlyn1PkJ2ZfJsX2di+sceKb+KK1xT+2vUSsvnIumBCYqMhU6y3WjBWK
+6WrmOcsldWo4IcgyzwVRlZiuuYoe6ZsxZ4nMyTdYebALPqgkt8QVXqkgcjWK8F18
+nuqWIAn1ISTjj73H4cnzYv0CAwEAAaM8MDowOAYDVR0RBDEwL4INKi5leGFtcGxl
+LmNvbYIJMTI3LjAuMC4xgghtb3JlZnVuIYIJbG9jYWxob3N0MA0GCSqGSIb3DQEB
+BQUAA4IBAQA5M3U4wvQYI3jz/+Eh4POrJAs9eSRGkUhz1lP7D6Fcyp+BbbXB1fa9
+5qpD4bp1ZoDP2R2zca2uwwfd3DTWPbmwFMNqs2D7d0hgX71Vg9DCAwExFjoeRo44
+cCE9kakZtE3kT/tiH6SpYpnBa3dizxTmiY48z212Pw813SSXSPMN1myx5sMJof5I
+whJNQhSQOw6WHw5swZJZT4FkzxjQMrTWdF6r0d5EU9K2WWk5DTwq4QaysplB5l0H
+8qm+fnC6xI+2qgqMO9xqc6qMtHHICXtdUOup6wj/bdeo7bAQdVDyKlFKiYivDXvO
+RJNp2cwsBgxU+qdrtOLp7/j/0R3tUqWb
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAko4tM0Q+/TwhWaF+S/perV1PqmSLvqCCkntSnB1RKxOxYMo7
+ajVzvCfMSN+FhD4K2US9tjIvc7t/wBo7XlYl2LndIy5ZL5B8IqzpvSzKZhuKdJeV
+kZvihlgVKuh1LEf4by75NTHJ5peBMejAxYCuZNGLYuC0jBfDAclrB+EUPGOLdxKY
+sCvlhAwBpdTKereDQMQAYxp46XKfU+QnZl8mxfZ2L6xx4pv4orXFP7a9RKy+ci6Y
+EJioyFTrLdaMFYrpauY5yyV1ajghyDLPBVGVmK65ih7pmzFniczJN1h5sAs+qCS3
+xBVeqSByNYrwXXye6pYgCfUhJOOPvcfhyfNi/QIDAQABAoIBADqGMkClwS2pJHxB
+hEjc+4/pklWt/ywpttq+CpgzEOXN4GiRebaJD+WUUvzK3euYRwbKb6PhWJezyWky
+UID0j/qDBm71JEJdRWUnfdPAnja2Ss0Sd3UFNimF5TYUTC5ZszjbHkOC1WiTGdGP
+a+Oy5nF2SF4883x6RLJi963W0Rjn3jIW9LoLeTgm9bjWXg3iqonCo3AjREdkR/SG
+BZaCvulGEWl/A3a7NmW5EGGNUMvzZOxrqQz4EX+VnYdb7SPrH3pmQJyJpAqUlvD5
+y7pO01fI0wg9kOWiIR0vd3Gbm9NaFmlH9Gr2oyan3CWt1h1gPzkH/V17rZzVYb5L
+RnjLdyECgYEA6X16A5Gpb5rOVR/SK/JZGd+3z52+hRR8je4WhXkZqRZmbn2deKha
+LKZi1eVl11t8zitLg/OSN1uZ/873iESKtp/R6vcGcriUCd87cDh7KTyW/7ZW5jdj
+o6Y3Liai3Xrf6dL+V2xYw964Map9oK9qatYw/L+Ke6b9wbGi+hduf1kCgYEAoK8n
+pzctajS3Ntmk147n4ZVtcv78nWItBNH2B8UaofdkBlSRyUURsEY9nA34zLNWI0f3
+k59+cR13iofkQ0rKqJw1HbTTncrSsFqptyEDt23iWSmmaU3/9Us8lcNGqRm7a35V
+Km0XBFLnE0mGFGFoTpNt8oiR4WGASJPi482xkEUCgYEAwPmQn2SDCheDEr2zAdlR
+pN3O2EwCi5DMBK3TdUsKV0KJNCajwHY72Q1HQItQ6XXWp7sGta7YmOIfXFodIUWs
+85URdMXnUWeWCrayNGSp/gHytrNoDOuYcUfN8VnDX5PPfjyBM5X7ox7vUzUakXSJ
+WnVelXZlKR9yOOTs0xAMpjkCgYAbF61N6mXD5IOHwgajObsrM/CyVP/u4WDJ0UT0
+Zm1pJbc9wgCauQSUfiNhLpHmoc5CQJ4jy96b3+YJ+4OnPPMSntPt4FFV557CkWbQ
+M8bWpLZnZjhixP4FM9xRPA2r8WTCaRifAKnC1t+TRvBOe2YE6aK+I/zEzZW9pwG4
+ezQXKQKBgQAIBSJLa6xWbfbzqyPsvmRNgiEjIamF7wcb1sRjgqWM6sCzYwYv8f5v
+9C4YhNXEn+c5V2KevgYeg6iPSQuzEAfJx64QV7JD8kEBf5GNETnuW45Yg7KwKPD6
+ZCealfpy/o9iiNqbWqDNND91pj2/g5oZnac3misJg5tGCJbJsBFXag==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/mockkrb5.conf b/src/mongo/gotools/test/legacy28/jstests/libs/mockkrb5.conf
new file mode 100644
index 00000000000..0f004f2de8a
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/mockkrb5.conf
@@ -0,0 +1,13 @@
+[libdefaults]
+ default_realm = 10GEN.ME
+
+[realms]
+ 10GEN.ME = {
+ kdc = kdc.10gen.me
+ admin_server = kdc.10gen.me
+ default_domain = 10gen.me
+ }
+
+[domain_realm]
+ .10gen.me = 10GEN.ME
+ 10gen.me = 10GEN.ME
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/mockservice.keytab b/src/mongo/gotools/test/legacy28/jstests/libs/mockservice.keytab
new file mode 100644
index 00000000000..3529d5fcbc6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/mockservice.keytab
Binary files differ
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/mockuser.keytab b/src/mongo/gotools/test/legacy28/jstests/libs/mockuser.keytab
new file mode 100644
index 00000000000..35fd2ff06e7
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/mockuser.keytab
Binary files differ
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/not_yet_valid.pem b/src/mongo/gotools/test/legacy28/jstests/libs/not_yet_valid.pem
new file mode 100644
index 00000000000..7c021c0becd
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/not_yet_valid.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDhTCCAm2gAwIBAgIBETANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMjAwNzE3MTYwMDAwWhcNMjUwNzE3MTYwMDAwWjBzMRYwFAYDVQQD
+Ew1ub3RfeWV0X3ZhbGlkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdv
+REIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQsw
+CQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM2gF+Fo
+CeBKVlPyDAaEA7cjK75CxnzQy+oqw1j/vcfe/CfKL9MvDDXauR/9v1RRlww5zlxQ
+XJJtcMJtxN1EpP21cHrHCpJ/fRsCdMfJdD9MO6gcnclEI0Odwy5YI/57rAgxEuDC
+7z4d+M6z7PLq8DIwvRuhAZVTszeyTsCCkwfTJ/pisD2Ace75pS37t/ttQp+kQ+Vl
+QrfccHYxrScQ9i0JqBfrTULDl6ST76aINOaFKWqrLLkRUvE6pEkL/iP6xXUSKOsm
+uyc0yb0PK5Y/IVdrzwWUkabWEM27RAMH+CAx2iobk6REj0fsGySBzT2CaETZPjck
+vn/LYKqr+CvYjc8CAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcu
+MC4wLjEwDQYJKoZIhvcNAQEFBQADggEBADw37jpmhj/fgCZdF1NrDKLmWxb4hovQ
+Y9PRe6GsBOc1wH8Gbe4UkYAE41WUuT3xW9YpfCHLXxC7da6dhaBISWryX7n72abM
+xbfAghV3je5JAmC0E/OzQz8tTgENxJN/c4oqCQ9nVOOLjwWiim5kF0/NY8HCc/Sg
+OG9IdseRX72CavDaPxcqR9/5KKY/pxARMeyy3/D0FIB1Fwu5h9vjHEi5fGOqcizf
+S1KHfzAmTxVtjw6HWRGKmkPX0W0/lURWVkKRxvC8KkJIeKx3fl9U1PqCw0AVi5d/
+whYn4qHNFFp4OiVzXq3b5YoBy0dlHUePCIPT2GkGlV4NQKosZMJUkKo=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAzaAX4WgJ4EpWU/IMBoQDtyMrvkLGfNDL6irDWP+9x978J8ov
+0y8MNdq5H/2/VFGXDDnOXFBckm1wwm3E3USk/bVwescKkn99GwJ0x8l0P0w7qByd
+yUQjQ53DLlgj/nusCDES4MLvPh34zrPs8urwMjC9G6EBlVOzN7JOwIKTB9Mn+mKw
+PYBx7vmlLfu3+21Cn6RD5WVCt9xwdjGtJxD2LQmoF+tNQsOXpJPvpog05oUpaqss
+uRFS8TqkSQv+I/rFdRIo6ya7JzTJvQ8rlj8hV2vPBZSRptYQzbtEAwf4IDHaKhuT
+pESPR+wbJIHNPYJoRNk+NyS+f8tgqqv4K9iNzwIDAQABAoIBAFWTmjyyOuIArhrz
+snOHv7AZUBw32DmcADGtqG1Cyi4DrHe22t6ORwumwsMArP8fkbiB2lNrEovSRkp0
+uqjH5867E1vVuJ2tt1hlVkrLmbi6Nl3JwxU/aVm7r7566kgAGmGyYsPt/PmiKamF
+Ekkq49pPlHSKNol6My0r5UCTVzO6uwW7dAa4GOQRI7bM7PVlxRVVeNzPH3yOsTzk
+smrkRgf8HbjtY7m/EHG281gu14ZQRCqzLshO2BtWbkx9dMXnNU5dRRaZ8Pe8XN0Z
+umsStcX6So6VFAqlwknZTi1/sqyIuQLfE+S9DocVQkvKFUgKpFddK8Nmqc8xPCKt
+UwR9hEECgYEA9kZ5KmUbzxQrF8Kn9G18AbZ/Cf6rE9fhs/J8OGcuuJ9QTjPO7pxV
+T7lGrIOX3dVu3+iHrYXZUZv+UTOePWx+ghqJ8ML7RdVsxAWMqh+1J0eBJKIdc9mt
+0hGkLEyyBbAlfNmvw8JugTUeZH2gA+VK9HoMTAjD+LvH164rrktauKECgYEA1b6z
+lZypAbAqnuCndcetcgatdd/bYNH5WWTgdZHqInt3k94EsUEHFNMQUbO+FNkOJ4qJ
+Jp7xrqkOUX+MPrzV5XYVapamlht9gvUtyxGq7DYndlq4mIsN5kReH++lqONBnWoG
+ZlbxvadkvPo+bK003hsl+E4F8X7xUssGGLvygG8CgYEAm/yLJkUgVgsqOER86R6n
+mtYipQv/A/SK6tU9xOPl/d46mS3LderjRjnN/9rhyAo1zfCUb14GBeDONlSBd9pO
+Ts3MbQiy6sqBt67kJ6UpspVhwPhFu2k25YVy/PQfFec591hSMaXnJEOm2nOPdKg4
+z5y2STqMFfGqZHvXAvCLp8ECgYA8oVGTmNKf9fbBBny5/iAG/jnp+8vg1O7kGqdI
+8lD14wvyV8IA/a8iixRP+Kpsg31uXe+1ktR/dNjo6UNA8JPD+RDuITmzzqx1n1KU
+DbjsNBhRjD5cluUkcjQ43uOg2oXcPxz9nqAH6hm7OUjHzwH2FsFYg9lPvXB6ybg6
+/+Uz5QKBgBxvTtLsZ3Cvvb3qezn4DdpLjlsrT6HWaTGqwEx8NYVBTFX/lT8P04tv
+NqFuQsDJ4gw0AZF7HqF49qdpnHEJ8tdHgBc/xDLFUMuKjON4IZtr0/j407K6V530
+m4q3ziHOu/lORDcZTz/YUjEzT8r7Qiv7QusWncvIWEiLSCC2dvvb
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/parallelTester.js b/src/mongo/gotools/test/legacy28/jstests/libs/parallelTester.js
new file mode 100644
index 00000000000..8c44d2df553
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/parallelTester.js
@@ -0,0 +1,259 @@
+/**
+ * The ParallelTester class is used to test more than one test concurrently
+ */
+
+
+if ( typeof _threadInject != "undefined" ){
+ //print( "fork() available!" );
+
+ Thread = function(){
+ this.init.apply( this, arguments );
+ }
+ _threadInject( Thread.prototype );
+
+ ScopedThread = function() {
+ this.init.apply( this, arguments );
+ }
+ ScopedThread.prototype = new Thread( function() {} );
+ _scopedThreadInject( ScopedThread.prototype );
+
+ fork = function() {
+ var t = new Thread( function() {} );
+ Thread.apply( t, arguments );
+ return t;
+ }
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function( me, collectionName, mean, host ) {
+ this.mean = mean;
+ if (host == undefined) host = db.getMongo().host;
+ this.events = new Array( me, collectionName, host );
+ }
+
+ EventGenerator.prototype._add = function( action ) {
+ this.events.push( [ Random.genExp( this.mean ), action ] );
+ }
+
+ EventGenerator.prototype.addInsert = function( obj ) {
+ this._add( "t.insert( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addRemove = function( obj ) {
+ this._add( "t.remove( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addUpdate = function( objOld, objNew ) {
+ this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" );
+ }
+
+ EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );"
+ if ( checkQuery ) {
+ action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );"
+ }
+ if ( shouldPrint ) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add( action );
+ }
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ }
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray( arguments );
+ var me = args.shift();
+ var collectionName = args.shift();
+ var host = args.shift();
+ var m = new Mongo( host );
+ var t = m.getDB( "test" )[ collectionName ];
+ for( var i in args ) {
+ sleep( args[ i ][ 0 ] );
+ eval( args[ i ][ 1 ] );
+ }
+ }
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode")
+ this.params = new Array();
+ }
+
+ ParallelTester.prototype.add = function( fun, args ) {
+ args = args || [];
+ args.unshift( fun );
+ this.params.push( args );
+ }
+
+ ParallelTester.prototype.run = function( msg, newScopes ) {
+ newScopes = newScopes || false;
+ assert.parallelTests( this.params, msg, newScopes );
+ }
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function( n ) {
+ var params = new Array();
+ for( var i = 0; i < n; ++i ) {
+ params.push( [] );
+ }
+
+ var makeKeys = function( a ) {
+ var ret = {};
+ for( var i in a ) {
+ ret[ a[ i ] ] = 1;
+ }
+ return ret;
+ }
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys([ "dbadmin.js",
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js",// log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters_write_cmd.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ ] );
+
+ var parallelFilesDir = "jstests/core";
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys( serialTestsArr );
+
+ // prefix the first thread with the serialTests
+ // (which we will exclude from the rest of the threads below)
+ params[ 0 ] = serialTestsArr;
+ var files = listFiles( parallelFilesDir );
+ files = Array.shuffle( files );
+
+ var i = 0;
+ files.forEach(
+ function(x) {
+ if ( ( /[\/\\]_/.test(x.name) ) ||
+ ( ! /\.js$/.test(x.name) ) ||
+ ( x.name.match(parallelFilesDir + "/(.*\.js)")[1] in skipTests ) || //
+ ( x.name in serialTests )) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[ i % n ].push( x.name );
+ ++i;
+ }
+ );
+
+ // randomize ordering of the serialTests
+ params[ 0 ] = Array.shuffle( params[ 0 ] );
+
+ for( var i in params ) {
+ params[ i ].unshift( i );
+ }
+
+ return params;
+ }
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray( arguments );
+ var suite = args.shift();
+ args.forEach(
+ function( x ) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc( function() { load(x); }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms" );
+ }
+ );
+ }
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function( params, msg, newScopes ) {
+ newScopes = newScopes || false;
+ var wrapper = function( fun, argv ) {
+ eval (
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson( argv ) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "print('');" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ }
+ var runners = new Array();
+ for( var i in params ) {
+ var param = params[ i ];
+ var test = param.shift();
+ var t;
+ if ( newScopes )
+ t = new ScopedThread( wrapper( test, param ) );
+ else
+ t = new Thread( wrapper( test, param ) );
+ runners.push( t );
+ }
+
+ runners.forEach( function( x ) { x.start(); } );
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );
+ assert.eq( 0, nFailed, msg );
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/password_protected.pem b/src/mongo/gotools/test/legacy28/jstests/libs/password_protected.pem
new file mode 100644
index 00000000000..25e47bc2402
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/password_protected.pem
@@ -0,0 +1,51 @@
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBCTANBgkqhkiG9w0BAQUFADB4MRswGQYDVQQDExJwYXNz
+d29yZF9wcm90ZWN0ZWQxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29E
+QjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJ
+BgNVBAYTAlVTMB4XDTE0MDcxNzE2MDAwMFoXDTIwMDcxNzE2MDAwMFoweDEbMBkG
+A1UEAxMScGFzc3dvcmRfcHJvdGVjdGVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNV
+BAoTB01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5l
+dyBZb3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBALT4r3Hcou2auIOHeihBSjk4bKQTVqI6r/stnkul359SRfKuzVA9gMQaRRDi
+MJoxczHJzS2FX+wElzBt2EUhfu3qpUJ4gJw7H4WjLx+mNnj/+6b4HUO4eRzH5hTE
+A+qgDH40qYjFDEjiARvybWo3IlDLeI/uFwlyUj5PZBUBc1LBBzNtCBfJ2MmHLhIx
+jzTFhkJZll673LL6BPHtJclXCazqKUZDLqObW4Ei6X4hdBOdC8v8Q6GMgC4BxLe0
+wsOpKYYeM3il4BtfiqDQB5ZPG0lgo1Y7OOyFHFXBA7oNkK8lykhdyH4iLt5L9mWo
+VKyZ79VqSODFuCqWo8n8kUTgA/0CAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkq
+hkiG9w0BAQUFAAOCAQEAntxk8a0HcuPG8Fdjckp6WL+HKQQnUKdvSk06rPF0SHpN
+Ma4eZcaumROdtAYnPKvtbpq3DRCQlj59dlWPksEcYiXqf56TgcyAz5K5g5z9RbFi
+ArvAXJNRcDz1080NWGBUTPYyiKWR3PhtlYhJZ4r7fQIWLv4mifXHViw2roXXhsAY
+ubk9HOtrqE7x6NJXgR24aybxqI6TfAKfM+LJNtMwMFrPC+GHnhqMOs/jHJS38NIB
+TrKA63TdpYUroVu23/tGLQaJz352qgF4Di91RkUfnI528goj57pX78H8KRsSNVvs
+KHVNrxtZIez+pxxjBPnyfCH81swkiAPG9fdX+Hcu5A==
+-----END CERTIFICATE-----
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQI6y3b7IxcANECAggA
+MB0GCWCGSAFlAwQBAgQQrTslOKC0GZZwq48v7niXYQSCBNBNKsTN7fyw60/EEDH0
+JUgxL83Wfb7pNP97/lV5qiclY1mwcKz44kXQaesFTzhiwzAMOpbI/ijEtsNU25wV
+wtTgjAC3Em/+5/ygrmAu7hgacIRssspovmsgw029E9iOkyBd1VIrDVMi7HLHf0iU
+2Zq18QF20az2pXNMDipmVJkpc9NvjSdqka5+375pJuisspEWCDBd11K10jzCWqB5
+q3Rm1IIeq+mql6KT1rJcUoeE0facDc9GDYBiF/MfIKQ3FrZy/psqheCfL1UDUMyc
+mnm9GJO5+bCuHkg8ni0Zo5XXsf2VEFt0yt6lSucoOP43flucQaHnFKcn+5DHjDXv
+S6Eb5wEG9qWtzwWy/9DfRbkj6FxUgT3SFgizo/uLmdqFCJCnYkHUD1OuYCDmoIXP
+VTinwgK4lO/vrGfoPQrgJmdlnwHRWYjlB8edMCbmItaj2Esh3FBS12y976+UT0Sk
+8n5HsZAEYScDyNArVhrLUZRgF+r+bgZ28TDFO0MISPCAbZjhvq6lygS3dEmdTUW3
+cFDe1deNknWxZcv4UpJW4Nq6ckxwXBfTB1VFzjp7/vXrK/Sd9t8zi6vKTO8OTqc4
+KrlLXBgz0ouP/cxhYDykUrKXE2Eb0TjeAN1txZWo3fIFzXUvDZCphQEZNUqsFUxH
+86V2lwqVzKrFq6UpTgKrfTw/2ePQn9dQgd7iFWDTWjRkbzA5aAgTSVP8xQRoIOeQ
+epXtP9202kEz3h28SZYK7QBOTTX9xNmV/dzDTsi9nXZ6KtsP/aGFE5hh95jvESx/
+wlOBAPW4HR33rSYalvQPE7RjjLZHOKuYIllUBGlTOfgdA+WUXR3KxiLNPdslPBPV
++O6aDyerhWoQwE7TFwhP/FpxL/46hOu4iq4fgqfjddBTq8z5jG3c3zzogDjoDzBF
+LEQDcbenUCGbEQ7zxXsXtr3QinJ+aAejDO38hp1h9ROb5LF53/9H2j/16nby/jPX
+7kp2weRSKGJ0B6AVuS9pTsQz4+E3icsIgBWSU6qtcUz2GO2QxnFuvT9LEVnyMNN2
+IKMIEKi2FsUMddHGXLULTANlzUMocdHrd5j81eqcFPhMOFOiHpgwiwxqZyBYOLRl
+Fe7x5dLVWoLgjJagZj8uYnJbExDsfFLjEx8p4Z+rejJIC5CqZLbz9sDgCtIL+92k
++x4mlT1Rfmz9pU+RQqik83nFFRBGWxeW9iWWEgocWtmezvnK6E241v78zkqxNkvF
+JJo7BsBw7DiEHEfLhBZYuqV2q6+kwqgYrzyGIwAJkBGrkYfalVzgR+3/uN04h005
+M3jQRpSkDVGYr3JKEAlh3Sc+JD9VPbu6/RXNwy5mY67UCgWGaFwRqJE3DC9aKfNC
+OET8m8+8oQgFzhw3pNpENsgwR+Sx3K4q0GI3YwxT02pieBFNQaw53O3B3TtoCjkk
+UsuyIWqcLonwo4I3z0kjU3gEFN+0m4E4/A1DNt0J3rsKN+toCk1FqbxQg9xTZzXu
+hYmA3HMMwugzXmCanqBhmMsniPg+dRxCIfiHZhLuEpjKxZWcMWcW4M6l/wbM+LbE
+oDcTuI9ezfPTZ3xA8hNIHBT3MhuI7EJQnvKKvJDJeyX5sAtmSsSFqhEr8QZD8RgV
+5H9eOyUdfcWxLlstcq982V0oGg==
+-----END ENCRYPTED PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/server.pem b/src/mongo/gotools/test/legacy28/jstests/libs/server.pem
new file mode 100644
index 00000000000..df2b49163d6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/server.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDfjCCAmagAwIBAgIBBzANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBsMQ8wDQYDVQQD
+EwZzZXJ2ZXIxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEWMBQG
+A1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNVBAYT
+AlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp76KJeDczBqjSPJj
+5f8DHdtrWpQDK9AWNDlslWpi6+pL8hMqwbX0D7hC2r3kAgccMyFoNIudPqIXfXVd
+1LOh6vyY+jveRvqjKW/UZVzZeiL4Gy4bhke6R8JRC3O5aMKIAbaiQUAI1Nd8LxIt
+LGvH+ia/DFza1whgB8ym/uzVQB6igOifJ1qHWJbTtIhDKaW8gvjOhv5R3jzjfLEb
+R9r5Q0ZyE0lrO27kTkqgBnHKPmu54GSzU/r0HM3B+Sc/6UN+xNhNbuR+LZ+EvJHm
+r4de8jhW8wivmjTIvte33jlLibQ5nYIHrlpDLEwlzvDGaIio+OfWcgs2WuPk98MU
+tht0IQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTAN
+BgkqhkiG9w0BAQUFAAOCAQEANoYxvVFsIol09BQA0fwryAye/Z4dYItvKhmwB9VS
+t99DsmJcyx0P5meB3Ed8SnwkD0NGCm5TkUY/YLacPP9uJ4SkbPkNZ1fRISyShCCn
+SGgQUJWHbCbcIEj+vssFb91c5RFJbvnenDkQokRvD2VJWspwioeLzuwtARUoMH3Y
+qg0k0Mn7Bx1bW1Y6xQJHeVlnZtzxfeueoFO55ZRkZ0ceAD/q7q1ohTXi0vMydYgu
+1CB6VkDuibGlv56NdjbttPJm2iQoPaez8tZGpBo76N/Z1ydan0ow2pVjDXVOR84Y
+2HSZgbHOGBiycNw2W3vfw7uK0OmiPRTFpJCmewDjYwZ/6w==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAp76KJeDczBqjSPJj5f8DHdtrWpQDK9AWNDlslWpi6+pL8hMq
+wbX0D7hC2r3kAgccMyFoNIudPqIXfXVd1LOh6vyY+jveRvqjKW/UZVzZeiL4Gy4b
+hke6R8JRC3O5aMKIAbaiQUAI1Nd8LxItLGvH+ia/DFza1whgB8ym/uzVQB6igOif
+J1qHWJbTtIhDKaW8gvjOhv5R3jzjfLEbR9r5Q0ZyE0lrO27kTkqgBnHKPmu54GSz
+U/r0HM3B+Sc/6UN+xNhNbuR+LZ+EvJHmr4de8jhW8wivmjTIvte33jlLibQ5nYIH
+rlpDLEwlzvDGaIio+OfWcgs2WuPk98MUtht0IQIDAQABAoIBACgi1ilECXCouwMc
+RDzm7Jb7Rk+Q9MVJ79YlG08Q+oRaNjvAzE03PSN5wj1WjDTUALJXPvi7oy82V4qE
+R6Q6Kvbv46aUJpYzKFEk2dw7ACpSLa1LNfjGNtMusnecA/QF/8bxLReRu8s5mBQn
+NDnZvCqllLbfjNlAvsF+/UIn5sqFZpAZPMtPwkTAeh5ge8H9JvrG8y8aXsiFGAhV
+Z7tMZyn8wPCUrRi14NLvVB4hxM66G/tuTp8r9AmeTU+PV+qbCnKXd+v0IS52hvX9
+z75OPfAc66nm4bbPCapb6Yx7WaewPXXU0HDxeaT0BeQ/YfoNa5OT+ZOX1KndSfHa
+VhtmEsECgYEA3m86yYMsNOo+dkhqctNVRw2N+8gTO28GmWxNV9AC+fy1epW9+FNR
+yTQXpBkRrR7qrd5mF7WBc7vAIiSfVs021RMofzn5B1x7jzkH34VZtlviNdE3TZhx
+lPinqo0Yy3UEksgsCBJFIofuCmeTLk4ZtqoiZnXr35RYibaZoQdUT4kCgYEAwQ6Y
+xsKFYFks1+HYl29kR0qUkXFlVbKOhQIlj/dPm0JjZ0xYkUxmzoXD68HrOWgz7hc2
+hZaQTgWf+8cRaZNfh7oL+Iglczc2UXuwuUYguYssD/G6/ZPY15PhItgCghaU5Ewy
+hMwIJ81NENY2EQTgk/Z1KZitXdVJfHl/IPMQgdkCgYASdqkqkPjaa5dDuj8byO8L
+NtTSUYlHJbAmjBbfcyTMG230/vkF4+SmDuznci1FcYuJYyyWSzqzoKISM3gGfIJQ
+rYZvCSDiu4qGGPXOWANaX8YnMXalukGzW/CO96dXPB9lD7iX8uxKMX5Q3sgYz+LS
+hszUNHWf2XB//ehCtZkKAQKBgQCxL2luepeZHx82H9T+38BkYgHLHw0HQzLkxlyd
+LjlE4QCEjSB4cmukvkZbuYXfEVEgAvQKVW6p/SWhGkpT4Gt8EXftKV9dyF21GVXQ
+JZnhUOcm1xBsrWYGLXYi2agrpvgONBTlprERfq5tdnz2z8giZL+RZswu45Nnh8bz
+AcKzuQKBgQCGOQvKvNL5XKKmws/KRkfJbXgsyRT2ubO6pVL9jGQG5wntkeIRaEpT
+oxFtWMdPx3b3cxtgSP2ojllEiISk87SFIN1zEhHZy/JpTF0GlU1qg3VIaA78M1p2
+ZdpUsuqJzYmc3dDbQMepIaqdW4xMoTtZFyenUJyoezz6eWy/NlZ/XQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/servers.js b/src/mongo/gotools/test/legacy28/jstests/libs/servers.js
new file mode 100755
index 00000000000..b752b820eae
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/servers.js
@@ -0,0 +1,961 @@
+// Wrap whole file in a function to avoid polluting the global namespace
+(function() {
+
+_parsePath = function() {
+ var dbpath = "";
+ for( var i = 0; i < arguments.length; ++i )
+ if ( arguments[ i ] == "--dbpath" )
+ dbpath = arguments[ i + 1 ];
+
+ if ( dbpath == "" )
+ throw Error("No dbpath specified");
+
+ return dbpath;
+}
+
+_parsePort = function() {
+ var port = "";
+ for( var i = 0; i < arguments.length; ++i )
+ if ( arguments[ i ] == "--port" )
+ port = arguments[ i + 1 ];
+
+ if ( port == "" )
+ throw Error("No port specified");
+ return port;
+}
+
+connectionURLTheSame = function( a , b ){
+
+ if ( a == b )
+ return true;
+
+ if ( ! a || ! b )
+ return false;
+
+ if( a.host ) return connectionURLTheSame( a.host, b )
+ if( b.host ) return connectionURLTheSame( a, b.host )
+
+ if( a.name ) return connectionURLTheSame( a.name, b )
+ if( b.name ) return connectionURLTheSame( a, b.name )
+
+ if( a.indexOf( "/" ) < 0 && b.indexOf( "/" ) < 0 ){
+ a = a.split( ":" )
+ b = b.split( ":" )
+
+ if( a.length != b.length ) return false
+
+ if( a.length == 2 && a[1] != b[1] ) return false
+
+ if( a[0] == "localhost" || a[0] == "127.0.0.1" ) a[0] = getHostName()
+ if( b[0] == "localhost" || b[0] == "127.0.0.1" ) b[0] = getHostName()
+
+ return a[0] == b[0]
+ }
+ else {
+ var a0 = a.split( "/" )[0]
+ var b0 = b.split( "/" )[0]
+ return a0 == b0
+ }
+}
+
+assert( connectionURLTheSame( "foo" , "foo" ) )
+assert( ! connectionURLTheSame( "foo" , "bar" ) )
+
+assert( connectionURLTheSame( "foo/a,b" , "foo/b,a" ) )
+assert( ! connectionURLTheSame( "foo/a,b" , "bar/a,b" ) )
+
+createMongoArgs = function( binaryName , args ){
+ var fullArgs = [ binaryName ];
+
+ if ( args.length == 1 && isObject( args[0] ) ){
+ var o = args[0];
+ for ( var k in o ){
+ if ( o.hasOwnProperty(k) ){
+ if ( k == "v" && isNumber( o[k] ) ){
+ var n = o[k];
+ if ( n > 0 ){
+ if ( n > 10 ) n = 10;
+ var temp = "-";
+ while ( n-- > 0 ) temp += "v";
+ fullArgs.push( temp );
+ }
+ }
+ else {
+ fullArgs.push( "--" + k );
+ if ( o[k] != "" )
+ fullArgs.push( "" + o[k] );
+ }
+ }
+ }
+ }
+ else {
+ for ( var i=0; i<args.length; i++ )
+ fullArgs.push( args[i] )
+ }
+
+ return fullArgs;
+}
+
+
+MongoRunner = function(){}
+
+MongoRunner.dataDir = "/data/db"
+MongoRunner.dataPath = "/data/db/"
+MongoRunner.usedPortMap = {}
+
+MongoRunner.VersionSub = function(regex, version) {
+ this.regex = regex;
+ this.version = version;
+}
+
+// These patterns allow substituting the binary versions used for each
+// version string to support the dev/stable MongoDB release cycle.
+MongoRunner.binVersionSubs = [ new MongoRunner.VersionSub(/^latest$/, ""),
+ new MongoRunner.VersionSub(/^oldest-supported$/, "1.8"),
+ // To-be-updated when 2.8 becomes available
+ new MongoRunner.VersionSub(/^last-stable$/, "2.6"),
+ // Latest unstable and next stable are effectively the
+ // same release
+ new MongoRunner.VersionSub(/^2\.7(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^2\.8(\..*){0,1}/, "") ];
+
+MongoRunner.getBinVersionFor = function(version) {
+
+ // If this is a version iterator, iterate the version via toString()
+ if (version instanceof MongoRunner.versionIterator.iterator) {
+ version = version.toString();
+ }
+
+ // No version set means we use no suffix, this is *different* from "latest"
+ // since latest may be mapped to a different version.
+ if (version == null) version = "";
+ version = version.trim();
+ if (version === "") return "";
+
+ // See if this version is affected by version substitutions
+ for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
+ var sub = MongoRunner.binVersionSubs[i];
+ if (sub.regex.test(version)) {
+ version = sub.version;
+ }
+ }
+
+ return version;
+}
+
+MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+
+ versionA = MongoRunner.getBinVersionFor(versionA);
+ versionB = MongoRunner.getBinVersionFor(versionB);
+
+ if (versionA === "" || versionB === "") {
+ return versionA === versionB;
+ }
+
+ return versionA.startsWith(versionB) ||
+ versionB.startsWith(versionA);
+}
+
+MongoRunner.logicalOptions = { runId : true,
+ pathOpts : true,
+ remember : true,
+ noRemember : true,
+ appendOptions : true,
+ restart : true,
+ noCleanData : true,
+ cleanData : true,
+ startClean : true,
+ forceLock : true,
+ useLogFiles : true,
+ logFile : true,
+ useHostName : true,
+ useHostname : true,
+ noReplSet : true,
+ forgetPort : true,
+ arbiter : true,
+ noJournalPrealloc : true,
+ noJournal : true,
+ binVersion : true,
+ waitForConnect : true }
+
+MongoRunner.toRealPath = function( path, pathOpts ){
+
+ // Replace all $pathOptions with actual values
+ pathOpts = pathOpts || {}
+ path = path.replace( /\$dataPath/g, MongoRunner.dataPath )
+ path = path.replace( /\$dataDir/g, MongoRunner.dataDir )
+ for( key in pathOpts ){
+ path = path.replace( RegExp( "\\$" + RegExp.escape(key), "g" ), pathOpts[ key ] )
+ }
+
+ // Relative path
+ // Detect Unix and Windows absolute paths
+ // as well as Windows drive letters
+ // Also captures Windows UNC paths
+
+ if( ! path.match( /^(\/|\\|[A-Za-z]:)/ ) ){
+ if( path != "" && ! path.endsWith( "/" ) )
+ path += "/"
+
+ path = MongoRunner.dataPath + path
+ }
+
+ return path
+
+}
+
+MongoRunner.toRealDir = function( path, pathOpts ){
+
+ path = MongoRunner.toRealPath( path, pathOpts )
+
+ if( path.endsWith( "/" ) )
+ path = path.substring( 0, path.length - 1 )
+
+ return path
+}
+
+MongoRunner.toRealFile = MongoRunner.toRealDir
+
+MongoRunner.nextOpenPort = function(){
+
+ var i = 0;
+ while( MongoRunner.usedPortMap[ "" + ( 27000 + i ) ] ) i++;
+ MongoRunner.usedPortMap[ "" + ( 27000 + i ) ] = true
+
+ return 27000 + i
+
+}
+
+/**
+ * Returns an iterator object which yields successive versions on toString(), starting from a
+ * random initial position, from an array of versions.
+ *
+ * If passed a single version string or an already-existing version iterator, just returns the
+ * object itself, since it will yield correctly on toString()
+ *
+ * @param {Array.<String>}|{String}|{versionIterator}
+ */
+MongoRunner.versionIterator = function( arr, isRandom ){
+
+ // If this isn't an array of versions, or is already an iterator, just use it
+ if( typeof arr == "string" ) return arr
+ if( arr.isVersionIterator ) return arr
+
+ if (isRandom == undefined) isRandom = false;
+
+ // Starting pos
+ var i = isRandom ? parseInt( Random.rand() * arr.length ) : 0;
+
+ return new MongoRunner.versionIterator.iterator(i, arr);
+}
+
+MongoRunner.versionIterator.iterator = function(i, arr) {
+
+ this.toString = function() {
+ i = ( i + 1 ) % arr.length
+ print( "Returning next version : " + i +
+ " (" + arr[i] + ") from " + tojson( arr ) + "..." );
+ return arr[ i ]
+ }
+
+ this.isVersionIterator = true;
+
+}
+
+/**
+ * Converts the args object by pairing all keys with their value and appending
+ * dash-dash (--) to the keys. The only exception to this rule are keys that
+ * are defined in MongoRunner.logicalOptions, of which they will be ignored.
+ *
+ * @param {string} binaryName
+ * @param {Object} args
+ *
+ * @return {Array.<String>} an array of parameter strings that can be passed
+ * to the binary.
+ */
+MongoRunner.arrOptions = function( binaryName , args ){
+
+ var fullArgs = [ "" ]
+
+ // isObject returns true even if "args" is an array, so the else branch of this statement is
+ // dead code. See SERVER-14220.
+ if ( isObject( args ) || ( args.length == 1 && isObject( args[0] ) ) ){
+
+ var o = isObject( args ) ? args : args[0]
+
+ // If we've specified a particular binary version, use that
+ if (o.binVersion && o.binVersion != "") {
+ binaryName += "-" + o.binVersion;
+ }
+
+ // Manage legacy options
+ var isValidOptionForBinary = function( option, value ){
+
+ if( ! o.binVersion ) return true
+
+ // Version 1.x options
+ if( o.binVersion.startsWith( "1." ) ){
+
+ return [ "nopreallocj" ].indexOf( option ) < 0
+ }
+
+ return true
+ }
+
+ for ( var k in o ){
+
+ // Make sure our logical option should be added to the array of options
+ if( ! o.hasOwnProperty( k ) ||
+ k in MongoRunner.logicalOptions ||
+ ! isValidOptionForBinary( k, o[k] ) ) continue
+
+ if ( ( k == "v" || k == "verbose" ) && isNumber( o[k] ) ){
+ var n = o[k]
+ if ( n > 0 ){
+ if ( n > 10 ) n = 10
+ var temp = "-"
+ while ( n-- > 0 ) temp += "v"
+ fullArgs.push( temp )
+ }
+ }
+ else {
+ if( o[k] == undefined || o[k] == null ) continue
+ fullArgs.push( "--" + k )
+ if ( o[k] != "" )
+ fullArgs.push( "" + o[k] )
+ }
+ }
+ }
+ else {
+ for ( var i=0; i<args.length; i++ )
+ fullArgs.push( args[i] )
+ }
+
+ fullArgs[ 0 ] = binaryName
+ return fullArgs
+}
+
+MongoRunner.arrToOpts = function( arr ){
+
+ var opts = {}
+ for( var i = 1; i < arr.length; i++ ){
+ if( arr[i].startsWith( "-" ) ){
+ var opt = arr[i].replace( /^-/, "" ).replace( /^-/, "" )
+
+ if( arr.length > i + 1 && ! arr[ i + 1 ].startsWith( "-" ) ){
+ opts[ opt ] = arr[ i + 1 ]
+ i++
+ }
+ else{
+ opts[ opt ] = ""
+ }
+
+ if( opt.replace( /v/g, "" ) == "" ){
+ opts[ "verbose" ] = opt.length
+ }
+ }
+ }
+
+ return opts
+}
+
+MongoRunner.savedOptions = {}
+
+MongoRunner.mongoOptions = function( opts ){
+
+ // Don't remember waitForConnect
+ var waitForConnect = opts.waitForConnect;
+ delete opts.waitForConnect;
+
+ // If we're a mongo object
+ if( opts.getDB ){
+ opts = { restart : opts.runId }
+ }
+
+ // Initialize and create a copy of the opts
+ opts = Object.merge( opts || {}, {} )
+
+ if( ! opts.restart ) opts.restart = false
+
+ // RunId can come from a number of places
+ // If restart is passed as an old connection
+ if( opts.restart && opts.restart.getDB ){
+ opts.runId = opts.restart.runId
+ opts.restart = true
+ }
+ // If it's the runId itself
+ else if( isObject( opts.restart ) ){
+ opts.runId = opts.restart
+ opts.restart = true
+ }
+
+ if( isObject( opts.remember ) ){
+ opts.runId = opts.remember
+ opts.remember = true
+ }
+ else if( opts.remember == undefined ){
+ // Remember by default if we're restarting
+ opts.remember = opts.restart
+ }
+
+ // If we passed in restart : <conn> or runId : <conn>
+ if( isObject( opts.runId ) && opts.runId.runId ) opts.runId = opts.runId.runId
+
+ if( opts.restart && opts.remember ) opts = Object.merge( MongoRunner.savedOptions[ opts.runId ], opts )
+
+ // Create a new runId
+ opts.runId = opts.runId || ObjectId()
+
+ // Save the port if required
+ if( ! opts.forgetPort ) opts.port = opts.port || MongoRunner.nextOpenPort()
+
+ var shouldRemember = ( ! opts.restart && ! opts.noRemember ) || ( opts.restart && opts.appendOptions )
+
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ if ( shouldRemember ){
+ MongoRunner.savedOptions[ opts.runId ] = Object.merge( opts, {} )
+ }
+
+ // Default for waitForConnect is true
+ opts.waitForConnect = (waitForConnect == undefined || waitForConnect == null) ?
+ true : waitForConnect;
+
+ if( jsTestOptions().useSSL ) {
+ if (!opts.sslMode) opts.sslMode = "requireSSL";
+ if (!opts.sslPEMKeyFile) opts.sslPEMKeyFile = "jstests/libs/server.pem";
+ if (!opts.sslCAFile) opts.sslCAFile = "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if ( jsTestOptions().useX509 && !opts.clusterAuthMode ) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ opts.port = opts.port || MongoRunner.nextOpenPort()
+ MongoRunner.usedPortMap[ "" + parseInt( opts.port ) ] = true
+
+ opts.pathOpts = Object.merge( opts.pathOpts || {}, { port : "" + opts.port, runId : "" + opts.runId } )
+
+ return opts
+}
+
+/**
+ * @option {object} opts
+ *
+ * {
+ * dbpath {string}
+ * useLogFiles {boolean}: use with logFile option.
+ * logFile {string}: path to the log file. If not specified and useLogFiles
+ * is true, automatically creates a log file inside dbpath.
+ * noJournalPrealloc {boolean}
+ * noJournal {boolean}
+ * keyFile
+ * replSet
+ * oplogSize
+ * }
+ */
+MongoRunner.mongodOptions = function( opts ){
+
+ opts = MongoRunner.mongoOptions( opts )
+
+ opts.dbpath = MongoRunner.toRealDir( opts.dbpath || "$dataDir/mongod-$port",
+ opts.pathOpts )
+
+ opts.pathOpts = Object.merge( opts.pathOpts, { dbpath : opts.dbpath } )
+
+ if( ! opts.logFile && opts.useLogFiles ){
+ opts.logFile = opts.dbpath + "/mongod.log"
+ }
+ else if( opts.logFile ){
+ opts.logFile = MongoRunner.toRealFile( opts.logFile, opts.pathOpts )
+ }
+
+ if ( opts.logFile !== undefined ) {
+ opts.logpath = opts.logFile;
+ }
+
+ if( jsTestOptions().noJournalPrealloc || opts.noJournalPrealloc )
+ opts.nopreallocj = ""
+
+ if( jsTestOptions().noJournal || opts.noJournal )
+ opts.nojournal = ""
+
+ if( jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile
+ }
+
+ if( jsTestOptions().useSSL ) {
+ if (!opts.sslMode) opts.sslMode = "requireSSL";
+ if (!opts.sslPEMKeyFile) opts.sslPEMKeyFile = "jstests/libs/server.pem";
+ if (!opts.sslCAFile) opts.sslCAFile = "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if ( jsTestOptions().useX509 && !opts.clusterAuthMode ) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ if( opts.noReplSet ) opts.replSet = null
+ if( opts.arbiter ) opts.oplogSize = 1
+
+ return opts
+}
+
+MongoRunner.mongosOptions = function( opts ){
+
+ opts = MongoRunner.mongoOptions( opts )
+
+ // Normalize configdb option to be host string if currently a host
+ if( opts.configdb && opts.configdb.getDB ){
+ opts.configdb = opts.configdb.host
+ }
+
+ opts.pathOpts = Object.merge( opts.pathOpts,
+ { configdb : opts.configdb.replace( /:|,/g, "-" ) } )
+
+ if( ! opts.logFile && opts.useLogFiles ){
+ opts.logFile = MongoRunner.toRealFile( "$dataDir/mongos-$configdb-$port.log",
+ opts.pathOpts )
+ }
+ else if( opts.logFile ){
+ opts.logFile = MongoRunner.toRealFile( opts.logFile, opts.pathOpts )
+ }
+
+ if ( opts.logFile !== undefined ){
+ opts.logpath = opts.logFile;
+ }
+
+ if( jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile
+ }
+
+ return opts
+}
+
+/**
+ * Starts a mongod instance.
+ *
+ * @param {Object} opts
+ *
+ * {
+ * useHostName {boolean}: Uses hostname of machine if true
+ * forceLock {boolean}: Deletes the lock file if set to true
+ * dbpath {string}: location of db files
+ * cleanData {boolean}: Removes all files in dbpath if true
+ * startClean {boolean}: same as cleanData
+ * noCleanData {boolean}: Do not clean files (cleanData takes priority)
+ *
+ * @see MongoRunner.mongodOptions for other options
+ * }
+ *
+ * @return {Mongo} connection object to the started mongod instance.
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongod = function( opts ){
+
+ opts = opts || {}
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if( isObject( opts ) ) {
+
+ opts = MongoRunner.mongodOptions( opts );
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ if( opts.forceLock ) removeFile( opts.dbpath + "/mongod.lock" )
+ if( ( opts.cleanData || opts.startClean ) || ( ! opts.restart && ! opts.noCleanData ) ){
+ print( "Resetting db path '" + opts.dbpath + "'" )
+ resetDbpath( opts.dbpath )
+ }
+
+ opts = MongoRunner.arrOptions( "mongod", opts )
+ }
+
+ var mongod = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) mongos = {};
+ if (!mongod) return null;
+
+ mongod.commandLine = MongoRunner.arrToOpts( opts )
+ mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port
+ mongod.host = mongod.name
+ mongod.port = parseInt( mongod.commandLine.port )
+ mongod.runId = runId || ObjectId()
+ mongod.savedOptions = MongoRunner.savedOptions[ mongod.runId ];
+ mongod.fullOptions = fullOptions;
+
+ return mongod
+}
+
+MongoRunner.runMongos = function( opts ){
+
+ opts = opts || {}
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if( isObject( opts ) ) {
+
+ opts = MongoRunner.mongosOptions( opts );
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ opts = MongoRunner.arrOptions( "mongos", opts )
+ }
+
+ var mongos = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) mongos = {};
+ if (!mongos) return null;
+
+ mongos.commandLine = MongoRunner.arrToOpts( opts )
+ mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port
+ mongos.host = mongos.name
+ mongos.port = parseInt( mongos.commandLine.port )
+ mongos.runId = runId || ObjectId()
+ mongos.savedOptions = MongoRunner.savedOptions[ mongos.runId ]
+ mongos.fullOptions = fullOptions;
+
+ return mongos
+}
+
+/**
+ * Kills a mongod process.
+ *
+ * @param {number} port the port of the process to kill
+ * @param {number} signal The signal number to use for killing
+ * @param {Object} opts Additional options. Format:
+ * {
+ * auth: {
+ * user {string}: admin user name
+ * pwd {string}: admin password
+ * }
+ * }
+ *
+ * Note: The auth option is required in a authenticated mongod running in Windows since
+ * it uses the shutdown command, which requires admin credentials.
+ */
+MongoRunner.stopMongod = function( port, signal, opts ){
+
+ if( ! port ) {
+ print( "Cannot stop mongo process " + port )
+ return
+ }
+
+ signal = signal || 15
+
+ if( port.port )
+ port = parseInt( port.port )
+
+ if( port instanceof ObjectId ){
+ var opts = MongoRunner.savedOptions( port )
+ if( opts ) port = parseInt( opts.port )
+ }
+
+ var exitCode = stopMongod( parseInt( port ), parseInt( signal ), opts )
+
+ delete MongoRunner.usedPortMap[ "" + parseInt( port ) ]
+
+ return exitCode
+}
+
+MongoRunner.stopMongos = MongoRunner.stopMongod
+
+MongoRunner.isStopped = function( port ){
+
+ if( ! port ) {
+ print( "Cannot detect if process " + port + " is stopped." )
+ return
+ }
+
+ if( port.port )
+ port = parseInt( port.port )
+
+ if( port instanceof ObjectId ){
+ var opts = MongoRunner.savedOptions( port )
+ if( opts ) port = parseInt( opts.port )
+ }
+
+ return MongoRunner.usedPortMap[ "" + parseInt( port ) ] ? false : true
+}
+
+/**
+ * Starts an instance of the specified mongo tool
+ *
+ * @param {String} binaryName The name of the tool to run
+ * @param {Object} opts options to pass to the tool
+ * {
+ * binVersion {string}: version of tool to run
+ * }
+ *
+ * @see MongoRunner.arrOptions
+ */
+MongoRunner.runMongoTool = function( binaryName, opts ){
+
+ var opts = opts || {}
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ var argsArray = MongoRunner.arrOptions(binaryName, opts)
+
+ return runMongoProgram.apply(null, argsArray);
+
+}
+
+// Given a test name figures out a directory for that test to use for dump files and makes sure
+// that directory exists and is empty.
+MongoRunner.getAndPrepareDumpDirectory = function(testName) {
+ var dir = MongoRunner.dataPath + testName + "_external/";
+ resetDbpath(dir);
+ return dir;
+}
+
+// Start a mongod instance and return a 'Mongo' object connected to it.
+// This function's arguments are passed as command line arguments to mongod.
+// The specified 'dbpath' is cleared if it exists, created if not.
+// var conn = startMongodEmpty("--port", 30000, "--dbpath", "asdf");
+startMongodEmpty = function () {
+ var args = createMongoArgs("mongod", arguments);
+
+ var dbpath = _parsePath.apply(null, args);
+ resetDbpath(dbpath);
+
+ return startMongoProgram.apply(null, args);
+}
+startMongod = function () {
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
+ return startMongodEmpty.apply(null, arguments);
+}
+startMongodNoReset = function(){
+ var args = createMongoArgs( "mongod" , arguments );
+ return startMongoProgram.apply( null, args );
+}
+
+startMongos = function(args){
+ return MongoRunner.runMongos(args);
+}
+
+/**
+ * Returns a new argArray with any test-specific arguments added.
+ */
+function appendSetParameterArgs(argArray) {
+ var programName = argArray[0];
+ if (programName.endsWith('mongod') || programName.endsWith('mongos')) {
+ if (jsTest.options().enableTestCommands) {
+ argArray.push.apply(argArray, ['--setParameter', "enableTestCommands=1"]);
+ }
+ if (jsTest.options().authMechanism && jsTest.options().authMechanism != "SCRAM-SHA-1") {
+ var hasAuthMechs = false;
+ for (i in argArray) {
+ if (typeof argArray[i] === 'string' &&
+ argArray[i].indexOf('authenticationMechanisms') != -1) {
+ hasAuthMechs = true;
+ break;
+ }
+ }
+ if (!hasAuthMechs) {
+ argArray.push.apply(argArray,
+ ['--setParameter',
+ "authenticationMechanisms=" + jsTest.options().authMechanism]);
+ }
+ }
+ if (jsTest.options().auth) {
+ argArray.push.apply(argArray, ['--setParameter', "enableLocalhostAuthBypass=false"]);
+ }
+
+ if ( jsTestOptions().useSSL ) {
+ if ( argArray.indexOf('--sslMode') < 0 ) {
+ argArray.push.apply(argArray, [ '--sslMode', 'requireSSL', '--sslPEMKeyFile', 'jstests/libs/server.pem', '--sslCAFile', 'jstests/libs/ca.pem', '--sslWeakCertificateValidation' ] );
+ }
+ }
+
+ // mongos only options
+ if (programName.endsWith('mongos')) {
+ // apply setParameters for mongos
+ if (jsTest.options().setParametersMongos) {
+ var params = jsTest.options().setParametersMongos.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) argArray.push.apply(argArray, ['--setParameter', p])
+ });
+ }
+ }
+ }
+ // mongod only options
+ else if (programName.endsWith('mongod')) {
+ // set storageEngine for mongod
+ if (jsTest.options().storageEngine) {
+ argArray.push.apply(argArray, ['--storageEngine', jsTest.options().storageEngine]);
+ }
+ // apply setParameters for mongod
+ if (jsTest.options().setParameters) {
+ var params = jsTest.options().setParameters.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) argArray.push.apply(argArray, ['--setParameter', p])
+ });
+ }
+ }
+ }
+ }
+ return argArray;
+};
+
+/**
+ * Start a mongo process with a particular argument array. If we aren't waiting for connect,
+ * return null.
+ */
+MongoRunner.startWithArgs = function(argArray, waitForConnect) {
+ // TODO: Make there only be one codepath for starting mongo processes
+
+ argArray = appendSetParameterArgs(argArray);
+ var port = _parsePort.apply(null, argArray);
+ var pid = _startMongoProgram.apply(null, argArray);
+
+ var conn = null;
+ if (waitForConnect) {
+ assert.soon( function() {
+ try {
+ conn = new Mongo("127.0.0.1:" + port);
+ return true;
+ } catch( e ) {
+ if (!checkProgram(pid)) {
+
+ print("Could not start mongo program at " + port + ", process ended")
+
+ // Break out
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+ }
+
+ return conn;
+}
+
+/**
+ * DEPRECATED
+ *
+ * Start mongod or mongos and return a Mongo() object connected to there.
+ * This function's first argument is "mongod" or "mongos" program name, \
+ * and subsequent arguments to this function are passed as
+ * command line arguments to the program.
+ */
+startMongoProgram = function(){
+ var port = _parsePort.apply( null, arguments );
+
+ // Enable test commands.
+ // TODO: Make this work better with multi-version testing so that we can support
+ // enabling this on 2.4 when testing 2.6
+ var args = argumentsToArray( arguments );
+ args = appendSetParameterArgs(args);
+ var pid = _startMongoProgram.apply( null, args );
+
+ var m;
+ assert.soon
+ ( function() {
+ try {
+ m = new Mongo( "127.0.0.1:" + port );
+ return true;
+ } catch( e ) {
+ if (!checkProgram(pid)) {
+
+ print("Could not start mongo program at " + port + ", process ended")
+
+ // Break out
+ m = null;
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000 );
+
+ return m;
+}
+
+runMongoProgram = function() {
+ var args = argumentsToArray( arguments );
+ var progName = args[0];
+
+ if ( jsTestOptions().auth ) {
+ args = args.slice(1);
+ args.unshift( progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationMechanism', DB.prototype._defaultAuthenticationMechanism,
+ '--authenticationDatabase=admin'
+ );
+ }
+
+ if (progName !== "bsondump" && args.indexOf("--dialTimeout") === -1) {
+ args.push("--dialTimeout", "30");
+ }
+
+ if ( jsTestOptions().useSSL ) {
+ args.push("--ssl", "--sslPEMKeyFile", "jstests/libs/server.pem", "--sslCAFile", "jstests/libs/ca.pem", "--sslAllowInvalidHostnames");
+ }
+
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ progName = args[0];
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _runMongoProgram.apply( null, args );
+}
+
+// Start a mongo program instance. This function's first argument is the
+// program name, and subsequent arguments to this function are passed as
+// command line arguments to the program. Returns pid of the spawned program.
+startMongoProgramNoConnect = function() {
+ var args = argumentsToArray( arguments );
+ var progName = args[0];
+
+ if ( jsTestOptions().auth ) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationMechanism', DB.prototype._defaultAuthenticationMechanism,
+ '--authenticationDatabase=admin');
+ }
+
+ if (progName == 'mongo' && !_useWriteCommandsDefault()) {
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _startMongoProgram.apply( null, args );
+}
+
+myPort = function() {
+ var m = db.getMongo();
+ if ( m.host.match( /:/ ) )
+ return m.host.match( /:(.*)/ )[ 1 ];
+ else
+ return 27017;
+}
+
+}());
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/servers_misc.js b/src/mongo/gotools/test/legacy28/jstests/libs/servers_misc.js
new file mode 100644
index 00000000000..4f6d3f9b9ef
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/servers_misc.js
@@ -0,0 +1,357 @@
+/**
+ * Run a mongod process.
+ *
+ * After initializing a MongodRunner, you must call start() on it.
+ * @param {int} port port to run db on, use allocatePorts(num) to requision
+ * @param {string} dbpath path to use
+ * @param {boolean} peer pass in false (DEPRECATED, was used for replica pair host)
+ * @param {boolean} arbiter pass in false (DEPRECATED, was used for replica pair host)
+ * @param {array} extraArgs other arguments for the command line
+ * @param {object} options other options include no_bind to not bind_ip to 127.0.0.1
+ * (necessary for replica set testing)
+ */
+MongodRunner = function( port, dbpath, peer, arbiter, extraArgs, options ) {
+ this.port_ = port;
+ this.dbpath_ = dbpath;
+ this.peer_ = peer;
+ this.arbiter_ = arbiter;
+ this.extraArgs_ = extraArgs;
+ this.options_ = options ? options : {};
+};
+
+/**
+ * Start this mongod process.
+ *
+ * @param {boolean} reuseData If the data directory should be left intact (default is to wipe it)
+ */
+MongodRunner.prototype.start = function( reuseData ) {
+ var args = [];
+ if ( reuseData ) {
+ args.push( "mongod" );
+ }
+ args.push( "--port" );
+ args.push( this.port_ );
+ args.push( "--dbpath" );
+ args.push( this.dbpath_ );
+ args.push( "--nohttpinterface" );
+ args.push( "--noprealloc" );
+ args.push( "--smallfiles" );
+ if (!this.options_.no_bind) {
+ args.push( "--bind_ip" );
+ args.push( "127.0.0.1" );
+ }
+ if ( this.extraArgs_ ) {
+ args = args.concat( this.extraArgs_ );
+ }
+ removeFile( this.dbpath_ + "/mongod.lock" );
+ if ( reuseData ) {
+ return startMongoProgram.apply( null, args );
+ } else {
+ return startMongod.apply( null, args );
+ }
+}
+
+MongodRunner.prototype.port = function() { return this.port_; }
+
+MongodRunner.prototype.toString = function() { return [ this.port_, this.dbpath_, this.peer_, this.arbiter_ ].toString(); }
+
+ToolTest = function( name, extraOptions ){
+ this.name = name;
+ this.options = extraOptions;
+ this.port = allocatePorts(1)[0];
+ this.baseName = "jstests_tool_" + name;
+ this.root = MongoRunner.dataPath + this.baseName;
+ this.dbpath = this.root + "/";
+ this.ext = this.root + "_external/";
+ this.extFile = this.root + "_external/a";
+ this.useSSL = jsTestOptions().useSSL
+ resetDbpath( this.dbpath );
+ resetDbpath( this.ext );
+}
+
+ToolTest.prototype.startDB = function( coll ){
+ assert( ! this.m , "db already running" );
+
+ var options = {port : this.port,
+ dbpath : this.dbpath,
+ nohttpinterface : "",
+ noprealloc : "",
+ smallfiles : "",
+ bind_ip : "127.0.0.1"};
+
+ Object.extend(options, this.options);
+
+ if ( this.useSSL ) {
+ Object.extend(options, { sslMode: "requireSSL", sslPEMKeyFile: "jstests/libs/server.pem", sslCAFile: "jstests/libs/ca.pem", sslWeakCertificateValidation: "" } );
+ }
+
+ this.m = startMongoProgram.apply(null, MongoRunner.arrOptions("mongod", options));
+ this.db = this.m.getDB( this.baseName );
+ if ( coll )
+ return this.db.getCollection( coll );
+ return this.db;
+}
+
+ToolTest.prototype.stop = function(){
+ if ( ! this.m )
+ return;
+ stopMongod( this.port );
+ this.m = null;
+ this.db = null;
+
+ print('*** ' + this.name + " completed successfully ***");
+}
+
+ToolTest.prototype.runTool = function(){
+ var a = [ "mongo" + arguments[0] ];
+
+ var hasdbpath = false;
+
+ for ( var i=1; i<arguments.length; i++ ){
+ a.push( arguments[i] );
+ if ( arguments[i] == "--dbpath" )
+ hasdbpath = true;
+ }
+
+ if ( this.useSSL ) {
+ a = a.concat(["--ssl", "--sslPEMKeyFile", "jstests/libs/server.pem", "--sslCAFile", "jstests/libs/ca.pem", "--sslAllowInvalidHostnames"]);
+ }
+
+ if ( ! hasdbpath ){
+ a.push( "--host" );
+ a.push( "127.0.0.1:" + this.port );
+ }
+
+ return runMongoProgram.apply( null , a );
+}
+
+
+ReplTest = function( name, ports ){
+ this.name = name;
+ this.ports = ports || allocatePorts( 2 );
+}
+
+ReplTest.prototype.getPort = function( master ){
+ if ( master )
+ return this.ports[ 0 ];
+ return this.ports[ 1 ]
+}
+
+ReplTest.prototype.getPath = function( master ){
+ var p = MongoRunner.dataPath + this.name + "-";
+ if ( master )
+ p += "master";
+ else
+ p += "slave"
+ return p;
+}
+
+ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst, norepl ){
+
+ if ( ! extra )
+ extra = {};
+
+ if ( ! extra.oplogSize )
+ extra.oplogSize = "40";
+
+ var a = []
+ if ( putBinaryFirst )
+ a.push( "mongod" )
+ a.push( "--nohttpinterface", "--noprealloc", "--bind_ip" , "127.0.0.1" , "--smallfiles" );
+
+ a.push( "--port" );
+ a.push( this.getPort( master ) );
+
+ a.push( "--dbpath" );
+ a.push( this.getPath( master ) );
+
+ if( jsTestOptions().noJournal ) a.push( "--nojournal" )
+ if( jsTestOptions().noJournalPrealloc ) a.push( "--nopreallocj" )
+ if( jsTestOptions().keyFile ) {
+ a.push( "--keyFile" )
+ a.push( jsTestOptions().keyFile )
+ }
+
+ if( jsTestOptions().useSSL ) {
+ if (!a.contains("--sslMode")) {
+ a.push( "--sslMode" )
+ a.push( "requireSSL" )
+ }
+ if (!a.contains("--sslPEMKeyFile")) {
+ a.push( "--sslPEMKeyFile" )
+ a.push( "jstests/libs/server.pem" )
+ }
+ if (!a.contains("--sslCAFile")) {
+ a.push( "--sslCAFile" )
+ a.push( "jstests/libs/ca.pem" )
+ }
+ a.push( "--sslWeakCertificateValidation" )
+ }
+ if( jsTestOptions().useX509 && !a.contains("--clusterAuthMode")) {
+ a.push( "--clusterAuthMode" )
+ a.push( "x509" )
+ }
+
+ if ( !norepl ) {
+ if ( master ){
+ a.push( "--master" );
+ }
+ else {
+ a.push( "--slave" );
+ a.push( "--source" );
+ a.push( "127.0.0.1:" + this.ports[0] );
+ }
+ }
+
+ for ( var k in extra ){
+ var v = extra[k];
+ if( k in MongoRunner.logicalOptions ) continue
+ a.push( "--" + k );
+ if ( v != null )
+ a.push( v );
+ }
+
+ return a;
+}
+
+ReplTest.prototype.start = function( master , options , restart, norepl ){
+ var lockFile = this.getPath( master ) + "/mongod.lock";
+ removeFile( lockFile );
+ var o = this.getOptions( master , options , restart, norepl );
+
+ if (restart) {
+ return startMongoProgram.apply(null, o);
+ } else {
+ var conn = startMongod.apply(null, o);
+ if (jsTestOptions().keyFile || jsTestOptions().auth || jsTestOptions().useX509) {
+ jsTest.authenticate(conn);
+ }
+ return conn;
+ }
+}
+
+ReplTest.prototype.stop = function( master , signal ){
+ if ( arguments.length == 0 ){
+ this.stop( true );
+ this.stop( false );
+ return;
+ }
+
+ print('*** ' + this.name + " completed successfully ***");
+ return stopMongod( this.getPort( master ) , signal || 15 );
+}
+
+allocatePorts = function( n , startPort ) {
+ var ret = [];
+ var start = startPort || 31000;
+ for( var i = start; i < start + n; ++i )
+ ret.push( i );
+ return ret;
+}
+
+
+SyncCCTest = function( testName , extraMongodOptions ){
+ this._testName = testName;
+ this._connections = [];
+
+ for ( var i=0; i<3; i++ ){
+ this._connections.push( startMongodTest( 30000 + i , testName + i , false, extraMongodOptions ) );
+ }
+
+ this.url = this._connections.map( function(z){ return z.name; } ).join( "," );
+ this.conn = new Mongo( this.url );
+}
+
+SyncCCTest.prototype.stop = function(){
+ for ( var i=0; i<this._connections.length; i++){
+ stopMongod( 30000 + i );
+ }
+
+ print('*** ' + this._testName + " completed successfully ***");
+}
+
+SyncCCTest.prototype.checkHashes = function( dbname , msg ){
+ var hashes = this._connections.map(
+ function(z){
+ return z.getDB( dbname ).runCommand( "dbhash" );
+ }
+ );
+
+ for ( var i=1; i<hashes.length; i++ ){
+ assert.eq( hashes[0].md5 , hashes[i].md5 , "checkHash on " + dbname + " " + msg + "\n" + tojson( hashes ) )
+ }
+}
+
+SyncCCTest.prototype.tempKill = function( num ){
+ num = num || 0;
+ stopMongod( 30000 + num );
+}
+
+SyncCCTest.prototype.tempStart = function( num ){
+ num = num || 0;
+ this._connections[num] = startMongodTest( 30000 + num , this._testName + num , true );
+}
+
+
+function startParallelShell( jsCode, port, noConnect ){
+ var x;
+
+ var args = ["mongo"];
+
+ // Convert function into call-string
+ if (typeof(jsCode) == "function") {
+ var id = Math.floor(Math.random() * 100000);
+ jsCode = "var f" + id + " = " + jsCode.toString() + ";f" + id + "();";
+ }
+ else if(typeof(jsCode) == "string") {}
+ // do nothing
+ else {
+ throw Error("bad first argument to startParallelShell");
+ }
+
+ if (noConnect) {
+ args.push("--nodb");
+ } else if (typeof(db) == "object") {
+ jsCode = "db = db.getSiblingDB('" + db.getName() + "');" + jsCode;
+ }
+
+ if (TestData) {
+ jsCode = "TestData = " + tojson(TestData) + ";" + jsCode;
+ }
+
+ args.push("--eval", jsCode);
+
+ if (typeof db == "object") {
+ var hostAndPort = db.getMongo().host.split(':');
+ var host = hostAndPort[0];
+ args.push("--host", host);
+ if (!port && hostAndPort.length >= 2) {
+ var port = hostAndPort[1];
+ }
+ }
+ if (port) {
+ args.push("--port", port);
+ }
+
+ if( jsTestOptions().useSSL ) {
+ args.push( "--ssl" )
+ args.push( "--sslPEMKeyFile" )
+ args.push( "jstests/libs/client.pem" )
+ args.push( "--sslCAFile" )
+ args.push( "jstests/libs/ca.pem" )
+ }
+
+ x = startMongoProgramNoConnect.apply(null, args);
+ return function(){
+ waitProgram( x );
+ };
+}
+
+var testingReplication = false;
+
+function skipIfTestingReplication(){
+ if (testingReplication) {
+ print("skipIfTestingReplication skipping");
+ quit(0);
+ }
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/slow_weekly_util.js b/src/mongo/gotools/test/legacy28/jstests/libs/slow_weekly_util.js
new file mode 100644
index 00000000000..f5f89643f16
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/slow_weekly_util.js
@@ -0,0 +1,20 @@
+
+SlowWeeklyMongod = function( name ) {
+ this.name = name;
+ this.port = 30201;
+
+ this.start = new Date();
+
+ this.conn = startMongodEmpty("--port", this.port, "--dbpath", MongoRunner.dataPath + this.name , "--smallfiles", "--nojournal" );
+};
+
+SlowWeeklyMongod.prototype.getDB = function( name ) {
+ return this.conn.getDB( name );
+}
+
+SlowWeeklyMongod.prototype.stop = function(){
+ stopMongod( this.port );
+ var end = new Date();
+ print( "slowWeekly test: " + this.name + " completed successfully in " + ( ( end.getTime() - this.start.getTime() ) / 1000 ) + " seconds" );
+};
+
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/smoke.pem b/src/mongo/gotools/test/legacy28/jstests/libs/smoke.pem
new file mode 100644
index 00000000000..7dddf222386
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/smoke.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDYTCCAkmgAwIBAgIBCDANBgkqhkiG9w0BAQUFADBrMQ4wDAYDVQQDEwVzbW9r
+ZTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1O
+ZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwHhcN
+MTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBrMQ4wDAYDVQQDEwVzbW9rZTEP
+MA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcg
+WW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb4fOWDomCPyYesh42pQ/bEHdK7r73
+06x1hdku9i+nytCSxhhuITGC1FA4ZIbYdQC/jgfzC0D+SDFKCCyNZA/2Pxam9y3F
+QHrueNtD9bw/OB98D6hC2fCow5OxUqWDkee2hQRTwLKDzec+H72AkwURh8oTfJsl
+LL/1YITZs9kfs59r8HG2YAT7QBbg3xBmK0wZvL4V/FY/OeeR92pIgjUU/6xm/1LU
+bhNHl5JTrXQxPpmvDb1ysiI0mMLeUz7UI+Pe/9mn91dHwgkprWyFi6VnV3/aW7DC
+nW/DklOPD8vMWu2A6iYU0fZbcj4vGM607vst5QLDMoD5Y2ilrKLiTRa5AgMBAAGj
+EDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAJc64d76+eyNGX6C
+5r4IdFF3zJjkLs/NcSMReUTEv4zAdJCn7c1FNRkQBS3Ky2CeSGmiyYOhWZ7usv7x
+EvprmHouWsrQXV+o5EIW366e5wzg0c5KWO3oBIRjx4hDkRSQSjJjy5NFrc8fAW9x
+eeaHFWdqk3CHvqBhd32QYEs4+7v8hBYM3PBkj8qghXta4ZZS89cTMSjhu5s4Opje
+qUzGzoHat2VBdYzIpVOorYMFXObwCeQkCAXO5epuGZ0QhML66hc7FuOsW75kI9aW
+QXVoM/z2Gb1wbBYnwHOXtClK783S3RdV0uJun/pVj+VeHb6fyIQRmC5d0eJ0C8mY
+X+acnvA=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA2+Hzlg6Jgj8mHrIeNqUP2xB3Su6+99OsdYXZLvYvp8rQksYY
+biExgtRQOGSG2HUAv44H8wtA/kgxSggsjWQP9j8WpvctxUB67njbQ/W8PzgffA+o
+QtnwqMOTsVKlg5HntoUEU8Cyg83nPh+9gJMFEYfKE3ybJSy/9WCE2bPZH7Ofa/Bx
+tmAE+0AW4N8QZitMGby+FfxWPznnkfdqSII1FP+sZv9S1G4TR5eSU610MT6Zrw29
+crIiNJjC3lM+1CPj3v/Zp/dXR8IJKa1shYulZ1d/2luwwp1vw5JTjw/LzFrtgOom
+FNH2W3I+LxjOtO77LeUCwzKA+WNopayi4k0WuQIDAQABAoIBAQDRFgAaDcLGfqQS
+Bk/iqHz2U6cMMxCW+sqAioGmPWW9iYdiOkra1meNP7T0mur7A+9tN3LpsybfZeiw
+vCsZXDAteXph1KPKcPE0uOnPqumRuB2ATCc1Qqas5CUaNju7a8/J6Jzfw1o9KVud
+4HLDw4nLTLNkalXhOLdkbp6FoZZypAgc8OnSdw7z9Kri6VndkddX3fWv4t203XwT
+AvBxvy4Qfblz6VKYRnjj2CPvo/kD+ncFEg+S6u8/LkghTX7CYeMHdTC0P9jOcEK2
+PMm3kS3sX7VkypsAirYK5QtBWxur+mINxfOBDtRlA2RaJQnikRiGb14bMkLx8Liy
+JNjEHSLdAoGBAP9+KpjniozZIbrcS79wdRrW+ARyDp1Plzyd4nQxfWmQ//nsnK5T
+EYCFXWTR/ldkAoHpD+bGGU02p1+1u4vmWqw/x+Qy56Gh/eylhe0RvYEjkVLyreuc
+bXu0BFlKVgRlBq1ZyXnr2lz3bAIZxvZs13lZn6qVPMt7w2/JTCal9jw7AoGBANxR
+sGik9mq/678nzLiNlf/LcwIz7siuyISoWDOaVEVva0uorqctVqL95w0f+3FXqBO/
+5BiJRFo5D8SfzRjkNkJ7V+rm+7/CjtsjEw2Ue+ZJYPlm+Wr545GYmhU9QH9NLZIN
+JBwTVWjLgdsyQyi0Gc+xMraBwEwoyS8cO17uHO2bAoGBANRmO91/6BPt0ve4epR9
+Vi1o9yki9PlcmHtBOmikWAFyFQvd4+eckVlKBflyBkL6locPjTOqDpC9VengeDj2
+2PyHzZLtqtkZhbK9bJhIfkWknwTZUTMliXMkldTxUo82uZVVpoRgSdmtq7IXYeut
+UnjExFMY3EDB9BizvUYIBKvPAoGAViQ6bS/SiPpxGlRdXus88r6BQSM9AYoVLIkF
+s2dr+5oMwZA6eXLopOHRLPiMP0yekto8PLuu1ffpil9QuaLA9E11moqlc9yGLngQ
+QwcDSo72M41nh8Qcjhi0ZgmE5kEuyCQLMk783fRz2VhVmdyRGvuVcHZa0WxA/QJ0
+1DEVbnECgYEA3i2PGHUvU2TIFNvubw3qdH5y7FXafF+O0ulQ8e6r/CbVAG14Z6xP
+RHLc7/JIYK9CG1PWCbkjiHZ4MsKFuRWFrUMrwSj8M3euCaEIxa/Co60qQ/CnZiZ6
+geleTtUcTZ2T0pqGLnrHwlzhLpCkPJPyjcfQjjEZRwd0bVFX6b3C/rw=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/test_background_ops.js b/src/mongo/gotools/test/legacy28/jstests/libs/test_background_ops.js
new file mode 100644
index 00000000000..b3f6f593947
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/test_background_ops.js
@@ -0,0 +1,340 @@
+//
+// Utilities related to background operations while other operations are working
+//
+
+/**
+ * Allows synchronization between background ops and the test operations
+ */
+var waitForLock = function( mongo, name ){
+
+ var ts = new ObjectId()
+ var lockColl = mongo.getCollection( "config.testLocks" )
+
+ lockColl.update({ _id : name, state : 0 }, { $set : { state : 0 } }, true)
+
+ //
+ // Wait until we can set the state to 1 with our id
+ //
+
+ var startTime = new Date().getTime()
+
+ assert.soon( function() {
+ lockColl.update({ _id : name, state : 0 }, { $set : { ts : ts, state : 1 } })
+ var gleObj = lockColl.getDB().getLastErrorObj()
+
+ if( new Date().getTime() - startTime > 20 * 1000 ){
+ print( "Waiting for..." )
+ printjson( gleObj )
+ printjson( lockColl.findOne() )
+ printjson( ts )
+ }
+
+ return gleObj.n == 1 || gleObj.updatedExisting
+ }, "could not acquire lock", 30 * 1000, 100 )
+
+ print( "Acquired lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+
+ // Set the state back to 0
+ var unlock = function(){
+ print( "Releasing lock " + tojson( { _id : name, ts : ts } ) + " curr : " +
+ tojson( lockColl.findOne({ _id : name }) ) )
+ lockColl.update({ _id : name, ts : ts }, { $set : { state : 0 } })
+ }
+
+ // Return an object we can invoke unlock on
+ return { unlock : unlock }
+}
+
+/**
+ * Allows a test or background op to say it's finished
+ */
+var setFinished = function( mongo, name, finished ){
+ if( finished || finished == undefined )
+ mongo.getCollection( "config.testFinished" ).update({ _id : name }, { _id : name }, true )
+ else
+ mongo.getCollection( "config.testFinished" ).remove({ _id : name })
+}
+
+/**
+ * Checks whether a test or background op is finished
+ */
+var isFinished = function( mongo, name ){
+ return mongo.getCollection( "config.testFinished" ).findOne({ _id : name }) != null
+}
+
+/**
+ * Sets the result of a background op
+ */
+var setResult = function( mongo, name, result, err ){
+ mongo.getCollection( "config.testResult" ).update({ _id : name }, { _id : name, result : result, err : err }, true )
+}
+
+/**
+ * Gets the result for a background op
+ */
+var getResult = function( mongo, name ){
+ return mongo.getCollection( "config.testResult" ).findOne({ _id : name })
+}
+
+/**
+ * Overrides the parallel shell code in mongo
+ */
+function startParallelShell( jsCode, port ){
+
+ var x;
+ if ( port ) {
+ x = startMongoProgramNoConnect( "mongo" , "--port" , port , "--eval" , jsCode );
+ } else {
+ x = startMongoProgramNoConnect( "mongo" , "--eval" , jsCode , db ? db.getMongo().host : null );
+ }
+
+ return function(){
+ jsTestLog( "Waiting for shell " + x + "..." )
+ waitProgram( x );
+ jsTestLog( "Shell " + x + " finished." )
+ };
+}
+
+startParallelOps = function( mongo, proc, args, context ){
+
+ var procName = proc.name + "-" + new ObjectId()
+ var seed = new ObjectId( new ObjectId().valueOf().split("").reverse().join("") )
+ .getTimestamp().getTime()
+
+ // Make sure we aren't finished before we start
+ setFinished( mongo, procName, false )
+ setResult( mongo, procName, undefined, undefined )
+
+ // TODO: Make this a context of its own
+ var procContext = { procName : procName,
+ seed : seed,
+ waitForLock : waitForLock,
+ setFinished : setFinished,
+ isFinished : isFinished,
+ setResult : setResult,
+
+ setup : function( context, stored ){
+
+ waitForLock = function(){
+ return context.waitForLock( db.getMongo(), context.procName )
+ }
+ setFinished = function( finished ){
+ return context.setFinished( db.getMongo(), context.procName, finished )
+ }
+ isFinished = function(){
+ return context.isFinished( db.getMongo(), context.procName )
+ }
+ setResult = function( result, err ){
+ return context.setResult( db.getMongo(), context.procName, result, err )
+ }
+ }}
+
+ var bootstrapper = function( stored ){
+
+ var procContext = stored.procContext
+ procContext.setup( procContext, stored )
+
+ var contexts = stored.contexts
+ eval( "contexts = " + contexts )
+
+ for( var i = 0; i < contexts.length; i++ ){
+ if( typeof( contexts[i] ) != "undefined" ){
+ // Evaluate all contexts
+ contexts[i]( procContext )
+ }
+ }
+
+ var operation = stored.operation
+ eval( "operation = " + operation )
+
+ var args = stored.args
+ eval( "args = " + args )
+
+ result = undefined
+ err = undefined
+
+ try{
+ result = operation.apply( null, args )
+ }
+ catch( e ){
+ err = e
+ }
+
+ setResult( result, err )
+ }
+
+ var contexts = [ RandomFunctionContext, context ]
+
+ var testDataColl = mongo.getCollection( "config.parallelTest" )
+
+ testDataColl.insert({ _id : procName,
+ bootstrapper : tojson( bootstrapper ),
+ operation : tojson( proc ),
+ args : tojson( args ),
+ procContext : procContext,
+ contexts : tojson( contexts ) })
+
+ assert.eq( null, testDataColl.getDB().getLastError() )
+
+ var bootstrapStartup =
+ "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}"
+
+
+ var oldDB = db
+ db = mongo.getDB( "test" )
+
+ jsTest.log( "Starting " + proc.name + " operations..." )
+
+ var rawJoin = startParallelShell( bootstrapStartup )
+
+ db = oldDB
+
+
+ var join = function(){
+ setFinished( mongo, procName, true )
+
+ rawJoin();
+ result = getResult( mongo, procName )
+
+ assert.neq( result, null )
+
+ if( result.err ) throw Error("Error in parallel ops " + procName + " : "
+ + tojson( result.err ) )
+
+ else return result.result
+ }
+
+ join.isFinished = function(){
+ return isFinished( mongo, procName )
+ }
+
+ join.setFinished = function( finished ){
+ return setFinished( mongo, procName, finished )
+ }
+
+ join.waitForLock = function( name ){
+ return waitForLock( mongo, name )
+ }
+
+ return join
+}
+
+var RandomFunctionContext = function( context ){
+
+ Random.srand( context.seed );
+
+ Random.randBool = function(){ return Random.rand() > 0.5 }
+
+ Random.randInt = function( min, max ){
+
+ if( max == undefined ){
+ max = min
+ min = 0
+ }
+
+ return min + Math.floor( Random.rand() * max )
+ }
+
+ Random.randShardKey = function(){
+
+ var numFields = 2 //Random.randInt(1, 3)
+
+ var key = {}
+ for( var i = 0; i < numFields; i++ ){
+ var field = String.fromCharCode( "a".charCodeAt() + i )
+ key[ field ] = 1
+ }
+
+ return key
+ }
+
+ Random.randShardKeyValue = function( shardKey ){
+
+ var keyValue = {}
+ for( field in shardKey ){
+ keyValue[ field ] = Random.randInt(1, 100)
+ }
+
+ return keyValue
+ }
+
+ Random.randCluster = function(){
+
+ var numShards = 2 //Random.randInt( 1, 10 )
+ var rs = false //Random.randBool()
+ var st = new ShardingTest({ shards : numShards,
+ mongos : 4,
+ other : { separateConfig : true, rs : rs } })
+
+ return st
+ }
+}
+
+
+//
+// Some utility operations
+//
+
+function moveOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var findKey = Random.randShardKeyValue( shardKey )
+ var toShard = shards[ Random.randInt( shards.length ) ]._id
+
+ try {
+ printjson( admin.runCommand({ moveChunk : collName,
+ find : findKey,
+ to : toShard }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping moveOps..." )
+}
+
+function splitOps( collName, options ){
+
+ options = options || {}
+
+ var admin = db.getMongo().getDB( "admin" )
+ var config = db.getMongo().getDB( "config" )
+ var shards = config.shards.find().toArray()
+ var shardKey = config.collections.findOne({ _id : collName }).key
+
+ while( ! isFinished() ){
+
+ var middleKey = Random.randShardKeyValue( shardKey )
+
+ try {
+ printjson( admin.runCommand({ split : collName,
+ middle : middleKey }) )
+ }
+ catch( e ){
+ printjson( e )
+ }
+
+ sleep( 1000 )
+ }
+
+ jsTest.log( "Stopping splitOps..." )
+}
+
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/testconfig b/src/mongo/gotools/test/legacy28/jstests/libs/testconfig
new file mode 100644
index 00000000000..4b09f37ad13
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/testconfig
@@ -0,0 +1,6 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
+help = false
+sysinfo = false
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/testconfig.json b/src/mongo/gotools/test/legacy28/jstests/libs/testconfig.json
new file mode 100644
index 00000000000..5af32aad7d3
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/testconfig.json
@@ -0,0 +1,4 @@
+{
+ "fastsync" : true,
+ "version" : false
+}
diff --git a/src/mongo/gotools/test/legacy28/jstests/libs/trace_missing_docs.js b/src/mongo/gotools/test/legacy28/jstests/libs/trace_missing_docs.js
new file mode 100644
index 00000000000..3faf50b4606
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/libs/trace_missing_docs.js
@@ -0,0 +1,90 @@
+
+//
+// On error inserting documents, traces back and shows where the document was dropped
+//
+
+function traceMissingDoc( coll, doc, mongos ) {
+
+ if (mongos) coll = mongos.getCollection(coll + "");
+ else mongos = coll.getMongo();
+
+ var config = mongos.getDB( "config" );
+ var shards = config.shards.find().toArray();
+ for ( var i = 0; i < shards.length; i++ ) {
+ shards[i].conn = new Mongo( shards[i].host );
+ }
+
+ var shardKeyPatt = config.collections.findOne({ _id : coll + "" }).key;
+
+ // Project out the shard key
+ var shardKey = {};
+ for ( var k in shardKeyPatt ) {
+ if ( doc[k] == undefined ) {
+ jsTest.log( "Shard key " + tojson( shardKey ) +
+ " not found in doc " + tojson( doc ) +
+ ", falling back to _id search..." );
+ shardKeyPatt = { _id : 1 };
+ shardKey = { _id : doc['_id'] };
+ break;
+ }
+ shardKey[k] = doc[k];
+ }
+
+ if ( doc['_id'] == undefined ) {
+ jsTest.log( "Id not found in doc " + tojson( doc ) + " cannot trace oplog entries." );
+ return;
+ }
+
+ jsTest.log( "Using shard key : " + tojson( shardKey ) );
+
+ var allOps = [];
+ for ( var i = 0; i < shards.length; i++ ) {
+
+ var oplog = shards[i].conn.getCollection( "local.oplog.rs" );
+ if ( !oplog.findOne() ) {
+ oplog = shards[i].conn.getCollection( "local.oplog.$main" );
+ }
+
+ if ( !oplog.findOne() ) {
+ jsTest.log( "No oplog was found on shard " + shards[i]._id );
+ continue;
+ }
+
+ var addKeyQuery = function( query, prefix ) {
+ for ( var k in shardKey ) {
+ query[prefix + '.' + k] = shardKey[k];
+ }
+ return query;
+ };
+
+ var addToOps = function( cursor ) {
+ cursor.forEach( function( doc ) {
+ doc.shard = shards[i]._id;
+ doc.realTime = new Date( doc.ts.getTime() * 1000 );
+ allOps.push( doc );
+ });
+ };
+
+ // Find ops
+ addToOps( oplog.find( addKeyQuery( { op : 'i' }, 'o' ) ) );
+ var updateQuery = { $or : [ addKeyQuery( { op : 'u' }, 'o2' ),
+ { op : 'u', 'o2._id' : doc['_id'] } ] };
+ addToOps( oplog.find( updateQuery ) );
+ addToOps( oplog.find({ op : 'd', 'o._id' : doc['_id'] }) );
+ }
+
+ var compareOps = function( opA, opB ) {
+ if ( opA.ts < opB.ts ) return -1;
+ if ( opB.ts < opA.ts ) return 1;
+ else return 0;
+ }
+
+ allOps.sort( compareOps );
+
+ print( "Ops found for doc " + tojson( doc ) + " on each shard:\n" );
+ for ( var i = 0; i < allOps.length; i++ ) {
+ printjson( allOps[i] );
+ }
+
+ return allOps;
+} \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/replsets/rslib.js b/src/mongo/gotools/test/legacy28/jstests/replsets/rslib.js
new file mode 100644
index 00000000000..6a16db232e4
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/replsets/rslib.js
@@ -0,0 +1,115 @@
+
+var count = 0;
+var w = 0;
+
+var wait = function(f,msg) {
+ w++;
+ var n = 0;
+ while (!f()) {
+ if( n % 4 == 0 )
+ print("waiting " + w);
+ if (++n == 4) {
+ print("" + f);
+ }
+ assert(n < 200, 'tried 200 times, giving up on ' + msg );
+ sleep(1000);
+ }
+};
+
+/**
+ * Use this to do something once every 4 iterations.
+ *
+ * <pre>
+ * for (i=0; i<1000; i++) {
+ * occasionally(function() { print("4 more iterations"); });
+ * }
+ * </pre>
+ */
+var occasionally = function(f, n) {
+ var interval = n || 4;
+ if (count % interval == 0) {
+ f();
+ }
+ count++;
+};
+
+var reconnect = function(a) {
+ wait(function() {
+ try {
+ // make this work with either dbs or connections
+ if (typeof(a.getDB) == "function") {
+ db = a.getDB('foo');
+ }
+ else {
+ db = a;
+ }
+ db.bar.stats();
+ if (jsTest.options().keyFile || jsTest.options().useX509) { // SERVER-4241: Shell connections don't re-authenticate on reconnect
+ return jsTest.authenticate(db.getMongo());
+ }
+ return true;
+ } catch(e) {
+ print(e);
+ return false;
+ }
+ });
+};
+
+
+var getLatestOp = function(server) {
+ server.getDB("admin").getMongo().setSlaveOk();
+ var log = server.getDB("local")['oplog.rs'];
+ var cursor = log.find({}).sort({'$natural': -1}).limit(1);
+ if (cursor.hasNext()) {
+ return cursor.next();
+ }
+ return null;
+};
+
+
+var waitForAllMembers = function(master, timeout) {
+ var failCount = 0;
+
+ assert.soon( function() {
+ var state = null
+ try {
+ state = master.getSisterDB("admin").runCommand({replSetGetStatus:1});
+ failCount = 0;
+ } catch ( e ) {
+ // Connection can get reset on replica set failover causing a socket exception
+ print( "Calling replSetGetStatus failed" );
+ print( e );
+ return false;
+ }
+ occasionally(function() { printjson(state); }, 10);
+
+ for (var m in state.members) {
+ if (state.members[m].state != 1 && // PRIMARY
+ state.members[m].state != 2 && // SECONDARY
+ state.members[m].state != 7) { // ARBITER
+ return false;
+ }
+ }
+ printjson( state );
+ return true;
+ }, "not all members ready", timeout || 60000);
+
+ print( "All members are now in state PRIMARY, SECONDARY, or ARBITER" );
+};
+
+var reconfig = function(rs, config) {
+ var admin = rs.getPrimary().getDB("admin");
+
+ try {
+ var ok = admin.runCommand({replSetReconfig : config});
+ assert.eq(ok.ok,1);
+ }
+ catch(e) {
+ print(e);
+ }
+
+ master = rs.getPrimary().getDB("admin");
+ waitForAllMembers(master);
+
+ return master;
+};
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/csv1.js b/src/mongo/gotools/test/legacy28/jstests/tool/csv1.js
new file mode 100644
index 00000000000..e95d8aa8b41
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/csv1.js
@@ -0,0 +1,43 @@
+// csv1.js
+
+
+t = new ToolTest( "csv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : 1 , b : "foo,bar\"baz,qux" , c: 5, 'd d': -6 , e: '-', f : "."};
+
+assert.eq( 0 , c.count() , "setup1" );
+c.insert( base );
+delete base._id
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv" , "-f" , "a,b,c,d d,e,f" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c,d d,e,f" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+assert.docEq( { a : "a" , b : "b" , c : "c" , 'd d': "d d", e: 'e', f : "f"}, a[1], "csv parse 1" );
+assert.docEq( base, a[0], "csv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.docEq( base, x, "csv parse 2" )
+
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/csvexport1.js b/src/mongo/gotools/test/legacy28/jstests/tool/csvexport1.js
new file mode 100644
index 00000000000..2cd3c9c0447
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/csvexport1.js
@@ -0,0 +1,65 @@
+// csvexport1.js
+
+
+t = new ToolTest( "csvexport1" )
+
+c = t.startDB( "foo" );
+
+assert.eq( 0 , c.count() , "setup1" );
+
+objId = ObjectId()
+
+c.insert({ a : new NumberInt(1) , b : objId , c: [1, 2, 3], d : {a : "hello", b : "world"} , e: '-'})
+c.insert({ a : -2.0, c : MinKey, d : "Then he said, \"Hello World!\"", e : new NumberLong(3)})
+c.insert({ a : new BinData(0, "1234"), b : ISODate("2009-08-27T12:34:56.789"),
+ c : new Timestamp(1234, 9876), d : /foo*\"bar\"/i,
+ e : function foo() { print("Hello World!"); }})
+
+assert.eq( 3 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b,c,d,e")
+
+
+c.drop()
+
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+
+assert.soon ( 3 + " == c.count()", "after import");
+
+// Note: Exporting and Importing to/from CSV is not designed to be round-trippable
+expected = []
+expected.push({ a : 1, b : "ObjectId(" + objId.valueOf() + ")", c : [ 1, 2, 3 ], d : { "a" : "hello", "b" : "world" }, e : "-"})
+expected.push({ a : -2.0, b : "", c : "$MinKey", d : "Then he said, \"Hello World!\"", e : 3})
+// "t" should be 1234, but the shell interprets the first field of timestamps as milliseconds while
+// they are stored as seconds. See SERVER-7718.
+expected.push({ a : "D76DF8", b : "2009-08-27T12:34:56.789Z",
+ c : { "$timestamp" : { "t" : 1234, "i" : 9876 } },
+ d : "/foo*\\\"bar\\\"/i", e : tojson(function foo() { print("Hello World!"); })})
+
+actual = []
+actual.push(c.find({a : 1}).toArray()[0]);
+actual.push(c.find({a : -2.0}).toArray()[0]);
+actual.push(c.find({a : "D76DF8"}).toArray()[0]);
+
+for (i = 0; i < expected.length; i++) {
+ delete actual[i]._id
+ assert.eq(Object.keys(expected[i]).length, Object.keys(actual[i]).length)
+ keys = Object.keys(expected[i])
+ for(var j=0;j<keys.length;j++){
+ expectedVal = expected[i][keys[j]]
+ if((typeof expectedVal)== "object"){
+ // For fields which contain arrays or objects, they have been
+ // exported as JSON - parse the JSON in the output and verify
+ // that it matches the original document's value
+ assert.docEq(expectedVal, JSON.parse(actual[i][keys[j]]), "CSV export " + i)
+ }else{
+ // Otherwise just compare the values directly
+ assert.eq(expectedVal, actual[i][keys[j]], "CSV export " + i)
+ }
+ }
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/csvexport2.js b/src/mongo/gotools/test/legacy28/jstests/tool/csvexport2.js
new file mode 100644
index 00000000000..2dc87b3c641
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/csvexport2.js
@@ -0,0 +1,32 @@
+// csvexport2.js
+
+
+t = new ToolTest( "csvexport2" )
+
+c = t.startDB( "foo" );
+
+// This test is designed to test exporting of a CodeWithScope object.
+// However, due to SERVER-3391, it is not possible to create a CodeWithScope object in the mongo shell,
+// therefore this test does not work. Once SERVER-3391 is resolved, this test should be un-commented out
+
+//assert.eq( 0 , c.count() , "setup1" );
+
+//c.insert({ a : 1 , b : Code("print(\"Hello \" + x);", {"x" : "World!"})})
+//assert.eq( 1 , c.count() , "setup2" );
+//t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" , "--csv", "-f", "a,b")
+
+
+//c.drop()
+
+//assert.eq( 0 , c.count() , "after drop" )
+//t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo", "--type", "csv", "--headerline");
+//assert.soon ( 1 + " == c.count()", "after import");
+
+//expected = { a : 1, b : "\"{ \"$code\" : print(\"Hello \" + x); , \"$scope\" : { \"x\" : \"World!\" } }"};
+//actual = c.findOne()
+
+//delete actual._id;
+//assert.eq( expected, actual );
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/csvimport1.js b/src/mongo/gotools/test/legacy28/jstests/tool/csvimport1.js
new file mode 100644
index 00000000000..87320afec87
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/csvimport1.js
@@ -0,0 +1,41 @@
+// csvimport1.js
+
+
+t = new ToolTest( "csvimport1" )
+
+c = t.startDB( "foo" );
+
+base = []
+base.push({ a : 1, b : "this is some text.\nThis text spans multiple lines, and just for fun\ncontains a comma", "c" : "This has leading and trailing whitespace!" })
+base.push({a : 2, b : "When someone says something you \"put it in quotes\"", "c" : "I like embedded quotes/slashes\\backslashes" })
+base.push({a : 3, b : " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", "c" : "" })
+base.push({a : 4, b : "", "c" : "How are empty entries handled?" })
+base.push({a : 5, b : "\"\"", c : "\"This string is in quotes and contains empty quotes (\"\")\""})
+base.push({ a : "a" , b : "b" , c : "c"})
+
+assert.eq( 0 , c.count() , "setup" );
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "-f" , "a,b,c" );
+assert.soon( base.length + " == c.count()" , "after import 1 " );
+
+a = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length; i++ ) {
+ delete a[i]._id
+ assert.docEq( base[i], a[i], "csv parse " + i)
+}
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/csvimport1.csv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "csv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( base.length - 1 , c.count() , "after import 2" );
+
+x = c.find().sort( { a : 1 } ).toArray();
+for (i = 0; i < base.length - 1; i++ ) {
+ delete x[i]._id
+ assert.docEq( base[i], x[i], "csv parse with headerline " + i)
+}
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/data/a.tsv b/src/mongo/gotools/test/legacy28/jstests/tool/data/a.tsv
new file mode 100644
index 00000000000..1e094179a63
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/data/a.tsv
@@ -0,0 +1,2 @@
+a b c d e
+ 1 foobar 5 -6
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/data/csvimport1.csv b/src/mongo/gotools/test/legacy28/jstests/tool/data/csvimport1.csv
new file mode 100644
index 00000000000..256d40a9184
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/data/csvimport1.csv
@@ -0,0 +1,8 @@
+a,b,c
+1,"this is some text.
+This text spans multiple lines, and just for fun
+contains a comma", "This has leading and trailing whitespace!"
+2, "When someone says something you ""put it in quotes""", I like embedded quotes/slashes\backslashes
+ 3 , " This line contains the empty string and has leading and trailing whitespace inside the quotes! ", ""
+ "4" ,, How are empty entries handled?
+"5","""""", """This string is in quotes and contains empty quotes ("""")"""
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/data/dumprestore6/foo.bson b/src/mongo/gotools/test/legacy28/jstests/tool/data/dumprestore6/foo.bson
new file mode 100644
index 00000000000..b8f8f99e6bf
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/data/dumprestore6/foo.bson
Binary files differ
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/data/dumprestore6/system.indexes.bson b/src/mongo/gotools/test/legacy28/jstests/tool/data/dumprestore6/system.indexes.bson
new file mode 100644
index 00000000000..dde25da302a
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/data/dumprestore6/system.indexes.bson
Binary files differ
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumpauth.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumpauth.js
new file mode 100644
index 00000000000..af6706d107d
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumpauth.js
@@ -0,0 +1,29 @@
+// dumpauth.js
+// test mongodump with authentication
+
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_dumpauth";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+db.createUser({user: "testuser" , pwd: "testuser", roles: jsTest.adminUserRoles});
+assert( db.auth( "testuser" , "testuser" ) , "auth failed" );
+
+t = db[ baseName ];
+t.drop();
+
+for(var i = 0; i < 100; i++) {
+ t["testcol"].save({ "x": i });
+}
+
+x = runMongoProgram( "mongodump",
+ "--db", "admin",
+ "--authenticationDatabase=admin",
+ "-u", "testuser",
+ "-p", "testuser",
+ "-h", "127.0.0.1:"+port,
+ "-vvv",
+ "--collection", baseName+".testcol" );
+assert.eq(x, 0, "mongodump should succeed with authentication");
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumpfilename1.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumpfilename1.js
new file mode 100644
index 00000000000..38b430896bf
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumpfilename1.js
@@ -0,0 +1,13 @@
+//dumpfilename1.js
+
+//Test designed to make sure error that dumping a collection with "/" fails
+
+t = new ToolTest( "dumpfilename1" );
+
+t.startDB( "foo" );
+
+c = t.db;
+assert.writeOK(c.getCollection("df/").insert({ a: 3 }));
+assert(t.runTool( "dump" , "--out" , t.ext ) != 0, "dump should fail with non-zero return code")
+t.stop();
+
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore1.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore1.js
new file mode 100644
index 00000000000..a0f6f844d9e
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore1.js
@@ -0,0 +1,32 @@
+// dumprestore1.js
+
+
+t = new ToolTest( "dumprestore1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save( { a : 22 } );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "dump" , "--out" , t.ext );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+// ensure that --collection is used with --db. See SERVER-7721
+var ret = t.runTool( "dump" , "--collection" , "col" );
+assert.neq( ret, 0, "mongodump should return failure code" );
+t.stop();
+
+// Ensure that --db and --collection are provided when filename is "-" (stdin).
+ret = t.runTool( "restore" , "--collection" , "coll", "--dir", "-" );
+assert.neq( ret, 0, "mongorestore should return failure code" );
+t.stop();
+ret = t.runTool( "restore" , "--db" , "db", "--dir", "-" );
+assert.neq( ret, 0, "mongorestore should return failure code" );
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore10.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore10.js
new file mode 100644
index 00000000000..5a9426dd7c4
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore10.js
@@ -0,0 +1,64 @@
+// simple test to ensure write concern functions as expected
+
+
+var name = "dumprestore10";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+var total = 1000;
+
+{
+ step("store data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < total; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("mongodump from replset");
+
+var data = MongoRunner.dataDir + "/dumprestore10-dump1/";
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+replTest.ports[0], "--out", data );
+
+
+{
+ step("remove data after dumping");
+ master.getDB("foo").getCollection("bar").drop();
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+}
+
+step("try mongorestore with write concern");
+
+runMongoProgram( "mongorestore", "--writeConcern", "2", "--host", "127.0.0.1:"+replTest.ports[0], "--dir", data );
+
+var x = 0;
+
+// no waiting for replication
+x = master.getDB("foo").getCollection("bar").count();
+
+assert.eq(x, total, "mongorestore should have successfully restored the collection");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore3.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore3.js
new file mode 100644
index 00000000000..45067c7ff06
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore3.js
@@ -0,0 +1,61 @@
+// dumprestore3.js
+
+
+var name = "dumprestore3";
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 2} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("populate master");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait for slaves");
+ replTest.awaitReplication();
+}
+
+{
+ step("dump & restore a db into a slave");
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+ var c = conn.getDB("foo").bar;
+ c.save({ a: 22 });
+ assert.eq(1, c.count(), "setup2");
+}
+
+step("try mongorestore to slave");
+
+var data = MongoRunner.dataDir + "/dumprestore3-other1/";
+resetDbpath(data);
+runMongoProgram( "mongodump", "--host", "127.0.0.1:"+port, "--out", data );
+
+var x = runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+replTest.ports[1], "--dir", data );
+assert.eq(x, 1, "mongorestore should exit w/ 1 on slave");
+
+step("try mongoimport to slave");
+
+dataFile = MongoRunner.dataDir + "/dumprestore3-other2.json";
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:"+port, "--out", dataFile, "--db", "foo", "--collection", "bar" );
+
+x = runMongoProgram( "mongoimport", "--host", "127.0.0.1:"+replTest.ports[1], "--file", dataFile );
+assert.eq(x, 1, "mongoreimport should exit w/ 1 on slave"); // windows return is signed
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore4.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore4.js
new file mode 100644
index 00000000000..337d9c34265
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore4.js
@@ -0,0 +1,43 @@
+// dumprestore4.js -- see SERVER-2186
+
+
+// The point of this test is to ensure that mongorestore successfully
+// constructs indexes when the database being restored into has a
+// different name than the database dumped from. There are 2
+// issues here: (1) if you dumped from database "A" and restore into
+// database "B", B should have exactly the right indexes; (2) if for
+// some reason you have another database called "A" at the time of the
+// restore, mongorestore shouldn't touch it.
+
+t = new ToolTest( "dumprestore4" );
+
+c = t.startDB( "dumprestore4" );
+
+db=t.db
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db2=db.getSisterDB( dbname2 );
+
+db.dropDatabase(); // make sure it's empty
+db2.dropDatabase(); // make sure everybody's empty
+
+assert.eq( 0 , c.getIndexes().length , "setup1" );
+c.ensureIndex({ x : 1} );
+assert.eq( 2 , c.getIndexes().length , "setup2" ); // _id and x_1
+
+assert.eq( 0, t.runTool( "dump" , "-d" , dbname, "--out", t.ext ), "dump")
+
+// to ensure issue (2), we have to clear out the first db.
+// By inspection, db.dropIndexes() doesn't get rid of the _id index on c,
+// so we have to drop the collection.
+c.drop();
+assert.eq( 0, t.runTool( "restore" , "--dir" , t.ext + "/" + dbname, "-d", dbname2 ), "restore" );
+
+// issue (1)
+assert.eq( 2 , db2.dumprestore4.getIndexes().length , "after restore 1" );
+// issue (2)
+assert.eq( 0 , db.dumprestore4.getIndexes().length , "after restore 2" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore6.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore6.js
new file mode 100644
index 00000000000..1ea55e40f5c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore6.js
@@ -0,0 +1,54 @@
+// dumprestore6.js
+// Test restoring from a dump with an old index version
+
+
+t = new ToolTest( "dumprestore6" );
+
+c = t.startDB( "foo" );
+db = t.db
+if(db.serverStatus().storageEngine.name == "mmapv1") {
+ assert.eq( 0 , c.count() , "setup1" );
+
+ t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6")
+
+ assert.soon( "c.findOne()" , "no data after sleep" );
+ assert.eq( 1 , c.count() , "after restore" );
+
+ var indexes = c.getIndexes();
+ assert.eq( 2, indexes.length, "there aren't the correct number of indexes" );
+ var aIndex = null;
+ indexes.forEach(function(index) {
+ if (index.name === "a_1") {
+ aIndex = index;
+ }
+ });
+ assert.neq(null, aIndex, "index doesn't exist" );
+ assert.eq( 1 , aIndex.v, "index version wasn't updated");
+
+ assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+
+ db.dropDatabase()
+ assert.eq( 0 , c.count() , "after drop" );
+
+ t.runTool("restore", "--dir", "jstests/tool/data/dumprestore6", "--db", "jstests_tool_dumprestore6", "--keepIndexVersion")
+
+ assert.soon( "c.findOne()" , "no data after sleep2" );
+ assert.eq( 1 , c.count() , "after restore2" );
+
+ indexes = c.getIndexes();
+ assert.eq( 2, indexes.length, "there aren't the correct number of indexes" );
+ aIndex = null;
+ indexes.forEach(function(index) {
+ if (index.name === "a_1") {
+ aIndex = index;
+ }
+ });
+ assert.neq(null, aIndex, "index doesn't exist" );
+ assert.eq( 0 , aIndex.v, "index version wasn't maintained")
+
+ assert.eq( 1, c.count({v:0}), "dropped the 'v' field from a non-index collection")
+}else{
+ print("skipping index version test on non-mmapv1 storage engine")
+}
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore7.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore7.js
new file mode 100644
index 00000000000..a71725f434b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore7.js
@@ -0,0 +1,66 @@
+var name = "dumprestore7";
+
+
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+step();
+
+var replTest = new ReplSetTest( {name: name, nodes: 1} );
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+
+{
+ step("first chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 0; i < 20; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+
+{
+ step("wait");
+ replTest.awaitReplication();
+ var time = replTest.getPrimary().getDB("local").getCollection("oplog.rs").find().limit(1).sort({$natural:-1}).next();
+ step(time.ts.t);
+}
+
+{
+ step("second chunk of data");
+ var foo = master.getDB("foo");
+ for (i = 30; i < 50; i++) {
+ foo.bar.insert({ x: i, y: "abc" });
+ }
+}
+{
+ var port = 30020;
+ var conn = startMongodTest(port, name + "-other");
+}
+
+step("try mongodump with $timestamp");
+
+var data = MongoRunner.dataDir + "/dumprestore7-dump1/";
+var query = "{\"ts\":{\"$gt\":{\"$timestamp\":{\"t\":"+ time.ts.t + ",\"i\":" + time.ts.i +"}}}}";
+
+MongoRunner.runMongoTool( "mongodump",
+ { "host": "127.0.0.1:"+replTest.ports[0],
+ "db": "local", "collection": "oplog.rs",
+ "query": query, "out": data });
+
+step("try mongorestore from $timestamp");
+
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:"+port, "--dir", data );
+var x = 9;
+x = conn.getDB("local").getCollection("oplog.rs").count();
+
+assert.eq(x, 20, "mongorestore should only have the latter 20 entries");
+
+step("stopSet");
+replTest.stopSet();
+
+step("SUCCESS");
+
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore8.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore8.js
new file mode 100644
index 00000000000..edc1a874343
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore8.js
@@ -0,0 +1,107 @@
+// dumprestore8.js
+
+
+// This file tests that indexes and capped collection options get properly dumped and restored.
+// It checks that this works both when doing a full database dump/restore and when doing it just for a single db or collection
+
+t = new ToolTest( "dumprestore8" );
+
+t.startDB( "foo" );
+db = t.db;
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+assert.eq( 0 , db.foo.count() , "setup1" );
+db.foo.save( { a : 1, b : 1 } );
+db.foo.ensureIndex({a:1});
+db.foo.ensureIndex({b:1, _id:-1});
+assert.eq( 1 , db.foo.count() , "setup2" );
+
+
+assert.eq( 0 , db.bar.count() , "setup3" );
+db.createCollection("bar", {capped:true, size:1000, max:10});
+
+for (var i = 0; i < 1000; i++) {
+ db.bar.save( { x : i } );
+}
+db.bar.ensureIndex({x:1});
+
+barDocCount = db.bar.count();
+assert.gt( barDocCount, 0 , "No documents inserted" );
+assert.lt( db.bar.count(), 1000 , "Capped collection didn't evict documents" );
+assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created right" );
+
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped" );
+assert.eq( 0 , db.bar.count() , "bar not dropped" );
+assert.eq( 0 , db.bar.getIndexes().length , "indexes on bar not dropped" );
+assert.eq( 0 , db.foo.getIndexes().length , "indexes on foo not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext );
+
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore." );
+assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created correctly by restore");
+
+// Dump/restore single DB
+
+dumppath = t.ext + "singledbdump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.foo.count() , "foo not dropped2" );
+assert.eq( 0 , db.bar.count() , "bar not dropped2" );
+assert.eq( 0 , db.foo.getIndexes().length , "indexes on foo not dropped2" );
+assert.eq( 0 , db.bar.getIndexes().length , "indexes on bar not dropped2" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname );
+
+db = db.getSiblingDB(dbname2);
+
+assert.soon( "db.foo.findOne()" , "no data after sleep 2" );
+assert.eq( 1 , db.foo.count() , "wrong number of docs restored to foo 2" );
+assert.eq( barDocCount, db.bar.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.bar.save({x:i});
+}
+assert.eq( barDocCount, db.bar.count(), "Capped collection didn't evict documents after restore 2." );
+assert.eq( 5 , db.foo.getIndexes().length + db.bar.getIndexes().length, "Indexes weren't created correctly by restore 2");
+
+
+// Dump/restore single collection
+
+dumppath = t.ext + "singlecolldump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname2, "-c", "bar", "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0 , db.bar.count() , "bar not dropped3" );
+assert.eq( 0 , db.bar.getIndexes().length , "indexes not dropped3" );
+
+t.runTool( "restore" , "-d", dbname, "-c", "baz", "--dir" , dumppath + dbname2 + "/bar.bson" );
+
+db = db.getSiblingDB(dbname);
+
+assert.soon( "db.baz.findOne()" , "no data after sleep 2" );
+assert.eq( barDocCount, db.baz.count(), "wrong number of docs restored to bar 2" );
+for (var i = 0; i < 10; i++) {
+ db.baz.save({x:i});
+}
+assert.eq( barDocCount, db.baz.count(), "Capped collection didn't evict documents after restore 3." );
+assert.eq( 2 , db.baz.getIndexes().length , "Indexes weren't created correctly by restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore9.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore9.js
new file mode 100644
index 00000000000..cef9a623cf1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore9.js
@@ -0,0 +1,79 @@
+if (0) { // Test disabled until SERVER-3853 is finished.
+var name = "dumprestore9";
+function step(msg) {
+ msg = msg || "";
+ this.x = (this.x || 0) + 1;
+ print('\n' + name + ".js step " + this.x + ' ' + msg);
+}
+
+s = new ShardingTest( "dumprestore9a", 2, 0, 3, { chunksize : 1, enableBalancer : 1 } );
+
+step("Shard collection");
+
+s.adminCommand( { enablesharding : "aaa" } ); // Make this db alphabetically before 'config' so it gets restored first
+s.adminCommand( { shardcollection : "aaa.foo" , key : { x : 1 } } );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+step("insert data");
+
+str = 'a';
+while (str.length < 1024*512) {
+ str += str;
+}
+
+numDocs = 20;
+for (var i = 0; i < numDocs; i++) {
+ coll.insert({x:i, str:str});
+}
+
+step("Wait for balancing");
+
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "aaa" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
+
+assert.eq(numDocs, coll.count(), "Documents weren't inserted correctly");
+
+step("dump cluster");
+
+dumpdir = MongoRunner.dataDir + "/dumprestore9-dump1/";
+resetDbpath(dumpdir);
+runMongoProgram( "mongodump", "--host", s._mongos[0].host, "--out", dumpdir );
+
+step("Shutting down cluster");
+
+s.stop();
+
+step("Starting up clean cluster");
+s = new ShardingTest( "dumprestore9b", 2, 0, 3, {chunksize:1} );
+
+db = s.getDB( "aaa" );
+coll = db.foo;
+
+assert.eq(0, coll.count(), "Data wasn't cleaned up by restarting sharding test");
+
+step("Restore data and config");
+
+runMongoProgram( "mongorestore", dumpdir, "--host", s._mongos[1].host, "--restoreShardingConfig", "--forceConfigRestore");
+
+config = s.getDB("config");
+assert(config.databases.findOne({_id:'aaa'}).partitioned, "Config data wasn't restored properly");
+
+assert( s.chunkDiff( "foo" , "aaa" ) < 2, "Chunk data wasn't restored properly");
+
+assert.eq(numDocs, coll.count(), "Didn't restore all documents properly2");
+assert.eq(numDocs, coll.find().itcount(), "Didn't restore all documents properly");
+
+for (var i = 0; i < numDocs; i++) {
+ doc = coll.findOne({x:i});
+ assert.eq(i, doc.x, "Doc missing from the shard it should be on");
+}
+
+for (var i = 0; i < s._connections.length; i++) {
+ assert(s._connections[i].getDB("aaa").foo.count() > 0, "No data on shard: " + s._connections[i].host);
+}
+
+step("Stop cluster");
+s.stop();
+step("SUCCESS");
+} \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestoreWithNoOptions.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestoreWithNoOptions.js
new file mode 100644
index 00000000000..bfd6f4fa579
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestoreWithNoOptions.js
@@ -0,0 +1,114 @@
+// SERVER-6366
+// relates to SERVER-808
+//
+// This file tests that options are not restored upon
+// mongorestore with --noOptionsRestore
+//
+// It checks that this works both when doing a full
+// database dump/restore and when doing it just for a
+// single db or collection.
+
+
+t = new ToolTest( "dumprestoreWithNoOptions" );
+
+t.startDB( "foo" );
+db = t.db;
+
+// We turn this off to prevent the server from touching the 'options' field in system.namespaces.
+// This is important because we check exact values of the 'options' field in this test.
+db.adminCommand({setParameter:1, newCollectionsUsePowerOf2Sizes: false});
+
+dbname = db.getName();
+dbname2 = "NOT_"+dbname;
+
+db.dropDatabase();
+
+var defaultFlags = {}
+
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt],
+ 'invalid option:' + tojson(options) + " " + tojson(cappedOptions));
+}
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+// Full dump/restore
+
+t.runTool( "dump" , "--out" , t.ext );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+
+t.runTool( "restore" , "--dir" , t.ext , "--noOptionsRestore");
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert.eq( defaultFlags, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single DB
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+dumppath = t.ext + "noOptionsSingleDump/";
+mkdir(dumppath);
+t.runTool( "dump" , "-d", dbname, "--out" , dumppath );
+
+db.dropDatabase();
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+
+t.runTool( "restore" , "-d", dbname2, "--dir" , dumppath + dbname, "--noOptionsRestore");
+
+db = db.getSiblingDB(dbname2);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert(true !== db.capped.stats().capped, "restore options were not ignored");
+assert.eq( defaultFlags, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+// Dump/restore single collection
+
+db.dropDatabase();
+var options = { capped: true, size: 4096, autoIndexId: true };
+db.createCollection('capped', options);
+assert.eq( 1, db.capped.getIndexes().length, "auto index not created" );
+var cappedOptions = db.capped.exists().options;
+for ( var opt in options ) {
+ assert.eq(options[opt], cappedOptions[opt], 'invalid option')
+}
+
+assert.writeOK(db.capped.insert({ x: 1 }));
+
+dumppath = t.ext + "noOptionsSingleColDump/";
+mkdir(dumppath);
+dbname = db.getName();
+t.runTool( "dump" , "-d", dbname, "-c", "capped", "--out" , dumppath );
+
+db.dropDatabase();
+
+assert.eq( 0, db.capped.count(), "capped not dropped");
+assert.eq( 0, db.capped.getIndexes().length, "indexes not dropped" );
+
+t.runTool( "restore", "-d", dbname, "--drop", "--noOptionsRestore", dumppath + dbname );
+
+db = db.getSiblingDB(dbname);
+
+assert.eq( 1, db.capped.count() , "wrong number of docs restored to capped" );
+assert( true !== db.capped.stats().capped, "restore options were not ignored" );
+assert.eq( defaultFlags, db.capped.exists().options,
+ "restore options not ignored: " + tojson( db.capped.exists() ) );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth.js
new file mode 100644
index 00000000000..4bda54a5bdc
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth.js
@@ -0,0 +1,117 @@
+// dumprestore_auth.js
+
+
+t = new ToolTest("dumprestore_auth", { auth : "" });
+
+c = t.startDB("foo");
+var dbName = c.getDB().toString();
+print("DB is ",dbName);
+
+adminDB = c.getDB().getSiblingDB('admin');
+adminDB.createUser({user: 'admin', pwd: 'password', roles: ['root']});
+adminDB.auth('admin','password');
+adminDB.createUser({user: 'backup', pwd: 'password', roles: ['backup']});
+adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore']});
+
+// Add user defined roles & users with those roles
+var testUserAdmin = c.getDB().getSiblingDB(dbName);
+var backupActions = ["find","listCollections", "listIndexes"];
+testUserAdmin.createRole({role: "backupFoo",
+ privileges: [{resource: {db: dbName, collection: "foo"}, actions:backupActions},
+ {resource: {db: dbName, collection: "" },
+ actions: backupActions}],
+ roles: []});
+testUserAdmin.createUser({user: 'backupFoo', pwd: 'password', roles: ['backupFoo']});
+
+var restoreActions = ["collMod", "createCollection","createIndex","dropCollection","insert","listCollections","listIndexes"];
+var restoreActionsFind = restoreActions;
+restoreActionsFind.push("find");
+testUserAdmin.createRole({role: "restoreChester",
+ privileges: [{resource: {db: dbName, collection: "chester"}, actions: restoreActions},
+ {resource: {db: dbName, collection: ""}, actions:["listCollections","listIndexes"]},
+ ],
+ roles: []});
+testUserAdmin.createRole({role: "restoreFoo",
+ privileges: [{resource: {db: dbName, collection: "foo"}, actions:restoreActions},
+ {resource: {db: dbName, collection: ""}, actions:["listCollections","listIndexes"]},
+ ],
+ roles: []});
+testUserAdmin.createUser({user: 'restoreChester', pwd: 'password', roles: ['restoreChester']});
+testUserAdmin.createUser({user: 'restoreFoo', pwd: 'password', roles: ['restoreFoo']});
+
+var sysUsers = adminDB.system.users.count();
+assert.eq(0 , c.count() , "setup1");
+c.save({ a : 22 });
+assert.eq(1 , c.count() , "setup2");
+
+assert.commandWorked(c.runCommand("collMod", {usePowerOf2Sizes: false}));
+
+var collections = c.getDB().getCollectionInfos();
+var fooColl = null;
+collections.forEach(function(coll) {
+ if (coll.name === "foo") {
+ fooColl = coll;
+ }
+});
+assert.neq(null, fooColl, "foo collection doesn't exist");
+assert(!fooColl.options.flags, "find namespaces 1");
+
+t.runTool("dump" , "--out" , t.ext, "--username", "backup", "--password", "password");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+// Restore should fail without user & pass
+t.runTool("restore" , "--dir" , t.ext, "--writeConcern" ,"0");
+assert.eq(0 , c.count() , "after restore without auth");
+
+// Restore should pass with authorized user
+t.runTool("restore" , "--dir" , t.ext, "--username", "restore", "--password", "password", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 2");
+assert.eq(22 , c.findOne().a , "after restore 2");
+
+collections = c.getDB().getCollectionInfos();
+fooColl = null;
+collections.forEach(function(coll) {
+ if (coll.name === "foo") {
+ fooColl = coll;
+ }
+});
+assert.neq(null, fooColl, "foo collection doesn't exist");
+assert(!fooColl.options.flags, "find namespaces 2");
+
+assert.eq(sysUsers, adminDB.system.users.count());
+
+// Dump & restore DB/colection with user defined roles
+t.runTool("dump" , "--out" , t.ext, "--username", "backupFoo", "--password", "password",
+ "--db", dbName, "--collection", "foo");
+
+c.drop();
+assert.eq(0 , c.count() , "after drop");
+
+// Restore with wrong user
+t.runTool("restore" , "--username", "restoreChester", "--password", "password",
+ "--db", dbName, "--collection", "foo", t.ext+dbName+"/foo.bson", "--writeConcern", "0");
+assert.eq(0 , c.count() , "after restore with wrong user");
+
+// Restore with proper user
+t.runTool("restore" , "--username", "restoreFoo", "--password", "password",
+ "--db", dbName, "--collection", "foo", t.ext+dbName+"/foo.bson", "--writeConcern", "0");
+assert.soon("c.findOne()" , "no data after sleep");
+assert.eq(1 , c.count() , "after restore 3");
+assert.eq(22 , c.findOne().a , "after restore 3");
+
+collections = c.getDB().getCollectionInfos();
+fooColl = null;
+collections.forEach(function(coll) {
+ if (coll.name === "foo") {
+ fooColl = coll;
+ }
+});
+assert.neq(null, fooColl, "foo collection doesn't exist");
+assert(!fooColl.options.flags, "find namespaces 3");
+
+assert.eq(sysUsers, adminDB.system.users.count());
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth2.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth2.js
new file mode 100644
index 00000000000..0392d1be3db
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth2.js
@@ -0,0 +1,98 @@
+// dumprestore_auth2.js
+// Tests that mongodump and mongorestore properly handle access control information
+// Tests that the default auth roles of backup and restore work properly.
+
+t = new ToolTest("dumprestore_auth2", {auth: ""});
+
+coll = t.startDB("foo");
+admindb = coll.getDB().getSiblingDB("admin")
+
+// Create the relevant users and roles.
+admindb.createUser({user: "root", pwd: "pass", roles: ["root"]});
+admindb.auth("root", "pass");
+
+admindb.createUser({user: "backup", pwd: "pass", roles: ["backup"]});
+admindb.createUser({user: "restore", pwd: "pass", roles: ["restore"]});
+
+admindb.createRole({role: "customRole",
+ privileges:[{resource: {db: "jstests_tool_dumprestore_auth2",
+ collection: "foo"},
+ actions: ["find"]}],
+ roles:[]});
+admindb.createUser({user: "test", pwd: "pass", roles: ["customRole"]});
+
+coll.insert({word: "tomato"});
+assert.eq(1, coll.count());
+
+assert.eq(4, admindb.system.users.count(), "setup users")
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "setup2: " + tojson( admindb.system.users.getIndexes() ) );
+assert.eq(1, admindb.system.roles.count(), "setup3")
+assert.eq(2, admindb.system.roles.getIndexes().length, "setup4")
+assert.eq(1, admindb.system.version.count());
+var versionDoc = admindb.system.version.findOne();
+
+// Logout root user.
+admindb.logout();
+
+// Verify that the custom role works as expected.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+// Dump the database.
+t.runTool("dump", "--out", t.ext, "--username", "backup", "--password", "pass");
+
+// Drop the relevant data in the database.
+admindb.auth("root", "pass");
+coll.getDB().dropDatabase();
+admindb.dropUser("backup");
+admindb.dropUser("test");
+admindb.dropRole("customRole");
+
+assert.eq(2, admindb.system.users.count(), "didn't drop backup and test users");
+assert.eq(0, admindb.system.roles.count(), "didn't drop roles");
+assert.eq(0, coll.count(), "didn't drop foo coll");
+
+// This test depends on W=0 to mask unique index violations.
+// This should be fixed once we implement TOOLS-341
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--writeConcern", "0");
+
+assert.soon("admindb.system.users.findOne()", "no data after restore");
+assert.eq(4, admindb.system.users.count(), "didn't restore users");
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "didn't restore user indexes");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.roles.getIndexes().length,
+ "didn't restore role indexes");
+
+admindb.logout();
+
+// Login as user with customRole to verify privileges are restored.
+admindb.auth("test", "pass");
+assert.eq("tomato", coll.findOne().word);
+admindb.logout();
+
+admindb.auth("root", "pass");
+admindb.createUser({user: "root2", pwd: "pass", roles: ["root"]});
+admindb.dropRole("customRole");
+admindb.createRole({role: "customRole2", roles: [], privileges:[]});
+admindb.dropUser("root");
+admindb.logout();
+
+t.runTool("restore", "--dir", t.ext, "--username", "restore", "--password", "pass", "--drop", "--writeConcern", "0");
+
+admindb.auth("root", "pass");
+assert.soon("1 == admindb.system.users.find({user:'root'}).count()", "didn't restore users 2");
+assert.eq(0, admindb.system.users.find({user:'root2'}).count(), "didn't drop users");
+assert.eq(0, admindb.system.roles.find({role:'customRole2'}).count(), "didn't drop roles");
+assert.eq(1, admindb.system.roles.find({role:'customRole'}).count(), "didn't restore roles");
+assert.eq(2, admindb.system.users.getIndexes().length,
+ "didn't maintain user indexes");
+assert.eq(2, admindb.system.roles.getIndexes().length,
+ "didn't maintain role indexes");
+assert.eq(1, admindb.system.version.count(), "didn't restore version");
+assert.docEq(versionDoc, admindb.system.version.findOne(), "version doc wasn't restored properly");
+admindb.logout();
+
+t.stop(); \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth3.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth3.js
new file mode 100644
index 00000000000..f65bed7abff
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_auth3.js
@@ -0,0 +1,200 @@
+// dumprestore_auth3.js
+// Tests that mongodump and mongorestore properly handle access control information when doing
+// single-db dumps and restores
+
+// Runs the tool with the given name against the given mongod.
+function runTool(toolName, mongod, options) {
+ var opts = {host: mongod.host};
+ Object.extend(opts, options);
+ MongoRunner.runMongoTool(toolName, opts);
+}
+
+var mongod = MongoRunner.runMongod();
+var admindb = mongod.getDB("admin");
+var db = mongod.getDB("foo");
+
+jsTestLog("Creating Admin user & initial data");
+admindb.createUser({user: 'root', pwd: 'pass', roles: ['root']});
+admindb.createUser({user: 'backup', pwd: 'pass', roles: ['backup']});
+admindb.createUser({user: 'restore', pwd: 'pass', roles: ['restore']});
+admindb.createRole({role: "dummyRole", roles: [], privileges:[]});
+db.createUser({user: 'user', pwd: 'pass', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+var backupActions = ['find'];
+db.createRole({role: 'backupFooChester',
+ privileges: [{resource: {db: 'foo', collection: 'chester'}, actions: backupActions}],
+ roles: []});
+db.createUser({user: 'backupFooChester', pwd: 'pass', roles: ['backupFooChester']});
+
+var userCount = db.getUsers().length;
+var rolesCount = db.getRoles().length;
+var adminUsersCount = admindb.getUsers().length;
+var adminRolesCount = admindb.getRoles().length;
+var systemUsersCount = admindb.system.users.count();
+var systemVersionCount = admindb.system.version.count();
+
+db.bar.insert({a:1});
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "setup");
+assert.eq(rolesCount, db.getRoles().length, "setup2");
+assert.eq(adminUsersCount, admindb.getUsers().length, "setup3");
+assert.eq(adminRolesCount, admindb.getRoles().length, "setup4");
+assert.eq(systemUsersCount, admindb.system.users.count(), "setup5");
+assert.eq(systemVersionCount, admindb.system.version.count(),"system version");
+assert.eq(1, admindb.system.users.count({user: "restore"}), "Restore user is missing");
+assert.eq(1, admindb.system.users.count({user: "backup"}), "Backup user is missing");
+var versionDoc = admindb.system.version.findOne();
+
+jsTestLog("Dump foo database without dumping user data");
+var dumpDir = MongoRunner.getAndPrepareDumpDirectory("dumprestore_auth3");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo"});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+jsTestLog("Restore foo database from dump that doesn't contain user data ");
+// This test depends on W=0 to mask unique index violations.
+// This should be fixed once we implement TOOLS-341
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restore created users somehow");
+assert.eq(0, db.getRoles().length, "Restore created roles somehow");
+
+// Re-create user data
+db.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+db.createRole({role: 'role', roles: [], privileges:[]});
+userCount = 1;
+rolesCount = 1;
+
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't create user");
+assert.eq(rolesCount, db.getRoles().length, "didn't create role");
+
+jsTestLog("Dump foo database *with* user data");
+runTool("mongodump", mongod, {out: dumpDir, db: "foo", dumpDbUsersAndRoles: ""});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore foo database without restoring user data, even though it's in the dump");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(0, db.getUsers().length, "Restored users even though it shouldn't have");
+assert.eq(0, db.getRoles().length, "Restored roles even though it shouldn't have");
+
+jsTestLog("Restore foo database *with* user data");
+runTool("mongorestore", mongod, {dir: dumpDir + "foo/", db: 'foo', restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq(1, admindb.system.users.count({user: "restore", db: "admin"}), "Restore user is missing");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Make modifications to user data that should be overridden by the restore");
+db.dropUser('user')
+db.createUser({user: 'user2', pwd: 'password2', roles: jsTest.basicUserRoles});
+db.dropRole('role')
+db.createRole({role: 'role2', roles: [], privileges:[]});
+
+jsTestLog("Restore foo database (and user data) with --drop so it overrides the changes made");
+// Restore with --drop to override the changes to user data
+runTool("mongorestore", mongod,
+ {dir: dumpDir + "foo/", db: 'foo', drop: "", restoreDbUsersAndRoles: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+admindb = mongod.getDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(adminUsersCount, admindb.getUsers().length, "Admin users were dropped");
+assert.eq(adminRolesCount, admindb.getRoles().length, "Admin roles were dropped");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't update user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't update role");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+
+jsTestLog("Dump just the admin database. User data should be dumped by default");
+// Make a user in another database to make sure it is properly captured
+db.getSiblingDB('bar').createUser({user: "user", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').createUser({user: "user", pwd: 'pwd', roles: []});
+adminUsersCount += 1;
+runTool("mongodump", mongod, {out: dumpDir, db: "admin"});
+db = mongod.getDB('foo');
+
+// Change user data a bit.
+db.dropAllUsers();
+db.getSiblingDB('bar').createUser({user: "user2", pwd: 'pwd', roles: []});
+db.getSiblingDB('admin').dropAllUsers();
+
+jsTestLog("Restore just the admin database. User data should be restored by default");
+runTool("mongorestore", mongod, {dir: dumpDir + "admin/", db: 'admin', drop: "", writeConcern: "0"});
+db = mongod.getDB('foo');
+var otherdb = db.getSiblingDB('bar');
+var admindb = db.getSiblingDB('admin');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(userCount, db.getUsers().length, "didn't restore users");
+assert.eq("user", db.getUser('user').user, "didn't restore user");
+assert.eq(rolesCount, db.getRoles().length, "didn't restore roles");
+assert.eq("role", db.getRole('role').role, "didn't restore role");
+assert.eq(1, otherdb.getUsers().length, "didn't restore users for bar database");
+assert.eq("user", otherdb.getUsers()[0].user, "didn't restore user for bar database");
+assert.eq(adminUsersCount, admindb.getUsers().length, "didn't restore users for admin database");
+assert.eq("user", admindb.getUser("user").user, "didn't restore user for admin database");
+assert.eq(6, admindb.system.users.count(), "has the wrong # of users for the whole server");
+assert.eq(2, admindb.system.roles.count(), "has the wrong # of roles for the whole server");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+jsTestLog("Dump all databases");
+runTool("mongodump", mongod, {out: dumpDir});
+db = mongod.getDB('foo');
+
+db.dropDatabase();
+db.dropAllUsers();
+db.dropAllRoles();
+
+assert.eq(0, db.getUsers().length, "didn't drop users");
+assert.eq(0, db.getRoles().length, "didn't drop roles");
+assert.eq(0, db.bar.count(), "didn't drop 'bar' collection");
+
+jsTestLog("Restore all databases");
+runTool("mongorestore", mongod, {dir: dumpDir, writeConcern: "0"});
+db = mongod.getDB('foo');
+
+assert.soon(function() { return db.bar.findOne(); }, "no data after restore");
+assert.eq(1, db.bar.findOne().a);
+assert.eq(1, db.getUsers().length, "didn't restore users");
+assert.eq(1, db.getRoles().length, "didn't restore roles");
+assert.docEq(versionDoc,
+ db.getSiblingDB('admin').system.version.findOne(),
+ "version doc was changed by restore");
+
+MongoRunner.stopMongod(mongod); \ No newline at end of file
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_excludecollections.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_excludecollections.js
new file mode 100644
index 00000000000..dcfab742053
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumprestore_excludecollections.js
@@ -0,0 +1,112 @@
+// Tests for mongodump options for excluding collections
+
+
+var testBaseName = "jstests_tool_dumprestore_excludecollections";
+
+var dumpDir = MongoRunner.dataPath + testBaseName + "_dump_external/";
+
+var mongodSource = MongoRunner.runMongod();
+var sourceDB = mongodSource.getDB(testBaseName);
+var mongodDest = MongoRunner.runMongod();
+var destDB = mongodDest.getDB(testBaseName);
+
+jsTest.log("Inserting documents into source mongod");
+sourceDB.test.insert({x:1});
+sourceDB.test2.insert({x:2});
+sourceDB.test3.insert({x:3});
+sourceDB.foo.insert({f:1});
+sourceDB.foo2.insert({f:2});
+
+jsTest.log("Testing incompabible option combinations");
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ excludeCollection : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollection but no --db option");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ collection : "foo",
+ excludeCollection : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollection and --collection");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollectionsWithPrefix but " +
+ "no --db option");
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ collection : "foo",
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+assert.neq(ret, 0, "mongodump started successfully with --excludeCollectionsWithPrefix and " +
+ "--collection");
+
+jsTest.log("Testing proper behavior of collection exclusion");
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollection : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.test2.findOne().x, 2, "Wrong value in document");
+assert.eq(destDB.test3.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.test3.findOne().x, 3, "Wrong value in document");
+assert.eq(destDB.foo.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo.findOne().f, 1, "Wrong value in document");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test3.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo.findOne().f, 1, "Wrong value in document");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+resetDbpath(dumpDir);
+ret = MongoRunner.runMongoTool("mongodump", { out : dumpDir,
+ db : testBaseName,
+ excludeCollection : "foo",
+ excludeCollectionsWithPrefix : "test",
+ host : mongodSource.host });
+
+ret = MongoRunner.runMongoTool("mongorestore", { dir : dumpDir, host : mongodDest.host });
+assert.eq(ret, 0, "failed to run mongodump on expected successful call");
+assert.eq(destDB.test.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test2.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.test3.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo.count(), 0, "Found documents in collection that we excluded");
+assert.eq(destDB.foo2.count(), 1, "Did not find document in collection that we did not exclude");
+assert.eq(destDB.foo2.findOne().f, 2, "Wrong value in document");
+destDB.dropDatabase();
+
+// The --excludeCollection and --excludeCollectionsWithPrefix options can be specified multiple
+// times, but that is not tested here because right now MongoRunners can only be configured using
+// javascript objects which do not allow duplicate keys. See SERVER-14220.
+
+MongoRunner.stopMongod(mongodDest.port);
+MongoRunner.stopMongod(mongodSource.port);
+
+print(testBaseName + " success!");
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/dumpsecondary.js b/src/mongo/gotools/test/legacy28/jstests/tool/dumpsecondary.js
new file mode 100644
index 00000000000..92cd6b9fff1
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/dumpsecondary.js
@@ -0,0 +1,39 @@
+
+var replTest = new ReplSetTest( {name: 'testSet', nodes: 2} );
+
+var nodes = replTest.startSet();
+replTest.initiate();
+
+var master = replTest.getPrimary();
+db = master.getDB("foo")
+db.foo.save({a: 1000});
+replTest.awaitReplication();
+replTest.awaitSecondaryNodes();
+
+assert.eq( 1 , db.foo.count() , "setup" );
+
+var slaves = replTest.liveNodes.slaves;
+assert( slaves.length == 1, "Expected 1 slave but length was " + slaves.length );
+slave = slaves[0];
+
+var args = ['mongodump', '-h', slave.host, '--out', MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+var authargs = ['--username', jsTest.options().authUser, '--password', jsTest.options().authPassword];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args);
+db.foo.drop()
+
+assert.eq( 0 , db.foo.count() , "after drop" );
+args = ['mongorestore', '-h', master.host, MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external/'];
+if (jsTest.options().keyFile) {
+ args = args.concat(authargs);
+}
+runMongoProgram.apply(null, args)
+assert.soon( "db.foo.findOne()" , "no data after sleep" );
+assert.eq( 1 , db.foo.count() , "after restore" );
+assert.eq( 1000 , db.foo.findOne().a , "after restore 2" );
+
+resetDbpath(MongoRunner.dataDir + '/jstests_tool_dumpsecondary_external')
+
+replTest.stopSet(15)
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/exportimport1.js b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport1.js
new file mode 100644
index 00000000000..5e206d8c40b
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport1.js
@@ -0,0 +1,67 @@
+// exportimport1.js
+
+
+t = new ToolTest( "exportimport1" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+var arr = ["x", undefined, "y", undefined];
+c.save( { a : 22 , b : arr} );
+assert.eq( 1 , c.count() , "setup2" );
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+assert.eq( 22 , doc.a , "after restore 2" );
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.b[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.b[i] , "after restore array: "+i );
+ }
+}
+
+// now with --jsonArray
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+assert.eq( 22 , c.findOne().a , "after restore 2" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+arr = ["a", undefined, "c"];
+c.save({a : arr});
+assert.eq( 1 , c.count() , "setup2" );
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+var doc = c.findOne();
+for (var i=0; i<arr.length; i++) {
+ if (typeof arr[i] == 'undefined') {
+ // null should be { "$undefined" : true }, but this is a workaround for SERVER-6102
+ assert.eq( null, doc.a[i] , "after restore array: "+i );
+ } else {
+ assert.eq( arr[i], doc.a[i] , "after restore array: "+i );
+ }
+}
+
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/exportimport3.js b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport3.js
new file mode 100644
index 00000000000..4f0fdd46609
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport3.js
@@ -0,0 +1,28 @@
+// exportimport3.js
+
+
+t = new ToolTest( "exportimport3" );
+
+c = t.startDB( "foo" );
+assert.eq( 0 , c.count() , "setup1" );
+c.save({a:1})
+c.save({a:2})
+c.save({a:3})
+c.save({a:4})
+c.save({a:5})
+
+assert.eq( 5 , c.count() , "setup2" );
+
+
+t.runTool( "export" , "--jsonArray" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );;
+
+t.runTool( "import" , "--jsonArray" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 5 , c.count() , "after restore 2" );
+
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/exportimport4.js b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport4.js
new file mode 100644
index 00000000000..c0d82a135bc
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport4.js
@@ -0,0 +1,57 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport4" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, NaN, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ NaN ] } );
+ c.save( { a : [1, 2, 3, 4, NaN, NaN, 5, NaN] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+
+ assert.eq( 5 , c.count() , "setup2" );
+};
+
+// attempt to export fields without NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[NaN]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 1" );
+
+// attempt to export fields with NaN
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:NaN}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 5 , c.count() , "after restore 3" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/exportimport5.js b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport5.js
new file mode 100644
index 00000000000..47dd98c2553
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport5.js
@@ -0,0 +1,82 @@
+// exportimport4.js
+
+
+t = new ToolTest( "exportimport5" );
+c = t.startDB( "foo" );
+
+install_test_data = function() {
+ c.drop();
+
+ assert.eq( 0 , c.count() , "setup1" );
+
+ c.save( { a : [1, 2, 3, Infinity, 4, null, 5] } );
+ c.save( { a : [1, 2, 3, 4, 5] } );
+ c.save( { a : [ Infinity ] } );
+ c.save( { a : [1, 2, 3, 4, Infinity, Infinity, 5, -Infinity] } );
+ c.save( { a : [1, 2, 3, 4, null, null, 5, null] } );
+ c.save( { a : [ -Infinity ] } );
+
+ assert.eq( 6 , c.count() , "setup2" );
+};
+
+// attempt to export fields without Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 1" );
+
+// attempt to export fields with Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 3 , c.count() , "after restore 2" );
+
+// attempt to export fields without -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:{\"$nin\":[-Infinity]}}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 4 , c.count() , "after restore 3" );
+
+// attempt to export fields with -Infinity
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo", "-q", "{a:-Infinity}" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 2 , c.count() , "after restore 4" );
+
+// attempt to export everything
+install_test_data();
+
+t.runTool( "export" , "--out" , t.extFile , "-d" , t.baseName , "-c" , "foo" );
+
+c.drop();
+assert.eq( 0 , c.count() , "after drop" , "-d" , t.baseName , "-c" , "foo" );
+
+t.runTool( "import" , "--file" , t.extFile , "-d" , t.baseName , "-c" , "foo", "--drop" );
+
+assert.eq( 6 , c.count() , "after restore 5" );
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/exportimport6.js b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport6.js
new file mode 100644
index 00000000000..a6406dfa880
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport6.js
@@ -0,0 +1,27 @@
+// exportimport6.js
+// test export with skip, limit and sort
+
+
+t = new ToolTest("exportimport6");
+
+c = t.startDB("foo");
+assert.eq(0, c.count(), "setup1");
+c.save({a:1, b:1})
+c.save({a:1, b:2})
+c.save({a:2, b:3})
+c.save({a:2, b:3})
+c.save({a:3, b:4})
+c.save({a:3, b:5})
+
+assert.eq(6, c.count(), "setup2");
+
+t.runTool("export", "--out", t.extFile, "-d", t.baseName, "-c", "foo",
+ "--sort", "{a:1, b:-1}", "--skip", "4", "--limit", "1");
+
+c.drop();
+assert.eq(0, c.count(), "after drop", "-d", t.baseName, "-c", "foo");
+t.runTool("import", "--file", t.extFile, "-d", t.baseName, "-c", "foo");
+assert.eq(1, c.count(), "count should be 1");
+assert.eq(5, c.findOne().b, printjson(c.findOne()));
+
+t.stop();
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_bigarray.js b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_bigarray.js
new file mode 100644
index 00000000000..e8bd4a468b4
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_bigarray.js
@@ -0,0 +1,59 @@
+// Test importing collections represented as a single line array above the maximum document size
+
+
+var tt = new ToolTest('exportimport_bigarray_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Calculate the number of documents it takes to get above 16MB (here using 20MB just to be safe)
+var bigString = new Array(1025).toString();
+var doc = {_id: new ObjectId(), x:bigString};
+var docSize = Object.bsonsize(doc);
+var numDocs = Math.floor(20*1024*1024 / docSize);
+
+print('Size of one document: ' + docSize)
+print('Number of documents to exceed maximum BSON size: ' + numDocs)
+
+print('About to insert ' + numDocs + ' documents into ' +
+ exportimport_db.getName() + '.' + src.getName());
+var i;
+var bulk = src.initializeUnorderedBulkOp();
+for (i = 0; i < numDocs; ++i) {
+ bulk.insert({ x: bigString });
+}
+assert.writeOK(bulk.execute());
+
+data = 'data/exportimport_array_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName(),
+ '--jsonArray');
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName(),
+ '--jsonArray');
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_date.js b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_date.js
new file mode 100644
index 00000000000..9dc6c275a96
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_date.js
@@ -0,0 +1,50 @@
+
+var tt = new ToolTest('exportimport_date_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+// Insert a date that we can format
+var formatable = ISODate("1970-01-02T05:00:00Z");
+assert.eq(formatable.valueOf(), 104400000);
+src.insert({ "_id" : formatable });
+
+// Insert a date that we cannot format as an ISODate string
+var nonformatable = ISODate("3001-01-01T00:00:00Z");
+assert.eq(nonformatable.valueOf(), 32535216000000);
+src.insert({ "_id" : nonformatable });
+
+// Verify number of documents inserted
+assert.eq(2, src.find().itcount());
+
+data = 'data/exportimport_date_test.json';
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + data);
+tt.runTool('export', '--out' , data, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + data);
+tt.runTool('import', '--file', data, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_minkey_maxkey.js b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_minkey_maxkey.js
new file mode 100644
index 00000000000..a4705dc3ceb
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/exportimport_minkey_maxkey.js
@@ -0,0 +1,38 @@
+
+var tt = new ToolTest('exportimport_minkey_maxkey_test');
+
+var exportimport_db = tt.startDB();
+
+var src = exportimport_db.src;
+var dst = exportimport_db.dst;
+
+src.drop();
+dst.drop();
+
+src.insert({ "_id" : MaxKey });
+src.insert({ "_id" : MinKey });
+
+print('About to call mongoexport on: ' + exportimport_db.getName() + '.' + src.getName() +
+ ' with file: ' + tt.extFile);
+tt.runTool('export', '--out' , tt.extFile, '-d', exportimport_db.getName(), '-c', src.getName());
+
+print('About to call mongoimport on: ' + exportimport_db.getName() + '.' + dst.getName() +
+ ' with file: ' + tt.extFile);
+tt.runTool('import', '--file', tt.extFile, '-d', exportimport_db.getName(), '-c', dst.getName());
+
+print('About to verify that source and destination collections match');
+
+src_cursor = src.find().sort({ _id : 1 });
+dst_cursor = dst.find().sort({ _id : 1 });
+
+var documentCount = 0;
+while (src_cursor.hasNext()) {
+ assert(dst_cursor.hasNext(), 'Source has more documents than destination. ' +
+ 'Destination has ' + documentCount + ' documents.');
+ assert.eq(src_cursor.next(), dst_cursor.next(), 'Mismatch on document ' + documentCount);
+ ++documentCount;
+}
+assert(!dst_cursor.hasNext(), 'Destination has more documents than source. ' +
+ 'Source has ' + documentCount + ' documents.');
+
+print('Verified that source and destination collections match');
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/files1.js b/src/mongo/gotools/test/legacy28/jstests/tool/files1.js
new file mode 100644
index 00000000000..3db783df19f
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/files1.js
@@ -0,0 +1,28 @@
+// files1.js
+
+
+t = new ToolTest( "files1" )
+
+db = t.startDB();
+
+filename = 'mongod'
+if ( _isWindows() )
+ filename += '.exe'
+
+t.runTool( "files" , "-d" , t.baseName , "put" , filename );
+md5 = md5sumFile(filename);
+
+file_obj = db.fs.files.findOne()
+assert( file_obj , "A 0" );
+md5_stored = file_obj.md5;
+md5_computed = db.runCommand({filemd5: file_obj._id}).md5;
+assert.eq( md5 , md5_stored , "A 1" );
+assert.eq( md5 , md5_computed, "A 2" );
+
+mkdir(t.ext);
+
+t.runTool( "files" , "-d" , t.baseName , "get" , filename , '-l' , t.extFile );
+md5 = md5sumFile(t.extFile);
+assert.eq( md5 , md5_stored , "B" );
+
+t.stop()
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/oplog1.js b/src/mongo/gotools/test/legacy28/jstests/tool/oplog1.js
new file mode 100644
index 00000000000..5beb1d697e0
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/oplog1.js
@@ -0,0 +1,29 @@
+// oplog1.js
+
+
+// very basic test for mongooplog
+// need a lot more, but test that it functions at all
+
+t = new ToolTest( "oplog1" );
+
+db = t.startDB();
+
+output = db.output
+
+doc = { _id : 5 , x : 17 };
+
+assert.commandWorked(db.createCollection(output.getName()));
+
+db.oplog.insert( { ts : new Timestamp() , "op" : "i" , "ns" : output.getFullName() , "o" : doc } );
+
+assert.eq( 0 , output.count() , "before" )
+
+t.runTool( "oplog" , "--oplogns" , db.getName() + ".oplog" , "--from" , "127.0.0.1:" + t.port , "-vv" );
+
+assert.eq( 1 , output.count() , "after" );
+
+assert.eq( doc , output.findOne() , "after check" );
+
+t.stop();
+
+
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/oplog_all_ops.js b/src/mongo/gotools/test/legacy28/jstests/tool/oplog_all_ops.js
new file mode 100644
index 00000000000..a0eb3e34dc9
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/oplog_all_ops.js
@@ -0,0 +1,62 @@
+/**
+ * Performs a simple test on mongooplog by doing different types of operations
+ * that will show up in the oplog then replaying it on another replica set.
+ * Correctness is verified using the dbhash command.
+ */
+
+
+var repl1 = new ReplSetTest({ name: 'rs1', nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl1.startSet({ oplogSize: 10 });
+repl1.initiate();
+repl1.awaitSecondaryNodes();
+
+var repl1Conn = new Mongo(repl1.getURL());
+var testDB = repl1Conn.getDB('test');
+var testColl = testDB.user;
+
+// op i
+testColl.insert({ x: 1 });
+testColl.insert({ x: 2 });
+
+// op c
+testDB.dropDatabase();
+
+testColl.insert({ y: 1 });
+testColl.insert({ y: 2 });
+testColl.insert({ y: 3 });
+
+// op u
+testColl.update({}, { $inc: { z: 1 }}, true, true);
+
+// op d
+testColl.remove({ y: 2 });
+
+// op n
+var oplogColl = repl1Conn.getCollection('local.oplog.rs');
+oplogColl.insert({ ts: new Timestamp(), op: 'n', ns: testColl.getFullName(), 'o': { x: 'noop' }});
+
+var repl2 = new ReplSetTest({ name: 'rs2', startPort: 31100, nodes: [{ nopreallocj: '' },
+ { arbiter: true }, { arbiter: true }]});
+
+repl2.startSet({ oplogSize: 10 });
+repl2.initiate();
+repl2.awaitSecondaryNodes();
+
+var srcConn = repl1.getPrimary();
+runMongoProgram('mongooplog', '--from', repl1.getPrimary().host,
+ '--host', repl2.getPrimary().host);
+
+var repl1Hash = testDB.runCommand({ dbhash: 1 });
+
+var repl2Conn = new Mongo(repl2.getURL());
+var testDB2 = repl2Conn.getDB(testDB.getName());
+var repl2Hash = testDB2.runCommand({ dbhash: 1 });
+
+assert(repl1Hash.md5);
+assert.eq(repl1Hash.md5, repl2Hash.md5);
+
+repl1.stopSet();
+repl2.stopSet();
+
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/restorewithauth.js b/src/mongo/gotools/test/legacy28/jstests/tool/restorewithauth.js
new file mode 100644
index 00000000000..d17769cf396
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/restorewithauth.js
@@ -0,0 +1,117 @@
+/* SERVER-4972
+ * Test for mongorestore on server with --auth allows restore without credentials of colls
+ * with no index
+ */
+/*
+ * 1) Start mongo without auth.
+ * 2) Write to collection
+ * 3) Take dump of the collection using mongodump.
+ * 4) Drop the collection.
+ * 5) Stop mongod from step 1.
+ * 6) Restart mongod with auth.
+ * 7) Add admin user to kick authentication
+ * 8) Try restore without auth credentials. The restore should fail
+ * 9) Try restore with correct auth credentials. The restore should succeed this time.
+ */
+
+
+var port = allocatePorts(1)[0];
+baseName = "jstests_restorewithauth";
+var conn = startMongod( "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// write to ns foo.bar
+var foo = conn.getDB( "foo" );
+for( var i = 0; i < 4; i++ ) {
+ foo["bar"].save( { "x": i } );
+ foo["baz"].save({"x": i});
+}
+
+// make sure the collection exists
+var collNames = foo.getCollectionNames();
+assert.neq(-1, collNames.indexOf("bar"), "bar collection doesn't exist");
+
+//make sure it has no index except _id
+assert.eq(foo.bar.getIndexes().length, 1);
+assert.eq(foo.baz.getIndexes().length, 1);
+
+foo.bar.createIndex({x:1});
+assert.eq(foo.bar.getIndexes().length, 2);
+assert.eq(foo.baz.getIndexes().length, 1);
+
+// get data dump
+var dumpdir = MongoRunner.dataDir + "/restorewithauth-dump1/";
+resetDbpath( dumpdir );
+x = runMongoProgram("mongodump", "--db", "foo", "-h", "127.0.0.1:"+port, "--out", dumpdir);
+
+// now drop the db
+foo.dropDatabase();
+
+// stop mongod
+stopMongod( port );
+
+// start mongod with --auth
+conn = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface",
+ "--nojournal", "--bind_ip", "127.0.0.1" );
+
+// admin user
+var admin = conn.getDB( "admin" )
+admin.createUser({user: "admin" , pwd: "admin", roles: jsTest.adminUserRoles});
+admin.auth( "admin" , "admin" );
+
+var foo = conn.getDB( "foo" )
+
+// make sure no collection with the same name exists
+collNames = foo.getCollectionNames();
+assert.eq(-1, collNames.indexOf("bar"), "bar collection already exists");
+assert.eq(-1, collNames.indexOf("baz"), "baz collection already exists");
+
+// now try to restore dump
+x = runMongoProgram( "mongorestore", "-h", "127.0.0.1:" + port, "--dir" , dumpdir, "-vvvvv" );
+
+// make sure that the collection isn't restored
+collNames = foo.getCollectionNames();
+assert.eq(-1, collNames.indexOf("bar"), "bar collection was restored");
+assert.eq(-1, collNames.indexOf("baz"), "baz collection was restored");
+
+// now try to restore dump with correct credentials
+x = runMongoProgram( "mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "--authenticationDatabase=admin",
+ "-u", "admin",
+ "-p", "admin",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+collNames = foo.getCollectionNames();
+assert.neq(-1, collNames.indexOf("bar"), "bar collection was not restored");
+assert.neq(-1, collNames.indexOf("baz"), "baz collection was not restored");
+
+// make sure the collection has 4 documents
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+
+foo.dropDatabase();
+
+foo.createUser({user: 'user', pwd: 'password', roles: jsTest.basicUserRoles});
+
+// now try to restore dump with foo database credentials
+x = runMongoProgram("mongorestore",
+ "-h", "127.0.0.1:" + port,
+ "-d", "foo",
+ "-u", "user",
+ "-p", "password",
+ "--dir", dumpdir + "foo/",
+ "-vvvvv");
+
+// make sure that the collection was restored
+collNames = foo.getCollectionNames();
+assert.neq(-1, collNames.indexOf("bar"), "bar collection was not restored");
+assert.neq(-1, collNames.indexOf("baz"), "baz collection was not restored");
+assert.eq(foo.bar.count(), 4);
+assert.eq(foo.baz.count(), 4);
+assert.eq(foo.bar.getIndexes().length + foo.baz.getIndexes().length, 3); // _id on foo, _id on bar, x on foo
+
+stopMongod( port );
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/stat1.js b/src/mongo/gotools/test/legacy28/jstests/tool/stat1.js
new file mode 100644
index 00000000000..3855d6c13c6
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/stat1.js
@@ -0,0 +1,18 @@
+// stat1.js
+// test mongostat with authentication SERVER-3875
+
+
+port = allocatePorts( 1 )[ 0 ];
+baseName = "tool_stat1";
+
+m = startMongod( "--auth", "--port", port, "--dbpath", MongoRunner.dataPath + baseName, "--nohttpinterface", "--bind_ip", "127.0.0.1" );
+db = m.getDB( "admin" );
+
+db.createUser({user: "eliot" , pwd: "eliot", roles: jsTest.adminUserRoles});
+assert( db.auth( "eliot" , "eliot" ) , "auth failed" );
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "eliot", "--rowcount", "1", "--authenticationDatabase=admin" );
+assert.eq(x, 0, "mongostat should exit successfully with eliot:eliot");
+
+x = runMongoProgram( "mongostat", "--host", "127.0.0.1:"+port, "--username", "eliot", "--password", "wrong", "--rowcount", "1", "--authenticationDatabase=admin" );
+assert.eq(x, 1, "mongostat should exit with -1 with eliot:wrong");
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/tool1.js b/src/mongo/gotools/test/legacy28/jstests/tool/tool1.js
new file mode 100644
index 00000000000..f7c6f769e72
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/tool1.js
@@ -0,0 +1,44 @@
+// mongo tool tests, very basic to start with
+
+
+baseName = "jstests_tool_tool1";
+dbPath = MongoRunner.dataPath + baseName + "/";
+externalPath = MongoRunner.dataPath + baseName + "_external/";
+externalBaseName = "export.json";
+externalFile = externalPath + externalBaseName;
+
+function fileSize(){
+ var l = listFiles( externalPath );
+ for ( var i=0; i<l.length; i++ ){
+ if ( l[i].baseName == externalBaseName )
+ return l[i].size;
+ }
+ return -1;
+}
+
+
+port = allocatePorts( 1 )[ 0 ];
+resetDbpath( externalPath );
+
+m = startMongod( "--port", port, "--dbpath", dbPath, "--nohttpinterface", "--noprealloc" , "--bind_ip", "127.0.0.1" );
+c = m.getDB( baseName ).getCollection( baseName );
+c.save( { a: 1 } );
+assert( c.findOne() );
+
+runMongoProgram( "mongodump", "--host", "127.0.0.1:" + port, "--out", externalPath );
+c.drop();
+runMongoProgram( "mongorestore", "--host", "127.0.0.1:" + port, "--dir", externalPath );
+assert.soon( "c.findOne()" , "mongodump then restore has no data w/sleep" );
+assert( c.findOne() , "mongodump then restore has no data" );
+assert.eq( 1 , c.findOne().a , "mongodump then restore has no broken data" );
+
+resetDbpath( externalPath );
+
+assert.eq( -1 , fileSize() , "mongoexport prep invalid" );
+runMongoProgram( "mongoexport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--out", externalFile );
+assert.lt( 10 , fileSize() , "file size changed" );
+
+c.drop();
+runMongoProgram( "mongoimport", "--host", "127.0.0.1:" + port, "-d", baseName, "-c", baseName, "--file", externalFile );
+assert.soon( "c.findOne()" , "mongo import json A" );
+assert( c.findOne() && 1 == c.findOne().a , "mongo import json B" );
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/tool_replset.js b/src/mongo/gotools/test/legacy28/jstests/tool/tool_replset.js
new file mode 100644
index 00000000000..934b380c464
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/tool_replset.js
@@ -0,0 +1,89 @@
+/*
+ * Test to ensure that (dump/restore/export/import/oplog) works with a replica set connection string
+ * 1. Start a replica set.
+ * 2. Add data to a collection.
+ * 3. Take a dump of the database.
+ * 4. Drop the db.
+ * 5. Restore the db.
+ * 6. Export a collection.
+ * 7. Drop the collection.
+ * 8. Import the collection.
+ * 9. Add data to the oplog.rs collection.
+ * 10. Ensure that the document doesn't exist yet.
+ * 11. Now play the mongooplog tool.
+ * 12. Make sure that the oplog was played
+*/
+
+// Load utility methods for replica set tests
+load("jstests/replsets/rslib.js");
+
+print("starting the replica set")
+
+var replTest = new ReplSetTest({ name: 'tool_replset', nodes: 2, oplogSize: 5 });
+var nodes = replTest.startSet();
+replTest.initiate();
+var master = replTest.getPrimary();
+for (var i = 0; i < 100; i++) {
+ master.getDB("foo").bar.insert({ a: i });
+}
+replTest.awaitReplication();
+
+var replSetConnString = "tool_replset/127.0.0.1:" + replTest.ports[0] +
+ ",127.0.0.1:" + replTest.ports[1];
+
+// Test with mongodump/mongorestore
+print("dump the db");
+var data = MongoRunner.dataDir + "/tool_replset-dump1/";
+runMongoProgram("mongodump", "--host", replSetConnString, "--out", data);
+
+print("db successfully dumped, dropping now");
+master.getDB("foo").dropDatabase();
+replTest.awaitReplication();
+
+print("restore the db");
+runMongoProgram("mongorestore", "--host", replSetConnString, "--dir", data);
+
+print("db successfully restored, checking count")
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongorestore should have successfully restored the collection");
+
+replTest.awaitReplication();
+
+// Test with mongoexport/mongoimport
+print("export the collection");
+var extFile = MongoRunner.dataDir + "/tool_replset/export";
+runMongoProgram("mongoexport", "--host", replSetConnString, "--out", extFile,
+ "-d", "foo", "-c", "bar");
+
+print("collection successfully exported, dropping now");
+master.getDB("foo").getCollection("bar").drop();
+replTest.awaitReplication();
+
+print("import the collection");
+runMongoProgram("mongoimport", "--host", replSetConnString, "--file", extFile,
+ "-d", "foo", "-c", "bar");
+
+var x = master.getDB("foo").getCollection("bar").count();
+assert.eq(x, 100, "mongoimport should have successfully imported the collection");
+
+// Test with mongooplog
+var doc = { _id : 5, x : 17 };
+master.getDB("local").oplog.rs.insert({ ts : new Timestamp(), "op" : "i", "ns" : "foo.bar",
+ "o" : doc, "v" : NumberInt(2) });
+
+assert.eq(100, master.getDB("foo").getCollection("bar").count(), "count before running mongooplog " +
+ "was not 100 as expected");
+
+runMongoProgram("mongooplog" , "--from", "127.0.0.1:" + replTest.ports[0],
+ "--host", replSetConnString);
+
+print("running mongooplog to replay the oplog")
+
+assert.eq(101, master.getDB("foo").getCollection("bar").count(), "count after running mongooplog " +
+ "was not 101 as expected")
+
+print("all tests successful, stopping replica set")
+
+replTest.stopSet();
+
+print("replica set stopped, test complete")
diff --git a/src/mongo/gotools/test/legacy28/jstests/tool/tsv1.js b/src/mongo/gotools/test/legacy28/jstests/tool/tsv1.js
new file mode 100644
index 00000000000..677bec2af9c
--- /dev/null
+++ b/src/mongo/gotools/test/legacy28/jstests/tool/tsv1.js
@@ -0,0 +1,33 @@
+// tsv1.js
+
+
+t = new ToolTest( "tsv1" )
+
+c = t.startDB( "foo" );
+
+base = { a : "", b : 1 , c : "foobar" , d: 5, e: -6 };
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "-f" , "a,b,c,d,e" );
+assert.soon( "2 == c.count()" , "restore 2" );
+
+a = c.find().sort( { a : 1 } ).toArray();
+delete a[0]._id
+delete a[1]._id
+
+assert.docEq( { a : "a" , b : "b" , c : "c" , d: "d", e: "e"} , a[1] , "tsv parse 1" );
+assert.docEq( base , a[0] , "tsv parse 0" )
+
+c.drop()
+assert.eq( 0 , c.count() , "after drop 2" )
+
+t.runTool( "import" , "--file" , "jstests/tool/data/a.tsv" , "-d" , t.baseName , "-c" , "foo" , "--type" , "tsv" , "--headerline" )
+assert.soon( "c.findOne()" , "no data after sleep" );
+assert.eq( 1 , c.count() , "after restore 2" );
+
+x = c.findOne()
+delete x._id;
+assert.docEq( base , x , "tsv parse 2" )
+
+
+
+t.stop()
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/__init__.py
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/buildlogger.py b/src/mongo/gotools/test/qa-tests/buildscripts/buildlogger.py
new file mode 100644
index 00000000000..d2466e495c0
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/buildlogger.py
@@ -0,0 +1,479 @@
+"""
+buildlogger.py
+
+Wrap a command (specified on the command line invocation of buildlogger.py)
+and send output in batches to the buildlogs web application via HTTP POST.
+
+The script configures itself from environment variables:
+
+ required env vars:
+ MONGO_BUILDER_NAME (e.g. "Nightly Linux 64-bit")
+ MONGO_BUILD_NUMBER (an integer)
+ MONGO_TEST_FILENAME (not required when invoked with -g)
+
+ optional env vars:
+ MONGO_PHASE (e.g. "core", "slow nightly", etc)
+ MONGO_* (any other environment vars are passed to the web app)
+ BUILDLOGGER_CREDENTIALS (see below)
+
+This script has two modes: a "test" mode, intended to wrap the invocation of
+an individual test file, and a "global" mode, intended to wrap the mongod
+instances that run throughout the duration of a mongo test phase (the logs
+from "global" invocations are displayed interspersed with the logs of each
+test, in order to let the buildlogs web app display the full output sensibly.)
+
+If the BUILDLOGGER_CREDENTIALS environment variable is set, it should be a
+path to a valid Python file containing "username" and "password" variables,
+which should be valid credentials for authenticating to the buildlogger web
+app. For example:
+
+ username = "hello"
+ password = "world"
+
+If BUILDLOGGER_CREDENTIALS is a relative path, then the working directory
+and the directories one, two, and three levels up, are searched, in that
+order.
+"""
+
+import functools
+import os
+import os.path
+import re
+import signal
+import socket
+import subprocess
+import sys
+import time
+import traceback
+import urllib2
+import utils
+
+# suppress deprecation warnings that happen when
+# we import the 'buildbot.tac' file below
+import warnings
+warnings.simplefilter('ignore', DeprecationWarning)
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+# try to load the shared secret from settings.py
+# which will be one, two, or three directories up
+# from this file's location
+credentials_file = os.environ.get('BUILDLOGGER_CREDENTIALS', 'buildbot.tac')
+credentials_loc, credentials_name = os.path.split(credentials_file)
+if not credentials_loc:
+ here = os.path.abspath(os.path.dirname(__file__))
+ possible_paths = [
+ os.path.abspath(os.path.join(here, '..')),
+ os.path.abspath(os.path.join(here, '..', '..')),
+ os.path.abspath(os.path.join(here, '..', '..', '..')),
+ ]
+else:
+ possible_paths = [credentials_loc]
+
+username, password = None, None
+for path in possible_paths:
+ credentials_path = os.path.join(path, credentials_name)
+ if os.path.isfile(credentials_path):
+ credentials = {}
+ try:
+ execfile(credentials_path, credentials, credentials)
+ username = credentials.get('slavename', credentials.get('username'))
+ password = credentials.get('passwd', credentials.get('password'))
+ break
+ except:
+ pass
+
+
+URL_ROOT = os.environ.get('BUILDLOGGER_URL', 'http://buildlogs.mongodb.org/')
+TIMEOUT_SECONDS = 10
+socket.setdefaulttimeout(TIMEOUT_SECONDS)
+
+digest_handler = urllib2.HTTPDigestAuthHandler()
+digest_handler.add_password(
+ realm='buildlogs',
+ uri=URL_ROOT,
+ user=username,
+ passwd=password)
+
+# This version of HTTPErrorProcessor is copied from
+# Python 2.7, and allows REST response codes (e.g.
+# "201 Created") which are treated as errors by
+# older versions.
+class HTTPErrorProcessor(urllib2.HTTPErrorProcessor):
+ def http_response(self, request, response):
+ code, msg, hdrs = response.code, response.msg, response.info()
+
+ # According to RFC 2616, "2xx" code indicates that the client's
+ # request was successfully received, understood, and accepted.
+ if not (200 <= code < 300):
+ response = self.parent.error(
+ 'http', request, response, code, msg, hdrs)
+
+ return response
+
+url_opener = urllib2.build_opener(digest_handler, HTTPErrorProcessor())
+
+def url(endpoint):
+ if not endpoint.endswith('/'):
+ endpoint = '%s/' % endpoint
+
+ return '%s/%s' % (URL_ROOT.rstrip('/'), endpoint)
+
+def post(endpoint, data, headers=None):
+ data = json.dumps(data, encoding='utf-8')
+
+ headers = headers or {}
+ headers.update({'Content-Type': 'application/json; charset=utf-8'})
+
+ req = urllib2.Request(url=url(endpoint), data=data, headers=headers)
+ try:
+ response = url_opener.open(req)
+ except urllib2.URLError:
+ import traceback
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ # indicate that the request did not succeed
+ return None
+
+ response_headers = dict(response.info())
+
+ # eg "Content-Type: application/json; charset=utf-8"
+ content_type = response_headers.get('content-type')
+ match = re.match(r'(?P<mimetype>[^;]+).*(?:charset=(?P<charset>[^ ]+))?$', content_type)
+ if match and match.group('mimetype') == 'application/json':
+ encoding = match.group('charset') or 'utf-8'
+ return json.load(response, encoding=encoding)
+
+ return response.read()
+
+def traceback_to_stderr(func):
+ """
+ decorator which logs any exceptions encountered to stderr
+ and returns none.
+ """
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError, err:
+ sys.stderr.write('error: HTTP code %d\n----\n' % err.code)
+ if hasattr(err, 'hdrs'):
+ for k, v in err.hdrs.items():
+ sys.stderr.write("%s: %s\n" % (k, v))
+ sys.stderr.write('\n')
+ sys.stderr.write(err.read())
+ sys.stderr.write('\n----\n')
+ sys.stderr.flush()
+ except:
+ sys.stderr.write('Traceback from buildlogger:\n')
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.flush()
+ return None
+ return wrapper
+
+
+@traceback_to_stderr
+def get_or_create_build(builder, buildnum, extra={}):
+ data = {'builder': builder, 'buildnum': buildnum}
+ data.update(extra)
+ response = post('build', data)
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def create_test(build_id, test_filename, test_command, test_phase):
+ response = post('build/%s/test' % build_id, {
+ 'test_filename': test_filename,
+ 'command': test_command,
+ 'phase': test_phase,
+ })
+ if response is None:
+ return None
+ return response['id']
+
+@traceback_to_stderr
+def append_test_logs(build_id, test_id, log_lines):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def append_global_logs(build_id, log_lines):
+ """
+ "global" logs are for the mongod(s) started by smoke.py
+ that last the duration of a test phase -- since there
+ may be output in here that is important but spans individual
+ tests, the buildlogs webapp handles these logs specially.
+ """
+ response = post('build/%s' % build_id, data=log_lines)
+ if response is None:
+ return False
+ return True
+
+@traceback_to_stderr
+def finish_test(build_id, test_id, failed=False):
+ response = post('build/%s/test/%s' % (build_id, test_id), data=[], headers={
+ 'X-Sendlogs-Test-Done': 'true',
+ 'X-Sendlogs-Test-Failed': failed and 'true' or 'false',
+ })
+ if response is None:
+ return False
+ return True
+
+def run_and_echo(command):
+ """
+ this just calls the command, and returns its return code,
+ allowing stdout and stderr to work as normal. it is used
+ as a fallback when environment variables or python
+ dependencies cannot be configured, or when the logging
+ webapp is unavailable, etc
+ """
+ proc = subprocess.Popen(command)
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ proc.wait()
+
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+class LogAppender(object):
+ def __init__(self, callback, args, send_after_lines=2000, send_after_seconds=10):
+ self.callback = callback
+ self.callback_args = args
+
+ self.send_after_lines = send_after_lines
+ self.send_after_seconds = send_after_seconds
+
+ self.buf = []
+ self.retrybuf = []
+ self.last_sent = time.time()
+
+ def __call__(self, line):
+ self.buf.append((time.time(), line))
+
+ delay = time.time() - self.last_sent
+ if len(self.buf) >= self.send_after_lines or delay >= self.send_after_seconds:
+ self.submit()
+
+ # no return value is expected
+
+ def submit(self):
+ if len(self.buf) + len(self.retrybuf) == 0:
+ return True
+
+ args = list(self.callback_args)
+ args.append(list(self.buf) + self.retrybuf)
+
+ self.last_sent = time.time()
+
+ if self.callback(*args):
+ self.buf = []
+ self.retrybuf = []
+ return True
+ else:
+ self.retrybuf += self.buf
+ self.buf = []
+ return False
+
+
+def wrap_test(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('buildlogger: build number ("%s") was not an int\n' % buildnum)
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ # test takes some extra info
+ phase = os.environ.get('MONGO_PHASE', 'unknown')
+ test_filename = os.environ.get('MONGO_TEST_FILENAME', 'unknown')
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+ build_info.pop('MONGO_PHASE', None)
+ build_info.pop('MONGO_TEST_FILENAME', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ test_id = create_test(build_id, test_filename, ' '.join(command), phase)
+ if not test_id:
+ return run_and_echo(command)
+
+ # the peculiar formatting here matches what is printed by
+ # smoke.py when starting tests
+ output_url = '%s/build/%s/test/%s/' % (URL_ROOT.rstrip('/'), build_id, test_id)
+ sys.stdout.write(' (output suppressed; see %s)\n' % output_url)
+ sys.stdout.flush()
+
+ callback = LogAppender(callback=append_test_logs, args=(build_id, test_id))
+ returncode = loop_and_callback(command, callback)
+ failed = bool(returncode != 0)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending test logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ tries = 5
+ while not finish_test(build_id, test_id, failed) and tries > 5:
+ sys.stderr.write('failed to mark test finished, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def wrap_global(command):
+ """
+ call the given command, intercept its stdout and stderr,
+ and send results in batches of 100 lines or 10s to the
+ buildlogger webapp. see :func:`append_global_logs` for the
+ difference between "global" and "test" log output.
+ """
+
+ # get builder name and build number from environment
+ builder = os.environ.get('MONGO_BUILDER_NAME')
+ buildnum = os.environ.get('MONGO_BUILD_NUMBER')
+
+ if builder is None or buildnum is None:
+ return run_and_echo(command)
+
+ try:
+ buildnum = int(buildnum)
+ except ValueError:
+ sys.stderr.write('int(os.environ["MONGO_BUILD_NUMBER"]):\n')
+ sys.stderr.write(traceback.format_exc())
+ sys.stderr.flush()
+ return run_and_echo(command)
+
+ build_info = dict((k, v) for k, v in os.environ.items() if k.startswith('MONGO_'))
+ build_info.pop('MONGO_BUILDER_NAME', None)
+ build_info.pop('MONGO_BUILD_NUMBER', None)
+
+ build_id = get_or_create_build(builder, buildnum, extra=build_info)
+ if not build_id:
+ return run_and_echo(command)
+
+ callback = LogAppender(callback=append_global_logs, args=(build_id, ))
+ returncode = loop_and_callback(command, callback)
+
+ # this will append any remaining unsubmitted logs, or
+ # return True if there are none left to submit
+ tries = 5
+ while not callback.submit() and tries > 0:
+ sys.stderr.write('failed to finish sending global logs, retrying in 1s\n')
+ sys.stderr.flush()
+ time.sleep(1)
+ tries -= 1
+
+ return returncode
+
+def loop_and_callback(command, callback):
+ """
+ run the given command (a sequence of arguments, ordinarily
+ from sys.argv), and call the given callback with each line
+ of stdout or stderr encountered. after the command is finished,
+ callback is called once more with None instead of a string.
+ """
+ proc = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ )
+
+ def handle_sigterm(signum, frame):
+ try:
+ proc.send_signal(signum)
+ except AttributeError:
+ os.kill(proc.pid, signum)
+
+ # register a handler to delegate SIGTERM
+ # to the child process
+ orig_handler = signal.signal(signal.SIGTERM, handle_sigterm)
+
+ while proc.poll() is None:
+ try:
+ line = proc.stdout.readline().strip('\r\n')
+ line = utils.unicode_dammit(line)
+ callback(line)
+ except IOError:
+ # if the signal handler is called while
+ # we're waiting for readline() to return,
+ # don't show a traceback
+ break
+
+ # There may be additional buffered output
+ for line in proc.stdout.readlines():
+ callback(line.strip('\r\n'))
+
+ # restore the original signal handler, if any
+ signal.signal(signal.SIGTERM, orig_handler)
+ return proc.returncode
+
+
+if __name__ == '__main__':
+ # argv[0] is 'buildlogger.py'
+ del sys.argv[0]
+
+ if sys.argv[0] in ('-g', '--global'):
+ # then this is wrapping a "global" command, and should
+ # submit global logs to the build, not test logs to a
+ # test within the build
+ del sys.argv[0]
+ wrapper = wrap_global
+
+ else:
+ wrapper = wrap_test
+
+ # if we are missing credentials or the json module, then
+ # we can't use buildlogger; so just echo output, but also
+ # log why we can't work.
+ if json is None:
+ sys.stderr.write('buildlogger: could not import a json module\n')
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ elif username is None or password is None:
+ sys.stderr.write('buildlogger: could not find or import %s for authentication\n' % credentials_file)
+ sys.stderr.flush()
+ wrapper = run_and_echo
+
+ # otherwise wrap a test command as normal; the
+ # wrapper functions return the return code of
+ # the wrapped command, so that should be our
+ # exit code as well.
+ sys.exit(wrapper(sys.argv))
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/cleanbb.py b/src/mongo/gotools/test/qa-tests/buildscripts/cleanbb.py
new file mode 100644
index 00000000000..fee7efdc0c1
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/cleanbb.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+import re
+import sys
+import os, os.path
+import utils
+import time
+from optparse import OptionParser
+
+def shouldKill( c, root=None ):
+
+ if "smoke.py" in c:
+ return False
+
+ if "emr.py" in c:
+ return False
+
+ if "java" in c:
+ return False
+
+ # if root directory is provided, see if command line matches mongod process running
+ # with the same data directory
+
+ if root and re.compile("(\W|^)mongod(.exe)?\s+.*--dbpath(\s+|=)%s(\s+|$)" % root).search( c ):
+ return True
+
+ if ( c.find( "buildbot" ) >= 0 or c.find( "slave" ) >= 0 ) and c.find( "/mongo/" ) >= 0:
+ return True
+
+ if c.find( "xml-data/build-dir" ) >= 0: # for bamboo
+ return True
+
+ return False
+
+def killprocs( signal="", root=None ):
+ killed = 0
+
+ if sys.platform == 'win32':
+ return killed
+
+ l = utils.getprocesslist()
+ print( "num procs:" + str( len( l ) ) )
+ if len(l) == 0:
+ print( "no procs" )
+ try:
+ print( execsys( "/sbin/ifconfig -a" ) )
+ except Exception,e:
+ print( "can't get interfaces" + str( e ) )
+
+ for x in l:
+ x = x.lstrip()
+ if not shouldKill( x, root=root ):
+ continue
+
+ pid = x.split( " " )[0]
+ print( "killing: " + x )
+ utils.execsys( "/bin/kill " + signal + " " + pid )
+ killed = killed + 1
+
+ return killed
+
+
+def tryToRemove(path):
+ for _ in range(60):
+ try:
+ os.remove(path)
+ return True
+ except OSError, e:
+ errno = getattr(e, 'winerror', None)
+ # check for the access denied and file in use WindowsErrors
+ if errno in (5, 32):
+ print("os.remove(%s) failed, retrying in one second." % path)
+ time.sleep(1)
+ else:
+ raise e
+ return False
+
+
+def cleanup( root , nokill ):
+ if nokill:
+ print "nokill requested, not killing anybody"
+ else:
+ if killprocs( root=root ) > 0:
+ time.sleep(3)
+ killprocs( "-9", root=root )
+
+ # delete all regular files, directories can stay
+ # NOTE: if we delete directories later, we can't delete diskfulltest
+ for ( dirpath , dirnames , filenames ) in os.walk( root , topdown=False ):
+ for x in filenames:
+ foo = dirpath + "/" + x
+ if os.path.exists(foo):
+ if not tryToRemove(foo):
+ raise Exception("Couldn't remove file '%s' after 60 seconds" % foo)
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="read the script")
+ parser.add_option("--nokill", dest='nokill', default=False, action='store_true')
+ (options, args) = parser.parse_args()
+
+ root = "/data/db/"
+ if len(args) > 0:
+ root = args[0]
+
+ cleanup( root , options.nokill )
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmoke.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmoke.py
new file mode 100755
index 00000000000..a6cb03cb620
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmoke.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python
+
+"""
+Command line utility for executing MongoDB tests of all kinds.
+"""
+
+from __future__ import absolute_import
+
+import json
+import os.path
+import random
+import signal
+import sys
+import time
+import traceback
+
+# Get relative imports to work when the package is not installed on the PYTHONPATH.
+if __name__ == "__main__" and __package__ is None:
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ from buildscripts import resmokelib
+
+
+def _execute_suite(suite, logging_config):
+ """
+ Executes each test group of 'suite', failing fast if requested.
+
+ Returns true if the execution of the suite was interrupted by the
+ user, and false otherwise.
+ """
+
+ logger = resmokelib.logging.loggers.EXECUTOR
+
+ for group in suite.test_groups:
+ if resmokelib.config.SHUFFLE:
+ logger.info("Shuffling order of tests for %ss in suite %s. The seed is %d.",
+ group.test_kind, suite.get_name(), resmokelib.config.RANDOM_SEED)
+ random.seed(resmokelib.config.RANDOM_SEED)
+ random.shuffle(group.tests)
+
+ if resmokelib.config.DRY_RUN == "tests":
+ sb = []
+ sb.append("Tests that would be run for %ss in suite %s:"
+ % (group.test_kind, suite.get_name()))
+ if len(group.tests) > 0:
+ for test in group.tests:
+ sb.append(test)
+ else:
+ sb.append("(no tests)")
+ logger.info("\n".join(sb))
+
+ # Set a successful return code on the test group because we want to output the tests
+ # that would get run by any other suites the user specified.
+ group.return_code = 0
+ continue
+
+ if len(group.tests) == 0:
+ logger.info("Skipping %ss, no tests to run", group.test_kind)
+ continue
+
+ group_config = suite.get_executor_config().get(group.test_kind, {})
+ executor = resmokelib.testing.executor.TestGroupExecutor(logger,
+ group,
+ logging_config,
+ **group_config)
+
+ try:
+ executor.run()
+ if resmokelib.config.FAIL_FAST and group.return_code != 0:
+ suite.return_code = group.return_code
+ return False
+ except resmokelib.errors.UserInterrupt:
+ suite.return_code = 130 # Simulate SIGINT as exit code.
+ return True
+ except:
+ logger.exception("Encountered an error when running %ss of suite %s.",
+ group.test_kind, suite.get_name())
+ suite.return_code = 2
+ return False
+
+
+def _log_summary(logger, suites, time_taken):
+ if len(suites) > 1:
+ sb = []
+ sb.append("Summary of all suites: %d suites ran in %0.2f seconds"
+ % (len(suites), time_taken))
+ for suite in suites:
+ suite_sb = []
+ suite.summarize(suite_sb)
+ sb.append(" %s: %s" % (suite.get_name(), "\n ".join(suite_sb)))
+
+ logger.info("=" * 80)
+ logger.info("\n".join(sb))
+
+
+def _summarize_suite(suite):
+ sb = []
+ suite.summarize(sb)
+ return "\n".join(sb)
+
+
+def _dump_suite_config(suite, logging_config):
+ """
+ Returns a string that represents the YAML configuration of a suite.
+
+ TODO: include the "options" key in the result
+ """
+
+ sb = []
+ sb.append("YAML configuration of suite %s" % (suite.get_name()))
+ sb.append(resmokelib.utils.dump_yaml({"selector": suite.get_selector_config()}))
+ sb.append("")
+ sb.append(resmokelib.utils.dump_yaml({"executor": suite.get_executor_config()}))
+ sb.append("")
+ sb.append(resmokelib.utils.dump_yaml({"logging": logging_config}))
+ return "\n".join(sb)
+
+
+def _write_report_file(suites, pathname):
+ """
+ Writes the report.json file if requested.
+ """
+
+ reports = []
+ for suite in suites:
+ for group in suite.test_groups:
+ reports.extend(group.get_reports())
+
+ combined_report_dict = resmokelib.testing.report.TestReport.combine(*reports).as_dict()
+ with open(pathname, "w") as fp:
+ json.dump(combined_report_dict, fp)
+
+
+def main():
+ start_time = time.time()
+
+ values, args = resmokelib.parser.parse_command_line()
+
+ logging_config = resmokelib.parser.get_logging_config(values)
+ resmokelib.logging.config.apply_config(logging_config)
+ resmokelib.logging.flush.start_thread()
+
+ resmokelib.parser.update_config_vars(values)
+
+ exec_logger = resmokelib.logging.loggers.EXECUTOR
+ resmoke_logger = resmokelib.logging.loggers.new_logger("resmoke", parent=exec_logger)
+
+ if values.list_suites:
+ suite_names = resmokelib.parser.get_named_suites()
+ resmoke_logger.info("Suites available to execute:\n%s", "\n".join(suite_names))
+ sys.exit(0)
+
+ interrupted = False
+ suites = resmokelib.parser.get_suites(values, args)
+ try:
+ for suite in suites:
+ resmoke_logger.info(_dump_suite_config(suite, logging_config))
+
+ suite.record_start()
+ interrupted = _execute_suite(suite, logging_config)
+ suite.record_end()
+
+ resmoke_logger.info("=" * 80)
+ resmoke_logger.info("Summary of %s suite: %s",
+ suite.get_name(), _summarize_suite(suite))
+
+ if interrupted or (resmokelib.config.FAIL_FAST and suite.return_code != 0):
+ time_taken = time.time() - start_time
+ _log_summary(resmoke_logger, suites, time_taken)
+ sys.exit(suite.return_code)
+
+ time_taken = time.time() - start_time
+ _log_summary(resmoke_logger, suites, time_taken)
+
+ # Exit with a nonzero code if any of the suites failed.
+ exit_code = max(suite.return_code for suite in suites)
+ sys.exit(exit_code)
+ finally:
+ if not interrupted:
+ resmokelib.logging.flush.stop_thread()
+
+ if resmokelib.config.REPORT_FILE is not None:
+ _write_report_file(suites, resmokelib.config.REPORT_FILE)
+
+
+if __name__ == "__main__":
+
+ def _dump_stacks(signum, frame):
+ """
+ Signal handler that will dump the stacks of all threads.
+ """
+
+ header_msg = "Dumping stacks due to SIGUSR1 signal"
+
+ sb = []
+ sb.append("=" * len(header_msg))
+ sb.append(header_msg)
+ sb.append("=" * len(header_msg))
+
+ frames = sys._current_frames()
+ sb.append("Total threads: %d" % (len(frames)))
+ sb.append("")
+
+ for thread_id in frames:
+ stack = frames[thread_id]
+ sb.append("Thread %d:" % (thread_id))
+ sb.append("".join(traceback.format_stack(stack)))
+
+ sb.append("=" * len(header_msg))
+ print "\n".join(sb)
+
+ try:
+ signal.signal(signal.SIGUSR1, _dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ main()
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/__init__.py
new file mode 100644
index 00000000000..37f5a889956
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/__init__.py
@@ -0,0 +1,4 @@
+from __future__ import absolute_import
+
+from .suites import NAMED_SUITES
+from .loggers import NAMED_LOGGERS
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py
new file mode 100644
index 00000000000..6511d496364
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/__init__.py
@@ -0,0 +1,36 @@
+"""
+Defines a mapping of shortened names for logger configuration files to
+their full path.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+
+
+def _get_named_loggers():
+ """
+ Explores this directory for any YAML configuration files.
+
+ Returns a mapping of basenames without the file extension to their
+ full path.
+ """
+
+ dirname = os.path.dirname(__file__)
+ named_loggers = {}
+
+ try:
+ (root, _dirs, files) = os.walk(dirname).next()
+ for filename in files:
+ (short_name, ext) = os.path.splitext(filename)
+ if ext in (".yml", ".yaml"):
+ pathname = os.path.join(root, filename)
+ named_loggers[short_name] = os.path.relpath(pathname)
+ except StopIteration:
+ # 'dirname' does not exist, which should be impossible because it contains __file__.
+ raise IOError("Directory '%s' does not exist" % (dirname))
+
+ return named_loggers
+
+NAMED_LOGGERS = _get_named_loggers()
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml
new file mode 100644
index 00000000000..302d2677491
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/buildlogger.yml
@@ -0,0 +1,13 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: buildlogger
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: buildlogger
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml
new file mode 100644
index 00000000000..b233de409b3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/console.yml
@@ -0,0 +1,13 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: logging.StreamHandler
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.StreamHandler
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml
new file mode 100644
index 00000000000..3d2d15cd5bc
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/file.yml
@@ -0,0 +1,19 @@
+logging:
+ executor:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: executor.log
+ mode: w
+ fixture:
+ format: '[%(name)s] %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: fixture.log
+ mode: w
+ tests:
+ format: '[%(name)s] %(asctime)s %(message)s'
+ handlers:
+ - class: logging.FileHandler
+ filename: tests.log
+ mode: w
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml
new file mode 100644
index 00000000000..c69bb793b0b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/loggers/suppress.yml
@@ -0,0 +1,10 @@
+logging:
+ executor:
+ handlers:
+ - class: logging.NullHandler
+ fixture:
+ handlers:
+ - class: logging.NullHandler
+ tests:
+ handlers:
+ - class: logging.NullHandler
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py
new file mode 100644
index 00000000000..e075dd22e0d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/__init__.py
@@ -0,0 +1,36 @@
+"""
+Defines a mapping of shortened names for suite configuration files to
+their full path.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+
+
+def _get_named_suites():
+ """
+ Explores this directory for any YAML configuration files.
+
+ Returns a mapping of basenames without the file extension to their
+ full path.
+ """
+
+ dirname = os.path.dirname(__file__)
+ named_suites = {}
+
+ try:
+ (root, _dirs, files) = os.walk(dirname).next()
+ for filename in files:
+ (short_name, ext) = os.path.splitext(filename)
+ if ext in (".yml", ".yaml"):
+ pathname = os.path.join(root, filename)
+ named_suites[short_name] = os.path.relpath(pathname)
+ except StopIteration:
+ # 'dirname' does not exist, which should be impossible because it contains __file__.
+ raise IOError("Directory '%s' does not exist" % (dirname))
+
+ return named_suites
+
+NAMED_SUITES = _get_named_suites()
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml
new file mode 100644
index 00000000000..bc094c1f549
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/core.yml
@@ -0,0 +1,27 @@
+selector:
+ js_test:
+ roots:
+ - jstests/bson/*.js
+ - jstests/dump/*.js
+ - jstests/export/*.js
+ - jstests/files/*.js
+ - jstests/import/*.js
+ - jstests/oplog/*.js
+ - jstests/restore/*.js
+ - jstests/stat/*.js
+ - jstests/top/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ readMode: commands
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml
new file mode 100644
index 00000000000..2a9330e2856
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/core_ssl.yml
@@ -0,0 +1,38 @@
+selector:
+ js_test:
+ roots:
+ - jstests/bson/*.js
+ - jstests/dump/*.js
+ - jstests/export/*.js
+ - jstests/files/*.js
+ - jstests/import/*.js
+ - jstests/oplog/*.js
+ - jstests/restore/*.js
+ - jstests/stat/*.js
+ - jstests/top/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ global_vars:
+ TestData:
+ useSSL: true
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ readMode: commands
+ ssl: ''
+ sslAllowInvalidCertificates: ''
+ sslCAFile: jstests/libs/ca.pem
+ sslPEMKeyFile: jstests/libs/client.pem
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+ sslMode: allowSSL
+ sslPEMKeyFile: jstests/libs/server.pem
+ sslCAFile: jstests/libs/ca.pem
+ sslWeakCertificateValidation: ''
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml
new file mode 100644
index 00000000000..8c51a3b2f46
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_archive.yml
@@ -0,0 +1,23 @@
+selector:
+ js_test:
+ roots:
+ - jstests/dump/*.js
+ - jstests/restore/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ eval: "load('jstests/configs/archive_targets.js');"
+ readMode: commands
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+
+
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml
new file mode 100644
index 00000000000..768b88ca6dd
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokeconfig/suites/restore_gzip.yml
@@ -0,0 +1,21 @@
+selector:
+ js_test:
+ roots:
+ - jstests/dump/*.js
+ - jstests/restore/*.js
+
+executor:
+ js_test:
+ config:
+ shell_options:
+ eval_prepend: "load('jstests/libs/servers.js'); load('jstests/libs/servers_misc.js');"
+ eval: "load('jstests/configs/gzip_targets.js');"
+ hooks:
+ - class: CleanEveryN
+ n: 20
+ fixture:
+ class: MongoDFixture
+ mongod_options:
+ set_parameters:
+ enableTestCommands: 1
+
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/__init__.py
new file mode 100644
index 00000000000..06b0539e25b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/__init__.py
@@ -0,0 +1,7 @@
+from __future__ import absolute_import
+
+from . import errors
+from . import logging
+from . import parser
+from . import testing
+from . import utils
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/config.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/config.py
new file mode 100644
index 00000000000..ecb7fec7fa3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/config.py
@@ -0,0 +1,165 @@
+"""
+Configuration options for resmoke.py.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import time
+
+
+##
+# Default values.
+##
+
+# Default path for where to look for executables.
+DEFAULT_DBTEST_EXECUTABLE = os.path.join(os.curdir, "dbtest")
+DEFAULT_MONGO_EXECUTABLE = os.path.join(os.curdir, "mongo")
+DEFAULT_MONGOD_EXECUTABLE = os.path.join(os.curdir, "mongod")
+DEFAULT_MONGOS_EXECUTABLE = os.path.join(os.curdir, "mongos")
+
+# Default root directory for where resmoke.py puts directories containing data files of mongod's it
+# starts, as well as those started by individual tests.
+DEFAULT_DBPATH_PREFIX = os.path.normpath("/data/db")
+
+# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
+# by resmoke.py.
+FIXTURE_SUBDIR = "resmoke"
+
+# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
+# by individual tests.
+MONGO_RUNNER_SUBDIR = "mongorunner"
+
+# Names below correspond to how they are specified via the command line or in the options YAML file.
+DEFAULTS = {
+ "basePort": 20000,
+ "buildloggerUrl": "https://logkeeper.mongodb.org",
+ "continueOnFailure": False,
+ "dbpathPrefix": None,
+ "dbtest": None,
+ "dryRun": None,
+ "excludeWithAllTags": None,
+ "excludeWithAnyTags": None,
+ "includeWithAllTags": None,
+ "includeWithAnyTags": None,
+ "jobs": 1,
+ "mongo": None,
+ "mongod": None,
+ "mongodSetParameters": None,
+ "mongos": None,
+ "mongosSetParameters": None,
+ "nojournal": False,
+ "repeat": 1,
+ "reportFile": None,
+ "seed": long(time.time() * 256), # Taken from random.py code in Python 2.7.
+ "shellReadMode": None,
+ "shellWriteMode": None,
+ "shuffle": False,
+ "storageEngine": None,
+ "wiredTigerCollectionConfigString": None,
+ "wiredTigerEngineConfigString": None,
+ "wiredTigerIndexConfigString": None
+}
+
+
+##
+# Variables that are set by the user at the command line or with --options.
+##
+
+# The starting port number to use for mongod and mongos processes spawned by resmoke.py and the
+# mongo shell.
+BASE_PORT = None
+
+# The root url of the buildlogger server.
+BUILDLOGGER_URL = None
+
+# Root directory for where resmoke.py puts directories containing data files of mongod's it starts,
+# as well as those started by individual tests.
+DBPATH_PREFIX = None
+
+# The path to the dbtest executable used by resmoke.py.
+DBTEST_EXECUTABLE = None
+
+# If set to "tests", then resmoke.py will output the tests that would be run by each suite (without
+# actually running them).
+DRY_RUN = None
+
+# If set, then any jstests that have all of the specified tags will be excluded from the suite(s).
+EXCLUDE_WITH_ALL_TAGS = None
+
+# If set, then any jstests that have any of the specified tags will be excluded from the suite(s).
+EXCLUDE_WITH_ANY_TAGS = None
+
+# If true, then a test failure or error will cause resmoke.py to exit and not run any more tests.
+FAIL_FAST = None
+
+# If set, then only jstests that have all of the specified tags will be run during the jstest
+# portion of the suite(s).
+INCLUDE_WITH_ALL_TAGS = None
+
+# If set, then only jstests that have at least one of the specified tags will be run during the
+# jstest portion of the suite(s).
+INCLUDE_WITH_ANY_TAGS = None
+
+# If set, then resmoke.py starts the specified number of Job instances to run tests.
+JOBS = None
+
+# The path to the mongo executable used by resmoke.py.
+MONGO_EXECUTABLE = None
+
+# The path to the mongod executable used by resmoke.py.
+MONGOD_EXECUTABLE = None
+
+# The --setParameter options passed to mongod.
+MONGOD_SET_PARAMETERS = None
+
+# The path to the mongos executable used by resmoke.py.
+MONGOS_EXECUTABLE = None
+
+# The --setParameter options passed to mongos.
+MONGOS_SET_PARAMETERS = None
+
+# If true, then all mongod's started by resmoke.py and by the mongo shell will not have journaling
+# enabled.
+NO_JOURNAL = None
+
+# If true, then all mongod's started by resmoke.py and by the mongo shell will not preallocate
+# journal files.
+NO_PREALLOC_JOURNAL = None
+
+# If set, then the RNG is seeded with the specified value. Otherwise uses a seed based on the time
+# this module was loaded.
+RANDOM_SEED = None
+
+# If set, then each suite is repeated the specified number of times.
+REPEAT = None
+
+# If set, then resmoke.py will write out a report file with the status of each test that ran.
+REPORT_FILE = None
+
+# If set, then mongo shells started by resmoke.py will use the specified read mode.
+SHELL_READ_MODE = None
+
+# If set, then mongo shells started by resmoke.py will use the specified write mode.
+SHELL_WRITE_MODE = None
+
+# If true, then the order the tests run in is randomized. Otherwise the tests will run in
+# alphabetical (case-insensitive) order.
+SHUFFLE = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# storage engine.
+STORAGE_ENGINE = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger collection configuration settings.
+WT_COLL_CONFIG = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger storage engine configuration settings.
+WT_ENGINE_CONFIG = None
+
+# If set, then all mongod's started by resmoke.py and by the mongo shell will use the specified
+# WiredTiger index configuration settings.
+WT_INDEX_CONFIG = None
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/__init__.py
new file mode 100644
index 00000000000..29a19a52500
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import process
+from . import programs
+from . import network
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/network.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/network.py
new file mode 100644
index 00000000000..44e54667a67
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/network.py
@@ -0,0 +1,114 @@
+"""
+Class used to allocate ports for use by various mongod and mongos
+processes involved in running the tests.
+"""
+
+from __future__ import absolute_import
+
+import collections
+import functools
+import threading
+
+from .. import config
+from .. import errors
+
+
+def _check_port(func):
+ """
+ A decorator that verifies the port returned by the wrapped function
+ is in the valid range.
+
+ Returns the port if it is valid, and raises a PortAllocationError
+ otherwise.
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ port = func(*args, **kwargs)
+
+ if port < 0:
+ raise errors.PortAllocationError("Attempted to use a negative port")
+
+ if port > PortAllocator.MAX_PORT:
+ raise errors.PortAllocationError("Exhausted all available ports. Consider decreasing"
+ " the number of jobs, or using a lower base port")
+
+ return port
+
+ return wrapper
+
+
+class PortAllocator(object):
+ """
+ This class is responsible for allocating ranges of ports.
+
+ It reserves a range of ports for each job with the first part of
+ that range used for the fixture started by that job, and the second
+ part of the range used for mongod and mongos processes started by
+ tests run by that job.
+ """
+
+ # A PortAllocator will not return any port greater than this number.
+ MAX_PORT = 2 ** 16 - 1
+
+ # Each job gets a contiguous range of _PORTS_PER_JOB ports, with job 0 getting the first block
+ # of ports, job 1 getting the second block, and so on.
+ _PORTS_PER_JOB = 250
+
+ # The first _PORTS_PER_FIXTURE ports of each range are reserved for the fixtures, the remainder
+ # of the port range is used by tests.
+ _PORTS_PER_FIXTURE = 10
+
+ _NUM_USED_PORTS_LOCK = threading.Lock()
+
+ # Used to keep track of how many ports a fixture has allocated.
+ _NUM_USED_PORTS = collections.defaultdict(int)
+
+ @classmethod
+ @_check_port
+ def next_fixture_port(cls, job_num):
+ """
+ Returns the next port for a fixture to use.
+
+ Raises a PortAllocationError if the fixture has requested more
+ ports than are reserved per job, or if the next port is not a
+ valid port number.
+ """
+ with cls._NUM_USED_PORTS_LOCK:
+ start_port = config.BASE_PORT + (job_num * cls._PORTS_PER_JOB)
+ num_used_ports = cls._NUM_USED_PORTS[job_num]
+ next_port = start_port + num_used_ports
+
+ cls._NUM_USED_PORTS[job_num] += 1
+
+ if next_port >= start_port + cls._PORTS_PER_FIXTURE:
+ raise errors.PortAllocationError(
+ "Fixture has requested more than the %d ports reserved per fixture"
+ % cls._PORTS_PER_FIXTURE)
+
+ return next_port
+
+ @classmethod
+ @_check_port
+ def min_test_port(cls, job_num):
+ """
+ For the given job, returns the lowest port that is reserved for
+ use by tests.
+
+ Raises a PortAllocationError if that port is higher than the
+ maximum port.
+ """
+ return config.BASE_PORT + (job_num * cls._PORTS_PER_JOB) + cls._PORTS_PER_FIXTURE
+
+ @classmethod
+ @_check_port
+ def max_test_port(cls, job_num):
+ """
+ For the given job, returns the highest port that is reserved
+ for use by tests.
+
+ Raises a PortAllocationError if that port is higher than the
+ maximum port.
+ """
+ next_range_start = config.BASE_PORT + ((job_num + 1) * cls._PORTS_PER_JOB)
+ return next_range_start - 1
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/pipe.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/pipe.py
new file mode 100644
index 00000000000..bb080721b2d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/pipe.py
@@ -0,0 +1,87 @@
+"""
+Helper class to read output of a subprocess. Used to avoid deadlocks
+from the pipe buffer filling up and blocking the subprocess while it's
+being waited on.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+
+class LoggerPipe(threading.Thread):
+ """
+ Asynchronously reads the output of a subprocess and sends it to a
+ logger.
+ """
+
+ # The start() and join() methods are not intended to be called directly on the LoggerPipe
+ # instance. Since we override them for that effect, the super's version are preserved here.
+ __start = threading.Thread.start
+ __join = threading.Thread.join
+
+ def __init__(self, logger, level, pipe_out):
+ """
+ Initializes the LoggerPipe with the specified logger, logging
+ level to use, and pipe to read from.
+ """
+
+ threading.Thread.__init__(self)
+ # Main thread should not call join() when exiting
+ self.daemon = True
+
+ self.__logger = logger
+ self.__level = level
+ self.__pipe_out = pipe_out
+
+ self.__lock = threading.Lock()
+ self.__condition = threading.Condition(self.__lock)
+
+ self.__started = False
+ self.__finished = False
+
+ LoggerPipe.__start(self)
+
+ def start(self):
+ raise NotImplementedError("start should not be called directly")
+
+ def run(self):
+ """
+ Reads the output from 'pipe_out' and logs each line to 'logger'.
+ """
+
+ with self.__lock:
+ self.__started = True
+ self.__condition.notify_all()
+
+ # Close the pipe when finished reading all of the output.
+ with self.__pipe_out:
+ # Avoid buffering the output from the pipe.
+ for line in iter(self.__pipe_out.readline, b""):
+ # Convert the output of the process from a bytestring to a UTF-8 string, and replace
+ # any characters that cannot be decoded with the official Unicode replacement
+ # character, U+FFFD. The log messages of MongoDB processes are not always valid
+ # UTF-8 sequences. See SERVER-7506.
+ line = line.decode("utf-8", "replace")
+ self.__logger.log(self.__level, line.rstrip())
+
+ with self.__lock:
+ self.__finished = True
+ self.__condition.notify_all()
+
+ def join(self, timeout=None):
+ raise NotImplementedError("join should not be called directly")
+
+ def wait_until_started(self):
+ with self.__lock:
+ while not self.__started:
+ self.__condition.wait()
+
+ def wait_until_finished(self):
+ with self.__lock:
+ while not self.__finished:
+ self.__condition.wait()
+
+ # No need to pass a timeout to join() because the thread should already be done after
+ # notifying us it has finished reading output from the pipe.
+ LoggerPipe.__join(self) # Tidy up the started thread.
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/process.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/process.py
new file mode 100644
index 00000000000..f54b0f0a640
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/process.py
@@ -0,0 +1,234 @@
+"""
+A more reliable way to create and destroy processes.
+
+Uses job objects when running on Windows to ensure that all created
+processes are terminated.
+"""
+
+from __future__ import absolute_import
+
+import atexit
+import logging
+import os
+import os.path
+import sys
+import threading
+
+# The subprocess32 module resolves the thread-safety issues of the subprocess module in Python 2.x
+# when the _posixsubprocess C extension module is also available. Additionally, the _posixsubprocess
+# C extension module avoids triggering invalid free() calls on Python's internal data structure for
+# thread-local storage by skipping the PyOS_AfterFork() call when the 'preexec_fn' parameter isn't
+# specified to subprocess.Popen(). See SERVER-22219 for more details.
+#
+# The subprocess32 module is untested on Windows and thus isn't recommended for use, even when it's
+# installed. See https://github.com/google/python-subprocess32/blob/3.2.7/README.md#usage.
+if os.name == "posix" and sys.version_info[0] == 2:
+ try:
+ import subprocess32 as subprocess
+ except ImportError:
+ import warnings
+ warnings.warn(("Falling back to using the subprocess module because subprocess32 isn't"
+ " available. When using the subprocess module, a child process may trigger"
+ " an invalid free(). See SERVER-22219 for more details."),
+ RuntimeWarning)
+ import subprocess
+else:
+ import subprocess
+
+from . import pipe
+from .. import utils
+
+# Attempt to avoid race conditions (e.g. hangs caused by a file descriptor being left open) when
+# starting subprocesses concurrently from multiple threads by guarding calls to subprocess.Popen()
+# with a lock. See https://bugs.python.org/issue2320 and https://bugs.python.org/issue12739 as
+# reports of such hangs.
+#
+# This lock probably isn't necessary when both the subprocess32 module and its _posixsubprocess C
+# extension module are available because either
+# (a) the pipe2() syscall is available on the platform we're using, so pipes are atomically
+# created with the FD_CLOEXEC flag set on them, or
+# (b) the pipe2() syscall isn't available, but the GIL isn't released during the
+# _posixsubprocess.fork_exec() call or the _posixsubprocess.cloexec_pipe() call.
+# See https://bugs.python.org/issue7213 for more details.
+_POPEN_LOCK = threading.Lock()
+
+# Job objects are the only reliable way to ensure that processes are terminated on Windows.
+if sys.platform == "win32":
+ import win32api
+ import win32con
+ import win32job
+ import win32process
+ import winerror
+
+ def _init_job_object():
+ job_object = win32job.CreateJobObject(None, "")
+
+ # Get the limit and job state information of the newly-created job object.
+ job_info = win32job.QueryInformationJobObject(job_object,
+ win32job.JobObjectExtendedLimitInformation)
+
+ # Set up the job object so that closing the last handle to the job object
+ # will terminate all associated processes and destroy the job object itself.
+ job_info["BasicLimitInformation"]["LimitFlags"] |= \
+ win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+
+ # Update the limits of the job object.
+ win32job.SetInformationJobObject(job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ return job_object
+
+ # Don't create a job object if the current process is already inside one.
+ if win32job.IsProcessInJob(win32process.GetCurrentProcess(), None):
+ _JOB_OBJECT = None
+ else:
+ _JOB_OBJECT = _init_job_object()
+ atexit.register(win32api.CloseHandle, _JOB_OBJECT)
+
+
+class Process(object):
+ """
+ Wrapper around subprocess.Popen class.
+ """
+
+ def __init__(self, logger, args, env=None, env_vars=None):
+ """
+ Initializes the process with the specified logger, arguments,
+ and environment.
+ """
+
+ # Ensure that executable files on Windows have a ".exe" extension.
+ if sys.platform == "win32" and os.path.splitext(args[0])[1] != ".exe":
+ args[0] += ".exe"
+
+ self.logger = logger
+ self.args = args
+ self.env = utils.default_if_none(env, os.environ.copy())
+ if env_vars is not None:
+ self.env.update(env_vars)
+
+ self.pid = None
+
+ self._process = None
+ self._stdout_pipe = None
+ self._stderr_pipe = None
+
+ def start(self):
+ """
+ Starts the process and the logger pipes for its stdout and
+ stderr.
+ """
+
+ creation_flags = 0
+ if sys.platform == "win32" and _JOB_OBJECT is not None:
+ creation_flags |= win32process.CREATE_BREAKAWAY_FROM_JOB
+
+ # Use unbuffered I/O pipes to avoid adding delay between when the subprocess writes output
+ # and when the LoggerPipe thread reads it.
+ buffer_size = 0
+
+ # Close file descriptors in the child process before executing the program. This prevents
+ # file descriptors that were inherited due to multiple calls to fork() -- either within one
+ # thread, or concurrently from multiple threads -- from causing another subprocess to wait
+ # for the completion of the newly spawned child process. Closing other file descriptors
+ # isn't supported on Windows when stdout and stderr are redirected.
+ close_fds = (sys.platform != "win32")
+
+ with _POPEN_LOCK:
+ self._process = subprocess.Popen(self.args,
+ bufsize=buffer_size,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=close_fds,
+ env=self.env,
+ creationflags=creation_flags)
+ self.pid = self._process.pid
+
+ self._stdout_pipe = pipe.LoggerPipe(self.logger, logging.INFO, self._process.stdout)
+ self._stderr_pipe = pipe.LoggerPipe(self.logger, logging.ERROR, self._process.stderr)
+
+ self._stdout_pipe.wait_until_started()
+ self._stderr_pipe.wait_until_started()
+
+ if sys.platform == "win32" and _JOB_OBJECT is not None:
+ try:
+ win32job.AssignProcessToJobObject(_JOB_OBJECT, self._process._handle)
+ except win32job.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process has already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(self._process._handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+
+ def stop(self):
+ """
+ Terminates the process.
+ """
+
+ if sys.platform == "win32":
+ # Adapted from implementation of Popen.terminate() in subprocess.py of Python 2.7
+ # because earlier versions do not catch exceptions.
+ try:
+ # Have the process exit with code 0 if it is terminated by us to simplify the
+ # success-checking logic later on.
+ win32process.TerminateProcess(self._process._handle, 0)
+ except win32process.error as err:
+ # ERROR_ACCESS_DENIED (winerror=5) is received when the process
+ # has already died.
+ if err.winerror != winerror.ERROR_ACCESS_DENIED:
+ raise
+ return_code = win32process.GetExitCodeProcess(self._process._handle)
+ if return_code == win32con.STILL_ACTIVE:
+ raise
+ else:
+ try:
+ self._process.terminate()
+ except OSError as err:
+ # ESRCH (errno=3) is received when the process has already died.
+ if err.errno != 3:
+ raise
+
+ def poll(self):
+ return self._process.poll()
+
+ def wait(self):
+ """
+ Waits until the process has terminated and all output has been
+ consumed by the logger pipes.
+ """
+
+ return_code = self._process.wait()
+
+ if self._stdout_pipe:
+ self._stdout_pipe.wait_until_finished()
+ if self._stderr_pipe:
+ self._stderr_pipe.wait_until_finished()
+
+ return return_code
+
+ def as_command(self):
+ """
+ Returns an equivalent command line invocation of the process.
+ """
+
+ default_env = os.environ
+ env_diff = self.env.copy()
+
+ # Remove environment variables that appear in both 'os.environ' and 'self.env'.
+ for env_var in default_env:
+ if env_var in env_diff and env_diff[env_var] == default_env[env_var]:
+ del env_diff[env_var]
+
+ sb = [] # String builder.
+ for env_var in env_diff:
+ sb.append("%s=%s" % (env_var, env_diff[env_var]))
+ sb.extend(self.args)
+
+ return " ".join(sb)
+
+ def __str__(self):
+ if self.pid is None:
+ return self.as_command()
+ return "%s (%d)" % (self.as_command(), self.pid)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/programs.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/programs.py
new file mode 100644
index 00000000000..cdffcdf7bca
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/core/programs.py
@@ -0,0 +1,311 @@
+"""
+Utility functions to create MongoDB processes.
+
+Handles all the nitty-gritty parameter conversion.
+"""
+
+from __future__ import absolute_import
+
+import json
+import os
+import os.path
+import stat
+
+from . import process as _process
+from .. import utils
+from .. import config
+
+
+def mongod_program(logger, executable=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongod executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGOD_EXECUTABLE)
+ args = [executable]
+
+ # Apply the --setParameter command line argument. Command line options to resmoke.py override
+ # the YAML configuration.
+ suite_set_parameters = kwargs.pop("set_parameters", {})
+
+ if config.MONGOD_SET_PARAMETERS is not None:
+ suite_set_parameters.update(utils.load_yaml(config.MONGOD_SET_PARAMETERS))
+
+ _apply_set_parameters(args, suite_set_parameters)
+
+ shortcut_opts = {
+ "nojournal": config.NO_JOURNAL,
+ "nopreallocj": config.NO_PREALLOC_JOURNAL,
+ "storageEngine": config.STORAGE_ENGINE,
+ "wiredTigerCollectionConfigString": config.WT_COLL_CONFIG,
+ "wiredTigerEngineConfigString": config.WT_ENGINE_CONFIG,
+ "wiredTigerIndexConfigString": config.WT_INDEX_CONFIG,
+ }
+
+ # These options are just flags, so they should not take a value.
+ opts_without_vals = ("nojournal", "nopreallocj")
+
+ # Have the --nojournal command line argument to resmoke.py unset the journal option.
+ if shortcut_opts["nojournal"] and "journal" in kwargs:
+ del kwargs["journal"]
+
+ # Ensure that config servers run with journaling enabled.
+ if "configsvr" in kwargs:
+ shortcut_opts["nojournal"] = False
+ kwargs["journal"] = ""
+
+ # Command line options override the YAML configuration.
+ for opt_name in shortcut_opts:
+ opt_value = shortcut_opts[opt_name]
+ if opt_name in opts_without_vals:
+ # Options that are specified as --flag on the command line are represented by a boolean
+ # value where True indicates that the flag should be included in 'kwargs'.
+ if opt_value:
+ kwargs[opt_name] = ""
+ else:
+ # Options that are specified as --key=value on the command line are represented by a
+ # value where None indicates that the key-value pair shouldn't be included in 'kwargs'.
+ if opt_value is not None:
+ kwargs[opt_name] = opt_value
+
+ # Override the storage engine specified on the command line with "wiredTiger" if running a
+ # config server replica set.
+ if "replSet" in kwargs and "configsvr" in kwargs:
+ kwargs["storageEngine"] = "wiredTiger"
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ _set_keyfile_permissions(kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def mongos_program(logger, executable=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongos executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGOS_EXECUTABLE)
+ args = [executable]
+
+ # Apply the --setParameter command line argument. Command line options to resmoke.py override
+ # the YAML configuration.
+ suite_set_parameters = kwargs.pop("set_parameters", {})
+
+ if config.MONGOS_SET_PARAMETERS is not None:
+ suite_set_parameters.update(utils.load_yaml(config.MONGOS_SET_PARAMETERS))
+
+ _apply_set_parameters(args, suite_set_parameters)
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ _set_keyfile_permissions(kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a mongo shell with arguments
+ constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_MONGO_EXECUTABLE)
+ args = [executable]
+
+ eval_sb = [] # String builder.
+ global_vars = kwargs.pop("global_vars", {}).copy()
+
+ shortcut_opts = {
+ "noJournal": (config.NO_JOURNAL, False),
+ "noJournalPrealloc": (config.NO_PREALLOC_JOURNAL, False),
+ "storageEngine": (config.STORAGE_ENGINE, ""),
+ "testName": (os.path.splitext(os.path.basename(filename))[0], ""),
+ "wiredTigerCollectionConfigString": (config.WT_COLL_CONFIG, ""),
+ "wiredTigerEngineConfigString": (config.WT_ENGINE_CONFIG, ""),
+ "wiredTigerIndexConfigString": (config.WT_INDEX_CONFIG, ""),
+ }
+
+ test_data = global_vars.get("TestData", {}).copy()
+ for opt_name in shortcut_opts:
+ (opt_value, opt_default) = shortcut_opts[opt_name]
+ if opt_value is not None:
+ test_data[opt_name] = opt_value
+ elif opt_name not in test_data:
+ # Only use 'opt_default' if the property wasn't set in the YAML configuration.
+ test_data[opt_name] = opt_default
+ global_vars["TestData"] = test_data
+
+ # Pass setParameters for mongos and mongod through TestData. The setParameter parsing in
+ # servers.js is very primitive (just splits on commas), so this may break for non-scalar
+ # setParameter values.
+ if config.MONGOD_SET_PARAMETERS is not None:
+ if "setParameters" in test_data:
+ raise ValueError("setParameters passed via TestData can only be set from either the"
+ " command line or the suite YAML, not both")
+ mongod_set_parameters = utils.load_yaml(config.MONGOD_SET_PARAMETERS)
+ test_data["setParameters"] = _format_test_data_set_parameters(mongod_set_parameters)
+
+ if config.MONGOS_SET_PARAMETERS is not None:
+ if "setParametersMongos" in test_data:
+ raise ValueError("setParametersMongos passed via TestData can only be set from either"
+ " the command line or the suite YAML, not both")
+ mongos_set_parameters = utils.load_yaml(config.MONGOS_SET_PARAMETERS)
+ test_data["setParametersMongos"] = _format_test_data_set_parameters(mongos_set_parameters)
+
+ if "eval_prepend" in kwargs:
+ eval_sb.append(str(kwargs.pop("eval_prepend")))
+
+ for var_name in global_vars:
+ _format_shell_vars(eval_sb, var_name, global_vars[var_name])
+
+ if "eval" in kwargs:
+ eval_sb.append(str(kwargs.pop("eval")))
+
+ eval_str = "; ".join(eval_sb)
+ args.append("--eval")
+ args.append(eval_str)
+
+ if config.SHELL_READ_MODE is not None:
+ kwargs["readMode"] = config.SHELL_READ_MODE
+
+ if config.SHELL_WRITE_MODE is not None:
+ kwargs["writeMode"] = config.SHELL_WRITE_MODE
+
+ # Apply the rest of the command line arguments.
+ _apply_kwargs(args, kwargs)
+
+ # Have the mongos shell run the specified file.
+ args.append(filename)
+
+ _set_keyfile_permissions(test_data)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def _format_shell_vars(sb, path, value):
+ """
+ Formats 'value' in a way that can be passed to --eval.
+
+ If 'value' is a dictionary, then it is unrolled into the creation of
+ a new JSON object with properties assigned for each key of the
+ dictionary.
+ """
+
+ # Only need to do special handling for JSON objects.
+ if not isinstance(value, dict):
+ sb.append("%s = %s" % (path, json.dumps(value)))
+ return
+
+ # Avoid including curly braces and colons in output so that the command invocation can be
+ # copied and run through bash.
+ sb.append("%s = new Object()" % (path))
+ for subkey in value:
+ _format_shell_vars(sb, ".".join((path, subkey)), value[subkey])
+
+
+def dbtest_program(logger, executable=None, suites=None, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts a dbtest executable with
+ arguments constructed from 'kwargs'.
+ """
+
+ executable = utils.default_if_none(executable, config.DEFAULT_DBTEST_EXECUTABLE)
+ args = [executable]
+
+ if suites is not None:
+ args.extend(suites)
+
+ if config.STORAGE_ENGINE is not None:
+ kwargs["storageEngine"] = config.STORAGE_ENGINE
+
+ return generic_program(logger, args, process_kwargs=process_kwargs, **kwargs)
+
+def generic_program(logger, args, process_kwargs=None, **kwargs):
+ """
+ Returns a Process instance that starts an arbitrary executable with
+ arguments constructed from 'kwargs'. The args parameter is an array
+ of strings containing the command to execute.
+ """
+
+ if not utils.is_string_list(args):
+ raise ValueError("The args parameter must be a list of command arguments")
+
+ _apply_kwargs(args, kwargs)
+
+ process_kwargs = utils.default_if_none(process_kwargs, {})
+ return _process.Process(logger, args, **process_kwargs)
+
+
+def _format_test_data_set_parameters(set_parameters):
+ """
+ Converts key-value pairs from 'set_parameters' into the comma
+ delimited list format expected by the parser in servers.js.
+
+ WARNING: the parsing logic in servers.js is very primitive.
+ Non-scalar options such as logComponentVerbosity will not work
+ correctly.
+ """
+ params = []
+ for param_name in set_parameters:
+ param_value = set_parameters[param_name]
+ if isinstance(param_value, bool):
+ # Boolean valued setParameters are specified as lowercase strings.
+ param_value = "true" if param_value else "false"
+ elif isinstance(param_value, dict):
+ raise TypeError("Non-scalar setParameter values are not currently supported.")
+ params.append("%s=%s" % (param_name, param_value))
+ return ",".join(params)
+
+def _apply_set_parameters(args, set_parameter):
+ """
+ Converts key-value pairs from 'kwargs' into --setParameter key=value
+ arguments to an executable and appends them to 'args'.
+ """
+
+ for param_name in set_parameter:
+ param_value = set_parameter[param_name]
+ # --setParameter takes boolean values as lowercase strings.
+ if isinstance(param_value, bool):
+ param_value = "true" if param_value else "false"
+ args.append("--setParameter")
+ args.append("%s=%s" % (param_name, param_value))
+
+
+def _apply_kwargs(args, kwargs):
+ """
+ Converts key-value pairs from 'kwargs' into --key value arguments
+ to an executable and appends them to 'args'.
+
+ A --flag without a value is represented with the empty string.
+ """
+
+ for arg_name in kwargs:
+ arg_value = str(kwargs[arg_name])
+ args.append("--%s" % (arg_name))
+ if arg_value:
+ args.append(arg_value)
+
+
+def _set_keyfile_permissions(opts):
+ """
+ Change the permissions of keyfiles in 'opts' to 600, i.e. only the
+ user can read and write the file.
+
+ This necessary to avoid having the mongod/mongos fail to start up
+ because "permissions on the keyfiles are too open".
+
+ We can't permanently set the keyfile permissions because git is not
+ aware of them.
+ """
+ if "keyFile" in opts:
+ os.chmod(opts["keyFile"], stat.S_IRUSR | stat.S_IWUSR)
+ if "encryptionKeyFile" in opts:
+ os.chmod(opts["encryptionKeyFile"], stat.S_IRUSR | stat.S_IWUSR)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/errors.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/errors.py
new file mode 100644
index 00000000000..6d2a704e390
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/errors.py
@@ -0,0 +1,52 @@
+"""
+Exceptions raised by resmoke.py.
+"""
+
+
+class ResmokeError(Exception):
+ """
+ Base class for all resmoke.py exceptions.
+ """
+ pass
+
+
+class StopExecution(ResmokeError):
+ """
+ Exception that is raised when resmoke.py should stop executing tests
+ if failing fast is enabled.
+ """
+ pass
+
+
+class UserInterrupt(StopExecution):
+ """
+ Exception that is raised when a user signals resmoke.py to
+ unconditionally stop executing tests.
+ """
+ pass
+
+
+class TestFailure(ResmokeError):
+ """
+ Exception that is raised by a hook in the after_test method if it
+ determines the the previous test should be marked as a failure.
+ """
+ pass
+
+
+class ServerFailure(TestFailure):
+ """
+ Exception that is raised by a hook in the after_test method if it
+ detects that the fixture did not exit cleanly and should be marked
+ as a failure.
+ """
+ pass
+
+
+class PortAllocationError(ResmokeError):
+ """
+ Exception that is raised by the PortAllocator if a port is requested
+ outside of the range of valid ports, or if a fixture requests more
+ ports than were reserved for that job.
+ """
+ pass
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/__init__.py
new file mode 100644
index 00000000000..54609ad861f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/__init__.py
@@ -0,0 +1,14 @@
+"""
+Extension to the logging package to support buildlogger.
+"""
+
+from __future__ import absolute_import
+
+# Alias the built-in logging.Logger class for type checking arguments. Those interested in
+# constructing a new Logger instance should use the loggers.new_logger() function instead.
+from logging import Logger
+
+from . import config
+from . import buildlogger
+from . import flush
+from . import loggers
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py
new file mode 100644
index 00000000000..c5f5d40401b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/buildlogger.py
@@ -0,0 +1,284 @@
+"""
+Defines handlers for communicating with a buildlogger server.
+"""
+
+from __future__ import absolute_import
+
+import functools
+import urllib2
+
+from . import handlers
+from . import loggers
+from .. import config as _config
+
+
+CREATE_BUILD_ENDPOINT = "/build"
+APPEND_GLOBAL_LOGS_ENDPOINT = "/build/%(build_id)s"
+CREATE_TEST_ENDPOINT = "/build/%(build_id)s/test"
+APPEND_TEST_LOGS_ENDPOINT = "/build/%(build_id)s/test/%(test_id)s"
+
+_BUILDLOGGER_REALM = "buildlogs"
+_BUILDLOGGER_CONFIG = "mci.buildlogger"
+
+_SEND_AFTER_LINES = 2000
+_SEND_AFTER_SECS = 10
+
+
+def _log_on_error(func):
+ """
+ A decorator that causes any exceptions to be logged by the
+ "buildlogger" Logger instance.
+
+ Returns the wrapped function's return value, or None if an error
+ was encountered.
+ """
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except urllib2.HTTPError as err:
+ sb = [] # String builder.
+ sb.append("HTTP Error %s: %s" % (err.code, err.msg))
+ sb.append("POST %s" % (err.filename))
+
+ for name in err.hdrs:
+ value = err.hdrs[name]
+ sb.append(" %s: %s" % (name, value))
+
+ # Try to read the response back from the server.
+ if hasattr(err, "read"):
+ sb.append(err.read())
+
+ loggers._BUILDLOGGER_FALLBACK.exception("\n".join(sb))
+ except:
+ loggers._BUILDLOGGER_FALLBACK.exception("Encountered an error.")
+ return None
+
+ return wrapper
+
+@_log_on_error
+def get_config():
+ """
+ Returns the buildlogger configuration as evaluated from the
+ _BUILDLOGGER_CONFIG file.
+ """
+
+ tmp_globals = {} # Avoid conflicts with variables defined in 'config_file'.
+ config = {}
+ execfile(_BUILDLOGGER_CONFIG, tmp_globals, config)
+
+ # Rename "slavename" to "username" if present.
+ if "slavename" in config and "username" not in config:
+ config["username"] = config["slavename"]
+ del config["slavename"]
+ # Rename "passwd" to "password" if present.
+ if "passwd" in config and "password" not in config:
+ config["password"] = config["passwd"]
+ del config["passwd"]
+
+ return config
+
+@_log_on_error
+def new_build_id(config):
+ """
+ Returns a new build id for sending global logs to.
+ """
+
+ if config is None:
+ return None
+
+ username = config["username"]
+ password = config["password"]
+ builder = config["builder"]
+ build_num = int(config["build_num"])
+
+ handler = handlers.HTTPHandler(
+ realm=_BUILDLOGGER_REALM,
+ url_root=_config.BUILDLOGGER_URL,
+ username=username,
+ password=password)
+
+ response = handler.post(CREATE_BUILD_ENDPOINT, data={
+ "builder": builder,
+ "buildnum": build_num,
+ })
+
+ return response["id"]
+
+@_log_on_error
+def new_test_id(build_id, build_config, test_filename, test_command):
+ """
+ Returns a new test id for sending test logs to.
+ """
+
+ if build_id is None or build_config is None:
+ return None
+
+ handler = handlers.HTTPHandler(
+ realm=_BUILDLOGGER_REALM,
+ url_root=_config.BUILDLOGGER_URL,
+ username=build_config["username"],
+ password=build_config["password"])
+
+ endpoint = CREATE_TEST_ENDPOINT % {"build_id": build_id}
+ response = handler.post(endpoint, data={
+ "test_filename": test_filename,
+ "command": test_command,
+ "phase": build_config.get("build_phase", "unknown"),
+ })
+
+ return response["id"]
+
+
+class _BaseBuildloggerHandler(handlers.BufferedHandler):
+ """
+ Base class of the buildlogger handler for the global logs and the
+ handler for the test logs.
+ """
+
+ def __init__(self,
+ build_id,
+ build_config,
+ capacity=_SEND_AFTER_LINES,
+ interval_secs=_SEND_AFTER_SECS):
+ """
+ Initializes the buildlogger handler with the build id and
+ credentials.
+ """
+
+ handlers.BufferedHandler.__init__(self, capacity, interval_secs)
+
+ username = build_config["username"]
+ password = build_config["password"]
+
+ self.http_handler = handlers.HTTPHandler(_BUILDLOGGER_REALM,
+ _config.BUILDLOGGER_URL,
+ username,
+ password)
+
+ self.build_id = build_id
+ self.retry_buffer = []
+
+ def process_record(self, record):
+ """
+ Returns a tuple of the time the log record was created, and the
+ message because the buildlogger expects the log messages
+ formatted in JSON as:
+
+ [ [ <log-time-1>, <log-message-1> ],
+ [ <log-time-2>, <log-message-2> ],
+ ... ]
+ """
+ msg = self.format(record)
+ return (record.created, msg)
+
+ def post(self, *args, **kwargs):
+ """
+ Convenience method for subclasses to use when making POST requests.
+ """
+
+ return self.http_handler.post(*args, **kwargs)
+
+ def _append_logs(self, log_lines):
+ raise NotImplementedError("_append_logs must be implemented by _BaseBuildloggerHandler"
+ " subclasses")
+
+ def flush_with_lock(self, close_called):
+ """
+ Ensures all logging output has been flushed to the buildlogger
+ server.
+
+ If _append_logs() returns false, then the log messages are added
+ to a separate buffer and retried the next time flush() is
+ called.
+ """
+
+ self.retry_buffer.extend(self.buffer)
+
+ if self._append_logs(self.retry_buffer):
+ self.retry_buffer = []
+ elif close_called:
+ # Request to the buildlogger server returned an error, so use the fallback logger to
+ # avoid losing the log messages entirely.
+ for (_, message) in self.retry_buffer:
+ # TODO: construct an LogRecord instance equivalent to the one passed to the
+ # process_record() method if we ever decide to log the time when the
+ # LogRecord was created, e.g. using %(asctime)s in
+ # _fallback_buildlogger_handler().
+ loggers._BUILDLOGGER_FALLBACK.info(message)
+ self.retry_buffer = []
+
+ self.buffer = []
+
+
+class BuildloggerTestHandler(_BaseBuildloggerHandler):
+ """
+ Buildlogger handler for the test logs.
+ """
+
+ def __init__(self, build_id, build_config, test_id, **kwargs):
+ """
+ Initializes the buildlogger handler with the build id, test id,
+ and credentials.
+ """
+
+ _BaseBuildloggerHandler.__init__(self, build_id, build_config, **kwargs)
+
+ self.test_id = test_id
+
+ @_log_on_error
+ def _append_logs(self, log_lines):
+ """
+ Sends a POST request to the APPEND_TEST_LOGS_ENDPOINT with the
+ logs that have been captured.
+ """
+ endpoint = APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": self.test_id,
+ }
+
+ response = self.post(endpoint, data=log_lines)
+ return response is not None
+
+ @_log_on_error
+ def _finish_test(self, failed=False):
+ """
+ Sends a POST request to the APPEND_TEST_LOGS_ENDPOINT with the
+ test status.
+ """
+ endpoint = APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": self.test_id,
+ }
+
+ self.post(endpoint, headers={
+ "X-Sendlogs-Test-Done": "true",
+ "X-Sendlogs-Test-Failed": "true" if failed else "false",
+ })
+
+ def close(self):
+ """
+ Closes the buildlogger handler.
+ """
+
+ _BaseBuildloggerHandler.close(self)
+
+ # TODO: pass the test status (success/failure) to this method
+ self._finish_test()
+
+
+class BuildloggerGlobalHandler(_BaseBuildloggerHandler):
+ """
+ Buildlogger handler for the global logs.
+ """
+
+ @_log_on_error
+ def _append_logs(self, log_lines):
+ """
+ Sends a POST request to the APPEND_GLOBAL_LOGS_ENDPOINT with
+ the logs that have been captured.
+ """
+ endpoint = APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": self.build_id}
+ response = self.post(endpoint, data=log_lines)
+ return response is not None
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/config.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/config.py
new file mode 100644
index 00000000000..c3960bbafd3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/config.py
@@ -0,0 +1,161 @@
+"""
+Configuration functions for the logging package.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import sys
+
+from . import buildlogger
+from . import formatters
+from . import loggers
+
+
+_DEFAULT_FORMAT = "[%(name)s] %(message)s"
+
+
+def using_buildlogger(logging_config):
+ """
+ Returns true if buildlogger is set as a handler on the "fixture" or
+ "tests" loggers, and false otherwise.
+ """
+ for logger_name in (loggers.FIXTURE_LOGGER_NAME, loggers.TESTS_LOGGER_NAME):
+ logger_info = logging_config[logger_name]
+ if _get_buildlogger_handler_info(logger_info) is not None:
+ return True
+ return False
+
+
+def apply_config(logging_config):
+ """
+ Adds all handlers specified by the configuration to the "executor",
+ "fixture", and "tests" loggers.
+ """
+
+ logging_components = (loggers.EXECUTOR_LOGGER_NAME,
+ loggers.FIXTURE_LOGGER_NAME,
+ loggers.TESTS_LOGGER_NAME)
+
+ if not all(component in logging_config for component in logging_components):
+ raise ValueError("Logging configuration should contain %s, %s, and %s components"
+ % logging_components)
+
+ # Configure the executor, fixture, and tests loggers.
+ for component in logging_components:
+ logger = loggers.LOGGERS_BY_NAME[component]
+ logger_info = logging_config[component]
+ _configure_logger(logger, logger_info)
+
+ # Configure the buildlogger logger.
+ loggers._BUILDLOGGER_FALLBACK.addHandler(_fallback_buildlogger_handler())
+
+
+def apply_buildlogger_global_handler(logger, logging_config, build_id=None, build_config=None):
+ """
+ Adds a buildlogger.BuildloggerGlobalHandler to 'logger' if specified
+ to do so by the configuration.
+ """
+
+ logger_info = logging_config[loggers.FIXTURE_LOGGER_NAME]
+ handler_info = _get_buildlogger_handler_info(logger_info)
+ if handler_info is None:
+ # Not configured to use buildlogger.
+ return
+
+ if all(x is not None for x in (build_id, build_config)):
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = buildlogger.BuildloggerGlobalHandler(build_id,
+ build_config,
+ **handler_info)
+ handler.setFormatter(formatter)
+ else:
+ handler = _fallback_buildlogger_handler()
+ # Fallback handler already has formatting configured.
+
+ logger.addHandler(handler)
+
+
+def apply_buildlogger_test_handler(logger,
+ logging_config,
+ build_id=None,
+ build_config=None,
+ test_id=None):
+ """
+ Adds a buildlogger.BuildloggerTestHandler to 'logger' if specified
+ to do so by the configuration.
+ """
+
+ logger_info = logging_config[loggers.TESTS_LOGGER_NAME]
+ handler_info = _get_buildlogger_handler_info(logger_info)
+ if handler_info is None:
+ # Not configured to use buildlogger.
+ return
+
+ if all(x is not None for x in (build_id, build_config, test_id)):
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = buildlogger.BuildloggerTestHandler(build_id,
+ build_config,
+ test_id,
+ **handler_info)
+ handler.setFormatter(formatter)
+ else:
+ handler = _fallback_buildlogger_handler()
+ # Fallback handler already has formatting configured.
+
+ logger.addHandler(handler)
+
+
+def _configure_logger(logger, logger_info):
+ """
+ Adds the handlers specified by the configuration to 'logger'.
+ """
+
+ log_format = logger_info.get("format", _DEFAULT_FORMAT)
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ for handler_info in logger_info.get("handlers", []):
+ handler_class = handler_info["class"]
+ if handler_class == "logging.FileHandler":
+ handler = logging.FileHandler(filename=handler_info["filename"],
+ mode=handler_info.get("mode", "w"))
+ elif handler_class == "logging.NullHandler":
+ handler = logging.NullHandler()
+ elif handler_class == "logging.StreamHandler":
+ handler = logging.StreamHandler(sys.stdout)
+ elif handler_class == "buildlogger":
+ continue # Buildlogger handlers are applied when running tests.
+ else:
+ raise ValueError("Unknown handler class '%s'" % (handler_class))
+ handler.setFormatter(formatter)
+ logger.addHandler(handler)
+
+
+def _fallback_buildlogger_handler():
+ """
+ Returns a handler that writes to stderr.
+ """
+
+ log_format = "[buildlogger:%(name)s] %(message)s"
+ formatter = formatters.ISO8601Formatter(fmt=log_format)
+
+ handler = logging.StreamHandler(sys.stderr)
+ handler.setFormatter(formatter)
+
+ return handler
+
+def _get_buildlogger_handler_info(logger_info):
+ """
+ Returns the buildlogger handler information if it exists, and None
+ otherwise.
+ """
+
+ for handler_info in logger_info["handlers"]:
+ handler_info = handler_info.copy()
+ if handler_info.pop("class") == "buildlogger":
+ return handler_info
+ return None
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/flush.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/flush.py
new file mode 100644
index 00000000000..c45533f1e13
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/flush.py
@@ -0,0 +1,97 @@
+"""
+Workaround for having too many threads running on 32-bit systems when
+logging to buildlogger that still allows periodically flushing messages
+to the buildlogger server.
+
+This is because a utils.timer.AlarmClock instance is used for each
+buildlogger.BuildloggerTestHandler, but only dismiss()ed when the Python
+process is about to exit.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+from ..utils import queue
+
+
+_LOGGER_QUEUE = queue.Queue()
+
+_FLUSH_THREAD_LOCK = threading.Lock()
+_FLUSH_THREAD = None
+
+
+def start_thread():
+ """
+ Starts the flush thread.
+ """
+
+ global _FLUSH_THREAD
+ with _FLUSH_THREAD_LOCK:
+ if _FLUSH_THREAD is not None:
+ raise ValueError("FlushThread has already been started")
+
+ _FLUSH_THREAD = _FlushThread()
+ _FLUSH_THREAD.start()
+
+
+def stop_thread():
+ """
+ Signals the flush thread to stop and waits until it does.
+ """
+
+ with _FLUSH_THREAD_LOCK:
+ if _FLUSH_THREAD is None:
+ raise ValueError("FlushThread hasn't been started")
+
+ # Add sentinel value to indicate when there are no more loggers to process.
+ _LOGGER_QUEUE.put(None)
+ _FLUSH_THREAD.join()
+
+
+def close_later(logger):
+ """
+ Adds 'logger' to the queue so that it is closed later by the flush
+ thread.
+ """
+ _LOGGER_QUEUE.put(logger)
+
+
+class _FlushThread(threading.Thread):
+ """
+ Asynchronously flushes and closes logging handlers.
+ """
+
+ def __init__(self):
+ """
+ Initializes the flush thread.
+ """
+
+ threading.Thread.__init__(self, name="FlushThread")
+ # Do not wait to flush the logs if interrupted by the user.
+ self.daemon = True
+
+ def run(self):
+ """
+ Continuously shuts down loggers from the queue.
+ """
+
+ while True:
+ logger = _LOGGER_QUEUE.get()
+ try:
+ if logger is None:
+ # Sentinel value received, so exit.
+ break
+ _FlushThread._shutdown_logger(logger)
+ finally:
+ _LOGGER_QUEUE.task_done()
+
+ @staticmethod
+ def _shutdown_logger(logger):
+ """
+ Flushes and closes all handlers of 'logger'.
+ """
+
+ for handler in logger.handlers:
+ handler.flush()
+ handler.close()
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/formatters.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/formatters.py
new file mode 100644
index 00000000000..4cc36da32d4
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/formatters.py
@@ -0,0 +1,50 @@
+"""
+Custom formatters for the logging handlers.
+"""
+
+from __future__ import absolute_import
+
+import logging
+import sys
+import time
+
+
+class ISO8601Formatter(logging.Formatter):
+ """
+ An ISO 8601 compliant formatter for log messages. It formats the
+ timezone as an hour/minute offset and uses a period as the
+ millisecond separator in order to match the log messages of MongoDB.
+ """
+
+ def formatTime(self, record, datefmt=None):
+ converted_time = self.converter(record.created)
+
+ if datefmt is not None:
+ return time.strftime(datefmt, converted_time)
+
+ formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", converted_time)
+ timezone = ISO8601Formatter._format_timezone_offset(converted_time)
+ return "%s.%03d%s" % (formatted_time, record.msecs, timezone)
+
+ @staticmethod
+ def _format_timezone_offset(converted_time):
+ """
+ Returns the timezone as an hour/minute offset in the form
+ "+HHMM" or "-HHMM".
+ """
+
+ # Windows treats %z in the format string as %Z, so we compute the hour/minute offset
+ # manually.
+ if converted_time.tm_isdst == 1 and time.daylight:
+ utc_offset_secs = time.altzone
+ else:
+ utc_offset_secs = time.timezone
+
+ # The offset is positive if the local timezone is behind (east of) UTC, and negative if it
+ # is ahead (west) of UTC.
+ utc_offset_prefix = "-" if utc_offset_secs > 0 else "+"
+ utc_offset_secs = abs(utc_offset_secs)
+
+ utc_offset_mins = (utc_offset_secs / 60) % 60
+ utc_offset_hours = utc_offset_secs / 3600
+ return "%s%02d%02d" % (utc_offset_prefix, utc_offset_hours, utc_offset_mins)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/handlers.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/handlers.py
new file mode 100644
index 00000000000..b688a1da68a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/handlers.py
@@ -0,0 +1,178 @@
+"""
+Additional handlers that are used as the base classes of the buildlogger
+handler.
+"""
+
+from __future__ import absolute_import
+
+import json
+import logging
+import threading
+import urllib2
+
+from .. import utils
+from ..utils import timer
+
+_TIMEOUT_SECS = 10
+
+class BufferedHandler(logging.Handler):
+ """
+ A handler class that buffers logging records in memory. Whenever
+ each record is added to the buffer, a check is made to see if the
+ buffer should be flushed. If it should, then flush() is expected to
+ do what's needed.
+ """
+
+ def __init__(self, capacity, interval_secs):
+ """
+ Initializes the handler with the buffer size and timeout after
+ which the buffer is flushed regardless.
+ """
+
+ logging.Handler.__init__(self)
+
+ if not isinstance(capacity, int):
+ raise TypeError("capacity must be an integer")
+ elif capacity <= 0:
+ raise ValueError("capacity must be a positive integer")
+
+ if not isinstance(interval_secs, (int, float)):
+ raise TypeError("interval_secs must be a number")
+ elif interval_secs <= 0.0:
+ raise ValueError("interval_secs must be a positive number")
+
+ self.capacity = capacity
+ self.interval_secs = interval_secs
+ self.buffer = []
+
+ self._lock = threading.Lock()
+ self._timer = None # Defer creation until actually begin to log messages.
+
+ def _new_timer(self):
+ """
+ Returns a new timer.AlarmClock instance that will call the
+ flush() method after 'interval_secs' seconds.
+ """
+
+ return timer.AlarmClock(self.interval_secs, self.flush, args=[self])
+
+ def process_record(self, record):
+ """
+ Applies a transformation to the record before it gets added to
+ the buffer.
+
+ The default implementation returns 'record' unmodified.
+ """
+
+ return record
+
+ def emit(self, record):
+ """
+ Emits a record.
+
+ Append the record to the buffer after it has been transformed by
+ process_record(). If the length of the buffer is greater than or
+ equal to its capacity, then flush() is called to process the
+ buffer.
+
+ After flushing the buffer, the timer is restarted so that it
+ will expire after another 'interval_secs' seconds.
+ """
+
+ with self._lock:
+ self.buffer.append(self.process_record(record))
+ if len(self.buffer) >= self.capacity:
+ if self._timer is not None:
+ self._timer.snooze()
+ self.flush_with_lock(False)
+ if self._timer is not None:
+ self._timer.reset()
+
+ if self._timer is None:
+ self._timer = self._new_timer()
+ self._timer.start()
+
+ def flush(self, close_called=False):
+ """
+ Ensures all logging output has been flushed.
+ """
+
+ with self._lock:
+ if self.buffer:
+ self.flush_with_lock(close_called)
+
+ def flush_with_lock(self, close_called):
+ """
+ Ensures all logging output has been flushed.
+
+ This version resets the buffers back to an empty list and is
+ intended to be overridden by subclasses.
+ """
+
+ self.buffer = []
+
+ def close(self):
+ """
+ Tidies up any resources used by the handler.
+
+ Stops the timer and flushes the buffer.
+ """
+
+ if self._timer is not None:
+ self._timer.dismiss()
+ self.flush(close_called=True)
+
+ logging.Handler.close(self)
+
+
+class HTTPHandler(object):
+ """
+ A class which sends data to a web server using POST requests.
+ """
+
+ def __init__(self, realm, url_root, username, password):
+ """
+ Initializes the handler with the necessary authenticaton
+ credentials.
+ """
+
+ digest_handler = urllib2.HTTPDigestAuthHandler()
+ digest_handler.add_password(
+ realm=realm,
+ uri=url_root,
+ user=username,
+ passwd=password)
+
+ self.url_root = url_root
+ self.url_opener = urllib2.build_opener(digest_handler, urllib2.HTTPErrorProcessor())
+
+ def _make_url(self, endpoint):
+ return "%s/%s/" % (self.url_root.rstrip("/"), endpoint.strip("/"))
+
+ def post(self, endpoint, data=None, headers=None, timeout_secs=_TIMEOUT_SECS):
+ """
+ Sends a POST request to the specified endpoint with the supplied
+ data.
+
+ Returns the response, either as a string or a JSON object based
+ on the content type.
+ """
+
+ data = utils.default_if_none(data, [])
+ data = json.dumps(data, encoding="utf-8")
+
+ headers = utils.default_if_none(headers, {})
+ headers["Content-Type"] = "application/json; charset=utf-8"
+
+ url = self._make_url(endpoint)
+ request = urllib2.Request(url=url, data=data, headers=headers)
+
+ response = self.url_opener.open(request, timeout=timeout_secs)
+ headers = response.info()
+
+ content_type = headers.gettype()
+ if content_type == "application/json":
+ encoding = headers.getparam("charset") or "utf-8"
+ return json.load(response, encoding=encoding)
+
+ return response.read()
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/loggers.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/loggers.py
new file mode 100644
index 00000000000..35f41512425
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/logging/loggers.py
@@ -0,0 +1,37 @@
+"""
+Module to hold the logger instances themselves.
+"""
+
+from __future__ import absolute_import
+
+import logging
+
+EXECUTOR_LOGGER_NAME = "executor"
+FIXTURE_LOGGER_NAME = "fixture"
+TESTS_LOGGER_NAME = "tests"
+
+def new_logger(logger_name, parent=None):
+ """
+ Returns a new logging.Logger instance with the specified name.
+ """
+
+ # Set up the logger to handle all messages it receives.
+ logger = logging.Logger(logger_name, level=logging.DEBUG)
+
+ if parent is not None:
+ logger.parent = parent
+ logger.propagate = True
+
+ return logger
+
+EXECUTOR = new_logger(EXECUTOR_LOGGER_NAME)
+FIXTURE = new_logger(FIXTURE_LOGGER_NAME)
+TESTS = new_logger(TESTS_LOGGER_NAME)
+
+LOGGERS_BY_NAME = {
+ EXECUTOR_LOGGER_NAME: EXECUTOR,
+ FIXTURE_LOGGER_NAME: FIXTURE,
+ TESTS_LOGGER_NAME: TESTS,
+}
+
+_BUILDLOGGER_FALLBACK = new_logger("fallback")
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/parser.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/parser.py
new file mode 100644
index 00000000000..4bcc7bfb137
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/parser.py
@@ -0,0 +1,368 @@
+"""
+Parser for command line arguments.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import optparse
+
+from . import config as _config
+from . import testing
+from . import utils
+from .. import resmokeconfig
+
+
+# Mapping of the attribute of the parsed arguments (dest) to its key as it appears in the options
+# YAML configuration file. Most should only be converting from snake_case to camelCase.
+DEST_TO_CONFIG = {
+ "base_port": "basePort",
+ "buildlogger_url": "buildloggerUrl",
+ "continue_on_failure": "continueOnFailure",
+ "dbpath_prefix": "dbpathPrefix",
+ "dbtest_executable": "dbtest",
+ "dry_run": "dryRun",
+ "exclude_with_all_tags": "excludeWithAllTags",
+ "exclude_with_any_tags": "excludeWithAnyTags",
+ "include_with_all_tags": "includeWithAllTags",
+ "include_with_any_tags": "includeWithAnyTags",
+ "jobs": "jobs",
+ "mongo_executable": "mongo",
+ "mongod_executable": "mongod",
+ "mongod_parameters": "mongodSetParameters",
+ "mongos_executable": "mongos",
+ "mongos_parameters": "mongosSetParameters",
+ "no_journal": "nojournal",
+ "prealloc_journal": "preallocJournal",
+ "repeat": "repeat",
+ "report_file": "reportFile",
+ "seed": "seed",
+ "shell_read_mode": "shellReadMode",
+ "shell_write_mode": "shellWriteMode",
+ "shuffle": "shuffle",
+ "storage_engine": "storageEngine",
+ "wt_coll_config": "wiredTigerCollectionConfigString",
+ "wt_engine_config": "wiredTigerEngineConfigString",
+ "wt_index_config": "wiredTigerIndexConfigString"
+}
+
+
+def parse_command_line():
+ """
+ Parses the command line arguments passed to resmoke.py.
+ """
+
+ parser = optparse.OptionParser()
+
+ parser.add_option("--suites", dest="suite_files", metavar="SUITE1,SUITE2",
+ help=("Comma separated list of YAML files that each specify the configuration"
+ " of a suite. If the file is located in the resmokeconfig/suites/"
+ " directory, then the basename without the .yml extension can be"
+ " specified, e.g. 'core'."))
+
+ parser.add_option("--executor", dest="executor_file", metavar="EXECUTOR",
+ help=("A YAML file that specifies the executor configuration. If the file is"
+ " located in the resmokeconfig/suites/ directory, then the basename"
+ " without the .yml extension can be specified, e.g. 'core_small_oplog'."
+ " If specified in combination with the --suites option, then the suite"
+ " configuration takes precedence."))
+
+ parser.add_option("--log", dest="logger_file", metavar="LOGGER",
+ help=("A YAML file that specifies the logging configuration. If the file is"
+ " located in the resmokeconfig/suites/ directory, then the basename"
+ " without the .yml extension can be specified, e.g. 'console'."))
+
+ parser.add_option("--options", dest="options_file", metavar="OPTIONS",
+ help="A YAML file that specifies global options to resmoke.py.")
+
+ parser.add_option("--basePort", dest="base_port", metavar="PORT",
+ help=("The starting port number to use for mongod and mongos processes"
+ " spawned by resmoke.py or the tests themselves. Each fixture and Job"
+ " allocates a contiguous range of ports."))
+
+ parser.add_option("--buildloggerUrl", action="store", dest="buildlogger_url", metavar="URL",
+ help="The root url of the buildlogger server.")
+
+ parser.add_option("--continueOnFailure", action="store_true", dest="continue_on_failure",
+ help="Executes all tests in all suites, even if some of them fail.")
+
+ parser.add_option("--dbpathPrefix", dest="dbpath_prefix", metavar="PATH",
+ help=("The directory which will contain the dbpaths of any mongod's started"
+ " by resmoke.py or the tests themselves."))
+
+ parser.add_option("--dbtest", dest="dbtest_executable", metavar="PATH",
+ help="The path to the dbtest executable for resmoke to use.")
+
+ parser.add_option("--excludeWithAllTags", dest="exclude_with_all_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. Any jstest that contains all of the"
+ " specified tags will be excluded from any suites that are run."))
+
+ parser.add_option("--excludeWithAnyTags", dest="exclude_with_any_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. Any jstest that contains any of the"
+ " specified tags will be excluded from any suites that are run."))
+
+ parser.add_option("--includeWithAllTags", dest="include_with_all_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. For the jstest portion of the suite(s),"
+ " only tests which have all of the specified tags will be run."))
+
+ parser.add_option("--includeWithAnyTags", dest="include_with_any_tags", metavar="TAG1,TAG2",
+ help=("Comma separated list of tags. For the jstest portion of the suite(s),"
+ " only tests which have at least one of the specified tags will be"
+ " run."))
+
+ parser.add_option("-n", action="store_const", const="tests", dest="dry_run",
+ help=("Output the tests that would be run."))
+
+ # TODO: add support for --dryRun=commands
+ parser.add_option("--dryRun", type="choice", action="store", dest="dry_run",
+ choices=("off", "tests"), metavar="MODE",
+ help=("Instead of running the tests, output the tests that would be run"
+ " (if MODE=tests). Defaults to MODE=%default."))
+
+ parser.add_option("-j", "--jobs", type="int", dest="jobs", metavar="JOBS",
+ help=("The number of Job instances to use. Each instance will receive its own"
+ " MongoDB deployment to dispatch tests to."))
+
+ parser.add_option("-l", "--listSuites", action="store_true", dest="list_suites",
+ help="List the names of the suites available to execute.")
+
+ parser.add_option("--mongo", dest="mongo_executable", metavar="PATH",
+ help="The path to the mongo shell executable for resmoke.py to use.")
+
+ parser.add_option("--mongod", dest="mongod_executable", metavar="PATH",
+ help="The path to the mongod executable for resmoke.py to use.")
+
+ parser.add_option("--mongodSetParameters", dest="mongod_parameters",
+ metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
+ help=("Pass one or more --setParameter options to all mongod processes"
+ " started by resmoke.py. The argument is specified as bracketed YAML -"
+ " i.e. JSON with support for single quoted and unquoted keys."))
+
+ parser.add_option("--mongos", dest="mongos_executable", metavar="PATH",
+ help="The path to the mongos executable for resmoke.py to use.")
+
+ parser.add_option("--mongosSetParameters", dest="mongos_parameters",
+ metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
+ help=("Pass one or more --setParameter options to all mongos processes"
+ " started by resmoke.py. The argument is specified as bracketed YAML -"
+ " i.e. JSON with support for single quoted and unquoted keys."))
+
+ parser.add_option("--nojournal", action="store_true", dest="no_journal",
+ help="Disable journaling for all mongod's.")
+
+ parser.add_option("--nopreallocj", action="store_const", const="off", dest="prealloc_journal",
+ help="Disable preallocation of journal files for all mongod processes.")
+
+ parser.add_option("--preallocJournal", type="choice", action="store", dest="prealloc_journal",
+ choices=("on", "off"), metavar="ON|OFF",
+ help=("Enable or disable preallocation of journal files for all mongod"
+ " processes. Defaults to %default."))
+
+ parser.add_option("--repeat", type="int", dest="repeat", metavar="N",
+ help="Repeat the given suite(s) N times, or until one fails.")
+
+ parser.add_option("--reportFile", dest="report_file", metavar="REPORT",
+ help="Write a JSON file with test status and timing information.")
+
+ parser.add_option("--seed", type="int", dest="seed", metavar="SEED",
+ help=("Seed for the random number generator. Useful in combination with the"
+ " --shuffle option for producing a consistent test execution order."))
+
+ parser.add_option("--shellReadMode", type="choice", action="store", dest="shell_read_mode",
+ choices=("commands", "compatibility", "legacy"), metavar="READ_MODE",
+ help="The read mode used by the mongo shell.")
+
+ parser.add_option("--shellWriteMode", type="choice", action="store", dest="shell_write_mode",
+ choices=("commands", "compatibility", "legacy"), metavar="WRITE_MODE",
+ help="The write mode used by the mongo shell.")
+
+ parser.add_option("--shuffle", action="store_true", dest="shuffle",
+ help="Randomize the order in which tests are executed.")
+
+ parser.add_option("--storageEngine", dest="storage_engine", metavar="ENGINE",
+ help="The storage engine used by dbtests and jstests.")
+
+ parser.add_option("--wiredTigerCollectionConfigString", dest="wt_coll_config", metavar="CONFIG",
+ help="Set the WiredTiger collection configuration setting for all mongod's.")
+
+ parser.add_option("--wiredTigerEngineConfigString", dest="wt_engine_config", metavar="CONFIG",
+ help="Set the WiredTiger engine configuration setting for all mongod's.")
+
+ parser.add_option("--wiredTigerIndexConfigString", dest="wt_index_config", metavar="CONFIG",
+ help="Set the WiredTiger index configuration setting for all mongod's.")
+
+ parser.set_defaults(executor_file="with_server",
+ logger_file="console",
+ dry_run="off",
+ list_suites=False,
+ prealloc_journal="off")
+
+ return parser.parse_args()
+
+
+def get_logging_config(values):
+ return _get_logging_config(values.logger_file)
+
+
+def update_config_vars(values):
+ options = _get_options_config(values.options_file)
+
+ config = _config.DEFAULTS.copy()
+ config.update(options)
+
+ values = vars(values)
+ for dest in values:
+ if dest not in DEST_TO_CONFIG:
+ continue
+ config_var = DEST_TO_CONFIG[dest]
+ if values[dest] is not None:
+ config[config_var] = values[dest]
+
+ _config.BASE_PORT = int(config.pop("basePort"))
+ _config.BUILDLOGGER_URL = config.pop("buildloggerUrl")
+ _config.DBPATH_PREFIX = _expand_user(config.pop("dbpathPrefix"))
+ _config.DBTEST_EXECUTABLE = _expand_user(config.pop("dbtest"))
+ _config.DRY_RUN = config.pop("dryRun")
+ _config.EXCLUDE_WITH_ALL_TAGS = config.pop("excludeWithAllTags")
+ _config.EXCLUDE_WITH_ANY_TAGS = config.pop("excludeWithAnyTags")
+ _config.FAIL_FAST = not config.pop("continueOnFailure")
+ _config.INCLUDE_WITH_ALL_TAGS = config.pop("includeWithAllTags")
+ _config.INCLUDE_WITH_ANY_TAGS = config.pop("includeWithAnyTags")
+ _config.JOBS = config.pop("jobs")
+ _config.MONGO_EXECUTABLE = _expand_user(config.pop("mongo"))
+ _config.MONGOD_EXECUTABLE = _expand_user(config.pop("mongod"))
+ _config.MONGOD_SET_PARAMETERS = config.pop("mongodSetParameters")
+ _config.MONGOS_EXECUTABLE = _expand_user(config.pop("mongos"))
+ _config.MONGOS_SET_PARAMETERS = config.pop("mongosSetParameters")
+ _config.NO_JOURNAL = config.pop("nojournal")
+ _config.NO_PREALLOC_JOURNAL = config.pop("preallocJournal") == "off"
+ _config.RANDOM_SEED = config.pop("seed")
+ _config.REPEAT = config.pop("repeat")
+ _config.REPORT_FILE = config.pop("reportFile")
+ _config.SHELL_READ_MODE = config.pop("shellReadMode")
+ _config.SHELL_WRITE_MODE = config.pop("shellWriteMode")
+ _config.SHUFFLE = config.pop("shuffle")
+ _config.STORAGE_ENGINE = config.pop("storageEngine")
+ _config.WT_COLL_CONFIG = config.pop("wiredTigerCollectionConfigString")
+ _config.WT_ENGINE_CONFIG = config.pop("wiredTigerEngineConfigString")
+ _config.WT_INDEX_CONFIG = config.pop("wiredTigerIndexConfigString")
+
+ if config:
+ raise optparse.OptionValueError("Unknown option(s): %s" % (config.keys()))
+
+
+def get_suites(values, args):
+ if (values.suite_files is None and not args) or (values.suite_files is not None and args):
+ raise optparse.OptionValueError("Must specify either --suites or a list of tests")
+
+ # If there are no suites specified, but there are args, assume they are jstests.
+ if args:
+ # No specified config, just use the following, and default the logging and executor.
+ suite_config = _make_jstests_config(args)
+ _ensure_executor(suite_config, values.executor_file)
+ suite = testing.suite.Suite("<jstests>", suite_config)
+ return [suite]
+
+ suite_files = values.suite_files.split(",")
+
+ suites = []
+ for suite_filename in suite_files:
+ suite_config = _get_suite_config(suite_filename)
+ _ensure_executor(suite_config, values.executor_file)
+ suite = testing.suite.Suite(suite_filename, suite_config)
+ suites.append(suite)
+ return suites
+
+
+def get_named_suites():
+ """
+ Returns the list of suites available to execute.
+ """
+
+ # Skip "with_server" and "no_server" because they do not define any test files to run.
+ executor_only = set(["with_server", "no_server"])
+ suite_names = [suite for suite in resmokeconfig.NAMED_SUITES if suite not in executor_only]
+ suite_names.sort()
+ return suite_names
+
+
+def _get_logging_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ how resmoke.py should log the tests and fixtures.
+ """
+
+ # Named loggers are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(pathname) and not os.path.dirname(pathname):
+ if pathname not in resmokeconfig.NAMED_LOGGERS:
+ raise optparse.OptionValueError("Unknown logger '%s'" % (pathname))
+ pathname = resmokeconfig.NAMED_LOGGERS[pathname] # Expand 'pathname' to full path.
+
+ if not utils.is_yaml_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a logger YAML config, but got '%s'" % (pathname))
+
+ return utils.load_yaml_file(pathname).pop("logging")
+
+
+def _get_options_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ any modifications to global options.
+ """
+
+ if pathname is None:
+ return {}
+
+ return utils.load_yaml_file(pathname).pop("options")
+
+
+def _get_suite_config(pathname):
+ """
+ Attempts to read a YAML configuration from 'pathname' that describes
+ what tests to run and how to run them.
+ """
+
+ # Named suites are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(pathname) and not os.path.dirname(pathname):
+ if pathname not in resmokeconfig.NAMED_SUITES:
+ raise optparse.OptionValueError("Unknown suite '%s'" % (pathname))
+ pathname = resmokeconfig.NAMED_SUITES[pathname] # Expand 'pathname' to full path.
+
+ if not utils.is_yaml_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a suite YAML config, but got '%s'" % (pathname))
+
+ return utils.load_yaml_file(pathname)
+
+
+def _make_jstests_config(js_files):
+ for pathname in js_files:
+ if not utils.is_js_file(pathname) or not os.path.isfile(pathname):
+ raise optparse.OptionValueError("Expected a list of JS files, but got '%s'"
+ % (pathname))
+
+ return {"selector": {"js_test": {"roots": js_files}}}
+
+
+def _ensure_executor(suite_config, executor_pathname):
+ if "executor" not in suite_config:
+ # Named executors are specified as the basename of the file, without the .yml extension.
+ if not utils.is_yaml_file(executor_pathname) and not os.path.dirname(executor_pathname):
+ if executor_pathname not in resmokeconfig.NAMED_SUITES:
+ raise optparse.OptionValueError("Unknown executor '%s'" % (executor_pathname))
+ executor_pathname = resmokeconfig.NAMED_SUITES[executor_pathname]
+
+ if not utils.is_yaml_file(executor_pathname) or not os.path.isfile(executor_pathname):
+ raise optparse.OptionValueError("Expected an executor YAML config, but got '%s'"
+ % (executor_pathname))
+
+ suite_config["executor"] = utils.load_yaml_file(executor_pathname).pop("executor")
+
+
+def _expand_user(pathname):
+ """
+ Wrapper around os.path.expanduser() to do nothing when given None.
+ """
+ if pathname is None:
+ return None
+ return os.path.expanduser(pathname)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/selector.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/selector.py
new file mode 100644
index 00000000000..c2dc0fca41b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/selector.py
@@ -0,0 +1,291 @@
+"""
+Test selection utility.
+
+Defines filtering rules for what tests to include in a suite depending
+on whether they apply to C++ unit tests, dbtests, or JS tests.
+"""
+
+from __future__ import absolute_import
+
+import fnmatch
+import os.path
+import subprocess
+import sys
+
+from . import config
+from . import errors
+from . import utils
+from .utils import globstar
+from .utils import jscomment
+
+def _filter_cpp_tests(kind, root, include_files, exclude_files):
+ """
+ Generic filtering logic for C++ tests that are sourced from a list
+ of test executables.
+ """
+ include_files = utils.default_if_none(include_files, [])
+ exclude_files = utils.default_if_none(exclude_files, [])
+
+ tests = []
+ with open(root, "r") as fp:
+ for test_path in fp:
+ test_path = test_path.rstrip()
+ tests.append(test_path)
+
+ (remaining, included, _) = _filter_by_filename(kind,
+ tests,
+ include_files,
+ exclude_files)
+
+ if include_files:
+ return list(included)
+ elif exclude_files:
+ return list(remaining)
+ return tests
+
+def filter_cpp_unit_tests(root="build/unittests.txt", include_files=None, exclude_files=None):
+ """
+ Filters out what C++ unit tests to run.
+ """
+ return _filter_cpp_tests("C++ unit test", root, include_files, exclude_files)
+
+
+def filter_cpp_integration_tests(root="build/integration_tests.txt",
+ include_files=None,
+ exclude_files=None):
+ """
+ Filters out what C++ integration tests to run.
+ """
+ return _filter_cpp_tests("C++ integration test", root, include_files, exclude_files)
+
+
+def filter_dbtests(binary=None, include_suites=None):
+ """
+ Filters out what dbtests to run.
+ """
+
+ # Command line option overrides the YAML configuration.
+ binary = utils.default_if_none(config.DBTEST_EXECUTABLE, binary)
+ # Use the default if nothing specified.
+ binary = utils.default_if_none(binary, config.DEFAULT_DBTEST_EXECUTABLE)
+
+ include_suites = utils.default_if_none(include_suites, [])
+
+ if not utils.is_string_list(include_suites):
+ raise TypeError("include_suites must be a list of strings")
+
+ # Ensure that executable files on Windows have a ".exe" extension.
+ if sys.platform == "win32" and os.path.splitext(binary)[1] != ".exe":
+ binary += ".exe"
+
+ program = subprocess.Popen([binary, "--list"], stdout=subprocess.PIPE)
+ stdout = program.communicate()[0]
+
+ if program.returncode != 0:
+ raise errors.ResmokeError("Getting list of dbtest suites failed")
+
+ dbtests = stdout.splitlines()
+
+ if not include_suites:
+ return dbtests
+
+ dbtests = set(dbtests)
+
+ (verbatim, globbed) = _partition(include_suites, normpath=False)
+ included = _pop_all("dbtest suite", dbtests, verbatim)
+
+ for suite_pattern in globbed:
+ for suite_name in dbtests:
+ if fnmatch.fnmatchcase(suite_name, suite_pattern):
+ included.add(suite_name)
+
+ return list(included)
+
+
+def filter_jstests(roots,
+ include_files=None,
+ include_with_all_tags=None,
+ include_with_any_tags=None,
+ exclude_files=None,
+ exclude_with_all_tags=None,
+ exclude_with_any_tags=None):
+ """
+ Filters out what jstests to run.
+ """
+
+ include_files = utils.default_if_none(include_files, [])
+ exclude_files = utils.default_if_none(exclude_files, [])
+
+ # Command line options override the YAML options, and all should be defaulted to an empty list
+ # if not specified.
+ tags = {
+ "exclude_with_all_tags": exclude_with_all_tags,
+ "exclude_with_any_tags": exclude_with_any_tags,
+ "include_with_all_tags": include_with_all_tags,
+ "include_with_any_tags": include_with_any_tags,
+ }
+ cmd_line_values = (
+ ("exclude_with_all_tags", config.EXCLUDE_WITH_ALL_TAGS),
+ ("exclude_with_any_tags", config.EXCLUDE_WITH_ANY_TAGS),
+ ("include_with_all_tags", config.INCLUDE_WITH_ALL_TAGS),
+ ("include_with_any_tags", config.INCLUDE_WITH_ANY_TAGS),
+ )
+ for (tag_category, cmd_line_val) in cmd_line_values:
+ if cmd_line_val is not None:
+ # Ignore the empty string when it is used as a tag. Specifying an empty string on the
+ # command line allows a user to unset the list of tags specified in the YAML
+ # configuration.
+ tags[tag_category] = set([tag for tag in cmd_line_val.split(",") if tag != ""])
+ else:
+ tags[tag_category] = set(utils.default_if_none(tags[tag_category], []))
+
+ using_tags = 0
+ for name in tags:
+ if not utils.is_string_set(tags[name]):
+ raise TypeError("%s must be a list of strings" % (name))
+ if len(tags[name]) > 0:
+ using_tags += 1
+
+ if using_tags > 1:
+ raise ValueError("Can only specify one of 'include_with_all_tags', 'include_with_any_tags',"
+ " 'exclude_with_all_tags', and 'exclude_with_any_tags'. If you wish to"
+ " unset one of these options, use --includeWithAllTags='' or similar")
+
+ jstests = []
+ for root in roots:
+ jstests.extend(globstar.iglob(root))
+
+ (remaining, included, _) = _filter_by_filename("jstest",
+ jstests,
+ include_files,
+ exclude_files)
+
+ # Skip parsing comments if not using tags
+ if not using_tags:
+ if include_files:
+ return list(included)
+ elif exclude_files:
+ return list(remaining)
+ return jstests
+
+ jstests = set(remaining)
+ excluded = set()
+
+ for filename in jstests:
+ file_tags = set(jscomment.get_tags(filename))
+ if tags["include_with_all_tags"] and not tags["include_with_all_tags"] - file_tags:
+ included.add(filename)
+ elif tags["include_with_any_tags"] and tags["include_with_any_tags"] & file_tags:
+ included.add(filename)
+ elif tags["exclude_with_all_tags"] and not tags["exclude_with_all_tags"] - file_tags:
+ excluded.add(filename)
+ elif tags["exclude_with_any_tags"] and tags["exclude_with_any_tags"] & file_tags:
+ excluded.add(filename)
+
+ if tags["include_with_all_tags"] or tags["include_with_any_tags"]:
+ if exclude_files:
+ return list((included & jstests) - excluded)
+ return list(included)
+ else:
+ if include_files:
+ return list(included | (jstests - excluded))
+ return list(jstests - excluded)
+
+
+def _filter_by_filename(kind, universe, include_files, exclude_files):
+ """
+ Filters out what tests to run solely by filename.
+
+ Returns the triplet (remaining, included, excluded), where
+ 'remaining' is 'universe' after 'included' and 'excluded' were
+ removed from it.
+ """
+
+ if not utils.is_string_list(include_files):
+ raise TypeError("include_files must be a list of strings")
+ elif not utils.is_string_list(exclude_files):
+ raise TypeError("exclude_files must be a list of strings")
+ elif include_files and exclude_files:
+ raise ValueError("Cannot specify both include_files and exclude_files")
+
+ universe = set(universe)
+ if include_files:
+ (verbatim, globbed) = _partition(include_files)
+ # Remove all matching files of 'verbatim' from 'universe'.
+ included_verbatim = _pop_all(kind, universe, verbatim)
+ included_globbed = set()
+
+ for file_pattern in globbed:
+ included_globbed.update(globstar.iglob(file_pattern))
+
+ # Remove all matching files of 'included_globbed' from 'universe' without checking whether
+ # the same file is expanded to multiple times. This implicitly takes an intersection
+ # between 'included_globbed' and 'universe'.
+ included_globbed = _pop_all(kind, universe, included_globbed, validate=False)
+ return (universe, included_verbatim | included_globbed, set())
+
+ elif exclude_files:
+ (verbatim, globbed) = _partition(exclude_files)
+
+ # Remove all matching files of 'verbatim' from 'universe'.
+ excluded_verbatim = _pop_all(kind, universe, verbatim)
+ excluded_globbed = set()
+
+ for file_pattern in globbed:
+ excluded_globbed.update(globstar.iglob(file_pattern))
+
+ # Remove all matching files of 'excluded_globbed' from 'universe' without checking whether
+ # the same file is expanded to multiple times. This implicitly takes an intersection
+ # between 'excluded_globbed' and 'universe'.
+ excluded_globbed = _pop_all(kind, universe, excluded_globbed, validate=False)
+ return (universe, set(), excluded_verbatim | excluded_globbed)
+
+ return (universe, set(), set())
+
+
+def _partition(pathnames, normpath=True):
+ """
+ Splits 'pathnames' into two separate lists based on whether they
+ use a glob pattern.
+
+ Returns the pair (non-globbed pathnames, globbed pathnames).
+ """
+
+ verbatim = []
+ globbed = []
+
+ for pathname in pathnames:
+ if globstar.is_glob_pattern(pathname):
+ globbed.append(pathname)
+ continue
+
+ # Normalize 'pathname' so exact string comparison can be used later.
+ if normpath:
+ pathname = os.path.normpath(pathname)
+ verbatim.append(pathname)
+
+ return (verbatim, globbed)
+
+
+def _pop_all(kind, universe, iterable, validate=True):
+ """
+ Removes all elements of 'iterable' from 'universe' and returns them.
+
+ If 'validate' is true, then a ValueError is raised if a element
+ would be removed multiple times, or if an element of 'iterable' does
+ not appear in 'universe' at all.
+ """
+
+ members = set()
+
+ for elem in iterable:
+ if validate and elem in members:
+ raise ValueError("%s '%s' specified multiple times" % (kind, elem))
+
+ if elem in universe:
+ universe.remove(elem)
+ members.add(elem)
+ elif validate:
+ raise ValueError("Unrecognized %s '%s'" % (kind, elem))
+
+ return members
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/__init__.py
new file mode 100644
index 00000000000..e4acff00521
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/__init__.py
@@ -0,0 +1,9 @@
+"""
+Extension to the unittest package to support buildlogger and parallel
+test execution.
+"""
+
+from __future__ import absolute_import
+
+from . import executor
+from . import suite
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/executor.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/executor.py
new file mode 100644
index 00000000000..5d79abd6ac6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/executor.py
@@ -0,0 +1,307 @@
+"""
+Driver of the test execution framework.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+from . import fixtures
+from . import hooks as _hooks
+from . import job as _job
+from . import report as _report
+from . import testcases
+from .. import config as _config
+from .. import errors
+from .. import logging
+from .. import utils
+from ..utils import queue as _queue
+
+
+class TestGroupExecutor(object):
+ """
+ Executes a test group.
+
+ Responsible for setting up and tearing down the fixtures that the
+ tests execute against.
+ """
+
+ _TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
+
+ def __init__(self,
+ exec_logger,
+ test_group,
+ logging_config,
+ config=None,
+ fixture=None,
+ hooks=None):
+ """
+ Initializes the TestGroupExecutor with the test group to run.
+ """
+
+ # Build a logger for executing this group of tests.
+ logger_name = "%s:%s" % (exec_logger.name, test_group.test_kind)
+ self.logger = logging.loggers.new_logger(logger_name, parent=exec_logger)
+
+ self.logging_config = logging_config
+ self.fixture_config = fixture
+ self.hooks_config = utils.default_if_none(hooks, [])
+ self.test_config = utils.default_if_none(config, {})
+
+ self._test_group = test_group
+
+ self._using_buildlogger = logging.config.using_buildlogger(logging_config)
+ self._build_config = None
+
+ if self._using_buildlogger:
+ self._build_config = logging.buildlogger.get_config()
+
+ # Must be done after getting buildlogger configuration.
+ self._jobs = [self._make_job(job_num) for job_num in xrange(_config.JOBS)]
+
+ def run(self):
+ """
+ Executes the test group.
+
+ Any exceptions that occur during setting up or tearing down a
+ fixture are propagated.
+ """
+
+ self.logger.info("Starting execution of %ss...", self._test_group.test_kind)
+
+ return_code = 0
+ try:
+ if not self._setup_fixtures():
+ return_code = 2
+ return
+
+ num_repeats = _config.REPEAT
+ while num_repeats > 0:
+ test_queue = self._make_test_queue()
+ self._test_group.record_start()
+ (report, interrupted) = self._run_tests(test_queue)
+ self._test_group.record_end(report)
+
+ # If the user triggered a KeyboardInterrupt, then we should stop.
+ if interrupted:
+ raise errors.UserInterrupt("Received interrupt from user")
+
+ sb = [] # String builder.
+ self._test_group.summarize_latest(sb)
+ self.logger.info("Summary: %s", "\n ".join(sb))
+
+ if not report.wasSuccessful():
+ return_code = 1
+ if _config.FAIL_FAST:
+ break
+
+ # Clear the report so it can be reused for the next execution.
+ for job in self._jobs:
+ job.report.reset()
+ num_repeats -= 1
+ finally:
+ if not self._teardown_fixtures():
+ return_code = 2
+ self._test_group.return_code = return_code
+
+ def _setup_fixtures(self):
+ """
+ Sets up a fixture for each job.
+ """
+
+ for job in self._jobs:
+ try:
+ job.fixture.setup()
+ except:
+ self.logger.exception("Encountered an error while setting up %s.", job.fixture)
+ return False
+
+ # Once they have all been started, wait for them to become available.
+ for job in self._jobs:
+ try:
+ job.fixture.await_ready()
+ except:
+ self.logger.exception("Encountered an error while waiting for %s to be ready",
+ job.fixture)
+ return False
+
+ return True
+
+ def _run_tests(self, test_queue):
+ """
+ Starts a thread for each Job instance and blocks until all of
+ the tests are run.
+
+ Returns a (combined report, user interrupted) pair, where the
+ report contains the status and timing information of tests run
+ by all of the threads.
+ """
+
+ threads = []
+ interrupt_flag = threading.Event()
+ user_interrupted = False
+ try:
+ # Run each Job instance in its own thread.
+ for job in self._jobs:
+ t = threading.Thread(target=job, args=(test_queue, interrupt_flag))
+ # Do not wait for tests to finish executing if interrupted by the user.
+ t.daemon = True
+ t.start()
+ threads.append(t)
+
+ joined = False
+ while not joined:
+ # Need to pass a timeout to join() so that KeyboardInterrupt exceptions
+ # are propagated.
+ joined = test_queue.join(TestGroupExecutor._TIMEOUT)
+ except (KeyboardInterrupt, SystemExit):
+ interrupt_flag.set()
+ user_interrupted = True
+ else:
+ # Only wait for all the Job instances if not interrupted by the user.
+ for t in threads:
+ t.join()
+
+ reports = [job.report for job in self._jobs]
+ combined_report = _report.TestReport.combine(*reports)
+
+ # We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
+ # instance if a test fails and it decides to drain the queue. We only want to raise a
+ # StopExecution exception in TestGroupExecutor.run() if the user triggered the interrupt.
+ return (combined_report, user_interrupted)
+
+ def _teardown_fixtures(self):
+ """
+ Tears down all of the fixtures.
+
+ Returns true if all fixtures were torn down successfully, and
+ false otherwise.
+ """
+
+ success = True
+ for job in self._jobs:
+ try:
+ if not job.fixture.teardown():
+ self.logger.warn("Teardown of %s was not successful.", job.fixture)
+ success = False
+ except:
+ self.logger.exception("Encountered an error while tearing down %s.", job.fixture)
+ success = False
+
+ return success
+
+ def _get_build_id(self, job_num):
+ """
+ Returns a unique build id for a job.
+ """
+
+ build_config = self._build_config
+
+ if self._using_buildlogger:
+ # Use a distinct "builder" for each job in order to separate their logs.
+ if build_config is not None and "builder" in build_config:
+ build_config = build_config.copy()
+ build_config["builder"] = "%s_job%d" % (build_config["builder"], job_num)
+
+ build_id = logging.buildlogger.new_build_id(build_config)
+
+ if build_config is None or build_id is None:
+ self.logger.info("Encountered an error configuring buildlogger for job #%d, falling"
+ " back to stderr.", job_num)
+
+ return build_id, build_config
+
+ return None, build_config
+
+ def _make_fixture(self, job_num, build_id, build_config):
+ """
+ Creates a fixture for a job.
+ """
+
+ fixture_config = {}
+ fixture_class = fixtures.NOOP_FIXTURE_CLASS
+
+ if self.fixture_config is not None:
+ fixture_config = self.fixture_config.copy()
+ fixture_class = fixture_config.pop("class")
+
+ logger_name = "%s:job%d" % (fixture_class, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=logging.loggers.FIXTURE)
+ logging.config.apply_buildlogger_global_handler(logger,
+ self.logging_config,
+ build_id=build_id,
+ build_config=build_config)
+
+ return fixtures.make_fixture(fixture_class, logger, job_num, **fixture_config)
+
+ def _make_hooks(self, job_num, fixture):
+ """
+ Creates the custom behaviors for the job's fixture.
+ """
+
+ behaviors = []
+
+ for behavior_config in self.hooks_config:
+ behavior_config = behavior_config.copy()
+ behavior_class = behavior_config.pop("class")
+
+ logger_name = "%s:job%d" % (behavior_class, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+ behavior = _hooks.make_custom_behavior(behavior_class,
+ logger,
+ fixture,
+ **behavior_config)
+ behaviors.append(behavior)
+
+ return behaviors
+
+ def _make_job(self, job_num):
+ """
+ Returns a Job instance with its own fixture, hooks, and test
+ report.
+ """
+
+ build_id, build_config = self._get_build_id(job_num)
+ fixture = self._make_fixture(job_num, build_id, build_config)
+ hooks = self._make_hooks(job_num, fixture)
+
+ logger_name = "%s:job%d" % (self.logger.name, job_num)
+ logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ if build_id is not None:
+ endpoint = logging.buildlogger.APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": build_id}
+ url = "%s/%s/" % (_config.BUILDLOGGER_URL.rstrip("/"), endpoint.strip("/"))
+ logger.info("Writing output of job #%d to %s.", job_num, url)
+
+ report = _report.TestReport(logger,
+ self.logging_config,
+ build_id=build_id,
+ build_config=build_config)
+
+ return _job.Job(logger, fixture, hooks, report)
+
+ def _make_test_queue(self):
+ """
+ Returns a queue of TestCase instances.
+
+ Use a multi-consumer queue instead of a unittest.TestSuite so
+ that the test cases can be dispatched to multiple threads.
+ """
+
+ test_kind_logger = logging.loggers.new_logger(self._test_group.test_kind,
+ parent=logging.loggers.TESTS)
+
+ # Put all the test cases in a queue.
+ queue = _queue.Queue()
+ for test_name in self._test_group.tests:
+ test_case = testcases.make_test_case(self._test_group.test_kind,
+ test_kind_logger,
+ test_name,
+ **self.test_config)
+ queue.put(test_case)
+
+ # Add sentinel value for each job to indicate when there are no more items to process.
+ for _ in xrange(_config.JOBS):
+ queue.put(None)
+
+ return queue
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py
new file mode 100644
index 00000000000..d68a66911d2
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/__init__.py
@@ -0,0 +1,32 @@
+"""
+Fixtures for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+from .interface import Fixture, ReplFixture
+from .standalone import MongoDFixture
+from .replicaset import ReplicaSetFixture
+from .masterslave import MasterSlaveFixture
+from .shardedcluster import ShardedClusterFixture
+
+
+NOOP_FIXTURE_CLASS = "Fixture"
+
+_FIXTURES = {
+ "Fixture": Fixture,
+ "MongoDFixture": MongoDFixture,
+ "ReplicaSetFixture": ReplicaSetFixture,
+ "MasterSlaveFixture": MasterSlaveFixture,
+ "ShardedClusterFixture": ShardedClusterFixture,
+}
+
+
+def make_fixture(class_name, *args, **kwargs):
+ """
+ Factory function for creating Fixture instances.
+ """
+
+ if class_name not in _FIXTURES:
+ raise ValueError("Unknown fixture class '%s'" % (class_name))
+ return _FIXTURES[class_name](*args, **kwargs)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py
new file mode 100644
index 00000000000..5fbf537c107
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/interface.py
@@ -0,0 +1,128 @@
+"""
+Interface of the different fixtures for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+import pymongo
+
+from ... import errors
+from ... import logging
+
+
+class Fixture(object):
+ """
+ Base class for all fixtures.
+ """
+
+ def __init__(self, logger, job_num):
+ """
+ Initializes the fixtures with a logger instance.
+ """
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ if not isinstance(job_num, int):
+ raise TypeError("job_num must be an integer")
+ elif job_num < 0:
+ raise ValueError("job_num must be a nonnegative integer")
+
+ self.logger = logger
+ self.job_num = job_num
+
+ self.port = None # Port that the mongo shell should connect to.
+
+ def setup(self):
+ """
+ Creates the fixture.
+ """
+ pass
+
+ def await_ready(self):
+ """
+ Blocks until the fixture can be used for testing.
+ """
+ pass
+
+ def teardown(self):
+ """
+ Destroys the fixture. Return true if was successful, and false otherwise.
+ """
+ return True
+
+ def is_running(self):
+ """
+ Returns true if the fixture is still operating and more tests
+ can be run, and false otherwise.
+ """
+ return True
+
+ def get_connection_string(self):
+ """
+ Returns the connection string for this fixture. This is NOT a
+ driver connection string, but a connection string of the format
+ expected by the mongo::ConnectionString class.
+ """
+ raise NotImplementedError("get_connection_string must be implemented by Fixture subclasses")
+
+ def __str__(self):
+ return "%s (Job #%d)" % (self.__class__.__name__, self.job_num)
+
+ def __repr__(self):
+ return "%r(%r, %r)" % (self.__class__.__name__, self.logger, self.job_num)
+
+
+class ReplFixture(Fixture):
+ """
+ Base class for all fixtures that support replication.
+ """
+
+ AWAIT_REPL_TIMEOUT_MINS = 5
+
+ def get_primary(self):
+ """
+ Returns the primary of a replica set, or the master of a
+ master-slave deployment.
+ """
+ raise NotImplementedError("get_primary must be implemented by ReplFixture subclasses")
+
+ def get_secondaries(self):
+ """
+ Returns a list containing the secondaries of a replica set, or
+ the slave of a master-slave deployment.
+ """
+ raise NotImplementedError("get_secondaries must be implemented by ReplFixture subclasses")
+
+ def await_repl(self):
+ """
+ Blocks until all operations on the primary/master have
+ replicated to all other nodes.
+ """
+ raise NotImplementedError("await_repl must be implemented by ReplFixture subclasses")
+
+ def retry_until_wtimeout(self, insert_fn):
+ """
+ Given a callback function representing an insert operation on
+ the primary, handle any connection failures, and keep retrying
+ the operation for up to 'AWAIT_REPL_TIMEOUT_MINS' minutes.
+
+ The insert operation callback should take an argument for the
+ number of remaining seconds to provide as the timeout for the
+ operation.
+ """
+
+ deadline = time.time() + ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60
+
+ while True:
+ try:
+ remaining = deadline - time.time()
+ insert_fn(remaining)
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure("Failed to connect to the primary on port %d" %
+ self.port)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py
new file mode 100644
index 00000000000..f3dbf87eb91
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/masterslave.py
@@ -0,0 +1,209 @@
+"""
+Master/slave fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+
+import pymongo
+
+from . import interface
+from . import standalone
+from ... import config
+from ... import logging
+from ... import utils
+
+
+class MasterSlaveFixture(interface.ReplFixture):
+ """
+ Fixture which provides JSTests with a master/slave deployment to
+ run against.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ master_options=None,
+ slave_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False):
+
+ interface.ReplFixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options:
+ raise ValueError("Cannot specify mongod_options.dbpath")
+
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.master_options = utils.default_if_none(master_options, {})
+ self.slave_options = utils.default_if_none(slave_options, {})
+ self.preserve_dbpath = preserve_dbpath
+
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.master = None
+ self.slave = None
+
+ def setup(self):
+ if self.master is None:
+ self.master = self._new_mongod_master()
+ self.master.setup()
+ self.port = self.master.port
+
+ if self.slave is None:
+ self.slave = self._new_mongod_slave()
+ self.slave.setup()
+
+ def await_ready(self):
+ self.master.await_ready()
+ self.slave.await_ready()
+
+ # Do a replicated write to ensure that the slave has finished with its initial sync before
+ # starting to run any tests.
+ client = utils.new_mongo_client(self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=2, wtimeout=remaining_millis)
+ coll = client.resmoke.get_collection("await_ready", write_concern=write_concern)
+ coll.insert_one({"awaiting": "ready"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Master-slave deployment was expected to be running in teardown(),"
+ " but wasn't.")
+
+ if self.slave is not None:
+ if running_at_start:
+ self.logger.info("Stopping slave...")
+
+ success = self.slave.teardown()
+
+ if running_at_start:
+ self.logger.info("Successfully stopped slave.")
+
+ if self.master is not None:
+ if running_at_start:
+ self.logger.info("Stopping master...")
+
+ success = self.master.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully stopped master.")
+
+ return success
+
+ def is_running(self):
+ return (self.master is not None and self.master.is_running() and
+ self.slave is not None and self.slave.is_running())
+
+ def get_primary(self):
+ return self.master
+
+ def get_secondaries(self):
+ return [self.slave]
+
+ def await_repl(self):
+ """
+ Inserts a document into each database on the master and waits
+ for all write operations to be acknowledged by the master-slave
+ deployment.
+ """
+
+ client = utils.new_mongo_client(self.port)
+
+ # We verify that each database has replicated to the slave because in the case of an initial
+ # sync, the slave may acknowledge writes to one database before it has finished syncing
+ # others.
+ db_names = client.database_names()
+ self.logger.info("Awaiting replication of inserts to each of the following databases on"
+ " master on port %d: %s",
+ self.port,
+ db_names)
+
+ for db_name in db_names:
+ if db_name == "local":
+ continue # The local database is expected to differ, ignore.
+
+ self.logger.info("Awaiting replication of insert to database %s (w=2, wtimeout=%d min)"
+ " to master on port %d",
+ db_name,
+ interface.ReplFixture.AWAIT_REPL_TIMEOUT_MINS,
+ self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=2, wtimeout=remaining_millis)
+ coll = client[db_name].get_collection("await_repl", write_concern=write_concern)
+ coll.insert_one({"awaiting": "repl"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ self.logger.info("Replication of write operation completed for database %s.", db_name)
+
+ self.logger.info("Finished awaiting replication.")
+
+ def _new_mongod(self, mongod_logger, mongod_options):
+ """
+ Returns a standalone.MongoDFixture with the specified logger and
+ options.
+ """
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _new_mongod_master(self):
+ """
+ Returns a standalone.MongoDFixture configured to be used as the
+ master of a master-slave deployment.
+ """
+
+ logger_name = "%s:master" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = self.mongod_options.copy()
+ mongod_options.update(self.master_options)
+ mongod_options["master"] = ""
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "master")
+ return self._new_mongod(mongod_logger, mongod_options)
+
+ def _new_mongod_slave(self):
+ """
+ Returns a standalone.MongoDFixture configured to be used as the
+ slave of a master-slave deployment.
+ """
+
+ logger_name = "%s:slave" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = self.mongod_options.copy()
+ mongod_options.update(self.slave_options)
+ mongod_options["slave"] = ""
+ mongod_options["source"] = "localhost:%d" % (self.port)
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "slave")
+ return self._new_mongod(mongod_logger, mongod_options)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py
new file mode 100644
index 00000000000..e9930627641
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -0,0 +1,211 @@
+"""
+Replica set fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+import time
+
+import pymongo
+
+from . import interface
+from . import standalone
+from ... import config
+from ... import logging
+from ... import utils
+
+
+class ReplicaSetFixture(interface.ReplFixture):
+ """
+ Fixture which provides JSTests with a replica set to run against.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False,
+ num_nodes=2,
+ auth_options=None,
+ replset_config_options=None):
+
+ interface.ReplFixture.__init__(self, logger, job_num)
+
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.preserve_dbpath = preserve_dbpath
+ self.num_nodes = num_nodes
+ self.auth_options = auth_options
+ self.replset_config_options = utils.default_if_none(replset_config_options, {})
+
+ # The dbpath in mongod_options is used as the dbpath prefix for replica set members and
+ # takes precedence over other settings. The ShardedClusterFixture uses this parameter to
+ # create replica sets and assign their dbpath structure explicitly.
+ if "dbpath" in self.mongod_options:
+ self._dbpath_prefix = self.mongod_options.pop("dbpath")
+ else:
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.nodes = []
+ self.replset_name = None
+
+ def setup(self):
+ self.replset_name = self.mongod_options.get("replSet", "rs")
+
+ if not self.nodes:
+ for i in xrange(self.num_nodes):
+ node = self._new_mongod(i, self.replset_name)
+ self.nodes.append(node)
+
+ for node in self.nodes:
+ node.setup()
+
+ self.port = self.get_primary().port
+
+ # Call await_ready() on each of the nodes here because we want to start the election as
+ # soon as possible.
+ for node in self.nodes:
+ node.await_ready()
+
+ # Initiate the replica set.
+ members = []
+ for (i, node) in enumerate(self.nodes):
+ member_info = {"_id": i, "host": node.get_connection_string()}
+ if i > 0:
+ member_info["priority"] = 0
+ if i >= 7:
+ # Only 7 nodes in a replica set can vote, so the other members must be non-voting.
+ member_info["votes"] = 0
+ members.append(member_info)
+ initiate_cmd_obj = {"replSetInitiate": {"_id": self.replset_name, "members": members}}
+
+ client = utils.new_mongo_client(port=self.port)
+ if self.auth_options is not None:
+ auth_db = client[self.auth_options["authenticationDatabase"]]
+ auth_db.authenticate(self.auth_options["username"],
+ password=self.auth_options["password"],
+ mechanism=self.auth_options["authenticationMechanism"])
+
+ if self.replset_config_options.get("configsvr", False):
+ initiate_cmd_obj["replSetInitiate"]["configsvr"] = True
+
+ self.logger.info("Issuing replSetInitiate command...")
+ client.admin.command(initiate_cmd_obj)
+
+ def await_ready(self):
+ # Wait for the primary to be elected.
+ client = utils.new_mongo_client(port=self.port)
+ while True:
+ is_master = client.admin.command("isMaster")["ismaster"]
+ if is_master:
+ break
+ self.logger.info("Waiting for primary on port %d to be elected.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ # Wait for the secondaries to become available.
+ for secondary in self.get_secondaries():
+ client = utils.new_mongo_client(port=secondary.port,
+ read_preference=pymongo.ReadPreference.SECONDARY)
+ while True:
+ is_secondary = client.admin.command("isMaster")["secondary"]
+ if is_secondary:
+ break
+ self.logger.info("Waiting for secondary on port %d to become available.",
+ secondary.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Replica set was expected to be running in teardown(), but wasn't.")
+ else:
+ self.logger.info("Stopping all members of the replica set...")
+
+ # Terminate the secondaries first to reduce noise in the logs.
+ for node in reversed(self.nodes):
+ success = node.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully stopped all members of the replica set.")
+
+ return success
+
+ def is_running(self):
+ return all(node.is_running() for node in self.nodes)
+
+ def get_primary(self):
+ # The primary is always the first element of the 'nodes' list because all other members of
+ # the replica set are configured with priority=0.
+ return self.nodes[0]
+
+ def get_secondaries(self):
+ return self.nodes[1:]
+
+ def await_repl(self):
+ self.logger.info("Awaiting replication of insert (w=%d, wtimeout=%d min) to primary on port"
+ " %d", self.num_nodes, interface.ReplFixture.AWAIT_REPL_TIMEOUT_MINS,
+ self.port)
+ client = utils.new_mongo_client(port=self.port)
+
+ # Keep retrying this until it times out waiting for replication.
+ def insert_fn(remaining_secs):
+ remaining_millis = int(round(remaining_secs * 1000))
+ write_concern = pymongo.WriteConcern(w=self.num_nodes, wtimeout=remaining_millis)
+ coll = client.resmoke.get_collection("await_repl", write_concern=write_concern)
+ coll.insert_one({"awaiting": "repl"})
+
+ try:
+ self.retry_until_wtimeout(insert_fn)
+ except pymongo.errors.WTimeoutError:
+ self.logger.info("Replication of write operation timed out.")
+ raise
+
+ self.logger.info("Replication of write operation completed.")
+
+ def _new_mongod(self, index, replset_name):
+ """
+ Returns a standalone.MongoDFixture configured to be used as a
+ replica-set member of 'replset_name'.
+ """
+
+ mongod_logger = self._get_logger_for_mongod(index)
+ mongod_options = self.mongod_options.copy()
+ mongod_options["replSet"] = replset_name
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "node%d" % (index))
+
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _get_logger_for_mongod(self, index):
+ """
+ Returns a new logging.Logger instance for use as the primary or
+ secondary of a replica-set.
+ """
+
+ if index == 0:
+ logger_name = "%s:primary" % (self.logger.name)
+ else:
+ suffix = str(index - 1) if self.num_nodes > 2 else ""
+ logger_name = "%s:secondary%s" % (self.logger.name, suffix)
+
+ return logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ def get_connection_string(self):
+ if self.replset_name is None:
+ raise ValueError("Must call setup() before calling get_connection_string()")
+
+ conn_strs = [node.get_connection_string() for node in self.nodes]
+ return self.replset_name + "/" + ",".join(conn_strs)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
new file mode 100644
index 00000000000..ab7b26bf372
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/shardedcluster.py
@@ -0,0 +1,347 @@
+"""
+Sharded cluster fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import copy
+import os.path
+import time
+
+import pymongo
+
+from . import interface
+from . import standalone
+from . import replicaset
+from ... import config
+from ... import core
+from ... import errors
+from ... import logging
+from ... import utils
+
+
+class ShardedClusterFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a sharded cluster to run
+ against.
+ """
+
+ _CONFIGSVR_REPLSET_NAME = "config-rs"
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongos_executable=None,
+ mongos_options=None,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False,
+ num_shards=1,
+ separate_configsvr=True,
+ enable_sharding=None,
+ auth_options=None):
+ """
+ Initializes ShardedClusterFixture with the different options to
+ the mongod and mongos processes.
+ """
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options:
+ raise ValueError("Cannot specify mongod_options.dbpath")
+
+ self.mongos_executable = mongos_executable
+ self.mongos_options = utils.default_if_none(mongos_options, {})
+ self.mongod_executable = mongod_executable
+ self.mongod_options = utils.default_if_none(mongod_options, {})
+ self.preserve_dbpath = preserve_dbpath
+ self.num_shards = num_shards
+ self.separate_configsvr = separate_configsvr
+ self.enable_sharding = utils.default_if_none(enable_sharding, [])
+ self.auth_options = auth_options
+
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self._dbpath_prefix = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+
+ self.configsvr = None
+ self.mongos = None
+ self.shards = []
+
+ def setup(self):
+ if self.separate_configsvr:
+ if self.configsvr is None:
+ self.configsvr = self._new_configsvr()
+ self.configsvr.setup()
+
+ if not self.shards:
+ for i in xrange(self.num_shards):
+ shard = self._new_shard(i)
+ self.shards.append(shard)
+
+ # Start up each of the shards
+ for shard in self.shards:
+ shard.setup()
+
+ def await_ready(self):
+ # Wait for the config server
+ if self.configsvr is not None:
+ self.configsvr.await_ready()
+
+ # Wait for each of the shards
+ for shard in self.shards:
+ shard.await_ready()
+
+ if self.mongos is None:
+ self.mongos = self._new_mongos()
+
+ # Start up the mongos
+ self.mongos.setup()
+
+ # Wait for the mongos
+ self.mongos.await_ready()
+ self.port = self.mongos.port
+
+ client = utils.new_mongo_client(port=self.port)
+ if self.auth_options is not None:
+ auth_db = client[self.auth_options["authenticationDatabase"]]
+ auth_db.authenticate(self.auth_options["username"],
+ password=self.auth_options["password"],
+ mechanism=self.auth_options["authenticationMechanism"])
+
+ # Inform mongos about each of the shards
+ for shard in self.shards:
+ self._add_shard(client, shard)
+
+ # Enable sharding on each of the specified databases
+ for db_name in self.enable_sharding:
+ self.logger.info("Enabling sharding for '%s' database...", db_name)
+ client.admin.command({"enablesharding": db_name})
+
+ def teardown(self):
+ """
+ Shuts down the sharded cluster.
+ """
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start:
+ self.logger.info("Sharded cluster was expected to be running in teardown(), but"
+ " wasn't.")
+
+ if self.configsvr is not None:
+ if running_at_start:
+ self.logger.info("Stopping config server...")
+
+ success = self.configsvr.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the config server.")
+
+ if self.mongos is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongos...")
+
+ success = self.mongos.teardown() and success
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongos.")
+
+ if running_at_start:
+ self.logger.info("Stopping shards...")
+ for shard in self.shards:
+ success = shard.teardown() and success
+ if running_at_start:
+ self.logger.info("Successfully terminated all shards.")
+
+ return success
+
+ def is_running(self):
+ """
+ Returns true if the config server, all shards, and the mongos
+ are all still operating, and false otherwise.
+ """
+ return (self.configsvr is not None and self.configsvr.is_running() and
+ all(shard.is_running() for shard in self.shards) and
+ self.mongos is not None and self.mongos.is_running())
+
+ def _new_configsvr(self):
+ """
+ Returns a replicaset.ReplicaSetFixture configured to be used as
+ the config server of a sharded cluster.
+ """
+
+ logger_name = "%s:configsvr" % (self.logger.name)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = copy.deepcopy(self.mongod_options)
+ mongod_options["configsvr"] = ""
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "config")
+ mongod_options["replSet"] = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
+ mongod_options["storageEngine"] = "wiredTiger"
+
+ return replicaset.ReplicaSetFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath,
+ num_nodes=3,
+ auth_options=self.auth_options,
+ replset_config_options={"configsvr": True})
+
+ def _new_shard(self, index):
+ """
+ Returns a standalone.MongoDFixture configured to be used as a
+ shard in a sharded cluster.
+ """
+
+ logger_name = "%s:shard%d" % (self.logger.name, index)
+ mongod_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongod_options = copy.deepcopy(self.mongod_options)
+ mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard%d" % (index))
+
+ return standalone.MongoDFixture(mongod_logger,
+ self.job_num,
+ mongod_executable=self.mongod_executable,
+ mongod_options=mongod_options,
+ preserve_dbpath=self.preserve_dbpath)
+
+ def _new_mongos(self):
+ """
+ Returns a _MongoSFixture configured to be used as the mongos for
+ a sharded cluster.
+ """
+
+ logger_name = "%s:mongos" % (self.logger.name)
+ mongos_logger = logging.loggers.new_logger(logger_name, parent=self.logger)
+
+ mongos_options = copy.deepcopy(self.mongos_options)
+ if self.separate_configsvr:
+ configdb_replset = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
+ configdb_port = self.configsvr.port
+ mongos_options["configdb"] = "%s/localhost:%d" % (configdb_replset, configdb_port)
+ else:
+ mongos_options["configdb"] = "localhost:%d" % (self.shards[0].port)
+
+ return _MongoSFixture(mongos_logger,
+ self.job_num,
+ mongos_executable=self.mongos_executable,
+ mongos_options=mongos_options)
+
+ def _add_shard(self, client, shard):
+ """
+ Add the specified program as a shard by executing the addShard
+ command.
+
+ See https://docs.mongodb.org/manual/reference/command/addShard
+ for more details.
+ """
+
+ self.logger.info("Adding localhost:%d as a shard...", shard.port)
+ client.admin.command({"addShard": "localhost:%d" % (shard.port)})
+
+
+class _MongoSFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a mongos to connect to.
+ """
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongos_executable=None,
+ mongos_options=None):
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ # Command line options override the YAML configuration.
+ self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE, mongos_executable)
+
+ self.mongos_options = utils.default_if_none(mongos_options, {}).copy()
+
+ self.mongos = None
+
+ def setup(self):
+ if "chunkSize" not in self.mongos_options:
+ self.mongos_options["chunkSize"] = 50
+
+ if "port" not in self.mongos_options:
+ self.mongos_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
+ self.port = self.mongos_options["port"]
+
+ mongos = core.programs.mongos_program(self.logger,
+ executable=self.mongos_executable,
+ **self.mongos_options)
+ try:
+ self.logger.info("Starting mongos on port %d...\n%s", self.port, mongos.as_command())
+ mongos.start()
+ self.logger.info("mongos started on port %d with pid %d.", self.port, mongos.pid)
+ except:
+ self.logger.exception("Failed to start mongos on port %d.", self.port)
+ raise
+
+ self.mongos = mongos
+
+ def await_ready(self):
+ deadline = time.time() + standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS
+
+ # Wait until the mongos is accepting connections. The retry logic is necessary to support
+ # versions of PyMongo <3.0 that immediately raise a ConnectionFailure if a connection cannot
+ # be established.
+ while True:
+ # Check whether the mongos exited for some reason.
+ exit_code = self.mongos.poll()
+ if exit_code is not None:
+ raise errors.ServerFailure("Could not connect to mongos on port %d, process ended"
+ " unexpectedly with code %d." % (self.port, exit_code))
+
+ try:
+ # Use a shorter connection timeout to more closely satisfy the requested deadline.
+ client = utils.new_mongo_client(self.port, timeout_millis=500)
+ client.admin.command("ping")
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure(
+ "Failed to connect to mongos on port %d after %d seconds"
+ % (self.port, standalone.MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
+
+ self.logger.info("Waiting to connect to mongos on port %d.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ self.logger.info("Successfully contacted the mongos on port %d.", self.port)
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start and self.port is not None:
+ self.logger.info("mongos on port %d was expected to be running in teardown(), but"
+ " wasn't." % (self.port))
+
+ if self.mongos is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongos on port %d with pid %d...",
+ self.port,
+ self.mongos.pid)
+ self.mongos.stop()
+
+ exit_code = self.mongos.wait()
+ success = exit_code == 0
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongos on port %d, exited with code"
+ " %d",
+ self.port,
+ exit_code)
+
+ return success
+
+ def is_running(self):
+ return self.mongos is not None and self.mongos.poll() is None
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py
new file mode 100644
index 00000000000..a8c1dc597c5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -0,0 +1,151 @@
+"""
+Standalone mongod fixture for executing JSTests against.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import shutil
+import time
+
+import pymongo
+
+from . import interface
+from ... import config
+from ... import core
+from ... import errors
+from ... import utils
+
+
+class MongoDFixture(interface.Fixture):
+ """
+ Fixture which provides JSTests with a standalone mongod to run
+ against.
+ """
+
+ AWAIT_READY_TIMEOUT_SECS = 300
+
+ def __init__(self,
+ logger,
+ job_num,
+ mongod_executable=None,
+ mongod_options=None,
+ dbpath_prefix=None,
+ preserve_dbpath=False):
+
+ interface.Fixture.__init__(self, logger, job_num)
+
+ if "dbpath" in mongod_options and dbpath_prefix is not None:
+ raise ValueError("Cannot specify both mongod_options.dbpath and dbpath_prefix")
+
+ # Command line options override the YAML configuration.
+ self.mongod_executable = utils.default_if_none(config.MONGOD_EXECUTABLE, mongod_executable)
+
+ self.mongod_options = utils.default_if_none(mongod_options, {}).copy()
+ self.preserve_dbpath = preserve_dbpath
+
+ # The dbpath in mongod_options takes precedence over other settings to make it easier for
+ # users to specify a dbpath containing data to test against.
+ if "dbpath" not in self.mongod_options:
+ # Command line options override the YAML configuration.
+ dbpath_prefix = utils.default_if_none(config.DBPATH_PREFIX, dbpath_prefix)
+ dbpath_prefix = utils.default_if_none(dbpath_prefix, config.DEFAULT_DBPATH_PREFIX)
+ self.mongod_options["dbpath"] = os.path.join(dbpath_prefix,
+ "job%d" % (self.job_num),
+ config.FIXTURE_SUBDIR)
+ self._dbpath = self.mongod_options["dbpath"]
+
+ self.mongod = None
+
+ def setup(self):
+ if not self.preserve_dbpath:
+ shutil.rmtree(self._dbpath, ignore_errors=True)
+
+ try:
+ os.makedirs(self._dbpath)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ if "port" not in self.mongod_options:
+ self.mongod_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
+ self.port = self.mongod_options["port"]
+
+ mongod = core.programs.mongod_program(self.logger,
+ executable=self.mongod_executable,
+ **self.mongod_options)
+ try:
+ self.logger.info("Starting mongod on port %d...\n%s", self.port, mongod.as_command())
+ mongod.start()
+ self.logger.info("mongod started on port %d with pid %d.", self.port, mongod.pid)
+ except:
+ self.logger.exception("Failed to start mongod on port %d.", self.port)
+ raise
+
+ self.mongod = mongod
+
+ def await_ready(self):
+ deadline = time.time() + MongoDFixture.AWAIT_READY_TIMEOUT_SECS
+
+ # Wait until the mongod is accepting connections. The retry logic is necessary to support
+ # versions of PyMongo <3.0 that immediately raise a ConnectionFailure if a connection cannot
+ # be established.
+ while True:
+ # Check whether the mongod exited for some reason.
+ exit_code = self.mongod.poll()
+ if exit_code is not None:
+ raise errors.ServerFailure("Could not connect to mongod on port %d, process ended"
+ " unexpectedly with code %d." % (self.port, exit_code))
+
+ try:
+ # Use a shorter connection timeout to more closely satisfy the requested deadline.
+ client = utils.new_mongo_client(self.port, timeout_millis=500)
+ client.admin.command("ping")
+ break
+ except pymongo.errors.ConnectionFailure:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ raise errors.ServerFailure(
+ "Failed to connect to mongod on port %d after %d seconds"
+ % (self.port, MongoDFixture.AWAIT_READY_TIMEOUT_SECS))
+
+ self.logger.info("Waiting to connect to mongod on port %d.", self.port)
+ time.sleep(0.1) # Wait a little bit before trying again.
+
+ self.logger.info("Successfully contacted the mongod on port %d.", self.port)
+
+ def teardown(self):
+ running_at_start = self.is_running()
+ success = True # Still a success even if nothing is running.
+
+ if not running_at_start and self.port is not None:
+ self.logger.info("mongod on port %d was expected to be running in teardown(), but"
+ " wasn't." % (self.port))
+
+ if self.mongod is not None:
+ if running_at_start:
+ self.logger.info("Stopping mongod on port %d with pid %d...",
+ self.port,
+ self.mongod.pid)
+ self.mongod.stop()
+
+ exit_code = self.mongod.wait()
+ success = exit_code == 0
+
+ if running_at_start:
+ self.logger.info("Successfully terminated the mongod on port %d, exited with code"
+ " %d.",
+ self.port,
+ exit_code)
+
+ return success
+
+ def is_running(self):
+ return self.mongod is not None and self.mongod.poll() is None
+
+ def get_connection_string(self):
+ if self.mongod is None:
+ raise ValueError("Must call setup() before calling get_connection_string()")
+
+ return "localhost:%d" % self.port
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/hooks.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/hooks.py
new file mode 100644
index 00000000000..4c580fa8392
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/hooks.py
@@ -0,0 +1,704 @@
+"""
+Customize the behavior of a fixture by allowing special code to be
+executed before or after each test, and before or after each suite.
+"""
+
+from __future__ import absolute_import
+
+import os
+import sys
+
+import bson
+import pymongo
+
+from . import fixtures
+from . import testcases
+from .. import errors
+from .. import logging
+from .. import utils
+
+
+def make_custom_behavior(class_name, *args, **kwargs):
+ """
+ Factory function for creating CustomBehavior instances.
+ """
+
+ if class_name not in _CUSTOM_BEHAVIORS:
+ raise ValueError("Unknown custom behavior class '%s'" % (class_name))
+ return _CUSTOM_BEHAVIORS[class_name](*args, **kwargs)
+
+
+class CustomBehavior(object):
+ """
+ The common interface all CustomBehaviors will inherit from.
+ """
+
+ @staticmethod
+ def start_dynamic_test(test_case, test_report):
+ """
+ If a CustomBehavior wants to add a test case that will show up
+ in the test report, it should use this method to add it to the
+ report, since we will need to count it as a dynamic test to get
+ the stats in the summary information right.
+ """
+ test_report.startTest(test_case, dynamic=True)
+
+ def __init__(self, logger, fixture):
+ """
+ Initializes the CustomBehavior with the specified fixture.
+ """
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ self.logger = logger
+ self.fixture = fixture
+
+ def before_suite(self, test_report):
+ """
+ The test runner calls this exactly once before they start
+ running the suite.
+ """
+ pass
+
+ def after_suite(self, test_report):
+ """
+ The test runner calls this exactly once after all tests have
+ finished executing. Be sure to reset the behavior back to its
+ original state so that it can be run again.
+ """
+ pass
+
+ def before_test(self, test_report):
+ """
+ Each test will call this before it executes.
+
+ Raises a TestFailure if the test should be marked as a failure,
+ or a ServerFailure if the fixture exits uncleanly or
+ unexpectedly.
+ """
+ pass
+
+ def after_test(self, test_report):
+ """
+ Each test will call this after it executes.
+
+ Raises a TestFailure if the test should be marked as a failure,
+ or a ServerFailure if the fixture exits uncleanly or
+ unexpectedly.
+ """
+ pass
+
+
+class CleanEveryN(CustomBehavior):
+ """
+ Restarts the fixture after it has ran 'n' tests.
+ On mongod-related fixtures, this will clear the dbpath.
+ """
+
+ DEFAULT_N = 20
+
+ def __init__(self, logger, fixture, n=DEFAULT_N):
+ CustomBehavior.__init__(self, logger, fixture)
+
+ # Try to isolate what test triggers the leak by restarting the fixture each time.
+ if "detect_leaks=1" in os.getenv("ASAN_OPTIONS", ""):
+ self.logger.info("ASAN_OPTIONS environment variable set to detect leaks, so restarting"
+ " the fixture after each test instead of after every %d.", n)
+ n = 1
+
+ self.n = n
+ self.tests_run = 0
+
+ def after_test(self, test_report):
+ self.tests_run += 1
+ if self.tests_run >= self.n:
+ self.logger.info("%d tests have been run against the fixture, stopping it...",
+ self.tests_run)
+ self.tests_run = 0
+
+ teardown_success = self.fixture.teardown()
+ self.logger.info("Starting the fixture back up again...")
+ self.fixture.setup()
+ self.fixture.await_ready()
+
+ # Raise this after calling setup in case --continueOnFailure was specified.
+ if not teardown_success:
+ raise errors.TestFailure("%s did not exit cleanly" % (self.fixture))
+
+
+class CheckReplDBHash(CustomBehavior):
+ """
+ Waits for replication after each test, then checks that the dbhahses
+ of all databases other than "local" match on the primary and all of
+ the secondaries. If any dbhashes do not match, logs information
+ about what was different (e.g. Different numbers of collections,
+ missing documents in a collection, mismatching documents, etc).
+
+ Compatible only with ReplFixture subclasses.
+ """
+
+ def __init__(self, logger, fixture):
+ if not isinstance(fixture, fixtures.ReplFixture):
+ raise TypeError("%s does not support replication" % (fixture.__class__.__name__))
+
+ CustomBehavior.__init__(self, logger, fixture)
+
+ self.test_case = testcases.TestCase(self.logger, "Hook", "#dbhash#")
+
+ self.started = False
+
+ def after_test(self, test_report):
+ """
+ After each test, check that the dbhash of the test database is
+ the same on all nodes in the replica set or master/slave
+ fixture.
+ """
+
+ try:
+ if not self.started:
+ CustomBehavior.start_dynamic_test(self.test_case, test_report)
+ self.started = True
+
+ # Wait until all operations have replicated.
+ self.fixture.await_repl()
+
+ success = True
+ sb = [] # String builder.
+
+ primary = self.fixture.get_primary()
+ primary_conn = utils.new_mongo_client(port=primary.port)
+
+ for secondary in self.fixture.get_secondaries():
+ read_preference = pymongo.ReadPreference.SECONDARY
+ secondary_conn = utils.new_mongo_client(port=secondary.port,
+ read_preference=read_preference)
+ # Skip arbiters.
+ if secondary_conn.admin.command("isMaster").get("arbiterOnly", False):
+ continue
+
+ all_matched = CheckReplDBHash._check_all_db_hashes(primary_conn,
+ secondary_conn,
+ sb)
+ if not all_matched:
+ sb.insert(0,
+ "One or more databases were different between the primary on port %d"
+ " and the secondary on port %d:"
+ % (primary.port, secondary.port))
+
+ success = all_matched and success
+
+ if not success:
+ # Adding failures to a TestReport requires traceback information, so we raise
+ # a 'self.test_case.failureException' that we will catch ourselves.
+ self.test_case.logger.info("\n ".join(sb))
+ raise self.test_case.failureException("The dbhashes did not match")
+ except self.test_case.failureException as err:
+ self.test_case.logger.exception("The dbhashes did not match.")
+ self.test_case.return_code = 1
+ test_report.addFailure(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.ServerFailure(err.args[0])
+ except pymongo.errors.WTimeoutError:
+ self.test_case.logger.exception("Awaiting replication timed out.")
+ self.test_case.return_code = 2
+ test_report.addError(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.StopExecution("Awaiting replication timed out")
+
+ def after_suite(self, test_report):
+ """
+ If we get to this point, the #dbhash# test must have been
+ successful, so add it to the test report.
+ """
+
+ if self.started:
+ self.test_case.logger.info("The dbhashes matched for all tests.")
+ self.test_case.return_code = 0
+ test_report.addSuccess(self.test_case)
+ # TestReport.stopTest() has already been called if there was a failure.
+ test_report.stopTest(self.test_case)
+
+ self.started = False
+
+ @staticmethod
+ def _check_all_db_hashes(primary_conn, secondary_conn, sb):
+ """
+ Returns true if for each non-local database, the dbhash command
+ returns the same MD5 hash on the primary as it does on the
+ secondary. Returns false otherwise.
+
+ Logs a message describing the differences if any database's
+ dbhash did not match.
+ """
+
+ # Overview of how we'll check that everything replicated correctly between these two nodes:
+ #
+ # - Check whether they have the same databases.
+ # - If not, log which databases are missing where, and dump the contents of any that are
+ # missing.
+ #
+ # - Check whether each database besides "local" gives the same md5 field as the result of
+ # running the dbhash command.
+ # - If not, check whether they have the same collections.
+ # - If not, log which collections are missing where, and dump the contents of any
+ # that are missing.
+ # - If so, check that the hash of each non-capped collection matches.
+ # - If any do not match, log the diff of the collection between the two nodes.
+
+ success = True
+
+ if not CheckReplDBHash._check_dbs_present(primary_conn, secondary_conn, sb):
+ return False
+
+ for db_name in primary_conn.database_names():
+ if db_name == "local":
+ continue # We don't expect this to match across different nodes.
+
+ matched = CheckReplDBHash._check_db_hash(primary_conn, secondary_conn, db_name, sb)
+ success = matched and success
+
+ return success
+
+ @staticmethod
+ def _check_dbs_present(primary_conn, secondary_conn, sb):
+ """
+ Returns true if the list of databases on the primary is
+ identical to the list of databases on the secondary, and false
+ otherwise.
+ """
+
+ success = True
+ primary_dbs = primary_conn.database_names()
+
+ # Can't run database_names() on secondary, so instead use the listDatabases command.
+ # TODO: Use database_names() once PYTHON-921 is resolved.
+ list_db_output = secondary_conn.admin.command("listDatabases")
+ secondary_dbs = [db["name"] for db in list_db_output["databases"]]
+
+ # There may be a difference in databases which is not considered an error, when
+ # the database only contains system collections. This difference is only logged
+ # when others are encountered, i.e., success = False.
+ missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
+ set(primary_dbs), set(secondary_dbs), "database")
+
+ for missing_db in missing_on_secondary:
+ db = primary_conn[missing_db]
+ coll_names = db.collection_names()
+ non_system_colls = [name for name in coll_names if not name.startswith("system.")]
+
+ # It is only an error if there are any non-system collections in the database,
+ # otherwise it's not well defined whether they should exist or not.
+ if non_system_colls:
+ sb.append("Database %s present on primary but not on secondary." % (missing_db))
+ CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
+ success = False
+
+ for missing_db in missing_on_primary:
+ db = secondary_conn[missing_db]
+
+ # Can't run collection_names() on secondary, so instead use the listCollections command.
+ # TODO: Always use collection_names() once PYTHON-921 is resolved. Then much of the
+ # logic that is duplicated here can be consolidated.
+ list_coll_output = db.command("listCollections")["cursor"]["firstBatch"]
+ coll_names = [coll["name"] for coll in list_coll_output]
+ non_system_colls = [name for name in coll_names if not name.startswith("system.")]
+
+ # It is only an error if there are any non-system collections in the database,
+ # otherwise it's not well defined if it should exist or not.
+ if non_system_colls:
+ sb.append("Database %s present on secondary but not on primary." % (missing_db))
+ CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
+ success = False
+
+ return success
+
+ @staticmethod
+ def _check_db_hash(primary_conn, secondary_conn, db_name, sb):
+ """
+ Returns true if the dbhash for 'db_name' matches on the primary
+ and the secondary, and false otherwise.
+
+ Appends a message to 'sb' describing the differences if the
+ dbhashes do not match.
+ """
+
+ primary_hash = primary_conn[db_name].command("dbhash")
+ secondary_hash = secondary_conn[db_name].command("dbhash")
+
+ if primary_hash["md5"] == secondary_hash["md5"]:
+ return True
+
+ success = CheckReplDBHash._check_dbs_eq(
+ primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb)
+
+ if not success:
+ sb.append("Database %s has a different hash on the primary and the secondary"
+ " ([ %s ] != [ %s ]):"
+ % (db_name, primary_hash["md5"], secondary_hash["md5"]))
+
+ return success
+
+ @staticmethod
+ def _check_dbs_eq(primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb):
+ """
+ Returns true if all non-capped collections had the same hash in
+ the dbhash response, and false otherwise.
+
+ Appends information to 'sb' about the differences between the
+ 'db_name' database on the primary and the 'db_name' database on
+ the secondary, if any.
+ """
+
+ success = True
+
+ primary_db = primary_conn[db_name]
+ secondary_db = secondary_conn[db_name]
+
+ primary_coll_hashes = primary_hash["collections"]
+ secondary_coll_hashes = secondary_hash["collections"]
+
+ primary_coll_names = set(primary_coll_hashes.keys())
+ secondary_coll_names = set(secondary_coll_hashes.keys())
+
+ missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
+ primary_coll_names, secondary_coll_names, "collection", sb=sb)
+
+ if missing_on_primary or missing_on_secondary:
+
+ # 'sb' already describes which collections are missing where.
+ for coll_name in missing_on_primary:
+ CheckReplDBHash._dump_all_documents(secondary_db, coll_name, sb)
+ for coll_name in missing_on_secondary:
+ CheckReplDBHash._dump_all_documents(primary_db, coll_name, sb)
+ return
+
+ for coll_name in primary_coll_names & secondary_coll_names:
+ primary_coll_hash = primary_coll_hashes[coll_name]
+ secondary_coll_hash = secondary_coll_hashes[coll_name]
+
+ if primary_coll_hash == secondary_coll_hash:
+ continue
+
+ # Ignore capped collections because they are not expected to match on all nodes.
+ if primary_db.command({"collStats": coll_name})["capped"]:
+ # Still fail if the collection is not capped on the secondary.
+ if not secondary_db.command({"collStats": coll_name})["capped"]:
+ success = False
+ sb.append("%s.%s collection is capped on primary but not on secondary."
+ % (primary_db.name, coll_name))
+ sb.append("%s.%s collection is capped, ignoring." % (primary_db.name, coll_name))
+ continue
+ # Still fail if the collection is capped on the secondary, but not on the primary.
+ elif secondary_db.command({"collStats": coll_name})["capped"]:
+ success = False
+ sb.append("%s.%s collection is capped on secondary but not on primary."
+ % (primary_db.name, coll_name))
+ continue
+
+ success = False
+ sb.append("Collection %s.%s has a different hash on the primary and the secondary"
+ " ([ %s ] != [ %s ]):"
+ % (db_name, coll_name, primary_coll_hash, secondary_coll_hash))
+ CheckReplDBHash._check_colls_eq(primary_db, secondary_db, coll_name, sb)
+
+ if success:
+ sb.append("All collections that were expected to match did.")
+ return success
+
+ @staticmethod
+ def _check_colls_eq(primary_db, secondary_db, coll_name, sb):
+ """
+ Appends information to 'sb' about the differences or between
+ the 'coll_name' collection on the primary and the 'coll_name'
+ collection on the secondary, if any.
+ """
+
+ codec_options = bson.CodecOptions(document_class=TypeSensitiveSON)
+
+ primary_coll = primary_db.get_collection(coll_name, codec_options=codec_options)
+ secondary_coll = secondary_db.get_collection(coll_name, codec_options=codec_options)
+
+ primary_docs = CheckReplDBHash._extract_documents(primary_coll)
+ secondary_docs = CheckReplDBHash._extract_documents(secondary_coll)
+
+ CheckReplDBHash._get_collection_diff(primary_docs, secondary_docs, sb)
+
+ @staticmethod
+ def _extract_documents(collection):
+ """
+ Returns a list of all documents in the collection, sorted by
+ their _id.
+ """
+
+ return [doc for doc in collection.find().sort("_id", pymongo.ASCENDING)]
+
+ @staticmethod
+ def _get_collection_diff(primary_docs, secondary_docs, sb):
+ """
+ Returns true if the documents in 'primary_docs' exactly match
+ the documents in 'secondary_docs', and false otherwise.
+
+ Appends information to 'sb' about what matched or did not match.
+ """
+
+ matched = True
+
+ # These need to be lists instead of sets because documents aren't hashable.
+ missing_on_primary = []
+ missing_on_secondary = []
+
+ p_idx = 0 # Keep track of our position in 'primary_docs'.
+ s_idx = 0 # Keep track of our position in 'secondary_docs'.
+
+ while p_idx < len(primary_docs) and s_idx < len(secondary_docs):
+ primary_doc = primary_docs[p_idx]
+ secondary_doc = secondary_docs[s_idx]
+
+ if primary_doc == secondary_doc:
+ p_idx += 1
+ s_idx += 1
+ continue
+
+ # We have mismatching documents.
+ matched = False
+
+ if primary_doc["_id"] == secondary_doc["_id"]:
+ sb.append("Mismatching document:")
+ sb.append(" primary: %s" % (primary_doc))
+ sb.append(" secondary: %s" % (secondary_doc))
+ p_idx += 1
+ s_idx += 1
+
+ # One node was missing a document. Since the documents are sorted by _id, the doc with
+ # the smaller _id was the one that was skipped.
+ elif primary_doc["_id"] < secondary_doc["_id"]:
+ missing_on_secondary.append(primary_doc)
+
+ # Only move past the doc that we know was skipped.
+ p_idx += 1
+
+ else: # primary_doc["_id"] > secondary_doc["_id"]
+ missing_on_primary.append(secondary_doc)
+
+ # Only move past the doc that we know was skipped.
+ s_idx += 1
+
+ # Check if there are any unmatched documents left.
+ while p_idx < len(primary_docs):
+ matched = False
+ missing_on_secondary.append(primary_docs[p_idx])
+ p_idx += 1
+ while s_idx < len(secondary_docs):
+ matched = False
+ missing_on_primary.append(secondary_docs[s_idx])
+ s_idx += 1
+
+ if not matched:
+ CheckReplDBHash._append_differences(
+ missing_on_primary, missing_on_secondary, "document", sb)
+ else:
+ sb.append("All documents matched.")
+
+ @staticmethod
+ def _check_difference(primary_set, secondary_set, item_type_name, sb=None):
+ """
+ Returns true if the contents of 'primary_set' and
+ 'secondary_set' are identical, and false otherwise. The sets
+ contain information about the primary and secondary,
+ respectively, e.g. the database names that exist on each node.
+
+ Appends information about anything that differed to 'sb'.
+ """
+
+ missing_on_primary = set()
+ missing_on_secondary = set()
+
+ for item in primary_set - secondary_set:
+ missing_on_secondary.add(item)
+
+ for item in secondary_set - primary_set:
+ missing_on_primary.add(item)
+
+ if sb is not None:
+ CheckReplDBHash._append_differences(
+ missing_on_primary, missing_on_secondary, item_type_name, sb)
+
+ return (missing_on_primary, missing_on_secondary)
+
+ @staticmethod
+ def _append_differences(missing_on_primary, missing_on_secondary, item_type_name, sb):
+ """
+ Given two iterables representing items that were missing on the
+ primary or the secondary respectively, append the information
+ about which items were missing to 'sb', if any.
+ """
+
+ if missing_on_primary:
+ sb.append("The following %ss were present on the secondary, but not on the"
+ " primary:" % (item_type_name))
+ for item in missing_on_primary:
+ sb.append(str(item))
+
+ if missing_on_secondary:
+ sb.append("The following %ss were present on the primary, but not on the"
+ " secondary:" % (item_type_name))
+ for item in missing_on_secondary:
+ sb.append(str(item))
+
+ @staticmethod
+ def _dump_all_collections(database, coll_names, sb):
+ """
+ Appends the contents of each of the collections in 'coll_names'
+ to 'sb'.
+ """
+
+ if coll_names:
+ sb.append("Database %s contains the following collections: %s"
+ % (database.name, coll_names))
+ for coll_name in coll_names:
+ CheckReplDBHash._dump_all_documents(database, coll_name, sb)
+ else:
+ sb.append("No collections in database %s." % (database.name))
+
+ @staticmethod
+ def _dump_all_documents(database, coll_name, sb):
+ """
+ Appends the contents of 'coll_name' to 'sb'.
+ """
+
+ docs = CheckReplDBHash._extract_documents(database[coll_name])
+ if docs:
+ sb.append("Documents in %s.%s:" % (database.name, coll_name))
+ for doc in docs:
+ sb.append(" %s" % (doc))
+ else:
+ sb.append("No documents in %s.%s." % (database.name, coll_name))
+
+class TypeSensitiveSON(bson.SON):
+ """
+ Extends bson.SON to perform additional type-checking of document values
+ to differentiate BSON types.
+ """
+
+ def items_with_types(self):
+ """
+ Returns a list of triples. Each triple consists of a field name, a
+ field value, and a field type for each field in the document.
+ """
+
+ return [(key, self[key], type(self[key])) for key in self]
+
+ def __eq__(self, other):
+ """
+ Comparison to another TypeSensitiveSON is order-sensitive and
+ type-sensitive while comparison to a regular dictionary ignores order
+ and type mismatches.
+ """
+
+ if isinstance(other, TypeSensitiveSON):
+ return (len(self) == len(other) and
+ self.items_with_types() == other.items_with_types())
+
+ raise TypeError("TypeSensitiveSON objects cannot be compared to other types")
+
+class ValidateCollections(CustomBehavior):
+ """
+ Runs full validation (db.collection.validate(true)) on all collections
+ in all databases on every standalone, or primary mongod. If validation
+ fails (validate.valid), then the validate return object is logged.
+
+ Compatible with all subclasses.
+ """
+ DEFAULT_FULL = True
+ DEFAULT_SCANDATA = True
+
+ def __init__(self, logger, fixture, full=DEFAULT_FULL, scandata=DEFAULT_SCANDATA):
+ CustomBehavior.__init__(self, logger, fixture)
+
+ if not isinstance(full, bool):
+ raise TypeError("Fixture option full is not specified as type bool")
+
+ if not isinstance(scandata, bool):
+ raise TypeError("Fixture option scandata is not specified as type bool")
+
+ self.test_case = testcases.TestCase(self.logger, "Hook", "#validate#")
+ self.started = False
+ self.full = full
+ self.scandata = scandata
+
+ def after_test(self, test_report):
+ """
+ After each test, run a full validation on all collections.
+ """
+
+ try:
+ if not self.started:
+ CustomBehavior.start_dynamic_test(self.test_case, test_report)
+ self.started = True
+
+ sb = [] # String builder.
+
+ # The self.fixture.port can be used for client connection to a
+ # standalone mongod, a replica-set primary, or mongos.
+ # TODO: Run collection validation on all nodes in a replica-set.
+ port = self.fixture.port
+ conn = utils.new_mongo_client(port=port)
+
+ success = ValidateCollections._check_all_collections(
+ conn, sb, self.full, self.scandata)
+
+ if not success:
+ # Adding failures to a TestReport requires traceback information, so we raise
+ # a 'self.test_case.failureException' that we will catch ourselves.
+ self.test_case.logger.info("\n ".join(sb))
+ raise self.test_case.failureException("Collection validation failed")
+ except self.test_case.failureException as err:
+ self.test_case.logger.exception("Collection validation failed")
+ self.test_case.return_code = 1
+ test_report.addFailure(self.test_case, sys.exc_info())
+ test_report.stopTest(self.test_case)
+ raise errors.ServerFailure(err.args[0])
+
+ def after_suite(self, test_report):
+ """
+ If we get to this point, the #validate# test must have been
+ successful, so add it to the test report.
+ """
+
+ if self.started:
+ self.test_case.logger.info("Collection validation passed for all tests.")
+ self.test_case.return_code = 0
+ test_report.addSuccess(self.test_case)
+ # TestReport.stopTest() has already been called if there was a failure.
+ test_report.stopTest(self.test_case)
+
+ self.started = False
+
+ @staticmethod
+ def _check_all_collections(conn, sb, full, scandata):
+ """
+ Returns true if for all databases and collections validate_collection
+ succeeds. Returns false otherwise.
+
+ Logs a message if any database's collection fails validate_collection.
+ """
+
+ success = True
+
+ for db_name in conn.database_names():
+ for coll_name in conn[db_name].collection_names():
+ try:
+ conn[db_name].validate_collection(coll_name, full=full, scandata=scandata)
+ except pymongo.errors.CollectionInvalid as err:
+ sb.append("Database %s, collection %s failed to validate:\n%s"
+ % (db_name, coll_name, err.args[0]))
+ success = False
+ return success
+
+
+_CUSTOM_BEHAVIORS = {
+ "CleanEveryN": CleanEveryN,
+ "CheckReplDBHash": CheckReplDBHash,
+ "ValidateCollections": ValidateCollections,
+}
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/job.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/job.py
new file mode 100644
index 00000000000..bc5705ffdfb
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/job.py
@@ -0,0 +1,195 @@
+"""
+Enables supports for running tests simultaneously by processing them
+from a multi-consumer queue.
+"""
+
+from __future__ import absolute_import
+
+import sys
+
+from .. import config
+from .. import errors
+from ..utils import queue as _queue
+
+
+class Job(object):
+ """
+ Runs tests from a queue.
+ """
+
+ def __init__(self, logger, fixture, hooks, report):
+ """
+ Initializes the job with the specified fixture and custom
+ behaviors.
+ """
+
+ self.logger = logger
+ self.fixture = fixture
+ self.hooks = hooks
+ self.report = report
+
+ def __call__(self, queue, interrupt_flag):
+ """
+ Continuously executes tests from 'queue' and records their
+ details in 'report'.
+ """
+
+ should_stop = False
+ try:
+ self._run(queue, interrupt_flag)
+ except errors.StopExecution as err:
+ # Stop running tests immediately.
+ self.logger.error("Received a StopExecution exception: %s.", err)
+ should_stop = True
+ except:
+ # Unknown error, stop execution.
+ self.logger.exception("Encountered an error during test execution.")
+ should_stop = True
+
+ if should_stop:
+ # Set the interrupt flag so that other jobs do not start running more tests.
+ interrupt_flag.set()
+ # Drain the queue to unblock the main thread.
+ Job._drain_queue(queue)
+
+ def _run(self, queue, interrupt_flag):
+ """
+ Calls the before/after suite hooks and continuously executes
+ tests from 'queue'.
+ """
+
+ for hook in self.hooks:
+ hook.before_suite(self.report)
+
+ while not interrupt_flag.is_set():
+ test = queue.get_nowait()
+ try:
+ if test is None:
+ # Sentinel value received, so exit.
+ break
+ self._execute_test(test)
+ finally:
+ queue.task_done()
+
+ for hook in self.hooks:
+ hook.after_suite(self.report)
+
+ def _execute_test(self, test):
+ """
+ Calls the before/after test hooks and executes 'test'.
+ """
+
+ test.configure(self.fixture)
+ self._run_hooks_before_tests(test)
+
+ test(self.report)
+ if config.FAIL_FAST and not self.report.wasSuccessful():
+ test.logger.info("%s failed, so stopping..." % (test.shortDescription()))
+ raise errors.StopExecution("%s failed" % (test.shortDescription()))
+
+ if not self.fixture.is_running():
+ self.logger.error("%s marked as a failure because the fixture crashed during the test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=2)
+ # Always fail fast if the fixture fails.
+ raise errors.StopExecution("%s not running after %s" %
+ (self.fixture, test.shortDescription()))
+
+ self._run_hooks_after_tests(test)
+
+ def _run_hooks_before_tests(self, test):
+ """
+ Runs the before_test method on each of the hooks.
+
+ Swallows any TestFailure exceptions if set to continue on
+ failure, and reraises any other exceptions.
+ """
+
+ try:
+ for hook in self.hooks:
+ hook.before_test(self.report)
+
+ except errors.StopExecution:
+ raise
+
+ except errors.ServerFailure:
+ self.logger.exception("%s marked as a failure by a hook's before_test.",
+ test.shortDescription())
+ self._fail_test(test, sys.exc_info(), return_code=2)
+ raise errors.StopExecution("A hook's before_test failed")
+
+ except errors.TestFailure:
+ self.logger.exception("%s marked as a failure by a hook's before_test.",
+ test.shortDescription())
+ self._fail_test(test, sys.exc_info(), return_code=1)
+ if config.FAIL_FAST:
+ raise errors.StopExecution("A hook's before_test failed")
+
+ except:
+ # Record the before_test() error in 'self.report'.
+ self.report.startTest(test)
+ self.report.addError(test, sys.exc_info())
+ self.report.stopTest(test)
+ raise
+
+ def _run_hooks_after_tests(self, test):
+ """
+ Runs the after_test method on each of the hooks.
+
+ Swallows any TestFailure exceptions if set to continue on
+ failure, and reraises any other exceptions.
+ """
+ try:
+ for hook in self.hooks:
+ hook.after_test(self.report)
+
+ except errors.StopExecution:
+ raise
+
+ except errors.ServerFailure:
+ self.logger.exception("%s marked as a failure by a hook's after_test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=2)
+ raise errors.StopExecution("A hook's after_test failed")
+
+ except errors.TestFailure:
+ self.logger.exception("%s marked as a failure by a hook's after_test.",
+ test.shortDescription())
+ self.report.setFailure(test, return_code=1)
+ if config.FAIL_FAST:
+ raise errors.StopExecution("A hook's after_test failed")
+
+ except:
+ self.report.setError(test)
+ raise
+
+ def _fail_test(self, test, exc_info, return_code=1):
+ """
+ Helper to record a test as a failure with the provided return
+ code.
+
+ This method should not be used if 'test' has already been
+ started, instead use TestReport.setFailure().
+ """
+
+ self.report.startTest(test)
+ test.return_code = return_code
+ self.report.addFailure(test, exc_info)
+ self.report.stopTest(test)
+
+ @staticmethod
+ def _drain_queue(queue):
+ """
+ Removes all elements from 'queue' without actually doing
+ anything to them. Necessary to unblock the main thread that is
+ waiting for 'queue' to be empty.
+ """
+
+ try:
+ while not queue.empty():
+ queue.get_nowait()
+ queue.task_done()
+ except _queue.Empty:
+ # Multiple threads may be draining the queue simultaneously, so just ignore the
+ # exception from the race between queue.empty() being false and failing to get an item.
+ pass
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/report.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/report.py
new file mode 100644
index 00000000000..61468e1dd41
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/report.py
@@ -0,0 +1,330 @@
+"""
+Extension to the unittest.TestResult to support additional test status
+and timing information for the report.json file.
+"""
+
+from __future__ import absolute_import
+
+import copy
+import time
+import unittest
+
+from .. import config
+from .. import logging
+
+
+class TestReport(unittest.TestResult):
+ """
+ Records test status and timing information.
+ """
+
+ def __init__(self, logger, logging_config, build_id=None, build_config=None):
+ """
+ Initializes the TestReport with the buildlogger configuration.
+ """
+
+ unittest.TestResult.__init__(self)
+
+ self.logger = logger
+ self.logging_config = logging_config
+ self.build_id = build_id
+ self.build_config = build_config
+
+ self.reset()
+
+ @classmethod
+ def combine(cls, *reports):
+ """
+ Merges the results from multiple TestReport instances into one.
+
+ If the same test is present in multiple reports, then one that
+ failed or errored is more preferred over one that succeeded.
+ This behavior is useful for when running multiple jobs that
+ dynamically add a #dbhash# test case.
+ """
+
+ combined_report = cls(logging.loggers.EXECUTOR, {})
+ combining_time = time.time()
+
+ for report in reports:
+ if not isinstance(report, TestReport):
+ raise TypeError("reports must be a list of TestReport instances")
+
+ for test_info in report.test_infos:
+ # If the user triggers a KeyboardInterrupt exception while a test is running, then
+ # it is possible for 'test_info' to be modified by a job thread later on. We make a
+ # shallow copy in order to ensure 'num_failed' is consistent with the actual number
+ # of tests that have status equal to "failed".
+ test_info = copy.copy(test_info)
+
+ # TestReport.addXX() may not have been called.
+ if test_info.status is None or test_info.return_code is None:
+ # Mark the test as having failed if it was interrupted. It might have passed if
+ # the suite ran to completion, but we wouldn't know for sure.
+ test_info.status = "fail"
+ test_info.return_code = -2
+
+ # TestReport.stopTest() may not have been called.
+ if test_info.end_time is None:
+ # Use the current time as the time that the test finished running.
+ test_info.end_time = combining_time
+
+ combined_report.test_infos.append(test_info)
+
+ combined_report.num_dynamic += report.num_dynamic
+
+ # Recompute number of success, failures, and errors.
+ combined_report.num_succeeded = len(combined_report.get_successful())
+ combined_report.num_failed = len(combined_report.get_failed())
+ combined_report.num_errored = len(combined_report.get_errored())
+
+ return combined_report
+
+ def startTest(self, test, dynamic=False):
+ """
+ Called immediately before 'test' is run.
+ """
+
+ unittest.TestResult.startTest(self, test)
+
+ test_info = _TestInfo(test.id(), dynamic)
+ test_info.start_time = time.time()
+ self.test_infos.append(test_info)
+
+ basename = test.basename()
+ if dynamic:
+ command = "(dynamic test case)"
+ self.num_dynamic += 1
+ else:
+ command = test.as_command()
+ self.logger.info("Running %s...\n%s", basename, command)
+
+ test_id = logging.buildlogger.new_test_id(self.build_id,
+ self.build_config,
+ basename,
+ command)
+
+ if self.build_id is not None:
+ endpoint = logging.buildlogger.APPEND_TEST_LOGS_ENDPOINT % {
+ "build_id": self.build_id,
+ "test_id": test_id,
+ }
+
+ test_info.url_endpoint = "%s/%s/" % (config.BUILDLOGGER_URL.rstrip("/"),
+ endpoint.strip("/"))
+
+ self.logger.info("Writing output of %s to %s.",
+ test.shortDescription(),
+ test_info.url_endpoint)
+
+ # Set up the test-specific logger.
+ logger_name = "%s:%s" % (test.logger.name, test.short_name())
+ logger = logging.loggers.new_logger(logger_name, parent=test.logger)
+ logging.config.apply_buildlogger_test_handler(logger,
+ self.logging_config,
+ build_id=self.build_id,
+ build_config=self.build_config,
+ test_id=test_id)
+
+ self.__original_loggers[test_info.test_id] = test.logger
+ test.logger = logger
+
+ def stopTest(self, test):
+ """
+ Called immediately after 'test' has run.
+ """
+
+ unittest.TestResult.stopTest(self, test)
+
+ test_info = self._find_test_info(test)
+ test_info.end_time = time.time()
+
+ time_taken = test_info.end_time - test_info.start_time
+ self.logger.info("%s ran in %0.2f seconds.", test.basename(), time_taken)
+
+ # Asynchronously closes the buildlogger test handler to avoid having too many threads open
+ # on 32-bit systems.
+ logging.flush.close_later(test.logger)
+
+ # Restore the original logger for the test.
+ test.logger = self.__original_loggers.pop(test.id())
+
+ def addError(self, test, err):
+ """
+ Called when a non-failureException was raised during the
+ execution of 'test'.
+ """
+
+ unittest.TestResult.addError(self, test, err)
+ self.num_errored += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "error"
+ test_info.return_code = test.return_code
+
+ def setError(self, test):
+ """
+ Used to change the outcome of an existing test to an error.
+ """
+
+ test_info = self._find_test_info(test)
+ if test_info.end_time is None:
+ raise ValueError("stopTest was not called on %s" % (test.basename()))
+
+ test_info.status = "error"
+ test_info.return_code = 2
+
+ # Recompute number of success, failures, and errors.
+ self.num_succeeded = len(self.get_successful())
+ self.num_failed = len(self.get_failed())
+ self.num_errored = len(self.get_errored())
+
+ def addFailure(self, test, err):
+ """
+ Called when a failureException was raised during the execution
+ of 'test'.
+ """
+
+ unittest.TestResult.addFailure(self, test, err)
+ self.num_failed += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "fail"
+ test_info.return_code = test.return_code
+
+ def setFailure(self, test, return_code=1):
+ """
+ Used to change the outcome of an existing test to a failure.
+ """
+
+ test_info = self._find_test_info(test)
+ if test_info.end_time is None:
+ raise ValueError("stopTest was not called on %s" % (test.basename()))
+
+ test_info.status = "fail"
+ test_info.return_code = return_code
+
+ # Recompute number of success, failures, and errors.
+ self.num_succeeded = len(self.get_successful())
+ self.num_failed = len(self.get_failed())
+ self.num_errored = len(self.get_errored())
+
+ def addSuccess(self, test):
+ """
+ Called when 'test' executed successfully.
+ """
+
+ unittest.TestResult.addSuccess(self, test)
+ self.num_succeeded += 1
+
+ test_info = self._find_test_info(test)
+ test_info.status = "pass"
+ test_info.return_code = test.return_code
+
+ def wasSuccessful(self):
+ """
+ Returns true if all tests executed successfully.
+ """
+ return self.num_failed == self.num_errored == 0
+
+ def get_successful(self):
+ """
+ Returns the status and timing information of the tests that
+ executed successfully.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "pass"]
+
+ def get_failed(self):
+ """
+ Returns the status and timing information of the tests that
+ raised a failureException during their execution.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "fail"]
+
+ def get_errored(self):
+ """
+ Returns the status and timing information of the tests that
+ raised a non-failureException during their execution.
+ """
+ return [test_info for test_info in self.test_infos if test_info.status == "error"]
+
+ def as_dict(self):
+ """
+ Return the test result information as a dictionary.
+
+ Used to create the report.json file.
+ """
+
+ results = []
+ for test_info in self.test_infos:
+ # Don't distinguish between failures and errors.
+ status = "pass" if test_info.status == "pass" else "fail"
+
+ result = {
+ "test_file": test_info.test_id,
+ "status": status,
+ "exit_code": test_info.return_code,
+ "start": test_info.start_time,
+ "end": test_info.end_time,
+ "elapsed": test_info.end_time - test_info.start_time,
+ }
+
+ if test_info.url_endpoint is not None:
+ result["url"] = test_info.url_endpoint
+
+ results.append(result)
+
+ return {
+ "results": results,
+ "failures": self.num_failed + self.num_errored,
+ }
+
+ def reset(self):
+ """
+ Resets the test report back to its initial state.
+ """
+
+ self.test_infos = []
+
+ self.num_dynamic = 0
+ self.num_succeeded = 0
+ self.num_failed = 0
+ self.num_errored = 0
+
+ self.__original_loggers = {}
+
+ def _find_test_info(self, test):
+ """
+ Returns the status and timing information associated with
+ 'test'.
+ """
+
+ test_id = test.id()
+
+ # Search the list backwards to efficiently find the status and timing information of a test
+ # that was recently started.
+ for test_info in reversed(self.test_infos):
+ if test_info.test_id == test_id:
+ return test_info
+
+ raise ValueError("Details for %s not found in the report" % (test.basename()))
+
+
+class _TestInfo(object):
+ """
+ Holder for the test status and timing information.
+ """
+
+ def __init__(self, test_id, dynamic):
+ """
+ Initializes the _TestInfo instance.
+ """
+
+ self.test_id = test_id
+ self.dynamic = dynamic
+
+ self.start_time = None
+ self.end_time = None
+ self.status = None
+ self.return_code = None
+ self.url_endpoint = None
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/suite.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/suite.py
new file mode 100644
index 00000000000..65503b85e8b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/suite.py
@@ -0,0 +1,140 @@
+"""
+Holder for a set of TestGroup instances.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+from . import summary as _summary
+from . import testgroup
+from .. import selector as _selector
+
+
+class Suite(object):
+ """
+ A suite of tests.
+ """
+
+ TESTS_ORDER = ("cpp_unit_test", "cpp_integration_test", "db_test", "js_test", "mongos_test")
+
+ def __init__(self, suite_name, suite_config):
+ """
+ Initializes the suite with the specified name and configuration.
+ """
+
+ self._suite_name = suite_name
+ self._suite_config = suite_config
+
+ self.test_groups = []
+ for test_kind in Suite.TESTS_ORDER:
+ if test_kind not in suite_config["selector"]:
+ continue
+ tests = self._get_tests_for_group(test_kind)
+ test_group = testgroup.TestGroup(test_kind, tests)
+ self.test_groups.append(test_group)
+
+ self.return_code = None
+
+ self._start_time = None
+ self._end_time = None
+
+ def _get_tests_for_group(self, test_kind):
+ """
+ Returns the tests to run based on the 'test_kind'-specific
+ filtering policy.
+ """
+
+ test_info = self.get_selector_config()[test_kind]
+
+ # The mongos_test doesn't have to filter anything, the test_info is just the arguments to
+ # the mongos program to be used as the test case.
+ if test_kind == "mongos_test":
+ mongos_options = test_info # Just for easier reading.
+ if not isinstance(mongos_options, dict):
+ raise TypeError("Expected dictionary of arguments to mongos")
+ return [mongos_options]
+ elif test_kind == "cpp_integration_test":
+ tests = _selector.filter_cpp_integration_tests(**test_info)
+ elif test_kind == "cpp_unit_test":
+ tests = _selector.filter_cpp_unit_tests(**test_info)
+ elif test_kind == "db_test":
+ tests = _selector.filter_dbtests(**test_info)
+ else: # test_kind == "js_test":
+ tests = _selector.filter_jstests(**test_info)
+
+ return sorted(tests, key=str.lower)
+
+ def get_name(self):
+ """
+ Returns the name of the test suite.
+ """
+ return self._suite_name
+
+ def get_selector_config(self):
+ """
+ Returns the "selector" section of the YAML configuration.
+ """
+ return self._suite_config["selector"]
+
+ def get_executor_config(self):
+ """
+ Returns the "executor" section of the YAML configuration.
+ """
+ return self._suite_config["executor"]
+
+ def record_start(self):
+ """
+ Records the start time of the suite.
+ """
+ self._start_time = time.time()
+
+ def record_end(self):
+ """
+ Records the end time of the suite.
+
+ Sets the 'return_code' of the suite based on the record codes of
+ each of the individual test groups.
+ """
+
+ self._end_time = time.time()
+
+ # Only set 'return_code' if it hasn't been set already. It may have been set if there was
+ # an exception that happened during the execution of the suite.
+ if self.return_code is None:
+ # The return code of the suite should be 2 if any test group has a return code of 2.
+ # The return code of the suite should be 1 if any test group has a return code of 1,
+ # and none have a return code of 2. Otherwise, the return code should be 0.
+ self.return_code = max(test_group.return_code for test_group in self.test_groups)
+
+ def summarize(self, sb):
+ """
+ Appends a summary of each individual test group onto the string
+ builder 'sb'.
+ """
+
+ combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
+
+ summarized_groups = []
+ for group in self.test_groups:
+ group_sb = []
+ summary = group.summarize(group_sb)
+ summarized_groups.append(" %ss: %s" % (group.test_kind, "\n ".join(group_sb)))
+
+ combined_summary = _summary.combine(combined_summary, summary)
+
+ if combined_summary.num_run == 0:
+ sb.append("Suite did not run any tests.")
+ return
+
+ # Override the 'time_taken' attribute of the summary if we have more accurate timing
+ # information available.
+ if self._start_time is not None and self._end_time is not None:
+ time_taken = self._end_time - self._start_time
+ combined_summary = combined_summary._replace(time_taken=time_taken)
+
+ sb.append("%d test(s) ran in %0.2f seconds"
+ " (%d succeeded, %d were skipped, %d failed, %d errored)" % combined_summary)
+
+ for summary_text in summarized_groups:
+ sb.append(summary_text)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/summary.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/summary.py
new file mode 100644
index 00000000000..1dae9ca81d6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/summary.py
@@ -0,0 +1,22 @@
+"""
+Holder for summary information about a test group or suite.
+"""
+
+from __future__ import absolute_import
+
+import collections
+
+
+
+Summary = collections.namedtuple("Summary", ["num_run", "time_taken", "num_succeeded",
+ "num_skipped", "num_failed", "num_errored"])
+
+
+def combine(summary1, summary2):
+ """
+ Returns a summary representing the sum of 'summary1' and 'summary2'.
+ """
+ args = []
+ for i in xrange(len(Summary._fields)):
+ args.append(summary1[i] + summary2[i])
+ return Summary._make(args)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/testcases.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/testcases.py
new file mode 100644
index 00000000000..3b068c3b80f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/testcases.py
@@ -0,0 +1,407 @@
+"""
+Subclasses of unittest.TestCase.
+"""
+
+from __future__ import absolute_import
+
+import os
+import os.path
+import shutil
+import unittest
+
+from .. import config
+from .. import core
+from .. import logging
+from .. import utils
+
+
+def make_test_case(test_kind, *args, **kwargs):
+ """
+ Factory function for creating TestCase instances.
+ """
+
+ if test_kind not in _TEST_CASES:
+ raise ValueError("Unknown test kind '%s'" % (test_kind))
+ return _TEST_CASES[test_kind](*args, **kwargs)
+
+
+class TestCase(unittest.TestCase):
+ """
+ A test case to execute.
+ """
+
+ def __init__(self, logger, test_kind, test_name):
+ """
+ Initializes the TestCase with the name of the test.
+ """
+
+ unittest.TestCase.__init__(self, methodName="run_test")
+
+ if not isinstance(logger, logging.Logger):
+ raise TypeError("logger must be a Logger instance")
+
+ if not isinstance(test_kind, basestring):
+ raise TypeError("test_kind must be a string")
+
+ if not isinstance(test_name, basestring):
+ raise TypeError("test_name must be a string")
+
+ self.logger = logger
+ self.test_kind = test_kind
+ self.test_name = test_name
+
+ self.fixture = None
+ self.return_code = None
+
+ def long_name(self):
+ """
+ Returns the path to the test, relative to the current working directory.
+ """
+ return os.path.relpath(self.test_name)
+
+ def basename(self):
+ """
+ Returns the basename of the test.
+ """
+ return os.path.basename(self.test_name)
+
+ def short_name(self):
+ """
+ Returns the basename of the test without the file extension.
+ """
+ return os.path.splitext(self.basename())[0]
+
+ def id(self):
+ return self.test_name
+
+ def shortDescription(self):
+ return "%s %s" % (self.test_kind, self.test_name)
+
+ def configure(self, fixture):
+ """
+ Stores 'fixture' as an attribute for later use during execution.
+ """
+ self.fixture = fixture
+
+ def run_test(self):
+ """
+ Runs the specified test.
+ """
+ raise NotImplementedError("run_test must be implemented by TestCase subclasses")
+
+ def as_command(self):
+ """
+ Returns the command invocation used to run the test.
+ """
+ return self._make_process().as_command()
+
+ def _execute(self, process):
+ """
+ Runs the specified process.
+ """
+
+ self.logger.info("Starting %s...\n%s", self.shortDescription(), process.as_command())
+ process.start()
+ self.logger.info("%s started with pid %s.", self.shortDescription(), process.pid)
+
+ self.return_code = process.wait()
+ if self.return_code != 0:
+ raise self.failureException("%s failed" % (self.shortDescription()))
+
+ self.logger.info("%s finished.", self.shortDescription())
+
+ def _make_process(self):
+ """
+ Returns a new Process instance that could be used to run the
+ test or log the command.
+ """
+ raise NotImplementedError("_make_process must be implemented by TestCase subclasses")
+
+
+class CPPUnitTestCase(TestCase):
+ """
+ A C++ unit test to execute.
+ """
+
+ def __init__(self,
+ logger,
+ program_executable,
+ program_options=None):
+ """
+ Initializes the CPPUnitTestCase with the executable to run.
+ """
+
+ TestCase.__init__(self, logger, "Program", program_executable)
+
+ self.program_executable = program_executable
+ self.program_options = utils.default_if_none(program_options, {}).copy()
+
+ def run_test(self):
+ try:
+ program = self._make_process()
+ self._execute(program)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running C++ unit test %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.process.Process(self.logger,
+ [self.program_executable],
+ **self.program_options)
+
+
+class CPPIntegrationTestCase(TestCase):
+ """
+ A C++ integration test to execute.
+ """
+
+ def __init__(self,
+ logger,
+ program_executable,
+ program_options=None):
+ """
+ Initializes the CPPIntegrationTestCase with the executable to run.
+ """
+
+ TestCase.__init__(self, logger, "Program", program_executable)
+
+ self.program_executable = program_executable
+ self.program_options = utils.default_if_none(program_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ self.program_options["connectionString"] = self.fixture.get_connection_string()
+
+ def run_test(self):
+ try:
+ program = self._make_process()
+ self._execute(program)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running C++ integration test %s.",
+ self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.generic_program(self.logger,
+ [self.program_executable],
+ **self.program_options)
+
+
+class DBTestCase(TestCase):
+ """
+ A dbtest to execute.
+ """
+
+ def __init__(self,
+ logger,
+ dbtest_suite,
+ dbtest_executable=None,
+ dbtest_options=None):
+ """
+ Initializes the DBTestCase with the dbtest suite to run.
+ """
+
+ TestCase.__init__(self, logger, "DBTest", dbtest_suite)
+
+ # Command line options override the YAML configuration.
+ self.dbtest_executable = utils.default_if_none(config.DBTEST_EXECUTABLE, dbtest_executable)
+
+ self.dbtest_suite = dbtest_suite
+ self.dbtest_options = utils.default_if_none(dbtest_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ # If a dbpath was specified, then use it as a container for all other dbpaths.
+ dbpath_prefix = self.dbtest_options.pop("dbpath", DBTestCase._get_dbpath_prefix())
+ dbpath = os.path.join(dbpath_prefix, "job%d" % (self.fixture.job_num), "unittest")
+ self.dbtest_options["dbpath"] = dbpath
+
+ shutil.rmtree(dbpath, ignore_errors=True)
+
+ try:
+ os.makedirs(dbpath)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ def run_test(self):
+ try:
+ dbtest = self._make_process()
+ self._execute(dbtest)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running dbtest suite %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.dbtest_program(self.logger,
+ executable=self.dbtest_executable,
+ suites=[self.dbtest_suite],
+ **self.dbtest_options)
+
+ @staticmethod
+ def _get_dbpath_prefix():
+ """
+ Returns the prefix of the dbpath to use for the dbtest
+ executable.
+
+ Order of preference:
+ 1. The --dbpathPrefix specified at the command line.
+ 2. Value of the TMPDIR environment variable.
+ 3. Value of the TEMP environment variable.
+ 4. Value of the TMP environment variable.
+ 5. The /tmp directory.
+ """
+
+ if config.DBPATH_PREFIX is not None:
+ return config.DBPATH_PREFIX
+
+ for env_var in ("TMPDIR", "TEMP", "TMP"):
+ if env_var in os.environ:
+ return os.environ[env_var]
+ return os.path.normpath("/tmp")
+
+
+class JSTestCase(TestCase):
+ """
+ A jstest to execute.
+ """
+
+ def __init__(self,
+ logger,
+ js_filename,
+ shell_executable=None,
+ shell_options=None):
+ "Initializes the JSTestCase with the JS file to run."
+
+ TestCase.__init__(self, logger, "JSTest", js_filename)
+
+ # Command line options override the YAML configuration.
+ self.shell_executable = utils.default_if_none(config.MONGO_EXECUTABLE, shell_executable)
+
+ self.js_filename = js_filename
+ self.shell_options = utils.default_if_none(shell_options, {}).copy()
+
+ def configure(self, fixture):
+ TestCase.configure(self, fixture)
+
+ if self.fixture.port is not None:
+ self.shell_options["port"] = self.fixture.port
+
+ global_vars = self.shell_options.get("global_vars", {}).copy()
+ data_dir = self._get_data_dir(global_vars)
+
+ # Set MongoRunner.dataPath if overridden at command line or not specified in YAML.
+ if config.DBPATH_PREFIX is not None or "MongoRunner.dataPath" not in global_vars:
+ # dataPath property is the dataDir property with a trailing slash.
+ data_path = os.path.join(data_dir, "")
+ else:
+ data_path = global_vars["MongoRunner.dataPath"]
+
+ global_vars["MongoRunner.dataDir"] = data_dir
+ global_vars["MongoRunner.dataPath"] = data_path
+
+ test_data = global_vars.get("TestData", {}).copy()
+ test_data["minPort"] = core.network.PortAllocator.min_test_port(fixture.job_num)
+ test_data["maxPort"] = core.network.PortAllocator.max_test_port(fixture.job_num)
+
+ global_vars["TestData"] = test_data
+ self.shell_options["global_vars"] = global_vars
+
+ shutil.rmtree(data_dir, ignore_errors=True)
+
+ try:
+ os.makedirs(data_dir)
+ except os.error:
+ # Directory already exists.
+ pass
+
+ def _get_data_dir(self, global_vars):
+ """
+ Returns the value that the mongo shell should set for the
+ MongoRunner.dataDir property.
+ """
+
+ # Command line options override the YAML configuration.
+ data_dir_prefix = utils.default_if_none(config.DBPATH_PREFIX,
+ global_vars.get("MongoRunner.dataDir"))
+ data_dir_prefix = utils.default_if_none(data_dir_prefix, config.DEFAULT_DBPATH_PREFIX)
+ return os.path.join(data_dir_prefix,
+ "job%d" % (self.fixture.job_num),
+ config.MONGO_RUNNER_SUBDIR)
+
+ def run_test(self):
+ try:
+ shell = self._make_process()
+ self._execute(shell)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running jstest %s.", self.basename())
+ raise
+
+ def _make_process(self):
+ return core.programs.mongo_shell_program(self.logger,
+ executable=self.shell_executable,
+ filename=self.js_filename,
+ **self.shell_options)
+
+
+class MongosTestCase(TestCase):
+ """
+ A TestCase which runs a mongos binary with the given parameters.
+ """
+
+ def __init__(self,
+ logger,
+ mongos_options):
+ """
+ Initializes the mongos test and saves the options.
+ """
+
+ self.mongos_executable = utils.default_if_none(config.MONGOS_EXECUTABLE,
+ config.DEFAULT_MONGOS_EXECUTABLE)
+ # Use the executable as the test name.
+ TestCase.__init__(self, logger, "mongos", self.mongos_executable)
+ self.options = mongos_options.copy()
+
+ def configure(self, fixture):
+ """
+ Ensures the --test option is present in the mongos options.
+ """
+
+ TestCase.configure(self, fixture)
+ # Always specify test option to ensure the mongos will terminate.
+ if "test" not in self.options:
+ self.options["test"] = ""
+
+ def run_test(self):
+ try:
+ mongos = self._make_process()
+ self._execute(mongos)
+ except self.failureException:
+ raise
+ except:
+ self.logger.exception("Encountered an error running %s.", mongos.as_command())
+ raise
+
+ def _make_process(self):
+ return core.programs.mongos_program(self.logger,
+ executable=self.mongos_executable,
+ **self.options)
+
+
+_TEST_CASES = {
+ "cpp_unit_test": CPPUnitTestCase,
+ "cpp_integration_test": CPPIntegrationTestCase,
+ "db_test": DBTestCase,
+ "js_test": JSTestCase,
+ "mongos_test": MongosTestCase,
+}
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py
new file mode 100644
index 00000000000..688d56c296d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/testing/testgroup.py
@@ -0,0 +1,132 @@
+"""
+Holder for the (test kind, list of tests) pair with additional metadata
+about when and how they execute.
+"""
+
+from __future__ import absolute_import
+
+import time
+
+from . import summary as _summary
+
+
+class TestGroup(object):
+ """
+ A class to encapsulate the results of running a group of tests
+ of a particular kind (e.g. C++ unit tests, dbtests, jstests).
+ """
+
+ def __init__(self, test_kind, tests):
+ """
+ Initializes the TestGroup with a list of tests.
+ """
+
+ self.test_kind = test_kind
+ self.tests = tests
+
+ self.return_code = None # Set by the executor.
+
+ self._start_times = []
+ self._end_times = []
+ self._reports = []
+
+ def get_reports(self):
+ """
+ Returns the list of reports.
+ """
+ return self._reports
+
+ def record_start(self):
+ """
+ Records the start time of an execution.
+ """
+ self._start_times.append(time.time())
+
+ def record_end(self, report):
+ """
+ Records the end time of an execution.
+ """
+ self._end_times.append(time.time())
+ self._reports.append(report)
+
+ def summarize_latest(self, sb):
+ """
+ Returns a summary of the latest execution of the group and appends a
+ summary of that execution onto the string builder 'sb'.
+ """
+ return self._summarize_execution(-1, sb)
+
+ def summarize(self, sb):
+ """
+ Returns a summary of the execution(s) of the group and appends a
+ summary of the execution(s) onto the string builder 'sb'.
+ """
+
+ if not self._reports:
+ sb.append("No tests ran.")
+ return _summary.Summary(0, 0.0, 0, 0, 0, 0)
+
+ if len(self._reports) == 1:
+ return self._summarize_execution(0, sb)
+
+ return self._summarize_repeated(sb)
+
+ def _summarize_repeated(self, sb):
+ """
+ Returns the summary information of all executions and appends
+ each execution's summary onto the string builder 'sb'. Also
+ appends information of how many repetitions there were.
+ """
+
+ num_iterations = len(self._reports)
+ total_time_taken = self._end_times[-1] - self._start_times[0]
+ sb.append("Executed %d times in %0.2f seconds:" % (num_iterations, total_time_taken))
+
+ combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
+ for iteration in xrange(num_iterations):
+ # Summarize each execution as a bulleted list of results.
+ bulleter_sb = []
+ summary = self._summarize_execution(iteration, bulleter_sb)
+ combined_summary = _summary.combine(combined_summary, summary)
+
+ for (i, line) in enumerate(bulleter_sb):
+ # Only bullet first line, indent others.
+ prefix = "* " if i == 0 else " "
+ sb.append(prefix + line)
+
+ return combined_summary
+
+ def _summarize_execution(self, iteration, sb):
+ """
+ Returns the summary information of the execution given by
+ 'iteration' and appends a summary of that execution onto the
+ string builder 'sb'.
+ """
+
+ report = self._reports[iteration]
+ time_taken = self._end_times[iteration] - self._start_times[iteration]
+
+ num_run = report.num_succeeded + report.num_errored + report.num_failed
+ num_skipped = len(self.tests) + report.num_dynamic - num_run
+
+ if report.num_succeeded == num_run and num_skipped == 0:
+ sb.append("All %d test(s) passed in %0.2f seconds." % (num_run, time_taken))
+ return _summary.Summary(num_run, time_taken, num_run, 0, 0, 0)
+
+ summary = _summary.Summary(num_run, time_taken, report.num_succeeded, num_skipped,
+ report.num_failed, report.num_errored)
+
+ sb.append("%d test(s) ran in %0.2f seconds"
+ " (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
+
+ if report.num_failed > 0:
+ sb.append("The following tests failed (with exit code):")
+ for test_info in report.get_failed():
+ sb.append(" %s (%d)" % (test_info.test_id, test_info.return_code))
+
+ if report.num_errored > 0:
+ sb.append("The following tests had errors:")
+ for test_info in report.get_errored():
+ sb.append(" %s" % (test_info.test_id))
+
+ return summary
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/__init__.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/__init__.py
new file mode 100644
index 00000000000..df387cc3323
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/__init__.py
@@ -0,0 +1,88 @@
+"""
+Helper functions.
+"""
+
+from __future__ import absolute_import
+
+import os.path
+
+import pymongo
+import yaml
+
+
+def default_if_none(value, default):
+ return value if value is not None else default
+
+
+def is_string_list(lst):
+ """
+ Returns true if 'lst' is a list of strings, and false otherwise.
+ """
+ return isinstance(lst, list) and all(isinstance(x, basestring) for x in lst)
+
+
+def is_string_set(value):
+ """
+ Returns true if 'value' is a set of strings, and false otherwise.
+ """
+ return isinstance(value, set) and all(isinstance(x, basestring) for x in value)
+
+
+def is_js_file(filename):
+ """
+ Returns true if 'filename' ends in .js, and false otherwise.
+ """
+ return os.path.splitext(filename)[1] == ".js"
+
+
+def is_yaml_file(filename):
+ """
+ Returns true if 'filename' ends in .yml or .yaml, and false
+ otherwise.
+ """
+ return os.path.splitext(filename)[1] in (".yaml", ".yml")
+
+
+def load_yaml_file(filename):
+ """
+ Attempts to read 'filename' as YAML.
+ """
+ try:
+ with open(filename, "r") as fp:
+ return yaml.safe_load(fp)
+ except yaml.YAMLError as err:
+ raise ValueError("File '%s' contained invalid YAML: %s" % (filename, err))
+
+
+def dump_yaml(value):
+ """
+ Returns 'value' formatted as YAML.
+ """
+ # Use block (indented) style for formatting YAML.
+ return yaml.safe_dump(value, default_flow_style=False).rstrip()
+
+def load_yaml(value):
+ """
+ Attempts to parse 'value' as YAML.
+ """
+ try:
+ return yaml.safe_load(value)
+ except yaml.YAMLError as err:
+ raise ValueError("Attempted to parse invalid YAML value '%s': %s" % (value, err))
+
+
+def new_mongo_client(port, read_preference=pymongo.ReadPreference.PRIMARY, timeout_millis=30000):
+ """
+ Returns a pymongo.MongoClient connected on 'port' with a read
+ preference of 'read_preference'.
+
+ The PyMongo driver will wait up to 'timeout_millis' milliseconds
+ before concluding that the server is unavailable.
+ """
+
+ kwargs = {"connectTimeoutMS": timeout_millis}
+ if pymongo.version_tuple[0] >= 3:
+ kwargs["serverSelectionTimeoutMS"] = timeout_millis
+ kwargs["connect"] = True
+
+ return pymongo.MongoClient(port=port, read_preference=read_preference, **kwargs)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/globstar.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/globstar.py
new file mode 100644
index 00000000000..644ebfe3e38
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/globstar.py
@@ -0,0 +1,202 @@
+"""
+Filename globbing utility.
+"""
+
+from __future__ import absolute_import
+
+import glob as _glob
+import os
+import os.path
+import re
+
+
+_GLOBSTAR = "**"
+_CONTAINS_GLOB_PATTERN = re.compile("[*?[]")
+
+
+def is_glob_pattern(s):
+ """
+ Returns true if 's' represents a glob pattern, and false otherwise.
+ """
+
+ # Copied from glob.has_magic().
+ return _CONTAINS_GLOB_PATTERN.search(s) is not None
+
+
+def glob(globbed_pathname):
+ """
+ Return a list of pathnames matching the 'globbed_pathname' pattern.
+
+ In addition to containing simple shell-style wildcards a la fnmatch,
+ the pattern may also contain globstars ("**"), which is recursively
+ expanded to match zero or more subdirectories.
+ """
+
+ return list(iglob(globbed_pathname))
+
+
+def iglob(globbed_pathname):
+ """
+ Emit a list of pathnames matching the 'globbed_pathname' pattern.
+
+ In addition to containing simple shell-style wildcards a la fnmatch,
+ the pattern may also contain globstars ("**"), which is recursively
+ expanded to match zero or more subdirectories.
+ """
+
+ parts = _split_path(globbed_pathname)
+ parts = _canonicalize(parts)
+
+ index = _find_globstar(parts)
+ if index == -1:
+ for pathname in _glob.iglob(globbed_pathname):
+ # Normalize 'pathname' so exact string comparison can be used later.
+ yield os.path.normpath(pathname)
+ return
+
+ # **, **/, or **/a
+ if index == 0:
+ expand = _expand_curdir
+
+ # a/** or a/**/ or a/**/b
+ else:
+ expand = _expand
+
+ prefix_parts = parts[:index]
+ suffix_parts = parts[index + 1:]
+
+ prefix = os.path.join(*prefix_parts) if prefix_parts else os.curdir
+ suffix = os.path.join(*suffix_parts) if suffix_parts else ""
+
+ for (kind, path) in expand(prefix):
+ if not suffix_parts:
+ yield path
+
+ # Avoid following symlinks to avoid an infinite loop
+ elif suffix_parts and kind == "dir" and not os.path.islink(path):
+ path = os.path.join(path, suffix)
+ for pathname in iglob(path):
+ yield pathname
+
+
+def _split_path(pathname):
+ """
+ Return 'pathname' as a list of path components.
+ """
+
+ parts = []
+
+ while True:
+ (dirname, basename) = os.path.split(pathname)
+ parts.append(basename)
+ if pathname == dirname:
+ parts.append(dirname)
+ break
+ if not dirname:
+ break
+ pathname = dirname
+
+ parts.reverse()
+ return parts
+
+
+def _canonicalize(parts):
+ """
+ Return a copy of 'parts' with consecutive "**"s coalesced.
+ Raise a ValueError for unsupported uses of "**".
+ """
+
+ res = []
+
+ prev_was_globstar = False
+ for p in parts:
+ if p == _GLOBSTAR:
+ # Skip consecutive **'s
+ if not prev_was_globstar:
+ prev_was_globstar = True
+ res.append(p)
+ elif _GLOBSTAR in p: # a/b**/c or a/**b/c
+ raise ValueError("Can only specify glob patterns of the form a/**/b")
+ else:
+ prev_was_globstar = False
+ res.append(p)
+
+ return res
+
+
+def _find_globstar(parts):
+ """
+ Return the index of the first occurrence of "**" in 'parts'.
+ Return -1 if "**" is not found in the list.
+ """
+
+ for (i, p) in enumerate(parts):
+ if p == _GLOBSTAR:
+ return i
+ return -1
+
+
+def _list_dir(pathname):
+ """
+ Return a pair of the subdirectory names and filenames immediately
+ contained within the 'pathname' directory.
+
+ If 'pathname' does not exist, then None is returned.
+ """
+
+ try:
+ (_root, dirs, files) = os.walk(pathname).next()
+ return (dirs, files)
+ except StopIteration:
+ return None # 'pathname' directory does not exist
+
+
+def _expand(pathname):
+ """
+ Emit tuples of the form ("dir", dirname) and ("file", filename)
+ of all directories and files contained within the 'pathname' directory.
+ """
+
+ res = _list_dir(pathname)
+ if res is None:
+ return
+
+ (dirs, files) = res
+
+ # Zero expansion
+ if os.path.basename(pathname):
+ yield ("dir", os.path.join(pathname, ""))
+
+ for f in files:
+ path = os.path.join(pathname, f)
+ yield ("file", path)
+
+ for d in dirs:
+ path = os.path.join(pathname, d)
+ for x in _expand(path):
+ yield x
+
+
+def _expand_curdir(pathname):
+ """
+ Emit tuples of the form ("dir", dirname) and ("file", filename)
+ of all directories and files contained within the 'pathname' directory.
+
+ The returned pathnames omit a "./" prefix.
+ """
+
+ res = _list_dir(pathname)
+ if res is None:
+ return
+
+ (dirs, files) = res
+
+ # Zero expansion
+ yield ("dir", "")
+
+ for f in files:
+ yield ("file", f)
+
+ for d in dirs:
+ for x in _expand(d):
+ yield x
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py
new file mode 100644
index 00000000000..18da7885820
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/jscomment.py
@@ -0,0 +1,78 @@
+"""
+Utility for parsing JS comments.
+"""
+
+from __future__ import absolute_import
+
+import re
+
+import yaml
+
+
+# TODO: use a more robust regular expression for matching tags
+_JSTEST_TAGS_RE = re.compile(r".*@tags\s*:\s*(\[[^\]]*\])", re.DOTALL)
+
+
+def get_tags(pathname):
+ """
+ Returns the list of tags found in the (JS-style) comments of
+ 'pathname'. The definition can span multiple lines, use unquoted,
+ single-quoted, or double-quoted strings, and use the '#' character
+ for inline commenting.
+
+ e.g.
+
+ /**
+ * @tags: [ "tag1", # double quoted
+ * 'tag2' # single quoted
+ * # line with only a comment
+ * , tag3 # no quotes
+ * tag4, # trailing comma
+ * ]
+ */
+ """
+
+ with open(pathname) as fp:
+ match = _JSTEST_TAGS_RE.match(fp.read())
+ if match:
+ try:
+ # TODO: it might be worth supporting the block (indented) style of YAML lists in
+ # addition to the flow (bracketed) style
+ tags = yaml.safe_load(_strip_jscomments(match.group(1)))
+ if not isinstance(tags, list) and all(isinstance(tag, basestring) for tag in tags):
+ raise TypeError("Expected a list of string tags, but got '%s'" % (tags))
+ return tags
+ except yaml.YAMLError as err:
+ raise ValueError("File '%s' contained invalid tags (expected YAML): %s"
+ % (pathname, err))
+
+ return []
+
+
+def _strip_jscomments(s):
+ """
+ Given a string 's' that represents the contents after the "@tags:"
+ annotation in the JS file, this function returns a string that can
+ be converted to YAML.
+
+ e.g.
+
+ [ "tag1", # double quoted
+ * 'tag2' # single quoted
+ * # line with only a comment
+ * , tag3 # no quotes
+ * tag4, # trailing comma
+ * ]
+
+ If the //-style JS comments were used, then the example remains the,
+ same except with the '*' character is replaced by '//'.
+ """
+
+ yaml_lines = []
+
+ for line in s.splitlines():
+ # Remove leading whitespace and symbols that commonly appear in JS comments.
+ line = line.lstrip("\t ").lstrip("*/")
+ yaml_lines.append(line)
+
+ return "\n".join(yaml_lines)
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/queue.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/queue.py
new file mode 100644
index 00000000000..80da5e2cc66
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/queue.py
@@ -0,0 +1,52 @@
+"""
+Extension to the Queue.Queue class.
+
+Added support for the join() method to take a timeout. This is necessary
+in order for KeyboardInterrupt exceptions to get propagated.
+
+See https://bugs.python.org/issue1167930 for more details.
+"""
+
+from __future__ import absolute_import
+
+import Queue
+import time
+
+
+# Exception that is raised when get_nowait() is called on an empty Queue.
+Empty = Queue.Empty
+
+
+class Queue(Queue.Queue):
+ """
+ A multi-producer, multi-consumer queue.
+ """
+
+ def join(self, timeout=None):
+ """
+ Wait until all items in the queue have been retrieved and processed,
+ or until 'timeout' seconds have passed.
+
+ The count of unfinished tasks is incremented whenever an item is added
+ to the queue. The count is decremented whenever task_done() is called
+ to indicate that all work on the retrieved item was completed.
+
+ When the number of unfinished tasks reaches zero, True is returned.
+ If the number of unfinished tasks remains nonzero after 'timeout'
+ seconds have passed, then False is returned.
+ """
+ with self.all_tasks_done:
+ if timeout is None:
+ while self.unfinished_tasks:
+ self.all_tasks_done.wait()
+ elif timeout < 0:
+ raise ValueError("timeout must be a nonnegative number")
+ else:
+ # Pass timeout down to lock acquisition
+ deadline = time.time() + timeout
+ while self.unfinished_tasks:
+ remaining = deadline - time.time()
+ if remaining <= 0.0:
+ return False
+ self.all_tasks_done.wait(remaining)
+ return True
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/timer.py b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/timer.py
new file mode 100644
index 00000000000..80531d5db5c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/resmokelib/utils/timer.py
@@ -0,0 +1,125 @@
+"""
+Alternative to the threading.Timer class.
+
+Enables a timer to be restarted without needing to construct a new thread
+each time. This is necessary to execute periodic actions, e.g. flushing
+log messages to buildlogger, while avoiding errors related to "can't start
+new thread" that would otherwise occur on Windows.
+"""
+
+from __future__ import absolute_import
+
+import threading
+
+
+class AlarmClock(threading.Thread):
+ """
+ Calls a function after a specified number of seconds.
+ """
+
+ def __init__(self, interval, func, args=None, kwargs=None):
+ """
+ Initializes the timer with a function to periodically execute.
+ """
+
+ threading.Thread.__init__(self)
+
+ # A non-dismissed timer should not prevent the program from exiting
+ self.daemon = True
+
+ self.interval = interval
+ self.func = func
+ self.args = args if args is not None else []
+ self.kwargs = kwargs if kwargs is not None else {}
+
+ self.lock = threading.Lock()
+ self.cond = threading.Condition(self.lock)
+
+ self.snoozed = False # canceled for one execution
+ self.dismissed = False # canceled for all time
+ self.restarted = False
+
+ def dismiss(self):
+ """
+ Disables the timer.
+ """
+
+ with self.lock:
+ self.dismissed = True
+ self.cond.notify_all()
+
+ self.join() # Tidy up the started thread.
+
+ cancel = dismiss # Expose API compatible with that of threading.Timer.
+
+ def snooze(self):
+ """
+ Skips the next execution of 'func' if it has not already started.
+ """
+
+ with self.lock:
+ if self.dismissed:
+ raise ValueError("Timer cannot be snoozed if it has been dismissed")
+
+ self.snoozed = True
+ self.restarted = False
+ self.cond.notify_all()
+
+ def reset(self):
+ """
+ Restarts the timer, causing it to wait 'interval' seconds before calling
+ 'func' again.
+ """
+
+ with self.lock:
+ if self.dismissed:
+ raise ValueError("Timer cannot be reset if it has been dismissed")
+
+ if not self.snoozed:
+ raise ValueError("Timer cannot be reset if it has not been snoozed")
+
+ self.restarted = True
+ self.cond.notify_all()
+
+ def run(self):
+ """
+ Repeatedly calls 'func' with a delay of 'interval' seconds between executions.
+
+ If the timer is snoozed before 'func' is called, then it waits to be reset.
+ After it has been reset, the timer will again wait 'interval' seconds and
+ then try to call 'func'.
+
+ If the timer is dismissed, then no subsequent executions of 'func' are made.
+ """
+
+ while True:
+ with self.lock:
+ if self.dismissed:
+ return
+
+ # Wait for the specified amount of time.
+ self.cond.wait(self.interval)
+
+ if self.dismissed:
+ return
+
+ # If the timer was snoozed, then it should wait to be reset.
+ if self.snoozed:
+ while not self.restarted:
+ self.cond.wait()
+
+ if self.dismissed:
+ return
+
+ self.restarted = False
+ self.snoozed = False
+ continue
+
+ # Execute the function after the lock has been released to prevent potential deadlocks
+ # with the invoked function.
+ self.func(*self.args, **self.kwargs)
+
+ # Reacquire the lock.
+ with self.lock:
+ # Ignore snoozes that took place while the function was being executed.
+ self.snoozed = False
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/setup_multiversion_mongodb.py b/src/mongo/gotools/test/qa-tests/buildscripts/setup_multiversion_mongodb.py
new file mode 100755
index 00000000000..df6adc64ac2
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/setup_multiversion_mongodb.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+
+import re
+import sys
+import os
+import tempfile
+import urllib2
+import subprocess
+import tarfile
+import zipfile
+import shutil
+import errno
+# To ensure it exists on the system
+import gzip
+import argparse
+
+#
+# Useful script for installing multiple versions of MongoDB on a machine
+# Only really tested/works on Linux.
+#
+
+def version_tuple(version):
+ """Returns a version tuple that can be used for numeric sorting
+ of version strings such as '2.6.0-rc1' and '2.4.0'"""
+
+ RC_OFFSET = -100
+ version_parts = re.split(r'\.|-', version[0])
+
+ if version_parts[-1].startswith("rc"):
+ rc_part = version_parts.pop()
+ rc_part = rc_part.split('rc')[1]
+
+ # RC versions are weighted down to allow future RCs and general
+ # releases to be sorted in ascending order (e.g., 2.6.0-rc1,
+ # 2.6.0-rc2, 2.6.0).
+ version_parts.append(int(rc_part) + RC_OFFSET)
+ else:
+ # Non-RC releases have an extra 0 appended so version tuples like
+ # (2, 6, 0, -100) and (2, 6, 0, 0) sort in ascending order.
+ version_parts.append(0)
+
+ return tuple(map(int, version_parts))
+
+class MultiVersionDownloaderBase :
+
+ def download_version(self, version):
+
+ try:
+ os.makedirs(self.install_dir)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(self.install_dir):
+ pass
+ else: raise
+
+ url, full_version = self.gen_url(version)
+
+ # this extracts the filename portion of the URL, without the extension.
+ # for example: ttp://downloads.mongodb.org/osx/mongodb-osx-x86_64-2.4.12.tgz
+ # extract_dir will become mongodb-osx-x86_64-2.4.12
+ extract_dir = url.split("/")[-1][:-4]
+
+ # only download if we don't already have the directory
+ already_downloaded = os.path.isdir(os.path.join( self.install_dir, extract_dir))
+ if already_downloaded:
+ print "Skipping download for version %s (%s) since the dest already exists '%s'" \
+ % (version, full_version, extract_dir)
+ else:
+ temp_dir = tempfile.mkdtemp()
+ temp_file = tempfile.mktemp(suffix=".tgz")
+
+ data = urllib2.urlopen(url)
+
+ print "Downloading data for version %s (%s) from %s..." % (version, full_version, url)
+
+ with open(temp_file, 'wb') as f:
+ f.write(data.read())
+ print "Uncompressing data for version %s (%s)..." % (version, full_version)
+
+ try:
+ tf = tarfile.open(temp_file, 'r:gz')
+ tf.extractall(path=temp_dir)
+ tf.close()
+ except:
+ # support for windows
+ zfile = zipfile.ZipFile(temp_file)
+ try:
+ if not os.path.exists(temp_dir):
+ os.makedirs(temp_dir)
+ for name in zfile.namelist():
+ _, filename = os.path.split(name)
+ print "Decompressing " + filename + " on " + temp_dir
+ zfile.extract(name, temp_dir)
+ except:
+ zfile.close()
+ raise
+ zfile.close()
+ temp_install_dir = os.path.join(temp_dir, extract_dir)
+ try:
+ os.stat(temp_install_dir)
+ except:
+ dir = os.listdir(temp_dir)
+ # TODO confirm that there is one and only one directory entry
+ os.rename(os.path.join(temp_dir,dir[0]),temp_install_dir)
+ shutil.move(temp_install_dir, self.install_dir)
+ shutil.rmtree(temp_dir)
+ try:
+ os.remove(temp_file)
+ except Exception as e:
+ print e
+ pass
+ self.symlink_version(version, os.path.abspath(os.path.join(self.install_dir, extract_dir)))
+
+ def symlink_version(self, version, installed_dir):
+
+ try:
+ os.makedirs(self.link_dir)
+ except OSError as exc:
+ if exc.errno == errno.EEXIST and os.path.isdir(self.link_dir):
+ pass
+ else: raise
+
+ for executable in os.listdir(os.path.join(installed_dir, "bin")):
+ link_name = "%s-%s" % (executable, version)
+ # support for windows
+ if executable.endswith(".exe") or executable.endswith(".pdb"):
+ link_name = "%s-%s.%s" % (executable[:-4], version, executable[len(executable)-3:])
+
+ try:
+ os.symlink(os.path.join(installed_dir, "bin", executable),\
+ os.path.join(self.link_dir, link_name))
+ except Exception as exc:
+ try:
+ # support for windows
+ shutil.copy2(os.path.join(installed_dir, "bin", executable),\
+ os.path.join(self.link_dir, link_name))
+ except:
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+class MultiVersionDownloader(MultiVersionDownloaderBase) :
+
+ def __init__(self, install_dir, link_dir, platform):
+ self.install_dir = install_dir
+ self.link_dir = link_dir
+ match = re.compile("(.*)\/(.*)").match(platform)
+ self.platform = match.group(1)
+ self.arch = match.group(2)
+ self._links = None
+
+ @property
+ def links(self):
+ if self._links is None:
+ self._links = self.download_links()
+ return self._links
+
+ def gen_url(self, version):
+ urls = []
+ for link_version, link_url in self.links.iteritems():
+ if link_version.startswith(version):
+ # If we have a "-" in our version, exact match only
+ if version.find("-") >= 0:
+ if link_version != version: continue
+ elif link_version.find("-") >= 0:
+ continue
+
+ urls.append((link_version, link_url))
+
+ if len(urls) == 0:
+ raise Exception("Cannot find a link for version %s, versions %s found." \
+ % (version, self.links))
+
+ urls.sort(key=version_tuple)
+ full_version = urls[-1][0]
+ url = urls[-1][1]
+ return url, full_version
+
+ def download_links(self):
+ href = "http://dl.mongodb.org/dl/%s/%s" \
+ % (self.platform, self.arch)
+
+ html = urllib2.urlopen(href).read()
+ links = {}
+ for line in html.split():
+ match = None
+ for ext in ["tgz", "zip"]:
+ match = re.compile("http:\/\/downloads\.mongodb\.org\/%s/mongodb-%s-%s-([^\"]*)\.%s" \
+ % (self.platform, self.platform, self.arch, ext)).search(line)
+ if match != None:
+ break
+
+ if match == None:
+ continue
+ link = match.group(0)
+ version = match.group(1)
+ links[version] = link
+
+ return links
+
+
+class LatestMultiVersionDownloader(MultiVersionDownloaderBase) :
+
+ def __init__(self, install_dir, link_dir, platform, use_ssl, os):
+ self.install_dir = install_dir
+ self.link_dir = link_dir
+ match = re.compile("(.*)\/(.*)").match(platform)
+ self.platform = match.group(1)
+ self.arch = match.group(2)
+ self._links = None
+ self.use_ssl = use_ssl
+ self.os = os
+
+ def gen_url(self, version):
+ ext = "tgz"
+ if "win" in self.platform:
+ ext = "zip"
+ if self.use_ssl:
+ if version == "2.4":
+ enterprise_string = "subscription"
+ else:
+ enterprise_string = "enterprise"
+ full_version = self.os + "-v" + version + "-latest"
+ url = "http://downloads.10gen.com/%s/mongodb-%s-%s-%s-%s.%s" % ( self.platform, self.platform, self.arch, enterprise_string, full_version, ext )
+ else:
+ full_version = "v" + version + "-latest"
+ url = "http://downloads.mongodb.org/%s/mongodb-%s-%s-%s.%s" % ( self.platform, self.platform, self.arch, full_version, ext )
+ return url, full_version
+
+CL_HELP_MESSAGE = \
+"""
+Downloads and installs particular mongodb versions (each binary is renamed to include its version)
+into an install directory and symlinks the binaries with versions to another directory.
+
+Usage: setup_multiversion_mongodb.py INSTALL_DIR LINK_DIR PLATFORM_AND_ARCH VERSION1 [VERSION2 VERSION3 ...]
+
+Ex: setup_multiversion_mongodb.py ./install ./link "Linux/x86_64" "2.0.6" "2.0.3-rc0" "2.0" "2.2" "2.3"
+Ex: setup_multiversion_mongodb.py ./install ./link "OSX/x86_64" "2.4" "2.2"
+
+After running the script you will have a directory structure like this:
+./install/[mongodb-osx-x86_64-2.4.9, mongodb-osx-x86_64-2.2.7]
+./link/[mongod-2.4.9, mongod-2.2.7, mongo-2.4.9...]
+
+You should then add ./link/ to your path so multi-version tests will work.
+
+Note: If "rc" is included in the version name, we'll use the exact rc, otherwise we'll pull the highest non-rc
+version compatible with the version specified.
+"""
+
+def parse_cl_args(args):
+
+ parser = argparse.ArgumentParser(description=CL_HELP_MESSAGE)
+
+ def raise_exception(msg):
+ print CL_HELP_MESSAGE
+ raise Exception(msg)
+
+ parser.add_argument('install_dir', action="store" )
+ parser.add_argument('link_dir', action="store" )
+ parser.add_argument('platform_and_arch', action="store" )
+ parser.add_argument('--latest', action="store_true" )
+ parser.add_argument('--use-ssl', action="store_true" )
+ parser.add_argument('--os', action="store" )
+ parser.add_argument('version', action="store", nargs="+" )
+
+ args = parser.parse_args()
+
+ if re.compile(".*\/.*").match(args.platform_and_arch) == None:
+ raise_exception("PLATFORM_AND_ARCH isn't of the correct format")
+
+ if args.latest:
+ if not args.os:
+ raise_exception("using --use-ssl requires an --os parameter")
+ return (LatestMultiVersionDownloader(args.install_dir, args.link_dir, args.platform_and_arch, args.use_ssl, args.os), args.version)
+ else:
+ if args.use_ssl:
+ raise_exception("you can only use --use-ssl when using --latest")
+ return (MultiVersionDownloader(args.install_dir, args.link_dir, args.platform_and_arch), args.version)
+
+def main():
+
+ downloader, versions = parse_cl_args(sys.argv[1:])
+
+ for version in versions:
+ downloader.download_version(version)
+
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/smoke.py b/src/mongo/gotools/test/qa-tests/buildscripts/smoke.py
new file mode 100755
index 00000000000..bbeec4b12b4
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/smoke.py
@@ -0,0 +1,1451 @@
+#!/usr/bin/env python
+
+# smoke.py: run some mongo tests.
+
+# Bugs, TODOs:
+
+# 0 Some tests hard-code pathnames relative to the mongo repository,
+# so the smoke.py process and all its children must be run with the
+# mongo repo as current working directory. That's kinda icky.
+
+# 1 The tests that are implemented as standalone executables ("test"),
+# don't take arguments for the dbpath, but unconditionally use
+# "/tmp/unittest".
+
+# 2 mongod output gets intermingled with mongo output, and it's often
+# hard to find error messages in the slop. Maybe have smoke.py do
+# some fancier wrangling of child process output?
+
+# 3 Some test suites run their own mongods, and so don't need us to
+# run any mongods around their execution. (It's harmless to do so,
+# but adds noise in the output.)
+
+# 4 Running a separate mongo shell for each js file is slower than
+# loading js files into one mongo shell process. Maybe have runTest
+# queue up all filenames ending in ".js" and run them in one mongo
+# shell at the "end" of testing?
+
+# 5 Right now small-oplog implies master/slave replication. Maybe
+# running with replication should be an orthogonal concern. (And
+# maybe test replica set replication, too.)
+
+# 6 We use cleanbb.py to clear out the dbpath, but cleanbb.py kills
+# off all mongods on a box, which means you can't run two smoke.py
+# jobs on the same host at once. So something's gotta change.
+
+from datetime import datetime
+from itertools import izip
+import glob
+from optparse import OptionParser
+import os
+import pprint
+import re
+import shlex
+import signal
+import socket
+import stat
+from subprocess import (PIPE, Popen, STDOUT)
+import sys
+import time
+import threading
+import traceback
+
+from pymongo import MongoClient
+from pymongo.errors import OperationFailure
+from pymongo import ReadPreference
+
+import cleanbb
+import smoke
+import utils
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ from hashlib import md5 # new in 2.5
+except ImportError:
+ from md5 import md5 # deprecated in 2.5
+
+try:
+ import json
+except:
+ try:
+ import simplejson as json
+ except:
+ json = None
+
+
+# TODO clean this up so we don't need globals...
+mongo_repo = os.getcwd() #'./'
+failfile = os.path.join(mongo_repo, 'failfile.smoke')
+test_path = None
+mongod_executable = None
+mongod_port = None
+shell_executable = None
+continue_on_failure = None
+file_of_commands_mode = False
+start_mongod = True
+temp_path = None
+clean_every_n_tests = 1
+clean_whole_dbroot = False
+
+tests = []
+winners = []
+losers = {}
+fails = [] # like losers but in format of tests
+
+# For replication hash checking
+replicated_collections = []
+lost_in_slave = []
+lost_in_master = []
+screwy_in_slave = {}
+
+smoke_db_prefix = ''
+small_oplog = False
+small_oplog_rs = False
+
+test_report = { "results": [] }
+report_file = None
+
+# This class just implements the with statement API
+class NullMongod(object):
+ def start(self):
+ pass
+
+ def stop(self):
+ pass
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.stop()
+ return not isinstance(value, Exception)
+
+
+def dump_stacks(signal, frame):
+ print "======================================"
+ print "DUMPING STACKS due to SIGUSR1 signal"
+ print "======================================"
+ threads = threading.enumerate();
+
+ print "Total Threads: " + str(len(threads))
+
+ for id, stack in sys._current_frames().items():
+ print "Thread %d" % (id)
+ print "".join(traceback.format_stack(stack))
+ print "======================================"
+
+
+def buildlogger(cmd, is_global=False):
+ # if the environment variable MONGO_USE_BUILDLOGGER
+ # is set to 'true', then wrap the command with a call
+ # to buildlogger.py, which sends output to the buidlogger
+ # machine; otherwise, return as usual.
+ if os.environ.get('MONGO_USE_BUILDLOGGER', '').lower().strip() == 'true':
+ if is_global:
+ return [utils.find_python(), 'buildscripts/buildlogger.py', '-g'] + cmd
+ else:
+ return [utils.find_python(), 'buildscripts/buildlogger.py'] + cmd
+ return cmd
+
+
+def clean_dbroot(dbroot="", nokill=False):
+ # Clean entire /data/db dir if --with-cleanbb, else clean specific database path.
+ if clean_whole_dbroot and not (small_oplog or small_oplog_rs):
+ dbroot = os.path.normpath(smoke_db_prefix + "/data/db")
+ if os.path.exists(dbroot):
+ print("clean_dbroot: %s" % dbroot)
+ cleanbb.cleanup(dbroot, nokill)
+
+
+class mongod(NullMongod):
+ def __init__(self, **kwargs):
+ self.kwargs = kwargs
+ self.proc = None
+ self.auth = False
+
+ def ensure_test_dirs(self):
+ utils.ensureDir(smoke_db_prefix + "/tmp/unittest/")
+ utils.ensureDir(smoke_db_prefix + "/data/")
+ utils.ensureDir(smoke_db_prefix + "/data/db/")
+
+ def check_mongo_port(self, port=27017):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", int(port)))
+ sock.close()
+
+ def is_mongod_up(self, port=mongod_port):
+ if not start_mongod:
+ return False
+ try:
+ self.check_mongo_port(int(port))
+ return True
+ except Exception,e:
+ print >> sys.stderr, e
+ return False
+
+ def did_mongod_start(self, port=mongod_port, timeout=300):
+ while timeout > 0:
+ time.sleep(1)
+ is_up = self.is_mongod_up(port)
+ if is_up:
+ return True
+ timeout = timeout - 1
+ print >> sys.stderr, "timeout starting mongod"
+ return False
+
+ def start(self):
+ global mongod_port
+ global mongod
+ if self.proc:
+ print >> sys.stderr, "probable bug: self.proc already set in start()"
+ return
+ self.ensure_test_dirs()
+ dir_name = smoke_db_prefix + "/data/db/sconsTests/"
+ self.port = int(mongod_port)
+ self.slave = False
+ if 'slave' in self.kwargs:
+ dir_name = smoke_db_prefix + '/data/db/sconsTestsSlave/'
+ srcport = mongod_port
+ self.port += 1
+ self.slave = True
+
+ clean_dbroot(dbroot=dir_name, nokill=self.slave)
+ utils.ensureDir(dir_name)
+
+ argv = [mongod_executable, "--port", str(self.port), "--dbpath", dir_name]
+ # These parameters are always set for tests
+ # SERVER-9137 Added httpinterface parameter to keep previous behavior
+ argv += ['--setParameter', 'enableTestCommands=1', '--httpinterface']
+ if self.kwargs.get('small_oplog'):
+ if self.slave:
+ argv += ['--slave', '--source', 'localhost:' + str(srcport)]
+ else:
+ argv += ["--master", "--oplogSize", "511"]
+ if self.kwargs.get('storage_engine'):
+ argv += ["--storageEngine", self.kwargs.get('storage_engine')]
+ if self.kwargs.get('wiredtiger_engine_config_string'):
+ argv += ["--wiredTigerEngineConfigString", self.kwargs.get('wiredtiger_engine_config_string')]
+ if self.kwargs.get('wiredtiger_collection_config_string'):
+ argv += ["--wiredTigerCollectionConfigString", self.kwargs.get('wiredtiger_collection_config_string')]
+ if self.kwargs.get('wiredtiger_index_config_string'):
+ argv += ["--wiredTigerIndexConfigString", self.kwargs.get('wiredtiger_index_config_string')]
+ params = self.kwargs.get('set_parameters', None)
+ if params:
+ for p in params.split(','): argv += ['--setParameter', p]
+ if self.kwargs.get('small_oplog_rs'):
+ argv += ["--replSet", "foo", "--oplogSize", "511"]
+ if self.kwargs.get('no_journal'):
+ argv += ['--nojournal']
+ if self.kwargs.get('no_preallocj'):
+ argv += ['--nopreallocj']
+ if self.kwargs.get('auth'):
+ argv += ['--auth', '--setParameter', 'enableLocalhostAuthBypass=false']
+ authMechanism = self.kwargs.get('authMechanism', 'SCRAM-SHA-1')
+ if authMechanism != 'SCRAM-SHA-1':
+ argv += ['--setParameter', 'authenticationMechanisms=' + authMechanism]
+ self.auth = True
+ if self.kwargs.get('keyFile'):
+ argv += ['--keyFile', self.kwargs.get('keyFile')]
+ if self.kwargs.get('use_ssl') or self.kwargs.get('use_x509'):
+ argv += ['--sslMode', "requireSSL",
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation']
+ if self.kwargs.get('use_x509'):
+ argv += ['--clusterAuthMode','x509'];
+ self.auth = True
+ print "running " + " ".join(argv)
+ self.proc = self._start(buildlogger(argv, is_global=True))
+
+ if not self.did_mongod_start(self.port):
+ raise Exception("Failed to start mongod")
+
+ if self.slave:
+ local = MongoClient(port=self.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).local
+ synced = False
+ while not synced:
+ synced = True
+ for source in local.sources.find({}, ["syncedTo"]):
+ synced = synced and "syncedTo" in source and source["syncedTo"]
+
+ def _start(self, argv):
+ """In most cases, just call subprocess.Popen(). On windows,
+ add the started process to a new Job Object, so that any
+ child processes of this process can be killed with a single
+ call to TerminateJobObject (see self.stop()).
+ """
+
+ if os.sys.platform == "win32":
+ # Create a job object with the "kill on job close"
+ # flag; this is inherited by child processes (ie
+ # the mongod started on our behalf by buildlogger)
+ # and lets us terminate the whole tree of processes
+ # rather than orphaning the mongod.
+ import win32job
+
+ # Magic number needed to allow job reassignment in Windows 7
+ # see: MSDN - Process Creation Flags - ms684863
+ CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+
+ proc = Popen(argv, creationflags=CREATE_BREAKAWAY_FROM_JOB)
+
+ self.job_object = win32job.CreateJobObject(None, '')
+
+ job_info = win32job.QueryInformationJobObject(
+ self.job_object, win32job.JobObjectExtendedLimitInformation)
+ job_info['BasicLimitInformation']['LimitFlags'] |= win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
+ win32job.SetInformationJobObject(
+ self.job_object,
+ win32job.JobObjectExtendedLimitInformation,
+ job_info)
+
+ win32job.AssignProcessToJobObject(self.job_object, proc._handle)
+
+ else:
+ proc = Popen(argv)
+
+ return proc
+
+ def stop(self):
+ if not self.proc:
+ print >> sys.stderr, "probable bug: self.proc unset in stop()"
+ return
+ try:
+ if os.sys.platform == "win32":
+ import win32job
+ win32job.TerminateJobObject(self.job_object, -1)
+ # Windows doesn't seem to kill the process immediately, so give it some time to die
+ time.sleep(5)
+ elif hasattr(self.proc, "terminate"):
+ # This method added in Python 2.6
+ self.proc.terminate()
+ else:
+ os.kill(self.proc.pid, 15)
+ except Exception, e:
+ print >> sys.stderr, "error shutting down mongod"
+ print >> sys.stderr, e
+ self.proc.wait()
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ # Fail hard if mongod terminates with an error. That might indicate that an
+ # instrumented build (e.g. LSAN) has detected an error. For now we aren't doing this on
+ # windows because the exit code seems to be unpredictable. We don't have LSAN there
+ # anyway.
+ retcode = self.proc.returncode
+ if os.sys.platform != "win32" and retcode != 0:
+ raise(Exception('mongod process exited with non-zero code %d' % retcode))
+
+ def wait_for_repl(self):
+ print "Awaiting replicated (w:2, wtimeout:5min) insert (port:" + str(self.port) + ")"
+ MongoClient(port=self.port).testing.smokeWait.insert({}, w=2, wtimeout=5*60*1000)
+ print "Replicated write completed -- done wait_for_repl"
+
+class Bug(Exception):
+ def __str__(self):
+ return 'bug in smoke.py: ' + super(Bug, self).__str__()
+
+class TestFailure(Exception):
+ pass
+
+class TestExitFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status=args[1]
+
+ def __str__(self):
+ return "test %s exited with status %d" % (self.path, self.status)
+
+class TestServerFailure(TestFailure):
+ def __init__(self, *args):
+ self.path = args[0]
+ self.status = -1 # this is meaningless as an exit code, but
+ # that's the point.
+ def __str__(self):
+ return 'mongod not running after executing test %s' % self.path
+
+def check_db_hashes(master, slave):
+ # Need to pause a bit so a slave might catch up...
+ if not slave.slave:
+ raise(Bug("slave instance doesn't have slave attribute set"))
+
+ master.wait_for_repl()
+
+ # FIXME: maybe make this run dbhash on all databases?
+ for mongod in [master, slave]:
+ client = MongoClient(port=mongod.port, read_preference=ReadPreference.SECONDARY_PREFERRED)
+ mongod.dbhash = client.test.command("dbhash")
+ mongod.dict = mongod.dbhash["collections"]
+
+ global lost_in_slave, lost_in_master, screwy_in_slave, replicated_collections
+
+ replicated_collections += master.dict.keys()
+
+ for coll in replicated_collections:
+ if coll not in slave.dict and coll not in lost_in_slave:
+ lost_in_slave.append(coll)
+ mhash = master.dict[coll]
+ shash = slave.dict[coll]
+ if mhash != shash:
+ mTestDB = MongoClient(port=master.port).test
+ sTestDB = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED).test
+ mCount = mTestDB[coll].count()
+ sCount = sTestDB[coll].count()
+ stats = {'hashes': {'master': mhash, 'slave': shash},
+ 'counts':{'master': mCount, 'slave': sCount}}
+ try:
+ mDocs = list(mTestDB[coll].find().sort("_id", 1))
+ sDocs = list(sTestDB[coll].find().sort("_id", 1))
+ mDiffDocs = list()
+ sDiffDocs = list()
+ for left, right in izip(mDocs, sDocs):
+ if left != right:
+ mDiffDocs.append(left)
+ sDiffDocs.append(right)
+
+ stats["docs"] = {'master': mDiffDocs, 'slave': sDiffDocs }
+ except Exception, e:
+ stats["error-docs"] = e;
+
+ screwy_in_slave[coll] = stats
+ if mhash == "no _id _index":
+ mOplog = mTestDB.connection.local["oplog.$main"];
+ oplog_entries = list(mOplog.find({"$or": [{"ns":mTestDB[coll].full_name}, \
+ {"op":"c"}]}).sort("$natural", 1))
+ print "oplog for %s" % mTestDB[coll].full_name
+ for doc in oplog_entries:
+ pprint.pprint(doc, width=200)
+
+
+ for db in slave.dict.keys():
+ if db not in master.dict and db not in lost_in_master:
+ lost_in_master.append(db)
+
+
+def ternary( b , l="true", r="false" ):
+ if b:
+ return l
+ return r
+
+# Blech.
+def skipTest(path):
+ basename = os.path.basename(path)
+ parentPath = os.path.dirname(path)
+ parentDir = os.path.basename(parentPath)
+ if small_oplog or small_oplog_rs: # For tests running in parallel
+ if basename in ["cursor8.js", "indexh.js", "dropdb.js", "dropdb_race.js",
+ "connections_opened.js", "opcounters_write_cmd.js", "dbadmin.js"]:
+ return True
+ if use_ssl:
+ # Skip tests using mongobridge since it does not support SSL
+ # TODO: Remove when SERVER-10910 has been resolved.
+ if basename in ["gridfs.js", "initial_sync3.js", "majority.js", "no_chaining.js",
+ "rollback4.js", "slavedelay3.js", "sync2.js", "tags.js"]:
+ return True
+ # TODO: For now skip tests using MongodRunner, remove when SERVER-10909 has been resolved
+ if basename in ["fastsync.js", "index_retry.js", "ttl_repl_maintenance.js",
+ "unix_socket1.js"]:
+ return True;
+ if auth or keyFile or use_x509: # For tests running with auth
+ # Skip any tests that run with auth explicitly
+ if parentDir.lower() == "auth" or "auth" in basename.lower():
+ return True
+ if parentPath == mongo_repo: # Skip client tests
+ return True
+ if parentDir == "tool": # SERVER-6368
+ return True
+ if parentDir == "dur": # SERVER-7317
+ return True
+ if parentDir == "disk": # SERVER-7356
+ return True
+
+ authTestsToSkip = [("jstests", "drop2.js"), # SERVER-8589,
+ ("jstests", "killop.js"), # SERVER-10128
+ ("sharding", "sync3.js"), # SERVER-6388 for this and those below
+ ("sharding", "sync6.js"),
+ ("sharding", "parallel.js"),
+ ("sharding", "copydb_from_mongos.js"), # SERVER-13080
+ ("jstests", "bench_test1.js"),
+ ("jstests", "bench_test2.js"),
+ ("jstests", "bench_test3.js"),
+ ("jstests", "bench_test_insert.js"),
+ ("core", "bench_test1.js"),
+ ("core", "bench_test2.js"),
+ ("core", "bench_test3.js"),
+ ("core", "bench_test_insert.js"),
+ ]
+
+ if os.path.join(parentDir,basename) in [ os.path.join(*test) for test in authTestsToSkip ]:
+ return True
+
+ return False
+
+legacyWriteRE = re.compile(r"jstests[/\\]multiVersion")
+def setShellWriteModeForTest(path, argv):
+ swm = shell_write_mode
+ if legacyWriteRE.search(path):
+ swm = "legacy"
+ argv += ["--writeMode", swm]
+
+def runTest(test, result):
+ # result is a map containing test result details, like result["url"]
+
+ # test is a tuple of ( filename , usedb<bool> )
+ # filename should be a js file to run
+ # usedb is true if the test expects a mongod to be running
+
+ (path, usedb) = test
+ (ignore, ext) = os.path.splitext(path)
+ test_mongod = mongod()
+ mongod_is_up = test_mongod.is_mongod_up(mongod_port)
+ result["mongod_running_at_start"] = mongod_is_up;
+
+ if file_of_commands_mode:
+ # smoke.py was invoked like "--mode files --from-file foo",
+ # so don't try to interpret the test path too much
+ if os.sys.platform == "win32":
+ argv = [path]
+ else:
+ argv = shlex.split(path)
+ path = argv[0]
+ # if the command is a python script, use the script name
+ if os.path.basename(path) in ('python', 'python.exe'):
+ path = argv[1]
+ elif ext == ".js":
+ argv = [shell_executable, "--port", mongod_port]
+
+ setShellWriteModeForTest(path, argv)
+
+ if not usedb:
+ argv += ["--nodb"]
+ if small_oplog or small_oplog_rs:
+ argv += ["--eval", 'testingReplication = true;']
+ if use_ssl:
+ argv += ["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidCertificates"]
+ argv += [path]
+ elif ext in ["", ".exe"]:
+ # Blech.
+ if os.path.basename(path) in ["dbtest", "dbtest.exe"]:
+ argv = [path]
+ # default data directory for dbtest is /tmp/unittest
+ if smoke_db_prefix:
+ dir_name = smoke_db_prefix + '/unittests'
+ argv.extend(["--dbpath", dir_name] )
+
+ if storage_engine:
+ argv.extend(["--storageEngine", storage_engine])
+ if wiredtiger_engine_config_string:
+ argv.extend(["--wiredTigerEngineConfigString", wiredtiger_engine_config_string])
+ if wiredtiger_collection_config_string:
+ argv.extend(["--wiredTigerCollectionConfigString", wiredtiger_collection_config_string])
+ if wiredtiger_index_config_string:
+ argv.extend(["--wiredTigerIndexConfigString", wiredtiger_index_config_string])
+
+ # more blech
+ elif os.path.basename(path) in ['mongos', 'mongos.exe']:
+ argv = [path, "--test"]
+ else:
+ argv = [test_path and os.path.abspath(os.path.join(test_path, path)) or path,
+ "--port", mongod_port]
+ else:
+ raise Bug("fell off in extension case: %s" % path)
+
+ mongo_test_filename = os.path.basename(path)
+
+ # sys.stdout.write() is more atomic than print, so using it prevents
+ # lines being interrupted by, e.g., child processes
+ sys.stdout.write(" *******************************************\n")
+ sys.stdout.write(" Test : %s ...\n" % mongo_test_filename)
+ sys.stdout.flush()
+
+ # FIXME: we don't handle the case where the subprocess
+ # hangs... that's bad.
+ if ( argv[0].endswith( 'mongo' ) or argv[0].endswith( 'mongo.exe' ) ) and not '--eval' in argv :
+ evalString = 'load("jstests/libs/servers.js");load("jstests/libs/servers_misc.js");' +\
+ 'TestData = new Object();' + \
+ 'TestData.storageEngine = "' + ternary( storage_engine, storage_engine, "" ) + '";' + \
+ 'TestData.wiredTigerEngineConfigString = "' + ternary( wiredtiger_engine_config_string, wiredtiger_engine_config_string, "" ) + '";' + \
+ 'TestData.wiredTigerCollectionConfigString = "' + ternary( wiredtiger_collection_config_string, wiredtiger_collection_config_string, "" ) + '";' + \
+ 'TestData.wiredTigerIndexConfigString = "' + ternary( wiredtiger_index_config_string, wiredtiger_index_config_string, "" ) + '";' + \
+ 'TestData.testPath = "' + path + '";' + \
+ 'TestData.testFile = "' + os.path.basename( path ) + '";' + \
+ 'TestData.testName = "' + re.sub( ".js$", "", os.path.basename( path ) ) + '";' + \
+ 'TestData.setParameters = "' + ternary( set_parameters, set_parameters, "" ) + '";' + \
+ 'TestData.setParametersMongos = "' + ternary( set_parameters_mongos, set_parameters_mongos, "" ) + '";' + \
+ 'TestData.noJournal = ' + ternary( no_journal ) + ";" + \
+ 'TestData.noJournalPrealloc = ' + ternary( no_preallocj ) + ";" + \
+ 'TestData.auth = ' + ternary( auth ) + ";" + \
+ 'TestData.keyFile = ' + ternary( keyFile , '"' + str(keyFile) + '"' , 'null' ) + ";" + \
+ 'TestData.keyFileData = ' + ternary( keyFile , '"' + str(keyFileData) + '"' , 'null' ) + ";" + \
+ 'TestData.authMechanism = ' + ternary( authMechanism,
+ '"' + str(authMechanism) + '"', 'null') + ";" + \
+ 'TestData.useSSL = ' + ternary( use_ssl ) + ";" + \
+ 'TestData.useX509 = ' + ternary( use_x509 ) + ";"
+ # this updates the default data directory for mongod processes started through shell (src/mongo/shell/servers.js)
+ evalString += 'MongoRunner.dataDir = "' + os.path.abspath(smoke_db_prefix + '/data/db') + '";'
+ evalString += 'MongoRunner.dataPath = MongoRunner.dataDir + "/";'
+ if temp_path:
+ evalString += 'TestData.tmpPath = "' + temp_path + '";'
+ if os.sys.platform == "win32":
+ # double quotes in the evalString on windows; this
+ # prevents the backslashes from being removed when
+ # the shell (i.e. bash) evaluates this string. yuck.
+ evalString = evalString.replace('\\', '\\\\')
+
+ if auth and usedb:
+ evalString += 'jsTest.authenticate(db.getMongo());'
+
+ if os.getenv('SMOKE_EVAL') is not None:
+ evalString += os.getenv('SMOKE_EVAL')
+
+ argv = argv + [ '--eval', evalString]
+
+
+ if argv[0].endswith( 'dbtest' ) or argv[0].endswith( 'dbtest.exe' ):
+ if no_preallocj :
+ argv = argv + [ '--nopreallocj' ]
+ if temp_path:
+ argv = argv + [ '--tempPath', temp_path ]
+
+
+ sys.stdout.write(" Command : %s\n" % ' '.join(argv))
+ sys.stdout.write(" Date : %s\n" % datetime.now().ctime())
+ sys.stdout.flush()
+
+ os.environ['MONGO_TEST_FILENAME'] = mongo_test_filename
+ t1 = time.time()
+
+ proc = Popen(buildlogger(argv), cwd=test_path, stdout=PIPE, stderr=STDOUT, bufsize=0)
+ first_line = proc.stdout.readline() # Get suppressed output URL
+ m = re.search(r"\s*\(output suppressed; see (?P<url>.*)\)" + os.linesep, first_line)
+ if m:
+ result["url"] = m.group("url")
+ sys.stdout.write(first_line)
+ sys.stdout.flush()
+ while True:
+ # print until subprocess's stdout closed.
+ # Not using "for line in file" since that has unwanted buffering.
+ line = proc.stdout.readline()
+ if not line:
+ break;
+
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ proc.wait() # wait if stdout is closed before subprocess exits.
+ r = proc.returncode
+
+ t2 = time.time()
+ del os.environ['MONGO_TEST_FILENAME']
+
+ timediff = t2 - t1
+ # timediff is seconds by default
+ scale = 1
+ suffix = "seconds"
+ # if timediff is less than 10 seconds use ms
+ if timediff < 10:
+ scale = 1000
+ suffix = "ms"
+ # if timediff is more than 60 seconds use minutes
+ elif timediff > 60:
+ scale = 1.0 / 60.0
+ suffix = "minutes"
+ sys.stdout.write(" %10.4f %s\n" % ((timediff) * scale, suffix))
+ sys.stdout.flush()
+
+ result["exit_code"] = r
+
+
+ is_mongod_still_up = test_mongod.is_mongod_up(mongod_port)
+ if start_mongod and not is_mongod_still_up:
+ print "mongod is not running after test"
+ result["mongod_running_at_end"] = is_mongod_still_up;
+ raise TestServerFailure(path)
+
+ result["mongod_running_at_end"] = is_mongod_still_up;
+
+ if r != 0:
+ raise TestExitFailure(path, r)
+
+ print ""
+
+def run_tests(tests):
+ # FIXME: some suites of tests start their own mongod, so don't
+ # need this. (So long as there are no conflicts with port,
+ # dbpath, etc., and so long as we shut ours down properly,
+ # starting this mongod shouldn't break anything, though.)
+
+ # The reason we want to use "with" is so that we get __exit__ semantics
+ # but "with" is only supported on Python 2.5+
+
+ master = NullMongod()
+ slave = NullMongod()
+
+ try:
+ if start_mongod:
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config_string=wiredtiger_engine_config_string,
+ wiredtiger_collection_config_string=wiredtiger_collection_config_string,
+ wiredtiger_index_config_string=wiredtiger_index_config_string,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ if small_oplog:
+ slave = mongod(slave=True,
+ small_oplog=True,
+ small_oplog_rs=False,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config_string=wiredtiger_engine_config_string,
+ wiredtiger_collection_config_string=wiredtiger_collection_config_string,
+ wiredtiger_index_config_string=wiredtiger_index_config_string,
+ set_parameters=set_parameters)
+ slave.start()
+ elif small_oplog_rs:
+ slave = mongod(slave=True,
+ small_oplog_rs=True,
+ small_oplog=False,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config_string=wiredtiger_engine_config_string,
+ wiredtiger_collection_config_string=wiredtiger_collection_config_string,
+ wiredtiger_index_config_string=wiredtiger_index_config_string,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ slave.start()
+ primary = MongoClient(port=master.port);
+
+ primary.admin.command({'replSetInitiate' : {'_id' : 'foo', 'members' : [
+ {'_id': 0, 'host':'localhost:%s' % master.port},
+ {'_id': 1, 'host':'localhost:%s' % slave.port,'priority':0}]}})
+
+ # Wait for primary and secondary to finish initial sync and election
+ ismaster = False
+ while not ismaster:
+ result = primary.admin.command("ismaster");
+ ismaster = result["ismaster"]
+ if not ismaster:
+ print "waiting for primary to be available ..."
+ time.sleep(.2)
+
+ secondaryUp = False
+ sConn = MongoClient(port=slave.port,
+ read_preference=ReadPreference.SECONDARY_PREFERRED);
+ while not secondaryUp:
+ result = sConn.admin.command("ismaster");
+ secondaryUp = result["secondary"]
+ if not secondaryUp:
+ print "waiting for secondary to be available ..."
+ time.sleep(.2)
+
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+
+ for tests_run, test in enumerate(tests):
+ tests_run += 1 # enumerate from 1, python 2.5 compatible
+ test_result = { "start": time.time() }
+
+ (test_path, use_db) = test
+
+ if test_path.startswith(mongo_repo + os.path.sep):
+ test_result["test_file"] = test_path[len(mongo_repo)+1:]
+ else:
+ # user could specify a file not in repo. leave it alone.
+ test_result["test_file"] = test_path
+
+ try:
+ if skipTest(test_path):
+ test_result["status"] = "skip"
+
+ print "skipping " + test_path
+ else:
+ fails.append(test)
+ runTest(test, test_result)
+ fails.pop()
+ winners.append(test)
+
+ test_result["status"] = "pass"
+
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_report["results"].append( test_result )
+ if small_oplog or small_oplog_rs:
+ master.wait_for_repl()
+ # check the db_hashes
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+ check_and_report_replication_dbhashes()
+
+ elif use_db: # reach inside test and see if "usedb" is true
+ if clean_every_n_tests and (tests_run % clean_every_n_tests) == 0:
+ # Restart mongod periodically to clean accumulated test data
+ # clean_dbroot() is invoked by mongod.start()
+ master.stop()
+ master = mongod(small_oplog_rs=small_oplog_rs,
+ small_oplog=small_oplog,
+ no_journal=no_journal,
+ storage_engine=storage_engine,
+ wiredtiger_engine_config_string=wiredtiger_engine_config_string,
+ wiredtiger_collection_config_string=wiredtiger_collection_config_string,
+ wiredtiger_index_config_string=wiredtiger_index_config_string,
+ set_parameters=set_parameters,
+ no_preallocj=no_preallocj,
+ auth=auth,
+ authMechanism=authMechanism,
+ keyFile=keyFile,
+ use_ssl=use_ssl,
+ use_x509=use_x509)
+ master.start()
+
+ except TestFailure, f:
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["error"] = str(f)
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+ try:
+ print f
+ # Record the failing test and re-raise.
+ losers[f.path] = f.status
+ raise f
+ except TestServerFailure, f:
+ return 2
+ except TestFailure, f:
+ if not continue_on_failure:
+ return 1
+ if isinstance(slave, mongod):
+ check_db_hashes(master, slave)
+
+ finally:
+ slave.stop()
+ master.stop()
+ return 0
+
+
+def check_and_report_replication_dbhashes():
+ def missing(lst, src, dst):
+ if lst:
+ print """The following collections were present in the %s but not the %s
+at the end of testing:""" % (src, dst)
+ for db in lst:
+ print db
+
+ missing(lost_in_slave, "master", "slave")
+ missing(lost_in_master, "slave", "master")
+ if screwy_in_slave:
+ print """The following collections has different hashes in master and slave
+at the end of testing:"""
+ for coll in screwy_in_slave.keys():
+ stats = screwy_in_slave[coll]
+ # Counts are "approx" because they are collected after the dbhash runs and may not
+ # reflect the states of the collections that were hashed. If the hashes differ, one
+ # possibility is that a test exited with writes still in-flight.
+ print "collection: %s\t (master/slave) hashes: %s/%s counts (approx): %i/%i" % (coll, stats['hashes']['master'], stats['hashes']['slave'], stats['counts']['master'], stats['counts']['slave'])
+ if "docs" in stats:
+ if (("master" in stats["docs"] and len(stats["docs"]["master"]) != 0) or
+ ("slave" in stats["docs"] and len(stats["docs"]["slave"]) != 0)):
+ print "All docs matched!"
+ else:
+ print "Different Docs"
+ print "Master docs:"
+ pprint.pprint(stats["docs"]["master"], indent=2)
+ print "Slave docs:"
+ pprint.pprint(stats["docs"]["slave"], indent=2)
+ if "error-docs" in stats:
+ print "Error getting docs to diff:"
+ pprint.pprint(stats["error-docs"])
+ return True
+
+ if (small_oplog or small_oplog_rs) and not (lost_in_master or lost_in_slave or screwy_in_slave):
+ print "replication ok for %d collections" % (len(replicated_collections))
+
+ return False
+
+
+def report():
+ print "%d tests succeeded" % len(winners)
+ num_missed = len(tests) - (len(winners) + len(losers.keys()))
+ if num_missed:
+ print "%d tests didn't get run" % num_missed
+ if losers:
+ print "The following tests failed (with exit code):"
+ for loser in losers:
+ print "%s\t%d" % (loser, losers[loser])
+
+ test_result = { "start": time.time() }
+ if check_and_report_replication_dbhashes():
+ test_result["end"] = time.time()
+ test_result["elapsed"] = test_result["end"] - test_result["start"]
+ test_result["test_file"] = "/#dbhash#"
+ test_result["error"] = "dbhash mismatch"
+ test_result["status"] = "fail"
+ test_report["results"].append( test_result )
+
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report ) )
+ f.close()
+
+ if losers or lost_in_slave or lost_in_master or screwy_in_slave:
+ raise Exception("Test failures")
+
+# Keys are the suite names (passed on the command line to smoke.py)
+# Values are pairs: (filenames, <start mongod before running tests>)
+
+suiteGlobalConfig = { "files": ("files/*.js", False),
+ "restore": ("restore/*.js", False),
+ "stat": ("stat/*.js", False),
+ "top": ("top/*.js", False),
+ "bson": ("bson/*.js", False),
+ "export": ("export/*.js", False),
+ "dump": ("dump/*.js", False),
+ "oplog": ("oplog/*.js", False),
+ "import": ("import/*.js", False),
+ "ssl": ("ssl/*.js", False),
+ "unstable": ("unstable/*.js", False),
+ }
+
+def get_module_suites():
+ """Attempts to discover and return information about module test suites
+
+ Returns a dictionary of module suites in the format:
+
+ {
+ "<suite_name>" : "<full_path_to_suite_directory/[!_]*.js>",
+ ...
+ }
+
+ This means the values of this dictionary can be used as "glob"s to match all jstests in the
+ suite directory that don't start with an underscore
+
+ The module tests should be put in 'src/mongo/db/modules/<module_name>/<suite_name>/*.js'
+
+ NOTE: This assumes that if we have more than one module the suite names don't conflict
+ """
+ modules_directory = 'src/mongo/db/modules'
+ test_suites = {}
+
+ # Return no suites if we have no modules
+ if not os.path.exists(modules_directory) or not os.path.isdir(modules_directory):
+ return {}
+
+ module_directories = os.listdir(modules_directory)
+ for module_directory in module_directories:
+
+ test_directory = os.path.join(modules_directory, module_directory, "jstests")
+
+ # Skip this module if it has no "jstests" directory
+ if not os.path.exists(test_directory) or not os.path.isdir(test_directory):
+ continue
+
+ # Get all suites for this module
+ for test_suite in os.listdir(test_directory):
+ test_suites[test_suite] = os.path.join(test_directory, test_suite, "[!_]*.js")
+
+ return test_suites
+
+def expand_suites(suites,expandUseDB=True):
+ """Takes a list of suites and expands to a list of tests according to a set of rules.
+
+ Keyword arguments:
+ suites -- list of suites specified by the user
+ expandUseDB -- expand globs (such as [!_]*.js) for tests that are run against a database
+ (default True)
+
+ This function handles expansion of globs (such as [!_]*.js), aliases (such as "client" and
+ "all"), detection of suites in the "modules" directory, and enumerating the test files in a
+ given suite. It returns a list of tests of the form (path_to_test, usedb), where the second
+ part of the tuple specifies whether the test is run against the database (see --nodb in the
+ mongo shell)
+
+ """
+ globstr = None
+ tests = []
+ module_suites = get_module_suites()
+ for suite in suites:
+ if suite == 'all':
+ return expand_suites(['dbtest',
+ 'jsCore',
+ 'jsPerf',
+ 'mmap_v1',
+ 'noPassthroughWithMongod',
+ 'noPassthrough',
+ 'clone',
+ 'parallel',
+ 'concurrency',
+ 'repl',
+ 'auth',
+ 'sharding',
+ 'slow1',
+ 'slow2',
+ 'tool'],
+ expandUseDB=expandUseDB)
+ if suite == 'dbtest' or suite == 'test':
+ if os.sys.platform == "win32":
+ program = 'dbtest.exe'
+ else:
+ program = 'dbtest'
+ (globstr, usedb) = (program, False)
+ elif suite == 'mongosTest':
+ if os.sys.platform == "win32":
+ program = 'mongos.exe'
+ else:
+ program = 'mongos'
+ tests += [(os.path.join(mongo_repo, program), False)]
+ elif os.path.exists( suite ):
+ usedb = True
+ for name in suiteGlobalConfig:
+ if suite in glob.glob( "jstests/" + suiteGlobalConfig[name][0] ):
+ usedb = suiteGlobalConfig[name][1]
+ break
+ tests += [ ( os.path.join( mongo_repo , suite ) , usedb ) ]
+ elif suite in module_suites:
+ # Currently we connect to a database in all module tests since there's no mechanism yet
+ # to configure it independently
+ usedb = True
+ paths = glob.glob(module_suites[suite])
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+ else:
+ try:
+ globstr, usedb = suiteGlobalConfig[suite]
+ except KeyError:
+ raise Exception('unknown test suite %s' % suite)
+
+ if globstr:
+ if usedb and not expandUseDB:
+ tests += [ (suite,False) ]
+ else:
+ if globstr.endswith('.js'):
+ loc = 'jstests/'
+ else:
+ loc = ''
+ globstr = os.path.join(mongo_repo, (os.path.join(loc, globstr)))
+ globstr = os.path.normpath(globstr)
+ paths = glob.glob(globstr)
+ paths.sort()
+ tests += [(path, usedb) for path in paths]
+
+ return tests
+
+
+def filter_tests_by_tag(tests, tag_query):
+ """Selects tests from a list based on a query over the tags in the tests."""
+
+ test_map = {}
+ roots = []
+ for test in tests:
+ root = os.path.abspath(test[0])
+ roots.append(root)
+ test_map[root] = test
+
+ new_style_tests = smoke.tests.build_tests(roots, extract_metadata=True)
+ new_style_tests = smoke.suites.build_suite(new_style_tests, tag_query)
+
+ print "\nTag query matches %s tests out of %s.\n" % (len(new_style_tests),
+ len(tests))
+
+ tests = []
+ for new_style_test in new_style_tests:
+ tests.append(test_map[os.path.abspath(new_style_test.filename)])
+
+ return tests
+
+
+def add_exe(e):
+ if os.sys.platform.startswith( "win" ) and not e.endswith( ".exe" ):
+ e += ".exe"
+ return e
+
+
+def set_globals(options, tests):
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure
+ global small_oplog, small_oplog_rs
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, storage_engine, wiredtiger_engine_config_string, wiredtiger_collection_config_string, wiredtiger_index_config_string
+ global auth, authMechanism, keyFile, keyFileData, smoke_db_prefix, test_path, start_mongod
+ global use_ssl, use_x509
+ global file_of_commands_mode
+ global report_file, shell_write_mode, use_write_commands
+ global temp_path
+ global clean_every_n_tests
+ global clean_whole_dbroot
+
+ start_mongod = options.start_mongod
+ if hasattr(options, 'use_ssl'):
+ use_ssl = options.use_ssl
+ if hasattr(options, 'use_x509'):
+ use_x509 = options.use_x509
+ use_ssl = use_ssl or use_x509
+ #Careful, this can be called multiple times
+ test_path = options.test_path
+
+ mongod_executable = add_exe(options.mongod_executable)
+ if not os.path.exists(mongod_executable):
+ raise Exception("no mongod found in this directory.")
+
+ mongod_port = options.mongod_port
+
+ shell_executable = add_exe( options.shell_executable )
+ if not os.path.exists(shell_executable):
+ raise Exception("no mongo shell found in this directory.")
+
+ continue_on_failure = options.continue_on_failure
+ smoke_db_prefix = options.smoke_db_prefix
+ small_oplog = options.small_oplog
+ if hasattr(options, "small_oplog_rs"):
+ small_oplog_rs = options.small_oplog_rs
+ no_journal = options.no_journal
+ storage_engine = options.storage_engine
+ wiredtiger_engine_config_string = options.wiredtiger_engine_config_string
+ wiredtiger_collection_config_string = options.wiredtiger_collection_config_string
+ wiredtiger_index_config_string = options.wiredtiger_index_config_string
+ set_parameters = options.set_parameters
+ set_parameters_mongos = options.set_parameters_mongos
+ no_preallocj = options.no_preallocj
+ auth = options.auth
+ authMechanism = options.authMechanism
+ keyFile = options.keyFile
+
+ clean_every_n_tests = options.clean_every_n_tests
+ clean_whole_dbroot = options.with_cleanbb
+
+ if auth and not keyFile:
+ # if only --auth was given to smoke.py, load the
+ # default keyFile from jstests/libs/authTestsKey
+ keyFile = os.path.join(mongo_repo, 'jstests', 'libs', 'authTestsKey')
+
+ if keyFile:
+ f = open(keyFile, 'r')
+ keyFileData = re.sub(r'\s', '', f.read()) # Remove all whitespace
+ f.close()
+ os.chmod(keyFile, stat.S_IRUSR | stat.S_IWUSR)
+ else:
+ keyFileData = None
+
+ # if smoke.py is running a list of commands read from a
+ # file (or stdin) rather than running a suite of js tests
+ file_of_commands_mode = options.File and options.mode == 'files'
+ # generate json report
+ report_file = options.report_file
+ temp_path = options.temp_path
+
+ use_write_commands = options.use_write_commands
+ shell_write_mode = options.shell_write_mode
+
+def file_version():
+ return md5(open(__file__, 'r').read()).hexdigest()
+
+def clear_failfile():
+ if os.path.exists(failfile):
+ os.remove(failfile)
+
+def run_old_fails():
+ global tests
+
+ try:
+ f = open(failfile, 'r')
+ state = pickle.load(f)
+ f.close()
+ except Exception:
+ try:
+ f.close()
+ except:
+ pass
+ clear_failfile()
+ return # This counts as passing so we will run all tests
+
+ if ('version' not in state or state['version'] != file_version()):
+ print "warning: old version of failfile.smoke detected. skipping recent fails"
+ clear_failfile()
+ return
+
+ testsAndOptions = state['testsAndOptions']
+ tests = [x[0] for x in testsAndOptions]
+ passed = []
+ try:
+ for (i, (test, options)) in enumerate(testsAndOptions):
+ # SERVER-5102: until we can figure out a better way to manage
+ # dependencies of the --only-old-fails build phase, just skip
+ # tests which we can't safely run at this point
+ path, usedb = test
+
+ if not os.path.exists(path):
+ passed.append(i)
+ winners.append(test)
+ continue
+
+ filename = os.path.basename(path)
+ if filename in ('dbtest', 'dbtest.exe') or filename.endswith('.js'):
+ set_globals(options, [filename])
+ oldWinners = len(winners)
+ run_tests([test])
+ if len(winners) != oldWinners: # can't use return value due to continue_on_failure
+ passed.append(i)
+ finally:
+ for offset, i in enumerate(passed):
+ testsAndOptions.pop(i - offset)
+
+ if testsAndOptions:
+ f = open(failfile, 'w')
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ pickle.dump(state, f)
+ else:
+ clear_failfile()
+
+ report() # exits with failure code if there is an error
+
+def add_to_failfile(tests, options):
+ try:
+ f = open(failfile, 'r')
+ testsAndOptions = pickle.load(f)["testsAndOptions"]
+ except Exception:
+ testsAndOptions = []
+
+ for test in tests:
+ if (test, options) not in testsAndOptions:
+ testsAndOptions.append( (test, options) )
+
+ state = {'version':file_version(), 'testsAndOptions':testsAndOptions}
+ f = open(failfile, 'w')
+ pickle.dump(state, f)
+
+
+
+def main():
+ global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog
+ global no_journal, set_parameters, set_parameters_mongos, no_preallocj, auth, storage_engine, wiredtiger_engine_config_string, wiredtiger_collection_config_string, wiredtiger_index_config_string
+ global keyFile, smoke_db_prefix, test_path, use_write_commands
+
+ try:
+ signal.signal(signal.SIGUSR1, dump_stacks)
+ except AttributeError:
+ print "Cannot catch signals on Windows"
+
+ parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
+ parser.add_option('--mode', dest='mode', default='suite',
+ help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
+ # Some of our tests hard-code pathnames e.g., to execute, so until
+ # that changes we don't have the freedom to run from anyplace.
+ # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
+ parser.add_option('--test-path', dest='test_path', default=None,
+ help="Path to the test executables to run, "
+ "currently only used for 'client' (%default)")
+ parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
+ help='Path to mongod to run (%default)')
+ parser.add_option('--port', dest='mongod_port', default="27999",
+ help='Port the mongod will bind to (%default)')
+ parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
+ help='Path to mongo, for .js test files (%default)')
+ parser.add_option('--continue-on-failure', dest='continue_on_failure',
+ action="store_true", default=False,
+ help='If supplied, continue testing even after a test fails')
+ parser.add_option('--from-file', dest='File',
+ help="Run tests/suites named in FILE, one test per line, '-' means stdin")
+ parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
+ help="Prefix to use for the mongods' dbpaths ('%default')")
+ parser.add_option('--small-oplog', dest='small_oplog', default=False,
+ action="store_true",
+ help='Run tests with master/slave replication & use a small oplog')
+ parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
+ action="store_true",
+ help='Run tests with replica set replication & use a small oplog')
+ parser.add_option('--storageEngine', dest='storage_engine', default=None,
+ help='What storage engine to start mongod with')
+ parser.add_option('--wiredTigerEngineConfig', dest='wiredtiger_engine_config_string', default=None,
+ help='Wired Tiger configuration to pass through to mongod')
+ parser.add_option('--wiredTigerCollectionConfig', dest='wiredtiger_collection_config_string', default=None,
+ help='Wired Tiger collection configuration to pass through to mongod')
+ parser.add_option('--wiredTigerIndexConfig', dest='wiredtiger_index_config_string', default=None,
+ help='Wired Tiger index configuration to pass through to mongod')
+ parser.add_option('--nojournal', dest='no_journal', default=False,
+ action="store_true",
+ help='Do not turn on journaling in tests')
+ parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
+ action="store_true",
+ help='Do not preallocate journal files in tests')
+ parser.add_option('--auth', dest='auth', default=False,
+ action="store_true",
+ help='Run standalone mongods in tests with authentication enabled')
+ parser.add_option('--use-x509', dest='use_x509', default=False,
+ action="store_true",
+ help='Use x509 auth for internal cluster authentication')
+ parser.add_option('--authMechanism', dest='authMechanism', default='SCRAM-SHA-1',
+ help='Use the given authentication mechanism, when --auth is used.')
+ parser.add_option('--keyFile', dest='keyFile', default=None,
+ help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
+ parser.add_option('--ignore', dest='ignore_files', default=None,
+ help='Pattern of files to ignore in tests')
+ parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
+ action="store_true",
+ help='Check the failfile and only run all tests that failed last time')
+ parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
+ action="store_true",
+ help='Clear the failfile. Do this if all tests pass')
+ parser.add_option('--with-cleanbb', dest='with_cleanbb', action="store_true",
+ default=False,
+ help='Clear database files before first test')
+ parser.add_option('--clean-every', dest='clean_every_n_tests', type='int',
+ default=(1 if 'detect_leaks=1' in os.getenv("ASAN_OPTIONS", "") else 20),
+ help='Clear database files every N tests [default %default]')
+ parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
+ action='store_false',
+ help='Do not start mongod before commencing test running')
+ parser.add_option('--use-ssl', dest='use_ssl', default=False,
+ action='store_true',
+ help='Run mongo shell and mongod instances with SSL encryption')
+ parser.add_option('--set-parameters', dest='set_parameters', default="",
+ help='Adds --setParameter to mongod for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--set-parameters-mongos', dest='set_parameters_mongos', default="",
+ help='Adds --setParameter to mongos for each passed in item in the csv list - ex. "param1=1,param2=foo" ')
+ parser.add_option('--temp-path', dest='temp_path', default=None,
+ help='If present, passed as --tempPath to unittests and dbtests or TestData.tmpPath to mongo')
+ # Buildlogger invocation from command line
+ parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
+ action="store", help='Set the "builder name" for buildlogger')
+ parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
+ action="store", help='Set the "build number" for buildlogger')
+ parser.add_option('--buildlogger-url', dest='buildlogger_url', default=None,
+ action="store", help='Set the url root for the buildlogger service')
+ parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
+ action="store", help='Path to Python file containing buildlogger credentials')
+ parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
+ action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
+ parser.add_option('--report-file', dest='report_file', default=None,
+ action='store',
+ help='Path to generate detailed json report containing all test details')
+ parser.add_option('--use-write-commands', dest='use_write_commands', default=False,
+ action='store_true',
+ help='Deprecated(use --shell-write-mode): Sets the shell to use write commands by default')
+ parser.add_option('--shell-write-mode', dest='shell_write_mode', default="commands",
+ help='Sets the shell to use a specific write mode: commands/compatibility/legacy (default:legacy)')
+
+ parser.add_option('--include-tags', dest='include_tags', default="", action='store',
+ help='Filters jstests run by tag regex(es) - a tag in the test must match the regexes. ' +
+ 'Specify single regex string or JSON array.')
+
+ parser.add_option('--exclude-tags', dest='exclude_tags', default="", action='store',
+ help='Filters jstests run by tag regex(es) - no tags in the test must match the regexes. ' +
+ 'Specify single regex string or JSON array.')
+
+ global tests
+ (options, tests) = parser.parse_args()
+
+ set_globals(options, tests)
+
+ buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
+ if all(buildlogger_opts):
+ os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
+ os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
+ os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
+ os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
+ if options.buildlogger_phase:
+ os.environ['MONGO_PHASE'] = options.buildlogger_phase
+ elif any(buildlogger_opts):
+ # some but not all of the required options were sete
+ raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")
+
+ if options.buildlogger_url: #optional; if None, defaults to const in buildlogger.py
+ os.environ['BUILDLOGGER_URL'] = options.buildlogger_url
+
+ if options.File:
+ if options.File == '-':
+ tests = sys.stdin.readlines()
+ else:
+ f = open(options.File)
+ tests = f.readlines()
+ tests = [t.rstrip('\n') for t in tests]
+
+ if options.only_old_fails:
+ run_old_fails()
+ return
+ elif options.reset_old_fails:
+ clear_failfile()
+ return
+
+ # If we're in suite mode, tests is a list of names of sets of tests.
+ if options.mode == 'suite':
+ tests = expand_suites(tests)
+ elif options.mode == 'files':
+ tests = [(os.path.abspath(test), start_mongod) for test in tests]
+
+ if options.ignore_files != None :
+ ignore_patt = re.compile( options.ignore_files )
+ print "Ignoring files with pattern: ", ignore_patt
+
+ def ignore_test( test ):
+ if ignore_patt.search( test[0] ) != None:
+ print "Ignoring test ", test[0]
+ return False
+ else:
+ return True
+
+ tests = filter( ignore_test, tests )
+
+ if options.include_tags or options.exclude_tags:
+
+ def to_regex_array(tags_option):
+ if not tags_option:
+ return []
+
+ tags_list = smoke.json_options.json_coerce(tags_option)
+ if isinstance(tags_list, basestring):
+ tags_list = [tags_list]
+
+ return map(re.compile, tags_list)
+
+ tests = filter_tests_by_tag(tests,
+ smoke.suites.RegexQuery(include_res=to_regex_array(options.include_tags),
+ exclude_res=to_regex_array(options.exclude_tags)))
+
+ if not tests:
+ print "warning: no tests specified"
+ return
+
+ if options.with_cleanbb:
+ clean_dbroot(nokill=True)
+
+ test_report["start"] = time.time()
+ test_report["mongod_running_at_start"] = mongod().is_mongod_up(mongod_port)
+ try:
+ run_tests(tests)
+ finally:
+ add_to_failfile(fails, options)
+
+ test_report["end"] = time.time()
+ test_report["elapsed"] = test_report["end"] - test_report["start"]
+ test_report["failures"] = len(losers.keys())
+ test_report["mongod_running_at_end"] = mongod().is_mongod_up(mongod_port)
+ if report_file:
+ f = open( report_file, "wb" )
+ f.write( json.dumps( test_report, indent=4, separators=(',', ': ')) )
+ f.close()
+
+ report()
+
+if __name__ == "__main__":
+ main()
diff --git a/src/mongo/gotools/test/qa-tests/buildscripts/utils.py b/src/mongo/gotools/test/qa-tests/buildscripts/utils.py
new file mode 100644
index 00000000000..0a46ef440d4
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/buildscripts/utils.py
@@ -0,0 +1,235 @@
+
+import codecs
+import re
+import socket
+import time
+import os
+import os.path
+import itertools
+import subprocess
+import sys
+import hashlib
+
+# various utilities that are handy
+
+def getAllSourceFiles( arr=None , prefix="." ):
+ if arr is None:
+ arr = []
+
+ if not os.path.isdir( prefix ):
+ # assume a file
+ arr.append( prefix )
+ return arr
+
+ for x in os.listdir( prefix ):
+ if x.startswith( "." ) or x.startswith( "pcre-" ) or x.startswith( "32bit" ) or x.startswith( "mongodb-" ) or x.startswith("debian") or x.startswith( "mongo-cxx-driver" ):
+ continue
+ # XXX: Avoid conflict between v8 and v8-3.25 source files in
+ # src/mongo/scripting
+ # Remove after v8-3.25 migration.
+ if x.find("v8-3.25") != -1:
+ continue
+ full = prefix + "/" + x
+ if os.path.isdir( full ) and not os.path.islink( full ):
+ getAllSourceFiles( arr , full )
+ else:
+ if full.endswith( ".cpp" ) or full.endswith( ".h" ) or full.endswith( ".c" ):
+ full = full.replace( "//" , "/" )
+ arr.append( full )
+
+ return arr
+
+
+def getGitBranch():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return None
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version.split( "/" )
+ version = version[len(version)-1]
+ return version
+
+def getGitBranchString( prefix="" , postfix="" ):
+ t = re.compile( '[/\\\]' ).split( os.getcwd() )
+ if len(t) > 2 and t[len(t)-1] == "mongo":
+ par = t[len(t)-2]
+ m = re.compile( ".*_([vV]\d+\.\d+)$" ).match( par )
+ if m is not None:
+ return prefix + m.group(1).lower() + postfix
+ if par.find("Nightly") > 0:
+ return ""
+
+
+ b = getGitBranch()
+ if b == None or b == "master":
+ return ""
+ return prefix + b + postfix
+
+def getGitVersion():
+ if not os.path.exists( ".git" ) or not os.path.isdir(".git"):
+ return "nogitversion"
+
+ version = open( ".git/HEAD" ,'r' ).read().strip()
+ if not version.startswith( "ref: " ):
+ return version
+ version = version[5:]
+ f = ".git/" + version
+ if not os.path.exists( f ):
+ return version
+ return open( f , 'r' ).read().strip()
+
+def execsys( args ):
+ import subprocess
+ if isinstance( args , str ):
+ r = re.compile( "\s+" )
+ args = r.split( args )
+ p = subprocess.Popen( args , stdout=subprocess.PIPE , stderr=subprocess.PIPE )
+ r = p.communicate()
+ return r;
+
+def getprocesslist():
+ raw = ""
+ try:
+ raw = execsys( "/bin/ps axww" )[0]
+ except Exception,e:
+ print( "can't get processlist: " + str( e ) )
+
+ r = re.compile( "[\r\n]+" )
+ return r.split( raw )
+
+def removeIfInList( lst , thing ):
+ if thing in lst:
+ lst.remove( thing )
+
+def findVersion( root , choices ):
+ for c in choices:
+ if ( os.path.exists( root + c ) ):
+ return root + c
+ raise "can't find a version of [" + root + "] choices: " + choices
+
+def choosePathExist( choices , default=None):
+ for c in choices:
+ if c != None and os.path.exists( c ):
+ return c
+ return default
+
+def filterExists(paths):
+ return filter(os.path.exists, paths)
+
+def ensureDir( name ):
+ d = os.path.dirname( name )
+ if not os.path.exists( d ):
+ print( "Creating dir: " + name );
+ os.makedirs( d )
+ if not os.path.exists( d ):
+ raise "Failed to create dir: " + name
+
+
+def distinctAsString( arr ):
+ s = set()
+ for x in arr:
+ s.add( str(x) )
+ return list(s)
+
+def checkMongoPort( port=27017 ):
+ sock = socket.socket()
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ sock.settimeout(1)
+ sock.connect(("localhost", port))
+ sock.close()
+
+
+def didMongodStart( port=27017 , timeout=20 ):
+ while timeout > 0:
+ time.sleep( 1 )
+ try:
+ checkMongoPort( port )
+ return True
+ except Exception,e:
+ print( e )
+ timeout = timeout - 1
+ return False
+
+def which(executable):
+ if sys.platform == 'win32':
+ paths = os.environ.get('Path', '').split(';')
+ else:
+ paths = os.environ.get('PATH', '').split(':')
+
+ for path in paths:
+ path = os.path.expandvars(path)
+ path = os.path.expanduser(path)
+ path = os.path.abspath(path)
+ executable_path = os.path.join(path, executable)
+ if os.path.exists(executable_path):
+ return executable_path
+
+ return executable
+
+def md5sum( file ):
+ #TODO error handling, etc..
+ return execsys( "md5sum " + file )[0].partition(" ")[0]
+
+def md5string( a_string ):
+ return hashlib.md5(a_string).hexdigest()
+
+def find_python(min_version=(2, 5)):
+ try:
+ if sys.version_info >= min_version:
+ return sys.executable
+ except AttributeError:
+ # In case the version of Python is somehow missing sys.version_info or sys.executable.
+ pass
+
+ version = re.compile(r'[Pp]ython ([\d\.]+)', re.MULTILINE)
+ binaries = ('python27', 'python2.7', 'python26', 'python2.6', 'python25', 'python2.5', 'python')
+ for binary in binaries:
+ try:
+ out, err = subprocess.Popen([binary, '-V'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ for stream in (out, err):
+ match = version.search(stream)
+ if match:
+ versiontuple = tuple(map(int, match.group(1).split('.')))
+ if versiontuple >= min_version:
+ return which(binary)
+ except:
+ pass
+
+ raise Exception('could not find suitable Python (version >= %s)' % '.'.join(str(v) for v in min_version))
+
+def smoke_command(*args):
+ # return a list of arguments that comprises a complete
+ # invocation of smoke.py
+ here = os.path.dirname(__file__)
+ smoke_py = os.path.abspath(os.path.join(here, 'smoke.py'))
+ # the --with-cleanbb argument causes smoke.py to run
+ # buildscripts/cleanbb.py before each test phase; this
+ # prevents us from running out of disk space on slaves
+ return [find_python(), smoke_py, '--with-cleanbb'] + list(args)
+
+def run_smoke_command(*args):
+ # to run a command line script from a scons Alias (or any
+ # Action), the command sequence must be enclosed in a list,
+ # otherwise SCons treats it as a list of dependencies.
+ return [smoke_command(*args)]
+
+# unicode is a pain. some strings cannot be unicode()'d
+# but we want to just preserve the bytes in a human-readable
+# fashion. this codec error handler will substitute the
+# repr() of the offending bytes into the decoded string
+# at the position they occurred
+def replace_with_repr(unicode_error):
+ offender = unicode_error.object[unicode_error.start:unicode_error.end]
+ return (unicode(repr(offender).strip("'").strip('"')), unicode_error.end)
+
+codecs.register_error('repr', replace_with_repr)
+
+def unicode_dammit(string, encoding='utf8'):
+ # convert a string to a unicode, using the Python
+ # representation of non-ascii bytes when necessary
+ #
+ # name inpsired by BeautifulSoup's "UnicodeDammit"
+ return string.decode(encoding, 'repr')
+
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/all_types.js b/src/mongo/gotools/test/qa-tests/jstests/bson/all_types.js
new file mode 100644
index 00000000000..70d169685c8
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/all_types.js
@@ -0,0 +1,33 @@
+// This test runs bsondump on a .bson file containing non-deprecated BSON types
+// and makes sure their debug type values exist in the output.
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var x = _runMongoProgram("bsondump", "--type=debug", "jstests/bson/testdata/all_types.bson");
+ assert.eq(x, 0, "bsondump should exit successfully with 0");
+
+ var results;
+ assert.eq.soon(22, function() {
+ results = rawMongoProgramOutput();
+ return (results.match(/--- new object ---/g) || []).length;
+ }, "should see all documents from the test data");
+
+ assert.strContains("type: 1", results, "bson type '1' should be present in the debug output");
+ assert.strContains("type: 2", results, "bson type '2' should be present in the debug output");
+ assert.strContains("type: 3", results, "bson type '3' should be present in the debug output");
+ assert.strContains("type: 4", results, "bson type '4' should be present in the debug output");
+ assert.strContains("type: 5", results, "bson type '5' should be present in the debug output");
+ assert.strContains("type: 6", results, "bson type '6' should be present in the debug output");
+ assert.strContains("type: 7", results, "bson type '7' should be present in the debug output");
+ assert.strContains("type: 8", results, "bson type '8' should be present in the debug output");
+ assert.strContains("type: 9", results, "bson type '9' should be present in the debug output");
+ assert.strContains("type: 10", results, "bson type '10' should be present in the debug output");
+ assert.strContains("type: 11", results, "bson type '11' should be present in the debug output");
+ assert.strContains("type: 12", results, "bson type '12' should be present in the debug output");
+ assert.strContains("type: 13", results, "bson type '13' should be present in the debug output");
+ assert.strContains("type: 17", results, "bson type '17' should be present in the debug output");
+ assert.strContains("type: 18", results, "bson type '18' should be present in the debug output");
+ assert.strContains("type: -1", results, "bson type '-1' should be present in the debug output");
+ assert.strContains("type: 127", results, "bson type '127' should be present in the debug output");
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/all_types_json.js b/src/mongo/gotools/test/qa-tests/jstests/bson/all_types_json.js
new file mode 100644
index 00000000000..f64050cee28
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/all_types_json.js
@@ -0,0 +1,29 @@
+// This test runs bsondump on a .bson file containing non-deprecated BSON types
+// and makes sure their JSON type representations exist in the output.
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var x = _runMongoProgram("bsondump", "--type=json", "jstests/bson/testdata/all_types.bson");
+ assert.eq(x, 0, "bsondump should exit successfully with 0");
+
+ assert.strContains.soon("20 objects found", rawMongoProgramOutput,
+ "should print out all top-level documents from the test data");
+
+ var results = rawMongoProgramOutput();
+ assert.strContains("$binary", results, "bson type 'binary' should be present in the debug output");
+ assert.strContains("$date", results, "bson type 'date' should be present in the debug output");
+ assert.strContains("$timestamp", results, "bson type 'timestamp' should be present in the debug output");
+ assert.strContains("$regex", results, "bson type 'regex' should be present in the debug output");
+ assert.strContains("$oid", results, "bson type 'oid' should be present in the debug output");
+ assert.strContains("$undefined", results, "bson type 'undefined' should be present in the debug output");
+ assert.strContains("$minKey", results, "bson type 'min' should be present in the debug output");
+ assert.strContains("$maxKey", results, "bson type 'max' should be present in the debug output");
+ assert.strContains("$numberLong", results, "bson type 'long' should be present in the debug output");
+ assert.strContains("$ref", results, "bson type 'dbref' should be present in the debug output");
+ assert.strContains("$id", results, "bson type 'dbref' should be present in the debug output");
+ assert.strContains("$code", results, "bson type 'javascript' should be present in the debug output");
+ assert.strContains("null", results, "bson type 'null' should be present in the debug output");
+ assert.strContains("true", results, "bson type 'true' should be present in the debug output");
+ assert.strContains("false", results, "bson type 'false' should be present in the debug output");
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/bad_files.js b/src/mongo/gotools/test/qa-tests/jstests/bson/bad_files.js
new file mode 100644
index 00000000000..1728aa1e999
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/bad_files.js
@@ -0,0 +1,41 @@
+// This test makes sure that certain invalid BSON succeeds or fails
+// with both JSON and debug output types AND --objcheck
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/random_bytes.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given random bytes");
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/bad_cstring.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given a non-terminated cstring");
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/bad_type.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given a bad type value");
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/partial_file.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given only the start of a file");
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/invalid_field_name.bson");
+ assert.neq(x, 0, "bsondump should exit with an error given invalid field names");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/random_bytes.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given random bytes");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/bad_cstring.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given a non-terminated cstring");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/bad_type.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given a bad type value");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/partial_file.bson");
+ assert.neq(x, 0, "bsondump should exit with an error when given only the start of a file");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/invalid_field_name.bson");
+ assert.neq(x, 0, "bsondump should exit with an error given invalid field names");
+
+ // This should pass, but the content of the output might be erroneous
+ x = _runMongoProgram("bsondump", "--objcheck", "jstests/bson/testdata/broken_array.bson");
+ assert.eq(x, 0, "bsondump should exit with success when given a bad array document");
+ x = _runMongoProgram("bsondump", "--objcheck", "--type=debug", "jstests/bson/testdata/broken_array.bson");
+ assert.eq(x, 0, "bsondump should exit with success when given a bad array document");
+
+ // Make sure recoverable cases do not return an error by default
+ clearRawMongoProgramOutput();
+ x = _runMongoProgram("bsondump", "jstests/bson/testdata/bad_cstring.bson");
+ assert.eq(x, 0, "bsondump should not exit with an error when given a non-terminated cstring without --objcheck");
+ assert.strContains.soon("corrupted", rawMongoProgramOutput,
+ "one of the documents should have been labelled as corrupted");
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/bsondump_options.js b/src/mongo/gotools/test/qa-tests/jstests/bson/bsondump_options.js
new file mode 100644
index 00000000000..1b81c2a419e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/bsondump_options.js
@@ -0,0 +1,57 @@
+// This test checks reasonable and unreasonable option configurations for bsondump
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var sampleFilepath = "jstests/bson/testdata/sample.bson";
+ var x = _runMongoProgram("bsondump", "--type=fake", sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given a non-existent type");
+
+ x = _runMongoProgram("bsondump", "jstests/bson/testdata/asdfasdfasdf");
+ assert.neq(x, 0, "bsondump should exit with failure when given a non-existent file");
+
+ x = _runMongoProgram("bsondump", "--noobjcheck", sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given --noobjcheck");
+
+ x = _runMongoProgram("bsondump", "--collection", sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given --collection");
+
+ x = _runMongoProgram("bsondump", sampleFilepath, sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given multiple files");
+
+ x = _runMongoProgram("bsondump", '--bsonFile', sampleFilepath, sampleFilepath);
+ assert.neq(x, 0, "bsondump should exit with failure when given both an out file and a positional argument");
+
+ x = _runMongoProgram("bsondump", "-vvvv", sampleFilepath);
+ assert.eq(x, 0, "bsondump should exit with success when given verbosity");
+ x = _runMongoProgram("bsondump", "--verbose", sampleFilepath);
+ assert.eq(x, 0, "bsondump should exit with success when given verbosity");
+
+ clearRawMongoProgramOutput();
+ var pid = _startMongoProgram("bsondump", "--quiet", sampleFilepath);
+ assert.eq(waitProgram(pid), 0, "bsondump should exit with success when given --quiet");
+ assert.strContains.soon("I am a string", rawMongoProgramOutput,
+ "found docs should still be printed when --quiet is used");
+ assert.eq.soon(-1, function() {
+ return rawMongoProgramOutput()
+ .split("\n")
+ .filter(function(line) {
+ return line.indexOf("sh"+pid+"| ") === 0;
+ })
+ .join("\n")
+ .indexOf("found");
+ }, "only the found docs should be printed when --quiet is used");
+
+ clearRawMongoProgramOutput();
+ x = _runMongoProgram("bsondump", "--help");
+ assert.eq(x, 0, "bsondump should exit with success when given --help");
+ assert.strContains.soon("Usage", rawMongoProgramOutput,
+ "help text should be printed when given --help");
+
+ clearRawMongoProgramOutput();
+ x = _runMongoProgram("bsondump", "--version");
+ assert.eq(x, 0, "bsondump should exit with success when given --version");
+ assert.strContains.soon("version", rawMongoProgramOutput,
+ "version info should be printed when given --version");
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/deep_nested.js b/src/mongo/gotools/test/qa-tests/jstests/bson/deep_nested.js
new file mode 100644
index 00000000000..1a226c81fc3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/deep_nested.js
@@ -0,0 +1,8 @@
+// This test checks that bsondump can handle a deeply nested document without breaking
+
+(function() {
+ var x = _runMongoProgram("bsondump", "--type=json", "jstests/bson/testdata/deep_nested.bson");
+ assert.eq(x, 0, "bsondump should exit successfully with 0");
+ x = _runMongoProgram("bsondump", "--type=debug", "jstests/bson/testdata/deep_nested.bson");
+ assert.eq(x, 0, "bsondump should exit successfully with 0");
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/output_file.js b/src/mongo/gotools/test/qa-tests/jstests/bson/output_file.js
new file mode 100644
index 00000000000..1e339d6dcb1
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/output_file.js
@@ -0,0 +1,71 @@
+/**
+ * output_file.js
+ *
+ * This file tests outputting bsondump to a file when the input is from a file.
+ */
+
+(function() {
+ 'use strict';
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('bson_output_file');
+ var commonToolArgs = getCommonToolArguments();
+
+ // The db and collections we'll use.
+ var testDB = toolTest.db.getSiblingDB('test');
+ var destColl = testDB.bsondump;
+
+ // Test using a flag to specify the output file..
+ var exportTarget = 'bson_dump.json';
+ removeFile(exportTarget);
+
+ var ret = _runMongoProgram("bsondump",
+ "--type=json",
+ "--bsonFile", "jstests/bson/testdata/sample.bson",
+ "--outFile", exportTarget);
+ assert.eq(ret, 0, "bsondump should exit successfully with 0");
+
+ // Import the data into the destination collection to check correctness.
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'bsondump',
+ '--type', 'json']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // Make sure everything was dumped.
+ assert.eq(1, destColl.count({a: 1.0}));
+ assert.eq(1, destColl.count({a: 2.5}));
+ assert.eq(1, destColl.count({a: 4.0}));
+ assert.eq(1, destColl.count({a: 4.01}));
+
+
+ // Test using a positional argument to specify the output file.
+ removeFile(exportTarget);
+
+ ret = _runMongoProgram("bsondump",
+ "--type=json",
+ "--outFile", exportTarget,
+ "jstests/bson/testdata/sample.bson");
+ assert.eq(ret, 0, "bsondump should exit successfully with 0");
+
+ // Import the data into the destination collection to check correctness.
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'bsondump',
+ '--type', 'json']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // Make sure everything was dumped.
+ assert.eq(1, destColl.count({a: 1.0}));
+ assert.eq(1, destColl.count({a: 2.5}));
+ assert.eq(1, destColl.count({a: 4.0}));
+ assert.eq(1, destColl.count({a: 4.01}));
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/all_types.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/all_types.bson
new file mode 100644
index 00000000000..1a1f3a923f9
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/all_types.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/bad_cstring.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/bad_cstring.bson
new file mode 100644
index 00000000000..70e2f9c273a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/bad_cstring.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/bad_type.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/bad_type.bson
new file mode 100644
index 00000000000..b21ed025bc2
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/bad_type.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/broken_array.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/broken_array.bson
new file mode 100644
index 00000000000..35d9a4f6b5d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/broken_array.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/deep_nested.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/deep_nested.bson
new file mode 100644
index 00000000000..68477b2aca3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/deep_nested.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/invalid_field_name.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/invalid_field_name.bson
new file mode 100644
index 00000000000..2a3aabd80a6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/invalid_field_name.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/partial_file.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/partial_file.bson
new file mode 100644
index 00000000000..a571f10d06c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/partial_file.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/random_bytes.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/random_bytes.bson
new file mode 100644
index 00000000000..d2558fd11c2
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/random_bytes.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/sample.bson b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/sample.bson
new file mode 100644
index 00000000000..35b3b843d66
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/bson/testdata/sample.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/common/check_version.js b/src/mongo/gotools/test/qa-tests/jstests/common/check_version.js
new file mode 100644
index 00000000000..cf151e41e75
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/common/check_version.js
@@ -0,0 +1,47 @@
+/**
+ * Given a MongoDB version, parses it into its major/minor/patch components,
+ * discounting '-pre' and '-rcX'. Useful for parsing the output of
+ * `db.version()` into an appropriate form for comparisons.
+ *
+ * Examples:
+ * getVersionComponents('2.7.8'); // { major: 2, minor: 7, patch: 8 }
+ * getVersionComponents('2.8.0-rc0'); // { major: 2, minor: 8, patch: 0 }
+ */
+var getVersionComponents = function(version) {
+ var splitVersion = version.split('.');
+ assert.eq(3, splitVersion.length);
+ var major = parseInt(splitVersion[0], 10);
+ var minor = parseInt(splitVersion[1], 10);
+
+ var patchEnd = splitVersion[2].indexOf('-') !== -1 ?
+ splitVersion[2].indexOf('-') :
+ undefined;
+ var patch = parseInt(splitVersion[2].substr(0, patchEnd));
+ return {
+ major: major,
+ minor: minor,
+ patch: patch,
+ };
+};
+
+/**
+ * Given two versions, returns true if the first version is >= the second.
+ *
+ * Examples:
+ * isAtLeastVersion('2.7.8', '2.7.8'); // true
+ * isAtLeastVersion('2.8.0-rc0', '2.7.8'); // true
+ * isAtLeastVersion('2.6.6', '2.7.8'); // false
+ * isAtLeastVersion('1.8.5', '2.7.8'); // false
+ */
+/* exported isAtLeastVersion */
+var isAtLeastVersion = function(serverVersion, checkVersion) {
+ serverVersion = getVersionComponents(serverVersion);
+ checkVersion = getVersionComponents(checkVersion);
+
+ return (checkVersion.major < serverVersion.major) ||
+ (checkVersion.major === serverVersion.major &&
+ checkVersion.minor < serverVersion.minor) ||
+ (checkVersion.major === serverVersion.major &&
+ checkVersion.minor === serverVersion.minor &&
+ checkVersion.patch <= serverVersion.patch);
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/common/topology_helper.js b/src/mongo/gotools/test/qa-tests/jstests/common/topology_helper.js
new file mode 100644
index 00000000000..a3bd8773a9b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/common/topology_helper.js
@@ -0,0 +1,164 @@
+// topology_helper.js; contains utility functions to run tests
+
+// auth related variables
+var authUser = 'user';
+var authPassword = 'password';
+var authArgs = [
+ '--authenticationDatabase', 'admin',
+ '--authenticationMechanism', 'SCRAM-SHA-1',
+ '-u', authUser,
+ '-p', authPassword
+];
+var keyFile = 'jstests/libs/key1';
+
+// topology startup settings
+var auth = {
+ name: 'auth',
+ args: authArgs,
+};
+
+var plain = {
+ name: 'plain',
+ args: [],
+};
+
+/* exported passthroughs */
+// passthroughs while running all tests
+var passthroughs = [plain, auth];
+
+/* helper functions */
+
+// runAuthSetup creates a user with root role on the admin database
+var runAuthSetup = function(topology) {
+ jsTest.log('Running auth setup');
+
+ var conn = topology.connection();
+ var db = conn.getDB('test');
+
+ db.getSiblingDB('admin').createUser({
+ user: authUser,
+ pwd: authPassword,
+ roles: ['root'],
+ });
+
+ assert.eq(db.getSiblingDB('admin').auth(authUser, authPassword), 1, 'authentication failed');
+};
+
+// buildStartupArgs constructs the proper object to be passed as arguments in
+// starting mongod
+var buildStartupArgs = function(passthrough) {
+ var startupArgs = {};
+ if (passthrough.name === auth.name) {
+ startupArgs.auth = '';
+ startupArgs.keyFile = keyFile;
+ }
+ return startupArgs;
+};
+
+// requiresAuth returns a boolean indicating if the passthrough requires authentication
+var requiresAuth = function(passthrough) {
+ return passthrough.name === auth.name;
+};
+
+/* standalone topology */
+/* exported standaloneTopology */
+var standaloneTopology = {
+ init: function(passthrough) {
+ jsTest.log('Using standalone topology');
+
+ passthrough = passthrough || [];
+ var startupArgs = buildStartupArgs(passthrough);
+ startupArgs.port = allocatePorts(1)[0];
+ this.conn = MongoRunner.runMongod(startupArgs);
+
+ // set up the auth user if needed
+ if (requiresAuth(passthrough)) {
+ runAuthSetup(this);
+ }
+ return this;
+ },
+ connection: function() {
+ return this.conn;
+ },
+ stop: function() {
+ MongoRunner.stopMongod(this.conn);
+ },
+};
+
+
+/* replica set topology */
+/* exported replicaSetTopology */
+var replicaSetTopology = {
+ init: function(passthrough) {
+ jsTest.log('Using replica set topology');
+
+ passthrough = passthrough || [];
+ var startupArgs = buildStartupArgs(passthrough);
+ startupArgs.name = testName;
+ startupArgs.nodes = 2;
+ this.replTest = new ReplSetTest(startupArgs);
+
+ // start the replica set
+ this.replTest.startSet();
+ jsTest.log('Started replica set');
+
+ // initiate the replica set with a default config
+ this.replTest.initiate();
+ jsTest.log('Initiated replica set');
+
+ // block till the set is fully operational
+ this.replTest.awaitSecondaryNodes();
+ jsTest.log('Replica set fully operational');
+
+ // set up the auth user if needed
+ if (requiresAuth(passthrough)) {
+ runAuthSetup(this);
+ }
+ return this;
+ },
+ connection: function() {
+ return this.replTest.getPrimary();
+ },
+ stop: function() {
+ this.replTest.stopSet();
+ },
+};
+
+
+/* sharded cluster topology */
+/* exported shardedClusterTopology */
+var shardedClusterTopology = {
+ init: function(passthrough) {
+ jsTest.log('Using sharded cluster topology');
+
+ passthrough = passthrough || [];
+ var other = buildStartupArgs(passthrough);
+ var startupArgs = {};
+ startupArgs.name = testName;
+ startupArgs.mongos = 1;
+ startupArgs.shards = 1;
+
+ // set up the auth user if needed
+ if (requiresAuth(passthrough)) {
+ startupArgs.keyFile = keyFile;
+ startupArgs.other = {
+ shardOptions: other,
+ };
+ this.shardingTest = new ShardingTest(startupArgs);
+ runAuthSetup(this);
+ } else {
+ startupArgs.other = {
+ shardOptions: other,
+ };
+ this.shardingTest = new ShardingTest(startupArgs);
+ }
+ return this;
+ },
+ connection: function() {
+ return this.shardingTest.s;
+ },
+ stop: function() {
+ this.shardingTest.stop();
+ },
+};
+
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/archive_targets.js b/src/mongo/gotools/test/qa-tests/jstests/configs/archive_targets.js
new file mode 100644
index 00000000000..9bdf3b4c5b3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/archive_targets.js
@@ -0,0 +1,32 @@
+var getDumpTarget;
+
+(function() {
+ if (getDumpTarget === undefined) {
+ getDumpTarget = function(target) {
+ if (!target) {
+ return ["--archive=dump.archive"];
+ }
+ return ["--archive="+target];
+ };
+ }
+}());
+
+var getRestoreTarget;
+
+/* exported dump_targets */
+var dump_targets = "archive";
+
+(function() {
+ if (getRestoreTarget === undefined) {
+ getRestoreTarget = function(target) {
+ if (!target) {
+ return ["--archive=dump.archive"];
+ }
+ targetParts = target.split("/");
+ if (targetParts[0] === "dump") {
+ return ["--archive=dump.archive"];
+ }
+ return ["--archive="+targetParts[0]];
+ };
+ }
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/auth_28.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/auth_28.config.js
new file mode 100644
index 00000000000..744bb237d79
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/auth_28.config.js
@@ -0,0 +1,38 @@
+/* exported getToolTest */
+var getToolTest;
+var AUTH_USER = 'passwordIsTaco';
+var AUTH_PASSWORD = 'Taco';
+
+(function() {
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '',
+ auth: '',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ var db = toolTest.startDB();
+
+ db.getSiblingDB('admin').createUser({
+ user: AUTH_USER,
+ pwd: AUTH_PASSWORD,
+ roles: ['__system'],
+ });
+
+ db.getSiblingDB('admin').auth(AUTH_USER, AUTH_PASSWORD);
+
+ toolTest.authCommand = "db.getSiblingDB('admin').auth('" + AUTH_USER
+ + "', '" + AUTH_PASSWORD + "');";
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--username', AUTH_USER,
+ '--password', AUTH_PASSWORD,
+ '--authenticationDatabase', 'admin'
+ ];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/gzip_targets.js b/src/mongo/gotools/test/qa-tests/jstests/configs/gzip_targets.js
new file mode 100644
index 00000000000..d57b0c55954
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/gzip_targets.js
@@ -0,0 +1,36 @@
+var getDumpTarget;
+
+(function() {
+ if (getDumpTarget === undefined) {
+ getDumpTarget = function(target) {
+ if (target === undefined) {
+ return ["--gzip"];
+ }
+ if (target.indexOf(".bson", target.length - 5) !== -1) {
+ return ["--gzip", "--out="+target+".gz"];
+ }
+ return ["--gzip", "--out="+target];
+ };
+ }
+}());
+
+var dump_targets;
+if (!dump_targets) {
+ dump_targets = "gzip";
+}
+
+var getRestoreTarget;
+
+(function() {
+ if (getRestoreTarget === undefined) {
+ getRestoreTarget = function(target) {
+ if (target === undefined) {
+ return ["--gzip"];
+ }
+ if (target.indexOf(".bson", target.length - 5) !== -1) {
+ return ["--gzip", "--dir="+target+".gz"];
+ }
+ return ["--gzip", "--dir="+target];
+ };
+ }
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos.config.yml b/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos.config.yml
new file mode 100644
index 00000000000..a2f7fdc5202
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos.config.yml
@@ -0,0 +1,7 @@
+security:
+ authorization: enabled
+ sasl:
+ serviceName: mockservice
+ hostName: kdc.10gen.me
+setParameter:
+ authenticationMechanisms: GSSAPI
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos.linux.sh b/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos.linux.sh
new file mode 100644
index 00000000000..d2f54971f1a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos.linux.sh
@@ -0,0 +1,5 @@
+echo "107.23.89.149 kdc.10gen.me" | sudo tee -a /etc/hosts
+echo "127.0.0.1 testserver.10gen.me" | sudo tee -a /etc/hosts
+sudo hostname "testserver.10gen.me"
+sudo cp jstests/libs/mockkrb5.conf /etc/krb5.conf
+kinit -p mockuser@10GEN.ME -k -t jstests/libs/mockuser.keytab
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos_28.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos_28.config.js
new file mode 100644
index 00000000000..c00e8819e9a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos_28.config.js
@@ -0,0 +1,39 @@
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ var AUTH_USER = 'mockuser@10GEN.ME';
+
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '',
+ config: 'jstests/configs/kerberos.config.yml',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ var db = toolTest.startDB();
+
+ db.getSiblingDB('$external').createUser({
+ user: AUTH_USER,
+ roles: [{role: '__system', db: 'admin'}],
+ });
+
+ db.getSiblingDB('$external').auth({user: AUTH_USER, mechanism: 'GSSAPI', serviceName: 'mockservice', serviceHostname: 'kdc.10gen.me'});
+
+ toolTest.authCommand = "db.getSiblingDB('$external').auth({ user: '"
+ + AUTH_USER + "', mechanism: 'GSSAPI', serviceName: 'mockservice', serviceHostname: 'kdc.10gen.me' });";
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--username', 'mockuser@10GEN.ME',
+ '--authenticationDatabase', '$external',
+ '--authenticationMechanism', 'GSSAPI',
+ '--gssapiServiceName', 'mockservice',
+ '--gssapiHostName', 'kdc.10gen.me'
+ ];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos_28_windows.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos_28_windows.config.js
new file mode 100644
index 00000000000..481862f23f3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/kerberos_28_windows.config.js
@@ -0,0 +1,59 @@
+/** NOTE: this config uses a static Kerberos instance running on an EC2
+ * machine outside our security group. It should NOT be used for
+ * automated tests, because its a single instance and there's no
+ * automated way to generate more instances just yet. */
+
+/** NOTE: you need to add a registry entry for the MADHACKER.BIZ Kerberos
+ * realm before using this:
+ * cmd /c "REG ADD HKLM\SYSTEM\ControlSet001\Control\Lsa\Kerberos\Domains\MADHACKER.BIZ /v KdcNames /d karpov.madhacker.biz /t REG_MULTI_SZ /f"
+ */
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, {});
+ var db;
+
+ db = toolTest.db = new Mongo(AUTH_HOSTNAME + ':27017').getDB('test');
+
+ /** Overwrite so toolTest.runTool doesn't append --host */
+ ToolTest.prototype.runTool = function() {
+ arguments[0] = 'mongo' + arguments[0];
+ return runMongoProgram.apply(null, arguments);
+ };
+
+ db.getSiblingDB('$external').auth({
+ user: AUTH_USER,
+ pwd: AUTH_PASSWORD,
+ mechanism: 'GSSAPI',
+ serviceName: 'mongodb',
+ serviceHostname: AUTH_HOSTNAME,
+ });
+
+ toolTest.authCommand = "db.getSiblingDB('$external').auth({ user: '"
+ + AUTH_USER + "', pwd: '" + AUTH_PASSWORD
+ + "', mechanism: 'GSSAPI', serviceName: 'mongodb', serviceHostname: '"
+ + AUTH_HOSTNAME + "' });";
+
+ toolTest.stop = function() {
+ print('No need to stop on Kerberos windows config. Test succeeded');
+ };
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--username', AUTH_USER,
+ '--password', AUTH_PASSWORD,
+ '--host', AUTH_HOSTNAME,
+ '--authenticationDatabase', '$external',
+ '--authenticationMechanism', 'GSSAPI',
+ '--gssapiServiceName', 'mongodb',
+ '--gssapiHostName', AUTH_HOSTNAME
+ ];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/plain_26.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/plain_26.config.js
new file mode 100644
index 00000000000..4f6cd93f8e3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/plain_26.config.js
@@ -0,0 +1,19 @@
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '2.6',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ toolTest.startDB();
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/plain_28.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/plain_28.config.js
new file mode 100644
index 00000000000..bf71e5ca079
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/plain_28.config.js
@@ -0,0 +1,21 @@
+load("jstests/configs/standard_dump_targets.config.js");
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ toolTest.startDB();
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/replset_28.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/replset_28.config.js
new file mode 100644
index 00000000000..82e9a10891d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/replset_28.config.js
@@ -0,0 +1,39 @@
+load("jstests/configs/standard_dump_targets.config.js");
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, null);
+
+ var replTest = new ReplSetTest({
+ name: 'tool_replset',
+ nodes: 3,
+ oplogSize: 5,
+ });
+
+ replTest.startSet();
+ replTest.initiate();
+ var master = replTest.getPrimary();
+
+ toolTest.m = master;
+ toolTest.db = master.getDB(name);
+ toolTest.port = replTest.getPort(master);
+
+ var oldStop = toolTest.stop;
+ toolTest.stop = function() {
+ replTest.stopSet();
+ oldStop.apply(toolTest, arguments);
+ };
+
+ toolTest.isReplicaSet = true;
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/replset_auth_28.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/replset_auth_28.config.js
new file mode 100644
index 00000000000..ed4f0c9b5e5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/replset_auth_28.config.js
@@ -0,0 +1,58 @@
+/* exported getToolTest */
+var getToolTest;
+
+var AUTH_USER = 'passwordIsTaco';
+var AUTH_PASSWORD = 'Taco';
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, null);
+
+ var replTest = new ReplSetTest({
+ name: 'tool_replset',
+ nodes: 3,
+ oplogSize: 5,
+ auth: '',
+ keyFile: 'jstests/libs/key1',
+ });
+
+ nodes = replTest.startSet();
+ replTest.initiate();
+ var master = replTest.getPrimary();
+
+ toolTest.m = master;
+ toolTest.db = master.getDB(name);
+ toolTest.port = replTest.getPort(master);
+
+ var db = toolTest.db;
+ db.getSiblingDB('admin').createUser({
+ user: AUTH_USER,
+ pwd: AUTH_PASSWORD,
+ roles: ['__system'],
+ });
+
+ db.getSiblingDB('admin').auth(AUTH_USER, AUTH_PASSWORD);
+
+ var oldStop = toolTest.stop;
+ toolTest.stop = function() {
+ replTest.stopSet();
+ oldStop.apply(toolTest, arguments);
+ };
+
+ toolTest.authCommand = 'db.getSiblingDB(\'admin\').auth(\'' +
+ AUTH_USER + '\', \'' + AUTH_PASSWORD + '\');';
+
+ toolTest.isReplicaSet = true;
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--username', AUTH_USER,
+ '--password', AUTH_PASSWORD,
+ '--authenticationDatabase', 'admin'
+ ];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/sharding_28.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/sharding_28.config.js
new file mode 100644
index 00000000000..24577f44938
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/sharding_28.config.js
@@ -0,0 +1,40 @@
+load("jstests/configs/standard_dump_targets.config.js");
+
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, null);
+
+ var shardingTest = new ShardingTest({name: name,
+ shards: 2,
+ verbose: 0,
+ mongos: 3,
+ other: {
+ chunksize: 1,
+ enableBalancer: 0
+ }
+ });
+ shardingTest.adminCommand({enablesharding: name});
+
+ toolTest.m = shardingTest.s0;
+ toolTest.db = shardingTest.getDB(name);
+ toolTest.port = shardingTest.s0.port;
+
+ var oldStop = toolTest.stop;
+ toolTest.stop = function() {
+ shardingTest.stop();
+ oldStop.apply(toolTest, arguments);
+ };
+
+ toolTest.isSharded = true;
+
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/ssl_28.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/ssl_28.config.js
new file mode 100644
index 00000000000..17c2fb492a0
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/ssl_28.config.js
@@ -0,0 +1,26 @@
+/* exported getToolTest */
+var getToolTest;
+
+(function() {
+ var TOOLS_TEST_CONFIG = {
+ binVersion: '',
+ sslMode: 'requireSSL',
+ sslPEMKeyFile: 'jstests/libs/server.pem',
+ sslCAFile: 'jstests/libs/ca.pem',
+ };
+
+ getToolTest = function(name) {
+ var toolTest = new ToolTest(name, TOOLS_TEST_CONFIG);
+ toolTest.startDB();
+ toolTest.usesSSL = true;
+ return toolTest;
+ };
+}());
+
+/* exported getCommonToolArguments */
+var getCommonToolArguments = function() {
+ return [
+ '--ssl',
+ '--sslPEMKeyFile', 'jstests/libs/client.pem'
+ ];
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/configs/standard_dump_targets.config.js b/src/mongo/gotools/test/qa-tests/jstests/configs/standard_dump_targets.config.js
new file mode 100644
index 00000000000..fe68b171246
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/configs/standard_dump_targets.config.js
@@ -0,0 +1,30 @@
+var getDumpTarget;
+
+(function() {
+ if (getDumpTarget === undefined) {
+ getDumpTarget = function(target) {
+ if (target === undefined) {
+ return [];
+ }
+ return ["--out="+target];
+ };
+ }
+}());
+
+var dump_targets;
+if (!dump_targets) {
+ dump_targets = "standard";
+}
+
+var getRestoreTarget;
+
+(function() {
+ if (getRestoreTarget === undefined) {
+ getRestoreTarget = function(target) {
+ if (target === undefined) {
+ return [];
+ }
+ return ["--dir="+target];
+ };
+ }
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/bad_options.js b/src/mongo/gotools/test/qa-tests/jstests/export/bad_options.js
new file mode 100644
index 00000000000..cbbeea4f346
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/bad_options.js
@@ -0,0 +1,47 @@
+(function() {
+
+ // Tests running mongoexport with bad command line options.
+
+ jsTest.log('Testing running mongoexport with bad command line options');
+
+ var toolTest = new ToolTest('bad_options');
+ toolTest.startDB('foo');
+
+ // run mongoexport with a missing --collection argument
+ var ret = toolTest.runTool('export', '--db', 'test');
+ assert.neq(0, ret);
+
+ // run mongoexport with bad json as the --query
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--query', '{ hello }');
+ assert.neq(0, ret);
+
+ // run mongoexport with a bad argument to --skip
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--sort', '{a: 1}', '--skip', 'jamesearljones');
+ assert.neq(0, ret);
+
+ // run mongoexport with a bad argument to --sort
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--sort', '{ hello }');
+ assert.neq(0, ret);
+
+ // run mongoexport with a bad argument to --limit
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--sort', '{a: 1}', '--limit', 'jamesearljones');
+ assert.neq(0, ret);
+
+ // run mongoexport with --query and --queryFile
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--query', '{a:1}', '--queryFile', 'jstests/export/testdata/query.json');
+ assert.neq(0, ret);
+
+ // run mongoexport with a --queryFile that doesn't exist
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data',
+ '--queryFile', 'jstests/nope');
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/basic_data.js b/src/mongo/gotools/test/qa-tests/jstests/export/basic_data.js
new file mode 100644
index 00000000000..c0be94f8a55
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/basic_data.js
@@ -0,0 +1,58 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with some basic data, and bringing it back
+ // in with import.
+
+ jsTest.log('Testing exporting, then importing, some basic data');
+
+ var toolTest = getToolTest('basic_data');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'basic_data_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data
+ for (var i = 0; i < 50; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // export the data
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data is correct
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/data_types.js b/src/mongo/gotools/test/qa-tests/jstests/export/data_types.js
new file mode 100644
index 00000000000..ce6a6ac8ed3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/data_types.js
@@ -0,0 +1,70 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with different data types, and bringing it back
+ // in with import.
+
+ jsTest.log('Testing exporting, then importing, different data types');
+
+ var toolTest = getToolTest('data_types');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'data_types_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data, of different types
+ testColl.insert({num: 1});
+ testColl.insert({flt: 1.0});
+ testColl.insert({str: '1'});
+ testColl.insert({obj: {a: 1}});
+ testColl.insert({arr: [0, 1]});
+ testColl.insert({bd: new BinData(0, '1234')});
+ testColl.insert({date: ISODate('2009-08-27T12:34:56.789')});
+ testColl.insert({ts: new Timestamp(1234, 5678)});
+ testColl.insert({rx: /foo*"bar"/i});
+ // sanity check the insertion worked
+ assert.eq(9, testColl.count());
+
+ // export the data
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data is correct
+ assert.eq(9, testColl.count());
+ assert.eq(1, testColl.count({num: 1}));
+ assert.eq(1, testColl.count({flt: 1.0}));
+ assert.eq(1, testColl.count({str: '1'}));
+ assert.eq(1, testColl.count({obj: {a: 1}}));
+ assert.eq(1, testColl.count({arr: [0, 1]}));
+ assert.eq(1, testColl.count({bd: new BinData(0, '1234')}));
+ assert.eq(1, testColl.count({date: ISODate('2009-08-27T12:34:56.789')}));
+ assert.eq(1, testColl.count({ts: new Timestamp(1234, 5678)}));
+ assert.eq(1, testColl.count({rx: {$exists: true}}));
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/field_file.js b/src/mongo/gotools/test/qa-tests/jstests/export/field_file.js
new file mode 100644
index 00000000000..e7517690f06
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/field_file.js
@@ -0,0 +1,60 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport exporting to csv using the --fieldFile option
+ jsTest.log('Testing exporting to csv using the --fieldFile option');
+
+ var toolTest = getToolTest('field_file');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // the export target
+ var exportTarget = 'jstests/export/testdata/field_file_export.csv';
+ removeFile(exportTarget);
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+ // sanity check the insertion worked
+ assert.eq(3, sourceColl.count());
+
+ // export the data, using a field file that specifies 'a' and 'b'
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--type=csv',
+ '--fieldFile', 'jstests/export/testdata/simple_field_file']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type=csv',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+
+ // make sure only the specified fields were exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(1, destColl.count({b: 1}));
+ assert.eq(1, destColl.count({b: 2}));
+ assert.eq(0, destColl.count({c: 3}));
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/fields_csv.js b/src/mongo/gotools/test/qa-tests/jstests/export/fields_csv.js
new file mode 100644
index 00000000000..531fdb93c9e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/fields_csv.js
@@ -0,0 +1,173 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport exporting to csv using the --fields option.
+
+ jsTest.log('Testing exporting to csv using the --fields option');
+
+ var toolTest = getToolTest('fields_csv');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // the export target
+ var exportTarget = 'fields_export.csv';
+ removeFile(exportTarget);
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+ // sanity check the insertion worked
+ assert.eq(3, sourceColl.count());
+
+ // export the data, specifying only one field
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'csv',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure only the specified field was exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(0, destColl.count({b: 1}));
+ assert.eq(0, destColl.count({b: 2}));
+ assert.eq(0, destColl.count({c: 3}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+ // export the data, specifying all fields
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'csv',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure everything was exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(1, destColl.count({b: 1}));
+ assert.eq(1, destColl.count({b: 2}));
+ assert.eq(1, destColl.count({c: 3}));
+
+ // make sure the _id was NOT exported - the _id for the
+ // corresponding documents in the two collections should
+ // be different
+ var fromSource = sourceColl.findOne({a: 1, b: 1});
+ var fromDest = destColl.findOne({a: 1, b: 1});
+ assert.neq(fromSource._id, fromDest._id);
+
+
+ /* Test passing positional arguments to --fields */
+
+ // outputMatchesExpected takes an output string and returns
+ // a boolean indicating if any line of the output matched
+ // the expected string.
+ var outputMatchesExpected = function(output, expected) {
+ var found = false;
+ output.split('\n').forEach(function(line) {
+ if (line.match(expected)) {
+ found = true;
+ }
+ });
+ return found;
+ };
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ sourceColl.remove({});
+
+ // ensure source collection is empty
+ assert.eq(0, sourceColl.count());
+
+ // insert some data
+ sourceColl.insert({a: [1, 2, 3, 4, 5], b: {c: [-1, -2, -3, -4]}});
+ sourceColl.insert({a: 1, b: 2, c: 3, d: {e: [4, 5, 6]}});
+ sourceColl.insert({a: 1, b: 2, c: 3, d: 5, e: {"0": ["foo", "bar", "baz"]}});
+ sourceColl.insert({a: 1, b: 2, c: 3, d: [4, 5, 6], e: [{"0": 0, "1": 1}, {"2": 2, "3": 3}]});
+
+ // ensure the insertion worked
+ assert.eq(4, sourceColl.count());
+
+ // use the following fields as filters:
+ var cases = [
+ {field: 'd.e.2', expected: /6/}, // specify nested field with array value
+ {field: 'e.0.0', expected: /foo/}, // specify nested field with numeric array value
+ {field: 'b,d.1,e.1.3', expected: /2,5,3/}, // specify varying levels of field nesting
+ ];
+
+ var output;
+
+ for (var i = 0; i < cases.length; i++) {
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--fields', cases[i].field,
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ output = cat(exportTarget);
+ jsTest.log("Fields Test " + (i + 1) + ": \n" + output);
+ assert.eq(outputMatchesExpected(output, cases[i].expected), true);
+ }
+
+ // test with $ projection and query
+ cases = [
+ {query: '{ d: 4 }', field: 'd.$', expected: /[4]/},
+ {query: '{ a: { $gt: 1 } }', field: 'a.$', expected: /[2]/},
+ {query: '{ "b.c": -1 }', field: 'b.c.$', expected: /[-1]/},
+ ];
+
+ for (i = 0; i < cases.length; i++) {
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--query', cases[i].query,
+ '--fields', cases[i].field,
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ output = cat(exportTarget);
+ jsTest.log("Fields + Query Test " + (i + 1) + ": \n" + output);
+ assert.eq(outputMatchesExpected(output, cases[i].expected), true);
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/fields_json.js b/src/mongo/gotools/test/qa-tests/jstests/export/fields_json.js
new file mode 100644
index 00000000000..ff79f79f63a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/fields_json.js
@@ -0,0 +1,92 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport exporting to json with the --fields option
+
+ jsTest.log('Testing exporting to json using the --fields option');
+
+ var toolTest = getToolTest('fields_json');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // the export target
+ var exportTarget = 'fields_export.json';
+ removeFile(exportTarget);
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+ // sanity check the insertion worked
+ assert.eq(3, sourceColl.count());
+
+ // export the data, specifying only one field
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'json']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure only the specified field was exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(0, destColl.count({b: 1}));
+ assert.eq(0, destColl.count({b: 2}));
+ assert.eq(0, destColl.count({c: 3}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+ // export the data, specifying all fields
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--fields', 'a,b,c']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'json']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure everything was exported
+ assert.eq(3, destColl.count({a: 1}));
+ assert.eq(1, destColl.count({b: 1}));
+ assert.eq(1, destColl.count({b: 2}));
+ assert.eq(1, destColl.count({c: 3}));
+
+ // make sure the _id was exported - the _id for the
+ // corresponding documents in the two collections should
+ // be the same
+ var fromSource = sourceColl.findOne({a: 1, b: 1});
+ var fromDest = destColl.findOne({a: 1, b: 1});
+ assert.eq(fromSource._id, fromDest._id);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/force_table_scan.js b/src/mongo/gotools/test/qa-tests/jstests/export/force_table_scan.js
new file mode 100644
index 00000000000..5076e476f45
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/force_table_scan.js
@@ -0,0 +1,111 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with --forceTableScan specified.
+
+ jsTest.log('Testing exporting with --forceTableScan');
+
+ var toolTest = getToolTest('force_table_scan');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'force_table_scan_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data
+ for (var i = 0; i < 50; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // set the profiling level to high, so that
+ // we can inspect all queries
+ assert.eq(1, testDB.setProfilingLevel(2).ok);
+
+ // the profiling collection
+ var profilingColl = testDB.system.profile;
+
+ // run mongoexport without --forceTableScan
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // grab the query from the profiling collection
+ var queries = profilingColl.find({op: 'query', ns: 'test.data'}).toArray();
+
+ // there should only be one query so far, and it should have snapshot set
+ assert.eq(1, queries.length);
+ assert.eq(true, queries[0].query.$snapshot || queries[0].query.snapshot);
+
+ // remove the export file
+ removeFile(exportTarget);
+
+ // run mongoexport again, with --forceTableScan
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data',
+ '--forceTableScan']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // grab the queries again
+ queries = profilingColl.find({op: 'query', ns: 'test.data'}).sort({ts: 1}).toArray();
+
+ // there should be two queries, and the second one should not
+ // have snapshot set
+ assert.eq(2, queries.length);
+ assert(!queries[1].query['$snapshot']);
+
+ // wipe the collection
+ testColl.remove({});
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure that the export with --forceTableScan exported the correct data
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // remove the export file
+ removeFile(exportTarget);
+
+ // run mongoexport again, without --forceTableScan but with --sort. --forceTableScan
+ // should be implicitly set
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data',
+ '--sort', '{_id:1}']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // grab the queries again
+ queries = profilingColl.find({op: 'query', ns: 'test.data'}).sort({ts: 1}).toArray();
+
+ // there should be 3 queries, and the last one should not have snapshot set
+ assert.eq(3, queries.length);
+ assert(!queries[2].query.$snapshot);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/json_array.js b/src/mongo/gotools/test/qa-tests/jstests/export/json_array.js
new file mode 100644
index 00000000000..0e2a8aa5ca7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/json_array.js
@@ -0,0 +1,55 @@
+(function() {
+
+ // Tests running mongoexport with the --jsonArray output option.
+
+ jsTest.log('Testing exporting with --jsonArray specified');
+
+ var toolTest = new ToolTest('json_array');
+ toolTest.startDB('foo');
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // the export target
+ var exportTarget = 'json_array_export.json';
+ removeFile(exportTarget);
+
+ // insert some data
+ for (var i = 0; i < 20; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(20, testColl.count());
+
+ // export the data
+ var ret = toolTest.runTool('export', '--out', exportTarget,
+ '--db', 'test', '--collection', 'data', '--jsonArray');
+ assert.eq(0, ret);
+
+ // drop the data
+ testDB.dropDatabase();
+
+ // make sure that mongoimport without --jsonArray does not work
+ ret = toolTest.runTool('import', '--file', exportTarget,
+ '--db', 'test', '--collection', 'data');
+ assert.neq(0, ret);
+
+ // make sure nothing was imported
+ assert.eq(0, testColl.count());
+
+ // run mongoimport again, with --jsonArray
+ ret = toolTest.runTool('import', '--file', exportTarget,
+ '--db', 'test', '--collection', 'data', '--jsonArray');
+ assert.eq(0, ret);
+
+ // make sure the data was imported
+ assert.eq(20, testColl.count());
+ for (i = 0; i < 20; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/limit.js b/src/mongo/gotools/test/qa-tests/jstests/export/limit.js
new file mode 100644
index 00000000000..9de98ff1102
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/limit.js
@@ -0,0 +1,59 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with --limit specified.
+
+ jsTest.log('Testing exporting with --limit');
+
+ var toolTest = getToolTest('limit');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'limit_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data
+ for (var i = 0; i < 50; i++) {
+ testColl.insert({a: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // export the data, using --limit
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data',
+ '--sort', '{a:1}',
+ '--limit', '20']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the limit was applied to the export
+ assert.eq(20, testColl.count());
+ for (i = 0; i < 20; i++) {
+ assert.eq(1, testColl.count({a: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/nested_fields_csv.js b/src/mongo/gotools/test/qa-tests/jstests/export/nested_fields_csv.js
new file mode 100644
index 00000000000..8a1954ec5dd
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/nested_fields_csv.js
@@ -0,0 +1,65 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests exporting nested fields to csv.
+
+ jsTest.log('Testing exporting nested fields to csv');
+
+ var toolTest = getToolTest('nested_fields_csv');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // the export target
+ var exportTarget = 'nested_fields_export.csv';
+ removeFile(exportTarget);
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 2, b: {c: 2}});
+ sourceColl.insert({a: 3, b: {c: 3, d: {e: 3}}});
+ sourceColl.insert({a: 4, x: null});
+ // sanity check the insertion worked
+ assert.eq(4, sourceColl.count());
+
+ // export the data, specifying nested fields to export
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--csv',
+ '--fields', 'a,b.d.e,x.y']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest',
+ '--type', 'csv',
+ '--headerline']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure that the non-specified fields were ignored, and the
+ // specified fields were added correctly
+ assert.eq(0, destColl.count({'b.c': 2}));
+ assert.eq(0, destColl.count({'b.c': 3}));
+ assert.eq(1, destColl.count({'b.d.e': 3}));
+ assert.eq(3, destColl.count({'b.d.e': ''}));
+ assert.eq(1, destColl.count({a: 1}));
+ assert.eq(1, destColl.count({a: 2}));
+ assert.eq(1, destColl.count({a: 3}));
+ assert.eq(4, destColl.count({'x.y': ''}));
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/no_data.js b/src/mongo/gotools/test/qa-tests/jstests/export/no_data.js
new file mode 100644
index 00000000000..cfc9248bb5f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/no_data.js
@@ -0,0 +1,21 @@
+(function() {
+
+ // Tests running mongoexport with no data in the target collection.
+
+ jsTest.log('Testing exporting no data');
+
+ var toolTest = new ToolTest('no_data');
+ toolTest.startDB('foo');
+
+ // run mongoexport with no data, make sure it doesn't error out
+ var ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data');
+ assert.eq(0, ret);
+
+ // but it should fail if --assertExists specified
+ ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data', '--assertExists');
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/pretty.js b/src/mongo/gotools/test/qa-tests/jstests/export/pretty.js
new file mode 100644
index 00000000000..db922794b6f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/pretty.js
@@ -0,0 +1,33 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('fields_json');
+ var commonToolArgs = getCommonToolArguments();
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+
+ // export it with pretty
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', "pretty.json",
+ '--db', 'test',
+ '--collection', 'source',
+ '--pretty',
+ '--jsonArray']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ parsed = JSON.parse(cat('pretty.json'));
+ assert.eq(parsed[0].a, 1);
+ assert.eq(parsed[1].b, 1);
+ assert.eq(parsed[2].b, 2);
+ assert.eq(parsed[2].c, 3);
+
+}());
+
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/query.js b/src/mongo/gotools/test/qa-tests/jstests/export/query.js
new file mode 100644
index 00000000000..9c44913a366
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/query.js
@@ -0,0 +1,198 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with --query specified.
+
+ jsTest.log('Testing exporting with --query');
+
+ var toolTest = getToolTest('query');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'query_export.json';
+ removeFile(exportTarget);
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+ var destColl = testDB.dest;
+
+ // insert some data
+ sourceColl.insert({a: 1, x: {b: '1'}});
+ sourceColl.insert({a: 2, x: {b: '1', c: '2'}});
+ sourceColl.insert({a: 1, c: '1'});
+ sourceColl.insert({a: 2, c: '2'});
+ // sanity check the insertion worked
+ assert.eq(4, sourceColl.count());
+
+ // export the data, with a query that will match nothing
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', '{a:3}']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the export was blank
+ assert.eq(0, destColl.count());
+
+ // remove the export
+ removeFile(exportTarget);
+
+ // export the data, with a query matching a single element
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', "{a:1, c:'1'}"]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the query was applied correctly
+ assert.eq(1, destColl.count());
+ assert.eq(1, destColl.count({a: 1, c: '1'}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+ // TOOLS-716 export the data, with a queryFile matching a single element
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--queryFile', "jstests/export/testdata/query.json"]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the query was applied correctly
+ assert.eq(1, destColl.count());
+ assert.eq(1, destColl.count({a: 1, c: '1'}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+
+ // export the data, with a query on an embedded document
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', "{a:2, 'x.c':'2'}"]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the query was applied correctly
+ assert.eq(1, destColl.count());
+ assert.eq(1, destColl.count({a: 2, "x.c": '2'}));
+
+ // remove the export, clear the destination collection
+ removeFile(exportTarget);
+ destColl.remove({});
+
+ // export the data, with a blank query (should match everything)
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', "{}"]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // import the data into the destination collection
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the query was applied correctly
+ assert.eq(4, destColl.count());
+
+ // TOOLS-469 test queries containing extended JSON field (like dates)
+ sourceColl.drop();
+ destColl.drop();
+ sourceColl.insert({
+ a: 1,
+ x: ISODate("2014-12-11T13:52:39.498Z"),
+ y: ISODate("2014-12-13T13:52:39.498Z")
+ });
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', '{x:{$gt:Date(1418305949498), $lt:Date(1418305979498)}, y:{$gt:{$date:1418478749498}, $lt:{$date:1418478769498}}}']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret); assert.eq(1, destColl.count());
+
+ // TOOLS-530 add support for ISODate and string formatting for query flag
+ sourceColl.drop();
+ destColl.drop();
+ sourceColl.insert({
+ a: 1,
+ x: ISODate("2014-12-11T13:52:39.498Z"),
+ y: ISODate("2014-12-13T13:52:39.498Z")
+ });
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--query', '{x:{$gt:ISODate("2014-12-11T13:52:39.3Z"), $lt:ISODate("2014-12-11T13:52:39.5Z")}}']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'dest']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(1, destColl.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/slave_ok.js b/src/mongo/gotools/test/qa-tests/jstests/export/slave_ok.js
new file mode 100644
index 00000000000..45c5602362d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/slave_ok.js
@@ -0,0 +1,61 @@
+(function() {
+ // Tests running mongoexport with --slaveOk.
+
+ jsTest.log('Testing exporting with --slaveOk');
+
+ // bring up a replica set with 3 nodes
+ var replTest = new ReplSetTest({
+ name: 'slave_ok',
+ nodes: 3,
+ oplogSize: 5,
+ useHostName: true,
+ });
+ var nodes = replTest.startSet();
+ replTest.initiate();
+ replTest.awaitSecondaryNodes();
+
+ // cache the primary
+ var primary = replTest.getPrimary();
+
+ // the export target
+ var exportTarget = 'slave_ok_export.json';
+ removeFile(exportTarget);
+
+ // insert some data
+ var testDB = primary.getDB('test');
+ for (var i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+ replTest.awaitReplication();
+
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // make sure that exporting from any of the nodes works with --slaveOk
+ nodes.forEach(function(node) {
+ // remove the export, clean the destination collection
+ removeFile(exportTarget);
+ testDB.dest.remove({});
+ printjson(replTest.status());
+
+ var ret = runMongoProgram('mongoexport',
+ '--db', 'test',
+ '--collection', 'data',
+ '--host', node.host,
+ '--slaveOk',
+ '--out', exportTarget);
+ assert.eq(0, ret);
+
+ ret = runMongoProgram('mongoimport',
+ '--db', 'test',
+ '--collection', 'dest',
+ '--host', primary.host,
+ '--file', exportTarget);
+ assert.eq(0, ret);
+ assert.eq(10, testDB.dest.count());
+ });
+
+ // success
+ replTest.stopSet();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/sort_and_skip.js b/src/mongo/gotools/test/qa-tests/jstests/export/sort_and_skip.js
new file mode 100644
index 00000000000..a2fd36d8103
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/sort_and_skip.js
@@ -0,0 +1,67 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongoexport with --sort and --skip specified.
+
+ jsTest.log('Testing exporting with --sort and --skip');
+
+ var toolTest = getToolTest('sort_and_skip');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the export target
+ var exportTarget = 'sort_and_skip_export.json';
+ removeFile(exportTarget);
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data, in a different order than we'll be sorting it
+ for (var i = 30; i > 20; i--) {
+ testColl.insert({a: i});
+ }
+ for (i = 31; i < 50; i++) {
+ testColl.insert({a: i});
+ }
+ for (i = 20; i >= 0; i--) {
+ testColl.insert({a: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // export the data, using --skip
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'data',
+ '--sort', '{a:1}',
+ '--skip', '20']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // import the data back in
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', exportTarget,
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the skip was applied to the export, and that
+ // the sort functioned so that the correct documents
+ // were skipped
+ assert.eq(30, testColl.count());
+ for (i = 20; i < 50; i++) {
+ assert.eq(1, testColl.count({a: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/stdout.js b/src/mongo/gotools/test/qa-tests/jstests/export/stdout.js
new file mode 100644
index 00000000000..e33834e97df
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/stdout.js
@@ -0,0 +1,40 @@
+// Tests running mongoexport writing to stdout.
+(function() {
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ jsTest.log('Testing exporting to stdout');
+
+ var toolTest = new ToolTest('stdout');
+ toolTest.startDB('foo');
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // insert some data
+ for (var i = 0; i < 20; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(20, testColl.count());
+
+ // export the data, writing to stdout
+ var ret = toolTest.runTool('export', '--db', 'test', '--collection', 'data');
+ assert.eq(0, ret);
+
+ // wait for full output to appear
+ assert.strContains.soon('exported 20 records', rawMongoProgramOutput,
+ 'should show number of exported records');
+
+ // grab the raw output
+ var output = rawMongoProgramOutput();
+
+ // make sure it contains the json output
+ for (i = 0; i < 20; i++) {
+ assert.neq(-1, output.indexOf('{"_id":'+i+'.0}'));
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/testdata/query.json b/src/mongo/gotools/test/qa-tests/jstests/export/testdata/query.json
new file mode 100644
index 00000000000..5e9b73d037e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/testdata/query.json
@@ -0,0 +1 @@
+{a:1, c:'1'}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/testdata/simple_field_file b/src/mongo/gotools/test/qa-tests/jstests/export/testdata/simple_field_file
new file mode 100644
index 00000000000..422c2b7ab3b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/testdata/simple_field_file
@@ -0,0 +1,2 @@
+a
+b
diff --git a/src/mongo/gotools/test/qa-tests/jstests/export/type_case.js b/src/mongo/gotools/test/qa-tests/jstests/export/type_case.js
new file mode 100644
index 00000000000..ea2a11bddfe
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/export/type_case.js
@@ -0,0 +1,115 @@
+(function() {
+
+ if (typeof getToolTest === "undefined") {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Testing exporting with various type specifiers
+
+ jsTest.log('Testing exporting with various type specifiers');
+
+ var toolTest = getToolTest('export_types');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collections we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var sourceColl = testDB.source;
+
+ // the export target
+ var exportTarget = 'type_export';
+
+ // insert some data
+ sourceColl.insert({a: 1});
+ sourceColl.insert({a: 1, b: 1});
+ sourceColl.insert({a: 1, b: 2, c: 3});
+ // sanity check the insertion worked
+ assert.eq(3, sourceColl.count());
+
+ // first validate that invalid types are rejected
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget,
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="foobar"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(3, ret);
+
+ // create a dump file using a lowercase csv type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".csv",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="csv"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ var csvmd5 = md5sumFile(exportTarget + ".csv");
+
+ // create a dump file using a uppercase csv type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".CSV",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="CSV"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ var CSVmd5 = md5sumFile(exportTarget + ".CSV");
+ // the files for the uppercase and lowercase types should match
+ assert.eq(csvmd5, CSVmd5);
+
+ // create a dump file using a mixedcase csv type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".cSv",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="cSv"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ var cSvmd5 = md5sumFile(exportTarget + ".cSv");
+ // the files for the uppercase and lowercase types should match
+ assert.eq(csvmd5, cSvmd5);
+
+ // then some json type tests
+
+ // create a dump file using a lowercase json type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".json",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="json"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ var jsonmd5 = md5sumFile(exportTarget + ".json");
+
+ // create a dump file using a uppercase json type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".JSON",
+ '--db', 'test',
+ '--collection', 'source',
+ '--type="JSON"',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ var JSONmd5 = md5sumFile(exportTarget + ".JSON");
+
+ // create a dump file using a uppercase blank (json) type
+ ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', exportTarget + ".blank",
+ '--db', 'test',
+ '--collection', 'source',
+ '--fields', 'a']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ var blankmd5 = md5sumFile(exportTarget + ".blank");
+ assert.eq(JSONmd5, jsonmd5);
+ assert.eq(blankmd5, jsonmd5);
+
+ // sanity check
+ assert.neq(csvmd5, jsonmd5);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_db.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_db.js
new file mode 100644
index 00000000000..fd647d03bff
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_db.js
@@ -0,0 +1,61 @@
+// mongofiles_db.js; ensure that running mongofiles using the db flag works as
+// expected
+var testName = 'mognofiles_db';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --host option');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Putting file with valid host name with ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('otherdb');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--db', 'otherdb',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--db', 'otherdb',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 2 failed');
+
+ // ensure the files were inserted into the right db
+ assert.eq(2, db.getCollection('fs.files').count(), 'unexpected fs.files count 1');
+
+ // test short form
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '-d', 'otherdb',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 3 failed');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '-d', 'otherdb',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 4 failed');
+
+ // ensure the file was inserted into the right db
+ assert.eq(4, db.getCollection('fs.files').count(), 'unexpected fs.files count 2s');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_delete.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_delete.js
new file mode 100644
index 00000000000..89e72b3f366
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_delete.js
@@ -0,0 +1,47 @@
+// mongofiles_delete.js; ensure that delete command works as expected
+var testName = 'mongofiles_delete';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles delete command');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Putting file with ' + passthrough.name + ' passthrough');
+
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ // ensure tool runs without error
+ for (var i = 0; i < 10; i++) {
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed');
+ }
+
+ // ensure all the files were written
+ assert.eq(10, db.fs.files.count(), 'unexpected fs.files count');
+
+ jsTest.log('Deleting file');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'delete', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'delete failed');
+
+ // ensure all the files were deleted
+ assert.eq(0, db.fs.files.count(), 'unexpected fs.files count');
+ assert.eq(0, db.fs.chunks.count(), 'unexpected fs.chunks count');
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_get.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_get.js
new file mode 100644
index 00000000000..04a8c93832a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_get.js
@@ -0,0 +1,81 @@
+// mongofiles_get.js; ensure that get command works as expected
+var testName = 'mongofiles_get';
+(function() {
+ jsTest.log('Testing mongofiles get command');
+ load('jstests/files/util/mongofiles_common.js');
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+ var getFile = testName + (Math.random() + 1).toString(36).substring(7);
+
+ jsTest.log('Putting file with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+
+ // ensure the file was inserted
+ assert.eq(1, db.fs.files.count(), 'unexpected fs.files count 1');
+ var fileId = db.fs.files.findOne()._id;
+
+ jsTest.log('Getting file with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', getFile,
+ 'get', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'get failed');
+
+ // ensure the retrieved file is exactly the same as that inserted
+ var actual = md5sumFile(filesToInsert[0]);
+ var expected = md5sumFile(getFile);
+
+ assert.eq(actual, expected, 'mismatched md5 sum - expected ' + expected + ' got ' + actual);
+
+ // ensure tool runs get_id without error
+ var idAsJSON = fileId.tojson();
+ if (_isWindows()) {
+ idAsJSON = '"' + idAsJSON.replace(/"/g, '\\"') + '"';
+ }
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', getFile,
+ 'get_id', idAsJSON]
+ .concat(passthrough.args)),
+ 0, 'get_id failed');
+ expected = md5sumFile(getFile);
+ assert.eq(actual, expected, 'mismatched md5 sum on _id - expected ' + expected + ' got ' + actual);
+
+ // clear the output buffer
+ clearRawMongoProgramOutput();
+
+ // test getting to stdout
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', '-',
+ 'get', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'get stdout failed');
+ var expectedContent = "this is a text file";
+ assert.strContains.soon(expectedContent, rawMongoProgramOutput,
+ "stdout get didn't match expected file content");
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_host.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_host.js
new file mode 100644
index 00000000000..8fac1af3c81
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_host.js
@@ -0,0 +1,59 @@
+// mongofiles_host.js; ensure that running mongofiles using valid and invalid
+// host names or IP addresses succeeds/fails as expected
+var testName = 'mongofiles_host';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --host option');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Putting file with valid host name with ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--host', 'localhost',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--host', '127.0.0.1',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 2 failed');
+
+ // ensure the file was inserted
+ assert.eq(2, db.getCollection('fs.files').count(), 'unexpected fs.files count 1');
+
+ jsTest.log('Putting file with invalid host name with ' + passthrough.name + ' passthrough');
+
+ // ensure tool exits with a non-zero exit code when supplied invalid hosts
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--host', 'does-not-exist',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'expected mongofiles to fail but it succeeded 1');
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--host', '555.555.555.555',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'expected mongofiles to fail but it succeeded 2');
+
+ // ensure the file was not inserted
+ assert.eq(2, db.getCollection('fs.files').count(), 'unexpected fs.files count 2');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_invalid.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_invalid.js
new file mode 100644
index 00000000000..0cd8bca00a9
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_invalid.js
@@ -0,0 +1,37 @@
+// mongofiles_invalid.js; runs mongofiles with an invalid command and
+// option - ensures it fails in all cases
+var testName = 'mongofiles_invalid';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles with invalid commands and options');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ jsTest.log('Running with file with invalid options onw passthrough ' + passthrough.name);
+
+ // run with invalid option
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--invalid', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'invalid-option: mongofiles succeeded when it should have failed');
+
+ // run with invalid command
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'invalid', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'invalid-command: mongofiles succeeded when it should have failed');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_list.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_list.js
new file mode 100644
index 00000000000..c027a3cbc60
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_list.js
@@ -0,0 +1,96 @@
+// mongofiles_list.js; tests the mongofiles list option by doing the following:
+//
+// 1. Inserts the mongod/mongo binaries using mongofiles put
+// 2. Checks that the actual md5 of the file matches what's stored in the database
+// 3. Runs the mongofiles list command to view all files stored.
+// 4. Ensures that all the files inserted and returned.
+// 5. Ensures that the returned list matches thae actual filesToInsert[0] and size of
+// files inserted.
+var testName = 'mongofiles_list';
+(function() {
+ jsTest.log('Testing mongofiles list command');
+ load('jstests/libs/extended_assert.js');
+ load('jstests/files/util/mongofiles_common.js');
+ var assert = extendedAssert;
+
+ var putFile = function(passthrough, conn, file) {
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', file]
+ .concat(passthrough.args)),
+ 0, 'put for ' + file + 'failed');
+ var db = conn.getDB('test');
+ var fileObj = db.fs.files.findOne({
+ filename: file,
+ });
+ assert(fileObj, 'could not find put file ' + file);
+ assert.eq(md5sumFile(file), fileObj.md5, file + ' md5 did not match - expected ' + md5sumFile(file) + ' got ' + fileObj.md5);
+ return fileObj.length;
+ };
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Putting GridFS files with ' + passthrough.name + ' passthrough');
+
+ var inputFileRegex = /^sh.*files.*/;
+ var whitespaceSplitRegex = /,?\s+/;
+ var fileSizes = [];
+
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ filesToInsert.forEach(function(file) {
+ var fileSize = putFile(passthrough, conn, file);
+ fileSizes.push(fileSize);
+ });
+
+ jsTest.log('Running mongofiles list');
+
+ // clear the output buffer
+ clearRawMongoProgramOutput();
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--quiet', 'list']
+ .concat(passthrough.args)),
+ 0, 'list command failed but was expected to succeed');
+
+ jsTest.log('Verifying list output');
+
+ var files;
+ assert.neq.soon(0, function() {
+ files = rawMongoProgramOutput()
+ .split('\n')
+ .filter(function(line) {
+ return line.match(inputFileRegex);
+ });
+ return files.length;
+ }, 'should find some files');
+
+ // ensure that the returned files and their sizes are as expected
+ files.forEach(function(currentFile, index) {
+ // should print mongod and then mongo
+ var fileEntry = currentFile.split(whitespaceSplitRegex);
+
+ // the list command should have 2 entries - the file name and its size
+ // we check for 3 files because of the sh. prefix in our js test framework
+ assert.eq(fileEntry.length, 3, 'unexpected list output on ' + currentFile + ' - expected 3 but got ' + fileEntry.length);
+
+ // ensure the expected file name is what is printed
+ assert.eq(fileEntry[1], filesToInsert[index], 'expected file ' + filesToInsert[1] + ' got ' + fileEntry[1]);
+
+ // ensure the expected file size is what is printed
+ assert.eq(fileEntry[2], fileSizes[index], 'expected size ' + fileSizes[2] + ' got ' + fileEntry[2]);
+ });
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_local.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_local.js
new file mode 100644
index 00000000000..bc874499cba
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_local.js
@@ -0,0 +1,102 @@
+// mongofiles_local.js; ensure that when --local is passed:
+// a. for puts, the supplied argument is read and stored using the gridfs filename
+// b. for gets, the supplied argument is used to store the retrieved file
+// c. for puts, if the supplied argument is the empty string, an error should occur
+// d. for gets, if the supplied argument is the empty string, the file name is used
+var testName = 'mongofiles_local';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --local option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ // generate a random GridFS name for the file
+ var putFSName = testName + (Math.random() + 1).toString(36).substring(7);
+ var getFSName = testName + (Math.random() + 1).toString(36).substring(7);
+
+ jsTest.log('Running put on file with --local');
+
+ // ensure tool runs without error with a non-empty --local argument
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '-l', filesToInsert[0],
+ 'put', putFSName]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 1');
+
+ // ensure the file exists
+ assert(db.fs.files.findOne({
+ filename: putFSName
+ }), 'did not find expected GridFS file - ' + putFSName);
+
+ // ensure tool returns an error if the --local argument does not exist
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', filesToInsert[0] + '?',
+ 'put', putFSName]
+ .concat(passthrough.args)),
+ 0, 'put succeeded when it should have failed 2');
+
+ // if the argument is empty, use the putFSName - which should cause an error since it doesn't exist
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', '',
+ 'put', putFSName]
+ .concat(passthrough.args)),
+ 0, 'put succeeded when it should have failed 3');
+
+ // if the argument is empty, and the GridFS file exists, it should run
+ // without error on linux and fails on windows
+ var comparison = 'eq';
+ if (_isWindows()) {
+ comparison = 'neq';
+ }
+ assert[comparison](runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', '',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 2');
+
+ jsTest.log('Running get on file with --local');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', getFSName,
+ 'get', putFSName]
+ .concat(passthrough.args)),
+ 0, 'get failed when it should have succeeded 1');
+
+ // ensure the right file name was written
+ assert.eq(md5sumFile(filesToInsert[0]), md5sumFile(getFSName), 'files do not match!');
+
+ // ensure tool uses the GridFS name if the --local argument is empty on linux
+ // and fails on windows
+ comparison = 'eq';
+ if (_isWindows()) {
+ comparison = 'neq';
+ }
+ assert[comparison](runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', '',
+ 'get', putFSName]
+ .concat(passthrough.args)),
+ 0, 'get failed unexpectedly');
+
+ if (!_isWindows()) {
+ assert.eq(md5sumFile(filesToInsert[0]), md5sumFile(putFSName), 'md5sums do not match - expected ' + md5sumFile(filesToInsert[0]) + ' got ' + md5sumFile(putFSName));
+ }
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_port.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_port.js
new file mode 100644
index 00000000000..88d6d8f1417
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_port.js
@@ -0,0 +1,52 @@
+// mongofiles_port.js; ensure that supplying valid/invalid port addresses
+// succeeds/fails as expected
+var testName = 'mongofiles_port';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --port option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ jsTest.log('Putting file with valid port with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+
+ // ensure the file was inserted
+ assert.eq(1, db.fs.files.count(), 'unexpected fs.files count 1');
+
+ jsTest.log('Putting file with invalid port with ' + passthrough.name + ' passthrough');
+
+ // ensure tool exits with a non-zero exit code when supplied invalid ports
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', '12345',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'expected mongofiles to fail but it succeeded 1');
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', 'random',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'expected mongofiles to fail but it succeeded 2');
+
+ // ensure the file was not inserted
+ var count = db.fs.files.count();
+ assert.eq(1, count, 'unexpected fs.files count - expected 2 but got ' + count);
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_prefix.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_prefix.js
new file mode 100644
index 00000000000..3d19bd141ee
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_prefix.js
@@ -0,0 +1,49 @@
+// mongofiles_prefix.js; ensure that passing --prefix works as expected - the
+// provided prefix is used as the collection name prefix
+var testName = 'mongofiles_prefix';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --prefix option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ jsTest.log('Putting file without --prefix with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 1 failed');
+
+ // ensure the default collection name prefix was used
+ assert.eq(1, db.fs.files.count(), 'unexpected fs.files count');
+ assert.eq(0, db[testName + '.files'].count(), 'unexpected ' + testName + '.files count');
+
+ jsTest.log('Putting file with --prefix with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--prefix', testName,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put 2 failed');
+
+ // ensure the supplied collection name prefix was used
+ assert.eq(1, db.fs.files.count(), 'unexpected fs.files count');
+ assert.eq(1, db[testName + '.files'].count(), 'unexpected ' + testName + '.files count');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_put.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_put.js
new file mode 100644
index 00000000000..87678df0bcb
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_put.js
@@ -0,0 +1,108 @@
+// mongofiles_put.js; ensure that put works with very large files.
+// NOTE: this test uses mongodump to create a large file
+var testName = 'mongofiles_put';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles put command');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ // create a large collection and dump it
+ jsTest.log('Creating large collection with ' + passthrough.name + ' passthrough');
+
+ var insertString = new Array(100).join("mongoDB");
+ var inserted = 0;
+ var num = 0;
+ var dbName = 'test';
+ var collection = 'foo';
+ var bulk = db[collection].initializeUnorderedBulkOp();
+
+ while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({
+ _id: num++,
+ str: insertString
+ });
+ inserted += insertString.length;
+ }
+
+ assert.writeOK(bulk.execute({w: "majority"}));
+
+ // dumping large collection to single large file
+ jsTest.log('Dumping collection to filesystem with ' + passthrough.name + ' passthrough');
+
+ var dumpDir = './dumpDir';
+
+ assert.eq(runMongoProgram.apply(this, ['mongodump',
+ '-d', dbName,
+ '--port', conn.port,
+ '-c', collection,
+ '--out', dumpDir]
+ .concat(passthrough.args)),
+ 0, 'dump failed when it should have succeeded');
+
+ jsTest.log('Putting directory');
+
+ // putting a directory should fail
+ assert.neq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', dumpDir]
+ .concat(passthrough.args)),
+ 0, 'put succeeded when it should have failed');
+
+ jsTest.log('Putting file with ' + passthrough.name + ' passthrough');
+
+ var putFile = dumpDir + '/' + dbName + '/' + collection + '.bson';
+
+ // ensure putting of the large file succeeds
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', putFile,
+ 'put', testName]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded');
+
+ // verify file metadata
+ var fileObj = db.fs.files.findOne({
+ filename: testName
+ });
+ assert(fileObj, testName + ' was not found');
+
+ var numDbChunks = db.fs.chunks.count();
+
+ // the number of chunks should be equal to math.ceil[fileSize (KB) / 255 KB]
+ // filesize for the dump should be s bytes
+ var expectedNumChunks = Math.ceil(fileObj.length / (1024 * 255));
+
+ assert.eq(expectedNumChunks, numDbChunks, 'expected ' + expectedNumChunks + ' chunks; got ' + numDbChunks);
+
+ // now attempt to get the large file
+ jsTest.log('Getting file with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ var getFile = testName + (Math.random() + 1).toString(36).substring(7);
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--local', getFile,
+ 'get', testName]
+ .concat(passthrough.args)),
+ 0, 'get failed');
+
+ // ensure the retrieved file is exactly the same as that inserted
+ var actual = md5sumFile(putFile);
+ var expected = md5sumFile(getFile);
+
+ assert.eq(actual, expected, 'mismatched md5 sum - expected ' + expected + ' got ' + actual);
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_replace.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_replace.js
new file mode 100644
index 00000000000..baef44033e0
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_replace.js
@@ -0,0 +1,79 @@
+// mongofiles_replace.js; ensure that after putting a file once multiple times,
+// on using --replace, any and all occurences of the given file is replaced in
+// the GridFS collection - all other files are left as is
+var testName = 'mongofiles_replace';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --replace option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+
+ jsTest.log('Running put on file with --replace with ' + passthrough.name + ' passthrough');
+
+ // insert the same file a couple of times
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 1');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 2');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 3');
+
+ // ensure that it is never overwritten
+ db.fs.files.findOne({
+ filename: filesToInsert[0]
+ });
+
+ assert.eq(db.fs.files.count(), 3, 'expected 3 files inserted but got ' + db.fs.files.count());
+
+ // now run with --replace
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--replace',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 4');
+
+ assert.eq(db.fs.files.count(), 1, 'expected 1 file inserted but got ' + db.fs.files.count());
+
+ // insert other files but ensure only 1 is replaced
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[1]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 5');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[2]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 6');
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--replace',
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 7');
+
+ assert.eq(db.fs.files.count(), 3, 'expected 3 files inserted but got ' + db.fs.files.count());
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_search.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_search.js
new file mode 100644
index 00000000000..0a39326ff33
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_search.js
@@ -0,0 +1,110 @@
+// mongofiles_search.js; ensures that the search command returns any and all
+// files that match the regex supplied
+var testName = 'mongofiles_search';
+(function() {
+ load('jstests/files/util/mongofiles_common.js');
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var conn;
+
+ // Given a list of search strings and an expected result - 0 for present or 1 for
+ // hasMatch takes in raw mongofiles search output and a matchItem; it returns 0
+ // if it finds the match item in any line of the output and 1 otherwise. If the
+ // exactString argument is not empty, hasMatch further checks that the line
+ // matches the argument
+ var hasMatch = function(output, matchItem, exactString) {
+ var lines = output.split('\n');
+ var shellOutputRegex = /^sh.*/;
+ for (var i = 0; i < lines.length; i++) {
+ if (lines[i].match(shellOutputRegex) && lines[i].match(matchItem)) {
+ if (exactString && !lines[i].match(exactString)) {
+ continue;
+ }
+ return 0;
+ }
+ }
+ // matchItem wasn't found
+ return 1;
+ };
+
+ // note - assertHasFiles checks that the output of running mongofiles search with
+ // each of the search strings meets the expected result supplied. If exactString
+ // is not empty, it further checks that the output also matches exactString
+ var assertHasFiles = function(passthrough, searchStrings, expectedResult, exactString) {
+ // perform a couple of search commands against the GridFS collection
+ for (var i = 0; i < searchStrings.length; i++) {
+ clearRawMongoProgramOutput();
+ var queryString = searchStrings[i];
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--quiet',
+ '--port', conn.port,
+ 'search', queryString]
+ .concat(passthrough.args)),
+ 0, 'search command failed on ' + queryString + ' - part of ' + searchStrings);
+
+ // eslint-disable-next-line no-loop-func
+ assert.eq.soon(expectedResult, function() {
+ return hasMatch(rawMongoProgramOutput(), queryString, exactString);
+ }, 'search failed: expected "' + queryString + '" to be ' + (expectedResult ? 'found' : 'missing'));
+ }
+ };
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Testing mongofiles search command');
+ var t = topology.init(passthrough);
+ conn = t.connection();
+
+ jsTest.log('Putting files into GridFS with ' + passthrough.name + ' passthrough');
+
+ for (var i = 0; i < filesToInsert.length; i++) {
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ 'put', filesToInsert[i]]
+ .concat(passthrough.args)),
+ 0, 'put failed on ' + filesToInsert[i] + ' when it should have succeeded');
+ }
+
+ jsTest.log('Searching files in GridFS with ' + passthrough.name + ' passthrough');
+
+ // these search strings should be matched
+ var searchStrings = ['files', '.txt', 'ile', '.'];
+
+ // add the verbatim file names put into GridFS
+ for (i = 0; i < filesToInsert.length; i++) {
+ searchStrings.push(filesToInsert[i]);
+ }
+
+ // all inserted files should be returned
+ assertHasFiles(passthrough, searchStrings, 0);
+
+ // these search strings should NOT be matched
+ searchStrings = ['random', 'always', 'filer'];
+ assertHasFiles(passthrough, searchStrings, 1);
+
+ // test that only the requested file is returned
+ for (i = 0; i < filesToInsert.length; i++) {
+ var currentFile = filesToInsert[i];
+ jsTest.log('Searching for file ' + currentFile + ' with ' + passthrough.name + ' passthrough');
+
+ // ensure the requested file is returned
+ assertHasFiles(passthrough, [currentFile], 0);
+
+ // ensure no other files are returned
+ assertHasFiles(passthrough,
+ // eslint-disable-next-line no-loop-func
+ filesToInsert.filter(function(file) {
+ return file !== currentFile;
+ }), 1, currentFile);
+ }
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_type.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_type.js
new file mode 100644
index 00000000000..c5af7b3e70f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_type.js
@@ -0,0 +1,63 @@
+// mongofiles_type.js; ensure that the given content type is stored when passed
+// as the --type argument. If no argument is passed, it should be omitted in the
+// database.
+var testName = 'mongofiles_type';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --type option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ var db = conn.getDB('test');
+ var contentType = 'txt';
+
+ jsTest.log('Running put on file with --type with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error with a non-empty --type argument
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '-t', contentType,
+ 'put', filesToInsert[0]]
+ .concat(passthrough.args)),
+ 0, 'put failed when it should have succeeded 1');
+
+ var fileObj = db.fs.files.findOne({
+ filename: filesToInsert[0]
+ });
+
+ assert(fileObj, 'did not find expected GridFS file - ' + filesToInsert[0]);
+
+ assert.eq(fileObj.contentType, contentType, 'unexpected content type - found ' + fileObj.contentType + ' but expected ' + contentType);
+
+ // ensure tool runs without error with empty --type argument on linux
+ // and fails on windows
+ var comparison = 'eq';
+ if (_isWindows()) {
+ comparison = 'neq';
+ }
+ assert[comparison](runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--type', '',
+ 'put', filesToInsert[1]]
+ .concat(passthrough.args)),
+ 0, 'put failed unexpectedly');
+
+ if (!_isWindows()) {
+ fileObj = db.fs.files.findOne({
+ filename: filesToInsert[1]
+ });
+ assert.neq(fileObj, null, 'did not find expected GridFS file - ' + filesToInsert[1]);
+ assert.eq(fileObj.contentType, undefined, 'unexpected content type - found ' + fileObj.contentType + ' but expected undefined');
+ }
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_version.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_version.js
new file mode 100644
index 00000000000..3c2c3a6d959
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_version.js
@@ -0,0 +1,29 @@
+// mongofiles_version.js; ensure that getting the version works without error
+var testName = 'mongofiles_version';
+load('jstests/files/util/mongofiles_common.js');
+(function() {
+ jsTest.log('Testing mongofiles --version option');
+
+ var runTests = function(topology, passthrough) {
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ jsTest.log('Testing --version with ' + passthrough.name + ' passthrough');
+
+ // ensure tool runs without error
+ assert.eq(runMongoProgram.apply(this, ['mongofiles',
+ '--port', conn.port,
+ '--version']
+ .concat(passthrough.args)),
+ 0, '--version failed');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_write_concern.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_write_concern.js
new file mode 100644
index 00000000000..4e261536273
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_write_concern.js
@@ -0,0 +1,54 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = new ToolTest('write_concern', null);
+ var commonToolArgs = getCommonToolArguments();
+
+ var rs = new ReplSetTest({
+ name: "rpls",
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ });
+
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+ toolTest.port = rs.getPrimary().port;
+ var dbOne = rs.nodes[0].getDB("dbOne");
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ ret = toolTest.runTool.apply(toolTest, ['files',
+ '-vvvvv',
+ '-d', 'dbOne']
+ .concat(writeConcern)
+ .concat(commonToolArgs)
+ .concat(['put', 'jstests/files/testdata/files1.txt']));
+ assert.eq(exitCode, ret, name);
+ dbOne.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongofiles',
+ '-d', 'dbOne',
+ '--writeConcern={w:3}',
+ '--host', rs.getPrimary().host]
+ .concat(commonToolArgs)
+ .concat(['put', 'jstests/files/testdata/files1.txt']));
+ }
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongofiles", rs, toolTest, writeConcernTestFunc, noConnectTest);
+
+ dbOne.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_write_concern_mongos.js b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_write_concern_mongos.js
new file mode 100644
index 00000000000..45386f2cc2f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/mongofiles_write_concern_mongos.js
@@ -0,0 +1,59 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = new ToolTest('write_concern', null);
+ var commonToolArgs = getCommonToolArguments();
+
+ var st = new ShardingTest({
+ shards: {
+ rs0: {
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ },
+ },
+ mongos: 1,
+ config: 1,
+ configReplSetTestOptions: {
+ settings: {chainingAllowed: false},
+ },
+ });
+ var rs = st.rs0;
+ rs.awaitReplication();
+ toolTest.port = st.s.port;
+ var dbOne = st.s.getDB('dbOne');
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ ret = toolTest.runTool.apply(toolTest, ['files',
+ '-vvvvv',
+ '-d', 'dbOne']
+ .concat(writeConcern)
+ .concat(commonToolArgs)
+ .concat(['put', 'jstests/files/testdata/files1.txt']));
+ assert.eq(exitCode, ret, name);
+ dbOne.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongofiles',
+ '-d', 'dbOne',
+ '--writeConcern={w:3}',
+ '--host', st.s.host]
+ .concat(commonToolArgs)
+ .concat(['put', 'jstests/files/testdata/files1.txt']));
+ }
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongofiles", rs, toolTest, writeConcernTestFunc, noConnectTest);
+
+ dbOne.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files1.txt b/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files1.txt
new file mode 100644
index 00000000000..e9ea42a12b9
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files1.txt
@@ -0,0 +1 @@
+this is a text file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files2.txt b/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files2.txt
new file mode 100644
index 00000000000..6d65e626d46
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files2.txt
@@ -0,0 +1 @@
+this is another text file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files3.txt b/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files3.txt
new file mode 100644
index 00000000000..181ba5fd828
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/testdata/files3.txt
@@ -0,0 +1 @@
+this is yet another test file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/files/util/mongofiles_common.js b/src/mongo/gotools/test/qa-tests/jstests/files/util/mongofiles_common.js
new file mode 100644
index 00000000000..7ff85d959b1
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/files/util/mongofiles_common.js
@@ -0,0 +1,10 @@
+// mongofiles_common.js; contains variables used by mongofiles tests
+load('jstests/common/topology_helper.js');
+
+/* exported filesToInsert */
+// these must have unique names
+var filesToInsert = [
+ 'jstests/files/testdata/files1.txt',
+ 'jstests/files/testdata/files2.txt',
+ 'jstests/files/testdata/files3.txt'
+];
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/all_primaries_down_error_code.js b/src/mongo/gotools/test/qa-tests/jstests/import/all_primaries_down_error_code.js
new file mode 100644
index 00000000000..c5f24f6e697
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/all_primaries_down_error_code.js
@@ -0,0 +1,65 @@
+/**
+ * all_primaries_down_error_code.js
+ *
+ * This file tests TOOLS-690 where mongoimport returned exit code 0 when it should have returned
+ * exit code 1 on error. The error stems from when mongos cannot find a primary.
+ * This file tests that errors of type 'could not contact primary for replica set' return exit
+ * code 1.
+ */
+(function() {
+ 'use strict';
+ jsTest.log('Testing mongoimport when a sharded cluster has no primaries');
+
+ var sh = new ShardingTest({
+ name: 'all_primaries_down_error_code',
+ shards: 1,
+ verbose: 0,
+ mongos: 1,
+ other: {
+ rs: true,
+ numReplicas: 3,
+ chunksize: 1,
+ enableBalancer: 0,
+ },
+ });
+
+ // Make sure there is no primary in any replica set.
+ for (var rs of sh._rs) {
+ var ranOutOfPrimaries = false;
+ for (var i = 0; i < rs.nodes.length + 1; i++) {
+ var primary;
+ try {
+ // If we can't find a primary in 20 seconds than assume there are no more.
+ primary = rs.test.getPrimary(20000);
+ } catch (e) {
+ print('Error Finding Primary: ' + e);
+ ranOutOfPrimaries = true;
+ break;
+ }
+
+ jsTest.log('Stepping down ' + primary.host);
+
+ try {
+ primary.adminCommand({replSetStepDown: 300, force: true});
+ } catch (e) {
+ // Ignore any errors that occur when stepping down the primary.
+ print('Error Stepping Down Primary: ' + e);
+ }
+ }
+ // Assert that we left due to running out of primaries and not due to the loop ending.
+ assert(ranOutOfPrimaries,
+ 'Had to kill primary more times than number of nodes in the replset.');
+ }
+
+ // Check that we catch 'could not contact primary for replica set'
+ jsTest.log('All primaries stepped down, trying to import.');
+
+ var ret = runMongoProgram('mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', 'test',
+ '--collection', 'noPrimaryErrorCode',
+ '--host', sh.s0.host);
+ assert.eq(ret, 1, 'mongoimport should fail with no primary');
+
+ sh.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/boolean_type.js b/src/mongo/gotools/test/qa-tests/jstests/import/boolean_type.js
new file mode 100644
index 00000000000..b7832cc0981
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/boolean_type.js
@@ -0,0 +1,57 @@
+/**
+ * boolean_type.js
+ *
+ * This file tests the Boolean() type in mongoimport. Importing a document with a field like
+ * Boolean(1) should be treated identically to how the shell would insert a similar document.
+ */
+
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with various options in the Boolean() type');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+ var testDocs = [
+ {key: 'a', bool: Boolean(1)},
+ {key: 'b', bool: Boolean(0)},
+ {key: 'c', bool: Boolean(140)},
+ {key: 'd', bool: Boolean(-140.5)},
+ {key: 'e', bool: Boolean(Boolean(1))},
+ {key: 'f', bool: Boolean(Boolean(0))},
+ {key: 'g', bool: Boolean('')},
+ {key: 'h', bool: Boolean('f')},
+ {key: 'i', bool: Boolean(null)},
+ {key: 'j', bool: Boolean(undefined)},
+ {key: 'k', bool: Boolean(true)},
+ {key: 'l', bool: Boolean(false)},
+ {key: 'm', bool: Boolean(true, false)},
+ {key: 'n', bool: Boolean(false, true)},
+ {key: 'o', bool: [Boolean(1), Boolean(0), Date(23)]},
+ {key: 'p', bool: Boolean(Date(15))},
+ {key: 'q', bool: Boolean(0x585)},
+ {key: 'r', bool: Boolean(0x0)},
+ {key: 's', bool: Boolean()},
+ ];
+
+ var ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', 'jstests/import/testdata/boolean.json',
+ '--db', 'imported',
+ '--collection', 'testcollbool']
+ .concat(commonToolArgs));
+ assert.eq(ret, 0);
+
+ // Confirm that mongoimport imports the testDocs identically to how the shell interprets them.
+ var coll = db1.getSiblingDB('imported').testcollbool;
+ for (var i = 0; i < testDocs.length; i++) {
+ var postImportDoc = coll.findOne({key: testDocs[i].key});
+ assert.eq(testDocs[i].key, postImportDoc.key,
+ 'imported doc ' + testDocs[i].key + 'does not match original');
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/collections.js b/src/mongo/gotools/test/qa-tests/jstests/import/collections.js
new file mode 100644
index 00000000000..cf72bf581a8
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/collections.js
@@ -0,0 +1,77 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save({a: 1, b: 2, c: 3});
+ db1.c.save({a: 4, b: 5, c: 6});
+ assert.eq(2, db1.c.count(), "setup2");
+
+ toolTest.runTool.apply(toolTest, ["export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName()]
+ .concat(commonToolArgs));
+
+ db1.c.drop();
+ assert.eq(0, db1.c.count(), "after drop", "-d", toolTest.baseName, "-c", "foo");
+
+
+ // copy the file to a file that contains the collection name
+ removeFile("foo.blah.json");
+ copyFile(toolTest.extFile, "foo.blah.json");
+
+ // copy the file to a file that contains the collection name plus an extra extension (.backup)
+ removeFile("foo.blah.json.backup");
+ copyFile(toolTest.extFile, "foo.blah.json.backup");
+
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", "foo.blah.json"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("test").foo.blah.count(), 2,
+ "importing file named after collection should insert to correct namespace");
+ db1.c.getDB().getSiblingDB("test").foo.blah.drop();
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", "foo.blah.json.backup"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("test").foo.blah.json.count(), 2,
+ "importing file with extra extension should still assume correct namespace");
+ db1.c.getDB().getSiblingDB("test").foo.blah.json.drop();
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", "foo.blah.json",
+ "--collection", "testcoll1"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("test").testcoll1.count(), 2,
+ "importing --file with --collection should use correct collection name");
+ db1.c.getDB().getSiblingDB("test").testcoll1.drop();
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "foo.blah.json"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("test").foo.blah.count(), 2,
+ "should be allowed to specify file as positional arg");
+ db1.c.getDB().getSiblingDB("test").foo.blah.drop();
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "foo.blah.json",
+ "--db", "testdb2"]
+ .concat(commonToolArgs));
+ assert.eq(db1.c.getDB().getSiblingDB("testdb2").foo.blah.count(), 2,
+ "should use database specified by --db");
+ db1.c.getDB().getSiblingDB("testdb2").foo.blah.drop();
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/decimal128.js b/src/mongo/gotools/test/qa-tests/jstests/import/decimal128.js
new file mode 100644
index 00000000000..9024096ca18
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/decimal128.js
@@ -0,0 +1,44 @@
+(function() {
+ // skip this test where NumberDecimal is unsupported (3.2 and earlier)
+ if (typeof NumberDecimal === 'undefined') {
+ return;
+ }
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ jsTest.log('Testing running import with various data types');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var testDoc = {
+ _id: "foo",
+ x: NumberDecimal("124124"),
+ };
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "initial collection is not empty");
+ db1.c.save(testDoc);
+ toolTest.runTool.apply(toolTest, ["export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName()]
+ .concat(commonToolArgs));
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", toolTest.extFile,
+ "--db", "imported",
+ "--collection", "dec128"]
+ .concat(commonToolArgs));
+ var importedDocs = db1.c.getDB().getSiblingDB("imported").dec128.find().toArray();
+
+ assert.eq(importedDocs.length, 1, "incorrect # of docs imported");
+
+ var importedDoc = importedDocs[0];
+
+ assert.eq(importedDoc, testDoc, "imported doc and test doc do not match");
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/drop.js b/src/mongo/gotools/test/qa-tests/jstests/import/drop.js
new file mode 100644
index 00000000000..a5a115269e0
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/drop.js
@@ -0,0 +1,48 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('import_writes');
+ var db = toolTest.db.getSiblingDB("droptest");
+ var commonToolArgs = getCommonToolArguments();
+
+ // Verify that --drop works.
+ // put a test doc in the collection, run import with --drop,
+ // make sure that the inserted doc is gone and only the imported
+ // docs are left.
+ db.c.insert({x: 1});
+ assert.eq(db.c.count(), 1, "collection count should be 1 at setup");
+ var ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/csv_header.csv",
+ "--type=csv",
+ "--db", db.getName(),
+ "--collection", db.c.getName(),
+ "--headerline",
+ "--drop"]
+ .concat(commonToolArgs));
+
+ // test csv file contains 3 docs and collection should have been dropped, so the doc we inserted
+ // should be gone and only the docs from the test file should be in the collection.
+ assert.eq(ret, 0);
+ assert.eq(db.c.count(), 3);
+ assert.eq(db.c.count({x: 1}), 0);
+
+ // --drop on a non-existent collection should not cause error
+ db.c.drop();
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/csv_header.csv",
+ "--type=csv",
+ "--db", db.getName(),
+ "--collection", db.c.getName(),
+ "--headerline",
+ "--drop"]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0);
+ assert.eq(db.c.count(), 3);
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/fields.js b/src/mongo/gotools/test/qa-tests/jstests/import/fields.js
new file mode 100644
index 00000000000..073ac01dc20
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/fields.js
@@ -0,0 +1,107 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with headerline');
+
+ formats = ["csv", "tsv"];
+
+ var checkCollectionContents = function(coll) {
+ var importedDoc = coll.findOne({"a": "foo"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, {a: "foo", b: "bar", c: {xyz: "blah"}, d: {hij: {lkm: "qwz"}}});
+ assert.eq(coll.count(), 3);
+ };
+
+ var reset = function(coll) {
+ coll.drop();
+ assert.eq(coll.count(), 0);
+ };
+
+ var toolTest = getToolTest("import_fields");
+ var db1 = toolTest.db;
+ var commonToolArgs= getCommonToolArguments();
+ for (var i=0; i<formats.length; i++) {
+ var format=formats[i];
+
+ var c = db1.c.getDB().getSiblingDB(format + "testdb")[format+"testcoll"];
+ // check that headerline uses the correct headers
+ var ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" +format+"_header." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--headerline"]
+ .concat(commonToolArgs));
+
+ checkCollectionContents(c);
+ reset(c);
+
+ // check that the fields can be specified with --fields
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" +format+"_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--fields", "a,b,c.xyz,d.hij.lkm"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ reset(c);
+
+ // check that the fields can be specified with --fieldsFile
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" +format+"_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--fieldFile", "jstests/import/testdata/fieldfile"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ // check that without --ignoreBlanks, the empty field is just blank string
+ assert.eq(c.findOne({a: "bob"}).b, "");
+ reset(c);
+
+ // check that --ignoreBlanks causes empty fields to be omitted
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" + format + "_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--fieldFile", "jstests/import/testdata/fieldfile",
+ "--ignoreBlanks"]
+ .concat(commonToolArgs));
+ assert.eq(c.findOne({a: "bob"}).b, undefined);
+ reset(c);
+
+ // when --fieldFile, --fields, and --headerline are all omitted,
+ // import should fail
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/" + format + "_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll"]
+ .concat(commonToolArgs));
+ assert.neq(ret, 0);
+ reset(c);
+
+ }
+
+ var c2 = db1.c.getDB().getSiblingDB("testdb")["extrafields"];
+ // check that extra fields are created as expected
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/extrafields.csv",
+ "--type=csv",
+ "--db", c2.getDB().toString(),
+ "--collection", c2.getName(),
+ "--fieldFile", "jstests/import/testdata/fieldfile"]
+ .concat(commonToolArgs));
+
+ var importedDoc = c2.findOne({"a": "one"});
+ assert.eq(importedDoc.field4, "extra1");
+ assert.eq(importedDoc.field5, "extra2");
+ assert.eq(importedDoc.field6, "extra3");
+ reset(c2);
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/import_document_validation.js b/src/mongo/gotools/test/qa-tests/jstests/import/import_document_validation.js
new file mode 100644
index 00000000000..533fb2f11f4
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/import_document_validation.js
@@ -0,0 +1,108 @@
+/**
+ * import_document_validation.js
+ *
+ * This file test that mongoimport works with document validation. It both checks that when
+ * validation is turned on invalid documents are not imported and that when a user indicates
+ * they want to bypass validation, that all documents are imported.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ /**
+ * Part 1: Test that import follows document validation rules.
+ */
+ jsTest.log('Testing that import reacts well to document validation');
+
+ var toolTest = getToolTest('import_document_validation');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create 1000 documents, half of which will pass the validation
+ for (var i = 0; i < 1000; i++) {
+ if (i%2 === 0) {
+ testDB.bar.insert({_id: i, num: i+1, s: '' + i});
+ } else {
+ testDB.bar.insert({_id: i, num: i+1, s: '' + i, baz: i});
+ }
+ }
+ // sanity check the insertion worked
+ assert.eq(1000, testDB.bar.count());
+
+ // export the data
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', toolTest.extFile,
+ '-d', 'test',
+ '-c', 'bar']
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'export should run successfully');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(),
+ 'after dropping the database, no documents should be seen');
+
+ // sanity check that we can import the data without validation
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', toolTest.extFile,
+ '--db', 'test',
+ '-c', 'bar']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ assert.eq(1000, testDB.bar.count(),
+ 'after import, the documents should be seen again');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(),
+ 'after dropping the database, no documents should be seen');
+
+ // turn on validation
+ var r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}});
+ assert.eq(r, {ok: 1}, 'create collection with validation works');
+
+ // test that it's working
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 0, "invalid documents shouldn't be inserted");
+
+ // import the 1000 records of which only 500 are valid
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', toolTest.extFile,
+ '--db', 'test',
+ '-c', 'bar']
+ .concat(commonToolArgs));
+ assert.eq(0, ret,
+ 'import against a collection with validation on still succeeds');
+
+ assert.eq(500, testDB.bar.count(), 'only the valid documents are imported');
+
+ /**
+ * Part 2: Test that import can bypass document validation rules.
+ */
+ jsTest.log('Testing that bypass document validation works');
+
+ testDB.dropDatabase();
+
+ // turn on validation
+ r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}});
+ assert.eq(r, {ok: 1}, 'create collection with validation should work');
+
+ // test that we cannot insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 0, 'invalid documents should not be inserted');
+
+ // import the 1000 records again with bypassDocumentValidation turned on
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', toolTest.extFile,
+ '--db', 'test',
+ '-c', 'bar',
+ '--bypassDocumentValidation']
+ .concat(commonToolArgs));
+ assert.eq(0, ret,
+ 'importing documents should work with bypass document validation set');
+ assert.eq(1000, testDB.bar.count(),
+ 'all documents should be imported with bypass document validation set');
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/import_types.js b/src/mongo/gotools/test/qa-tests/jstests/import/import_types.js
new file mode 100644
index 00000000000..3d0a26d745d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/import_types.js
@@ -0,0 +1,75 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing importing a json file and checking types');
+
+ var toolTest = getToolTest('import_types');
+
+ // the import file
+ var importFile = 'jstests/import/testdata/types.json';
+
+ // the db and collection we'll use
+ var testDB = toolTest.db.getSiblingDB('imported');
+ var testColl = testDB.types;
+ testColl.drop();
+ var commonToolArgs = getCommonToolArguments();
+
+ var importTypes = {
+ "double_type": 1,
+ "double_exponent_type": 1,
+ "double_negative_type": 1,
+ "NaN": 1,
+ "infinity": 1,
+ "negative_infinity": 1,
+ "string_type": 2,
+ "object_type": 3,
+ "binary_data": 5,
+ "undefined_type": 6,
+ "object_id_type": 7,
+ "true_type": 8,
+ "false_type": 8,
+ "date_type": 9,
+ "iso_date_type": 9,
+ "null_type": 10,
+ "int32_type": 16,
+ "int32_negative_type": 16,
+ "number_int_type": 16,
+ "int32_hex": 16,
+ "int64_type": 18,
+ "int64_negative_type": 18,
+ "number_long_type": 18,
+ "minkey_type": -1,
+ "maxkey_type": 127,
+ "regex_type": 11,
+ };
+
+
+ // import the data in from types.json
+ ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', importFile,
+ '--db', 'imported',
+ '--collection', 'types']
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ jsTest.log("Imported", importFile);
+
+ var postImportDoc = testColl.findOne();
+ printjson(postImportDoc);
+
+ docKeys = Object.keys(importTypes);
+
+ for (var i = 0; i < docKeys.length; i++) {
+ jsTest.log("Checking type of", docKeys[i]);
+ var typeNum = importTypes[docKeys[i]];
+ var field = docKeys[i];
+ var query = {};
+ query[field] = {"$type": typeNum};
+ printjson(query);
+ assert.eq(testColl.find(query).count(), 1);
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/import_write_concern.js b/src/mongo/gotools/test/qa-tests/jstests/import/import_write_concern.js
new file mode 100644
index 00000000000..0c7833b75b9
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/import_write_concern.js
@@ -0,0 +1,71 @@
+(function() {
+
+ load("jstests/configs/replset_28.config.js");
+
+ var name = 'import_write_concern';
+ var toolTest = new ToolTest(name, null);
+ var dbName = "foo";
+ var colName = "bar";
+ var rs = new ReplSetTest({
+ name: name,
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ });
+
+ var commonToolArgs = getCommonToolArguments();
+ var fileTarget = "wc.csv";
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+ toolTest.port = rs.getPrimary().port;
+
+ var db = rs.getPrimary().getDB(dbName);
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ var ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', fileTarget,
+ '-d', dbName,
+ '-c', colName]
+ .concat(writeConcern)
+ .concat(commonToolArgs));
+ assert.eq(exitCode, ret, name);
+ db.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongoimport',
+ '--writeConcern={w:3}',
+ '--host', rs.getPrimary().host,
+ '--file', fileTarget]
+ .concat(commonToolArgs));
+ }
+
+ // create a test collection
+ var col = db.getCollection(colName);
+ for (var i=0; i<=100; i++) {
+ col.insert({_id: i, x: i*i});
+ }
+ rs.awaitReplication();
+
+ // export the data that we'll use
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', fileTarget,
+ '-d', dbName,
+ '-c', colName]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ db.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongoimport", rs, toolTest, writeConcernTestFunc, noConnectTest);
+
+ db.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/import_write_concern_mongos.js b/src/mongo/gotools/test/qa-tests/jstests/import/import_write_concern_mongos.js
new file mode 100644
index 00000000000..98a49d4bc11
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/import_write_concern_mongos.js
@@ -0,0 +1,76 @@
+(function() {
+
+ load("jstests/configs/replset_28.config.js");
+
+ var name = 'import_write_concern';
+ var toolTest = new ToolTest(name, null);
+ var dbName = "foo";
+ var colName = "bar";
+ var fileTarget = "wc_mongos.csv";
+ var st = new ShardingTest({
+ shards: {
+ rs0: {
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ },
+ },
+ mongos: 1,
+ config: 1,
+ configReplSetTestOptions: {
+ settings: {chainingAllowed: false},
+ },
+ });
+ var rs = st.rs0;
+ rs.awaitReplication();
+ toolTest.port = st.s.port;
+
+ var commonToolArgs = getCommonToolArguments();
+ var db = st.s.getDB(dbName);
+ var col = db.getCollection(colName);
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ var ret = toolTest.runTool.apply(toolTest, ['import',
+ '--file', fileTarget,
+ '-d', dbName,
+ '-c', colName]
+ .concat(writeConcern)
+ .concat(commonToolArgs));
+ assert.eq(exitCode, ret, name);
+ db.dropDatabase();
+ }
+
+ function startProgramNoConnect() {
+ return startMongoProgramNoConnect.apply(null, ['mongoimport',
+ '--writeConcern={w:3}',
+ '--host', st.s.host,
+ '--file', fileTarget]
+ .concat(commonToolArgs));
+ }
+
+ // create a test collection
+ for (var i=0; i<=100; i++) {
+ col.insert({_id: i, x: i*i});
+ }
+ rs.awaitReplication();
+
+ // setup: export the data that we'll use
+ var ret = toolTest.runTool.apply(toolTest, ['export',
+ '--out', fileTarget,
+ '-d', dbName,
+ '-c', colName]
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ db.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongoimport", rs, toolTest, writeConcernTestFunc, startProgramNoConnect);
+
+ db.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/no_primary_error_code.js b/src/mongo/gotools/test/qa-tests/jstests/import/no_primary_error_code.js
new file mode 100644
index 00000000000..5fc0356e9e0
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/no_primary_error_code.js
@@ -0,0 +1,65 @@
+/**
+ * no_primary_error_code.js
+ *
+ * This file tests TOOLS-690 where mongoimport returned exit code 0 when it should have returned
+ * exit code 1 on error. The error stems from when mongos cannot find a primary. This file checks
+ * that errors of type 'not master', 'unable to target', and 'Connection refused' yield error
+ * code 1.
+ */
+(function() {
+ 'use strict';
+ jsTest.log('Testing mongoimport when a sharded cluster has no primaries');
+
+ var sh = new ShardingTest({
+ name: 'no_primary_error_code',
+ shards: 1,
+ verbose: 0,
+ mongos: 1,
+ other: {
+ rs: true,
+ numReplicas: 1,
+ chunksize: 1,
+ enableBalancer: 0,
+ },
+ });
+
+ // If we can't find a primary in 20 seconds than assume there are no more.
+ var primary = sh.rs0.getPrimary(20000);
+
+ jsTest.log('Stepping down ' + primary.host);
+
+ try {
+ primary.adminCommand({replSetStepDown: 300, force: true});
+ } catch (e) {
+ // Ignore any errors that occur when stepping down the primary.
+ print('Error Stepping Down Primary: ' + e);
+ }
+
+ // Check that we catch 'not master'
+ jsTest.log('All primaries stepped down, trying to import.');
+
+
+ var ret = runMongoProgram('mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', 'test',
+ '--collection', 'noPrimaryErrorCode',
+ '--host', sh.s0.host);
+ assert.eq(ret, 1, 'mongoimport should fail with no primary');
+
+ sh.getDB('test').dropDatabase();
+
+ // Kill the replica set.
+ sh.rs0.stopSet(15);
+
+ // Check that we catch 'Connection refused'
+ jsTest.log('All primaries died, trying to import.');
+
+ ret = runMongoProgram('mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', 'test',
+ '--collection', 'noPrimaryErrorCode',
+ '--host', sh.s0.host);
+ assert.eq(ret, 1, 'mongoimport should fail with no primary');
+
+ sh.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/options.js b/src/mongo/gotools/test/qa-tests/jstests/import/options.js
new file mode 100644
index 00000000000..12be9fd3bd4
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/options.js
@@ -0,0 +1,123 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('bad_options');
+ var db1 = toolTest.db;
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save({a: 1, b: 2, c: 3});
+ db1.c.save({a: 4, b: 5, c: 6});
+ assert.eq(2, db1.c.count(), "setup2");
+
+ toolTest.runTool("export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName());
+
+ // also make a CSV version of it
+ toolTest.runTool("export",
+ "--out", toolTest.extFile + ".csv",
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName(),
+ "--fields", "a,b,c",
+ "--csv");
+ db1.c.drop();
+ assert.eq(0, db1.c.count(), "after drop", "-d", toolTest.baseName, "-c", "foo");
+
+ // verify that the normal sane case works
+ var ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test");
+ assert.eq(ret, 0);
+
+ var testDb = db1.c.getDB().getSiblingDB("test");
+ assert.eq.soon(2, testDb.test.count.bind(testDb.test), "test.test should have 2 records");
+ testDb.test.drop();
+
+ var testScenarios = [
+ {args: [],
+ desc: "importing with no args should fail"},
+
+ {args: [toolTest.extFile, toolTest.extFile],
+ desc: "importing with multiple positional args should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, toolTest.extFile],
+ desc: "specifying both a --file and a positional argument should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", "non-existent-file.json"],
+ desc: "specifying a --file with a nonexistent filename should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", "."],
+ desc: "specifying a --file with a directory name should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type", "bogus"],
+ desc: "importing with an invalid --type should fail"},
+
+ {args: ["--db", "x.y.z", "-c", "test", "--file", toolTest.extFile],
+ desc: "importing with an invalid database name (. in name) should fail"},
+
+ {args: ["--db", "$x", "-c", "test", "--file", toolTest.extFile],
+ desc: "importing with an invalid database name ($ in name) should fail"},
+
+ {args: ["--db", "test", "-c", "blah$asfsaf", "--file", toolTest.extFile],
+ desc: "importing with an invalid collection name should fail"},
+
+ {args: ["--db", "test", "-c", "blah$asfsaf", "--file", toolTest.extFile],
+ desc: "importing with an invalid collection name should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--fields", "a,$xz,b"],
+ desc: "--fields containing a field containing a $ should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type=json", "--fields", "a,b"],
+ desc: "specifying --fields with --json should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--headerline", "--fields", "a,b", "--type=csv"],
+ desc: "specifying both --fields and --headerline should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--fields", "a,b", "--fieldFile", toolTest.extFile + ".csv"],
+ desc: "specifying both --fields and --fieldFile should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--headerline", "--fieldFile", toolTest.extFile + ".csv"],
+ desc: "specifying both --headerline and --fieldFile should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--fields", "a,b,b"],
+ desc: "--fields with duplicate field names should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=csv", "--fields", "a,b,b.c"],
+ desc: "--fields with field names of overlapping structures should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type=csv", "--fields", "a,b,b.c"],
+ desc: "--fields with field names of overlapping structures should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--upsertFields", "a,$b"],
+ desc: "invalid characters in upsertFields should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--jsonArray"],
+ desc: "using --jsonArray with a non-array input file should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile + ".csv", "--type=json"],
+ desc: "using --type=json with invalid json should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type=csv", "--fields=a,b,c"],
+ desc: "using --type=csv with invalid csv should fail"},
+
+ {args: ["--db", "test", "-c", "test", "--file", toolTest.extFile, "--type=json", "--headerline"],
+ desc: "using --type=json with headerline should fail"},
+ ];
+
+ for (var i=0; i<testScenarios.length; i++) {
+ jsTest.log('Testing: ' + testScenarios[i].desc);
+ ret = toolTest.runTool.apply(toolTest, ["import"].concat(testScenarios[i].args));
+ assert.neq(0, ret, i + ": " + testScenarios[i].desc);
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/parse_grace.js b/src/mongo/gotools/test/qa-tests/jstests/import/parse_grace.js
new file mode 100644
index 00000000000..99442265e8c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/parse_grace.js
@@ -0,0 +1,121 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var expectedDocs = [{
+ a: "foo",
+ b: 12,
+ c: {
+ xyz: ISODate("1997-06-02T15:24:00Z"),
+ noop: true,
+ },
+ d: {hij: {lkm: BinData(0, "e8MEnzZoFyMmD7WSHdNrFJyEk8M=")}},
+ }, {
+ a: "bar",
+ b: 24,
+ c: {
+ xyz: "06/08/2016 09:26:00",
+ noop: true,
+ },
+ d: {hij: {lkm: BinData(0, "dGVzdAo=")}},
+ }, {
+ a: "baz",
+ b: 36,
+ c: {
+ xyz: ISODate("2016-06-08T09:26:00Z"),
+ noop: false,
+ },
+ d: {hij: {lkm: BinData(0, "dGVzdAo=")}},
+ }];
+ jsTest.log('Testing parseGrace option');
+
+ var checkCollectionContents = function(coll) {
+ var importedDoc = coll.findOne({a: "foo"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[0]);
+ importedDoc = coll.findOne({a: "baz"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[2]);
+ };
+
+ var reset = function(coll) {
+ assert.eq.soon(0, function() {
+ coll.drop();
+ return coll.count();
+ }, "collection should be empty after drop");
+ };
+
+ var toolTest = getToolTest("import_fields");
+ var db1 = toolTest.db;
+ var commonToolArgs= getCommonToolArguments();
+
+ var c = db1.c.getDB().getSiblingDB("testdb")["testcoll"];
+
+ // parseGrace=fail should cause a failure
+ var ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/parse_grace.csv",
+ "--type", "csv",
+ "--db", "testdb",
+ "--collection", "testcoll",
+ "--columnsHaveTypes",
+ "--parseGrace", "stop",
+ "--headerline"]
+ .concat(commonToolArgs));
+ assert.neq(ret, 0);
+ reset(c);
+
+ // parseGrace=skipRow should not import the row
+ // with an un-coercable field
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/parse_grace.csv",
+ "--type", "csv",
+ "--db", "testdb",
+ "--collection", "testcoll",
+ "--columnsHaveTypes",
+ "--parseGrace", "skipRow",
+ "--headerline"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ assert.eq(c.count(), 2);
+ reset(c);
+
+ // parseGrace=skipField should not import the
+ // an un-coercable field, but still keep the rest
+ // of the row
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/parse_grace.csv",
+ "--type", "csv",
+ "--db", "testdb",
+ "--collection", "testcoll",
+ "--columnsHaveTypes",
+ "--parseGrace", "skipField",
+ "--headerline"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ assert.eq(c.count(), 3);
+ assert.neq(c.findOne({a: "bar"}), null);
+ assert.eq(c.findOne({a: "bar"}).c.xyz, undefined);
+ reset(c);
+
+ // parseGrace=autoCast should import the un-coercable field
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/parse_grace.csv",
+ "--type", "csv",
+ "--db", "testdb",
+ "--collection", "testcoll",
+ "--columnsHaveTypes",
+ "--parseGrace", "autoCast",
+ "--headerline"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ assert.eq(c.count(), 3);
+ var importedDoc = c.findOne({a: "bar"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[1]);
+ reset(c);
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/replset.js b/src/mongo/gotools/test/qa-tests/jstests/import/replset.js
new file mode 100644
index 00000000000..3ff32650d07
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/replset.js
@@ -0,0 +1,48 @@
+(function() {
+ jsTest.log('Testing running import with upserts');
+
+ var toolTest = new ToolTest('import_repl');
+
+ var replset1 = new ReplSetTest({nodes: 3, name: 'importtest'});
+ replset1.startSet();
+ replset1.initiate();
+
+ var primary = replset1.getPrimary();
+ var secondary = replset1.getSecondary();
+
+ var db = primary.getDB('import_repl_test');
+
+ // trying to write to the secondary should fail
+ assert.neq(runMongoProgram.apply(this, ['mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', db.getName(),
+ '--collection', db.c.getName(),
+ '--host', secondary.host]), 0,
+ "writing to secondary should fail");
+
+ assert.eq(db.c.count(), 0, 'database not empty');
+
+ // now import using the primary
+ assert.eq(runMongoProgram.apply(this, ['mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', db.getName(),
+ '--collection', db.c.getName(),
+ '--host', primary.host]), 0,
+ "writing to primary should succeed");
+
+ assert.neq(db.c.count(), 0, 'database unexpectedly empty on primary');
+
+ db.dropDatabase();
+
+ // import using the secondary but include replset name, should succeed
+ assert.eq(runMongoProgram.apply(this, ['mongoimport',
+ '--file', 'jstests/import/testdata/basic.json',
+ '--db', db.getName(),
+ '--collection', db.c.getName(),
+ '--host', replset1.name + "/" + secondary.host]), 0,
+ "writing to secondary with replset name should succeed");
+
+ assert.neq(db.c.count(), 0, 'database unexpectedly empty on secondary');
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/stoponerror.js b/src/mongo/gotools/test/qa-tests/jstests/import/stoponerror.js
new file mode 100644
index 00000000000..0eee2689204
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/stoponerror.js
@@ -0,0 +1,40 @@
+(function() {
+ jsTest.log('Testing running import with upserts');
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('import_dupes');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var db = db1.getSiblingDB("dupetest");
+ db.dropDatabase();
+
+ // Verify that --upsert with --upsertFields works by applying update w/ query on the fields
+ db.c.insert({_id: 1234, b: "000000", c: 222});
+ assert.eq(db.c.count(), 1, "collection count should be 1 at setup");
+ var ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/dupes.json",
+ "--db", db.getName(),
+ "--collection", db.c.getName(),
+ "--stopOnError"]
+ .concat(commonToolArgs));
+
+ assert.neq(ret, 0,
+ "duplicate key with --stopOnError should return nonzero exit code");
+
+ // drop it, try again without stop on error
+ db.c.drop();
+ db.c.insert({_id: 1234, b: "000000", c: 222});
+ ret = toolTest.runTool.apply(toolTest, ["import", "--file",
+ "jstests/import/testdata/dupes.json",
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0,
+ "duplicate key without --stopOnError should return zero exit code");
+ assert.docEq(db.c.findOne({_id: 1234}), {_id: 1234, b: "000000", c: 222});
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/basic.json b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/basic.json
new file mode 100644
index 00000000000..93b5efcd940
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/basic.json
@@ -0,0 +1,36 @@
+{_id:1, x:"1"}
+{_id:2, x:"2"}
+{_id:3, x:"3"}
+{_id:4, x:"4"}
+{_id:5, x:"5"}
+{_id:6, x:"6"}
+{_id:7, x:"7"}
+{_id:8, x:"8"}
+{_id:9, x:"9"}
+{_id:10, x:"10"}
+{_id:11, x:"11"}
+{_id:12, x:"12"}
+{_id:13, x:"13"}
+{_id:14, x:"14"}
+{_id:15, x:"15"}
+{_id:16, x:"16"}
+{_id:17, x:"17"}
+{_id:18, x:"18"}
+{_id:19, x:"19"}
+{_id:20, x:"20"}
+{_id:21, x:"21"}
+{_id:22, x:"22"}
+{_id:23, x:"23"}
+{_id:24, x:"24"}
+{_id:25, x:"25"}
+{_id:26, x:"26"}
+{_id:27, x:"27"}
+{_id:28, x:"28"}
+{_id:29, x:"29"}
+{_id:30, x:"30"}
+{_id:31, x:"31"}
+{_id:32, x:"32"}
+{_id:33, x:"33"}
+{_id:34, x:"34"}
+{_id:35, x:"35"}
+
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/boolean.json b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/boolean.json
new file mode 100644
index 00000000000..1ee4458d5fa
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/boolean.json
@@ -0,0 +1,19 @@
+{ key: 'a', bool: Boolean(1) }
+{ key: 'b', bool: Boolean(0) }
+{ key: 'c', bool: Boolean(140) }
+{ key: 'd', bool: Boolean(-140.5) }
+{ key: 'e', bool: Boolean(Boolean(1)) }
+{ key: 'f', bool: Boolean(Boolean(0)) }
+{ key: 'g', bool: Boolean('') }
+{ key: 'h', bool: Boolean('f') }
+{ key: 'i', bool: Boolean(null) }
+{ key: 'j', bool: Boolean(undefined) }
+{ key: 'k', bool: Boolean(true) }
+{ key: 'l', bool: Boolean(false) }
+{ key: 'm', bool: Boolean(true, false) }
+{ key: 'n', bool: Boolean(false, true) }
+{ key: 'o', bool: [ Boolean(1), Boolean(0), Date(23) ] }
+{ key: 'p', bool: Boolean(Date(15)) }
+{ key: 'q', bool: Boolean(0x585) }
+{ key: 'r', bool: Boolean(0x0) }
+{ key: 's', bool: Boolean() } \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/csv_header.csv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/csv_header.csv
new file mode 100644
index 00000000000..4c308f094b1
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/csv_header.csv
@@ -0,0 +1,4 @@
+a,b,c.xyz,d.hij.lkm
+foo,bar,blah,qwz
+bob,,steve,sue
+one,two,three,four
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/csv_noheader.csv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/csv_noheader.csv
new file mode 100644
index 00000000000..15427ed2b89
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/csv_noheader.csv
@@ -0,0 +1,3 @@
+foo,bar,blah,qwz
+bob,,steve,sue
+one,two,three,four
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/dupes.json b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/dupes.json
new file mode 100644
index 00000000000..5d7ce696dca
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/dupes.json
@@ -0,0 +1,34 @@
+{_id:2, x:"2"}
+{_id:3, x:"3"}
+{_id:4, x:"4"}
+{_id:5, x:"5"}
+{_id:6, x:"6"}
+{_id:7, x:"7"}
+{_id:8, x:"8"}
+{_id:9, x:"9"}
+{_id:10, x:"10"}
+{_id:11, x:"11"}
+{_id:12, x:"12"}
+{_id:13, x:"13"}
+{_id:14, x:"14"}
+{_id:15, x:"15"}
+{_id:16, x:"16"}
+{_id:17, x:"17"}
+{_id:18, x:"18"}
+{_id:19, x:"19"}
+{_id:20, x:"20"}
+{_id:1234, x:"21"}
+{_id:22, x:"22"}
+{_id:23, x:"23"}
+{_id:24, x:"24"}
+{_id:25, x:"25"}
+{_id:26, x:"26"}
+{_id:27, x:"27"}
+{_id:28, x:"28"}
+{_id:29, x:"29"}
+{_id:30, x:"30"}
+{_id:31, x:"31"}
+{_id:32, x:"32"}
+{_id:33, x:"33"}
+{_id:34, x:"34"}
+{_id:35, x:"35"}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/extrafields.csv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/extrafields.csv
new file mode 100644
index 00000000000..945dedb557a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/extrafields.csv
@@ -0,0 +1,3 @@
+foo,bar,blah,qwz
+bob,,steve,sue
+one,two,three,four,extra1,extra2,extra3
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/fieldfile b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/fieldfile
new file mode 100644
index 00000000000..d08b7dd21f5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/fieldfile
@@ -0,0 +1,4 @@
+a
+b
+c.xyz
+d.hij.lkm
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/parse_grace.csv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/parse_grace.csv
new file mode 100644
index 00000000000..447c1bd647e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/parse_grace.csv
@@ -0,0 +1,4 @@
+a.string(),b.int32(),"c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss)",c.noop.boolean(),d.hij.lkm.binary(hex)
+foo,12,"June 02, 1997 15:24:00",true,7bc3049f36681723260fb5921dd36b149c8493c3
+bar,24,"06/08/2016 09:26:00",true,746573740a
+baz,36,"June 08, 2016 09:26:00",false,746573740a
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/tsv_header.tsv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/tsv_header.tsv
new file mode 100644
index 00000000000..d10280f5e04
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/tsv_header.tsv
@@ -0,0 +1,4 @@
+a b c.xyz d.hij.lkm
+foo bar blah qwz
+bob steve sue
+one two three four
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/tsv_noheader.tsv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/tsv_noheader.tsv
new file mode 100644
index 00000000000..3729293b3ac
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/tsv_noheader.tsv
@@ -0,0 +1,3 @@
+foo bar blah qwz
+bob steve sue
+one two three four
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_extrafields.csv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_extrafields.csv
new file mode 100644
index 00000000000..8d398a3745f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_extrafields.csv
@@ -0,0 +1,3 @@
+foo,12,"June 02, 1997 15:24:00",true,7bc3049f36681723260fb5921dd36b149c8493c3
+bar,24,"June 08, 2016 09:26:00",false,746573740a
+one,2,"May 08, 2016 09:26:00",false,746573740a,extra1,extra2
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_extrafields.tsv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_extrafields.tsv
new file mode 100644
index 00000000000..a4ca42f4589
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_extrafields.tsv
@@ -0,0 +1,3 @@
+foo 12 June 02, 1997 15:24:00 true 7bc3049f36681723260fb5921dd36b149c8493c3
+bar 24 June 08, 2016 09:26:00 false 746573740a
+one 2 May 08, 2016 09:26:00 false 746573740a extra1 extra2
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_header.csv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_header.csv
new file mode 100644
index 00000000000..1140f31b20b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_header.csv
@@ -0,0 +1,3 @@
+a.string(),b.int32(),"c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss)",c.noop.boolean(),d.hij.lkm.binary(hex)
+foo,12,"June 02, 1997 15:24:00",true,7bc3049f36681723260fb5921dd36b149c8493c3
+bar,24,"June 08, 2016 09:26:00",false,746573740a
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_header.tsv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_header.tsv
new file mode 100644
index 00000000000..a80b16848be
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_header.tsv
@@ -0,0 +1,3 @@
+a.string() b.int32() c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss) c.noop.boolean() d.hij.lkm.binary(hex)
+foo 12 June 02, 1997 15:24:00 true 7bc3049f36681723260fb5921dd36b149c8493c3
+bar 24 June 08, 2016 09:26:00 false 746573740a
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_noheader.csv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_noheader.csv
new file mode 100644
index 00000000000..50eeda2d83f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_noheader.csv
@@ -0,0 +1,2 @@
+foo,12,"June 02, 1997 15:24:00",true,7bc3049f36681723260fb5921dd36b149c8493c3
+bar,24,"June 08, 2016 09:26:00",false,746573740a
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_noheader.tsv b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_noheader.tsv
new file mode 100644
index 00000000000..a4eb1896c1c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typed_noheader.tsv
@@ -0,0 +1,2 @@
+foo 12 June 02, 1997 15:24:00 true 7bc3049f36681723260fb5921dd36b149c8493c3
+bar 24 June 08, 2016 09:26:00 false 746573740a
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typedfieldfile b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typedfieldfile
new file mode 100644
index 00000000000..0068166003f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/typedfieldfile
@@ -0,0 +1,5 @@
+a.string()
+b.int32()
+c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss)
+c.noop.boolean()
+d.hij.lkm.binary(hex)
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/types.json b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/types.json
new file mode 100644
index 00000000000..692d7999709
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/types.json
@@ -0,0 +1,27 @@
+{ "double_type" : 5.0,
+ "double_exponent_type" : 5e+32,
+ "double_negative_type" : -5.0,
+ "NaN": NaN,
+ "infinity" : Infinity,
+ "negative_infinity" : -Infinity,
+ "string_type" : "sample string",
+ "object_type" : {"sample" : "object"},
+ "binary_data" : BinData(3, "e8MEnzZoFyMmD7WSHdNrFJyEk8M="),
+ "undefined_type" : undefined,
+ "object_id_type" : ObjectId("54b03ef2a817f4f960f5b809"),
+ "true_type" : true,
+ "false_type" : false,
+ "date_type" : Date(45),
+ "iso_date_type" : ISODate("2015-02-25T16:42:11Z"),
+ "null_type" : null,
+ "int32_type" : 5,
+ "int32_negative_type" : -5,
+ "number_int_type" : NumberInt(5),
+ "int32_hex" : 0x123,
+ "int64_type" : 214748364765,
+ "int64_negative_type" : -214748364765,
+ "number_long_type" : NumberLong(5000),
+ "minkey_type" : { "$minKey" : 1 },
+ "maxkey_type" : { "$maxKey" : 1 },
+ "regex_type" : { "$regex" : "\\.", "$options" : "" }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert1.json b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert1.json
new file mode 100644
index 00000000000..1608356a73b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert1.json
@@ -0,0 +1,5 @@
+{_id:"one", a:1234,b:4567}
+{_id:"two", a:"xxx",b:"yyy"}
+{_id:"one", a:"foo",b:"blah"}
+{_id:"one", a:"test",b:"test"}
+{_id:"one", a:"unicorns",b:"zebras"}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert2.json b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert2.json
new file mode 100644
index 00000000000..cf35f2762ac
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert2.json
@@ -0,0 +1,5 @@
+{a:1234,b:4567, c:222}
+{a:4567,b:"yyy", c:333}
+{a:1234,b:"blah", c:222}
+{a:"xxx",b:"test", c:-1}
+{a:4567,b:"asdf", c:222}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert3.json b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert3.json
new file mode 100644
index 00000000000..c22767ceac0
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/testdata/upsert3.json
@@ -0,0 +1,2000 @@
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":0.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":1.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":2.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":3.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":4.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":5.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":6.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":7.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":8.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":9.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":10.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":11.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":12.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":13.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":14.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":15.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":16.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":17.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":18.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":19.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":20.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":21.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":22.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":23.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":24.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":25.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":26.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":27.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":28.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":29.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":30.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":31.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":32.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":33.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":34.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":35.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":36.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":37.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":38.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":39.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":40.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":41.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":42.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":43.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":44.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":45.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":46.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":47.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":48.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":49.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":50.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":51.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":52.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":53.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":54.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":55.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":56.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":57.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":58.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":59.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":60.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":61.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":62.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":63.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":64.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":65.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":66.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":67.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":68.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":69.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":70.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":71.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":72.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":73.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":74.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":75.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":76.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":77.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":78.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":79.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":80.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":81.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":82.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":83.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":84.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":85.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":86.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":87.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":88.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":89.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":90.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":91.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":92.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":93.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":94.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":95.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":96.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":97.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":98.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":0.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":1.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":2.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":3.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":4.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":5.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":6.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":7.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":8.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":9.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":10.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":11.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":12.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":13.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":14.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":15.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":16.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":17.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":18.0,"d":"foo"}],"e":"bar"},"x":"str2"}
+{"_id":{"a":99.0,"b":[0.0,1.0,2.0,{"c":19.0,"d":"foo"}],"e":"bar"},"x":"str2"}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/type_case.js b/src/mongo/gotools/test/qa-tests/jstests/import/type_case.js
new file mode 100644
index 00000000000..671ae707f31
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/type_case.js
@@ -0,0 +1,98 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('bad_options');
+ var db1 = toolTest.db;
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save({a: 1, b: 2, c: 3});
+ db1.c.save({a: 4, b: 5, c: 6});
+ assert.eq(2, db1.c.count(), "setup2");
+
+ toolTest.runTool("export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName());
+
+ // also make a CSV version of it
+ toolTest.runTool("export",
+ "--out", toolTest.extFile + ".csv",
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName(),
+ "--csv",
+ "--fields", "a,b,c");
+ db1.c.drop();
+ assert.eq(0, db1.c.count(), "after drop", "-d", toolTest.baseName, "-c", "foo");
+
+ // verify that the normal sane case works
+ var ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test");
+ assert.eq(ret, 0);
+
+ // verify that the a lower case json type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test",
+ "--type=json");
+ assert.eq(ret, 0);
+
+ // verify that the a upper case json type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test",
+ "--type=JSON");
+ assert.eq(ret, 0);
+
+ // verify that the a csv type specifier failes to load a json file
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile,
+ "-d", "test",
+ "-c", "test",
+ "--type=csv",
+ "-f", "a,b,c");
+ assert.eq(ret, 1);
+
+ // verify that the a lower case csv type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile+".csv",
+ "-d", "test",
+ "-c", "test",
+ "--type=csv",
+ "-f", "a,b,c");
+ assert.eq(ret, 0);
+
+ // verify that the a upper case csv type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile+".csv",
+ "-d", "test",
+ "-c", "test",
+ "--type=CSV",
+ "-f", "a,b,c");
+ assert.eq(ret, 0);
+
+ // verify that the a mixed case csv type works
+ ret = toolTest.runTool("import",
+ "--file", toolTest.extFile+".csv",
+ "-d", "test",
+ "-c", "test",
+ "--type=cSv",
+ "-f", "a,b,c");
+ assert.eq(ret, 0);
+
+ var testDb = db1.c.getDB().getSiblingDB("test");
+ assert.eq.soon(11, testDb.test.count.bind(testDb.test), "test.test should have 11 records");
+ testDb.test.drop();
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/typed_fields.js b/src/mongo/gotools/test/qa-tests/jstests/import/typed_fields.js
new file mode 100644
index 00000000000..fa7e07dd0b1
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/typed_fields.js
@@ -0,0 +1,114 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var formats = ["csv", "tsv"];
+ var header = "a.string(),b.int32(),c.xyz.date_oracle(Month dd, yyyy HH24:mi:ss),c.noop.boolean(),d.hij.lkm.binary(hex)";
+ var expectedDocs = [{
+ a: "foo",
+ b: 12,
+ c: {
+ xyz: ISODate("1997-06-02T15:24:00Z"),
+ noop: true,
+ },
+ d: {hij: {lkm: BinData(0, "e8MEnzZoFyMmD7WSHdNrFJyEk8M=")}},
+ }, {
+ a: "bar",
+ b: 24,
+ c: {
+ xyz: ISODate("2016-06-08T09:26:00Z"),
+ noop: false,
+ },
+ d: {hij: {lkm: BinData(0, "dGVzdAo=")}},
+ }];
+ jsTest.log('Testing typed fields in CSV/TSV');
+
+ var checkCollectionContents = function(coll) {
+ var importedDoc = coll.findOne({a: "foo"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[0]);
+ importedDoc = coll.findOne({a: "bar"});
+ delete importedDoc["_id"];
+ assert.docEq(importedDoc, expectedDocs[1]);
+ assert.eq(coll.count(), 2);
+ };
+
+ var reset = function(coll) {
+ coll.drop();
+ assert.eq(coll.count(), 0);
+ };
+
+ var toolTest = getToolTest("import_fields");
+ var db1 = toolTest.db;
+ var commonToolArgs= getCommonToolArguments();
+ for (var i=0; i<formats.length; i++) {
+ var format=formats[i];
+
+ var c = db1.c.getDB().getSiblingDB(format + "testdb")[format+"testcoll"];
+ // check that headerline uses the correct headers
+ var ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_header." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes",
+ "--headerline"]
+ .concat(commonToolArgs));
+
+ checkCollectionContents(c);
+ reset(c);
+
+ // check that the fields can be specified with --fields
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes",
+ "--fields", header]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ reset(c);
+
+ // check that the fields can be specified with --fieldsFile
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes",
+ "--fieldFile", "jstests/import/testdata/typedfieldfile"]
+ .concat(commonToolArgs));
+ checkCollectionContents(c);
+ reset(c);
+
+ // when --fieldFile, --fields, and --headerline are all omitted,
+ // import should fail
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_noheader." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes"]
+ .concat(commonToolArgs));
+ assert.neq(ret, 0);
+ reset(c);
+
+ // check that extra fields are created as expected
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/typed_extrafields." + format,
+ "--type=" + format,
+ "--db", format + "testdb",
+ "--collection", format + "testcoll",
+ "--columnsHaveTypes",
+ "--fieldFile", "jstests/import/testdata/typedfieldfile"]
+ .concat(commonToolArgs));
+
+ var importedDoc = c.findOne({"a": "one"});
+ assert.eq(importedDoc.field5, "extra1");
+ assert.eq(importedDoc.field6, "extra2");
+ reset(c);
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/types.js b/src/mongo/gotools/test/qa-tests/jstests/import/types.js
new file mode 100644
index 00000000000..c128c9c5dc9
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/types.js
@@ -0,0 +1,117 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ jsTest.log('Testing running import with various data types');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var testDoc = {
+ _id: ObjectId(),
+ a: BinData(0, "e8MEnzZoFyMmD7WSHdNrFJyEk8M="),
+ b: Boolean(1),
+ d: "this is a string",
+ e: ["this is an ", 2, 23.5, "array with various types in it"],
+ f: {"this is": "an embedded doc"},
+ g: function () {
+ print("hey sup");
+ },
+ h: null,
+ i: true,
+ j: false,
+ k: NumberLong(10000),
+ l: MinKey(),
+ m: MaxKey(),
+ n: ISODate("2015-02-25T16:42:11Z"),
+ o: DBRef('namespace', 'identifier', 'database'),
+ p: NumberInt(5),
+ q: 5.0,
+ };
+
+ // Make a dummy file to import by writing a test collection and exporting it
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save(testDoc);
+ toolTest.runTool.apply(toolTest, ["export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName()]
+ .concat(commonToolArgs));
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", toolTest.extFile,
+ "--db", "imported",
+ "--collection", "testcoll2"]
+ .concat(commonToolArgs));
+ var postImportDoc = db1.c.getDB().getSiblingDB("imported").testcoll2.findOne();
+
+ printjson(postImportDoc);
+
+ for (var docKey in testDoc) {
+ if (!testDoc.hasOwnProperty(docKey)) {
+ continue;
+ }
+ jsTest.log("checking field " + docKey);
+ if (typeof testDoc[docKey] === 'function') {
+ // SERVER-23472: As of 3.3.5, JS functions are serialized when inserted,
+ // so accept either the original function or its serialization
+ try {
+ assert.eq(testDoc[docKey], postImportDoc[docKey],
+ "function does not directly match");
+ } catch (e) {
+ assert.eq({code: String(testDoc[docKey])}, postImportDoc[docKey],
+ "serialized function does not match");
+ }
+ continue;
+ }
+ assert.eq(testDoc[docKey], postImportDoc[docKey],
+ "imported field " + docKey + " does not match original");
+ }
+
+ // DBPointer should turn into a DBRef with a $ref field and hte $id field being an ObjectId. It will not convert back to a DBPointer.
+
+ var oid = ObjectId();
+ var irregularObjects = {
+ _id: ObjectId(),
+ a: DBPointer('namespace', oid),
+ b: NumberInt("5"),
+ c: NumberLong("5000"),
+ d: 5,
+ e: 9223372036854775,
+ };
+
+ db1.c.drop();
+ db1.c.getDB().getSiblingDB("imported").testcoll3.drop();
+ assert.eq(0, db1.c.count(), "setup1");
+ db1.c.save(irregularObjects);
+ toolTest.runTool.apply(toolTest, ["export",
+ "--out", toolTest.extFile,
+ "-d", toolTest.baseName,
+ "-c", db1.c.getName()]
+ .concat(commonToolArgs));
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", toolTest.extFile,
+ "--db", "imported",
+ "--collection", "testcoll3"]
+ .concat(commonToolArgs));
+ postImportDoc = db1.c.getDB().getSiblingDB("imported").testcoll3.findOne();
+
+ printjson(postImportDoc);
+
+ var dbRef = DBRef("namespace", oid);
+ assert.eq(postImportDoc["a"], dbRef);
+
+ assert.eq(postImportDoc["b"], 5);
+ assert.eq(postImportDoc["d"], 5);
+
+ var numLong = NumberLong(5000);
+ assert.eq(postImportDoc["c"], numLong);
+
+ numLong = NumberLong(9223372036854775);
+ assert.eq(postImportDoc["e"], numLong);
+
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/upsert.js b/src/mongo/gotools/test/qa-tests/jstests/import/upsert.js
new file mode 100644
index 00000000000..567f47e8716
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/upsert.js
@@ -0,0 +1,62 @@
+(function() {
+ jsTest.log('Testing running import with upserts');
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing running import with bad command line options');
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var db = db1.getSiblingDB("upserttest");
+ db.dropDatabase();
+
+ // Verify that --upsert with --upsertFields works by applying update w/ query on the fields
+ db.c.insert({a: 1234, b: "000000", c: 222});
+ db.c.insert({a: 4567, b: "111111", c: 333});
+ assert.eq(db.c.count(), 2, "collection count should be 2 at setup");
+ var ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/upsert2.json",
+ "--upsert",
+ "--upsertFields", "a,c",
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+
+ var doc1 = db.c.findOne({a: 1234});
+ delete doc1["_id"];
+ assert.docEq(doc1, {a: 1234, b: "blah", c: 222});
+
+ var doc2_1 = db.c.findOne({a: 4567, c: 333});
+ var doc2_2 = db.c.findOne({a: 4567, c: 222});
+ delete doc2_1["_id"];
+ delete doc2_2["_id"];
+
+ assert.docEq(doc2_1, {a: 4567, b: "yyy", c: 333});
+ assert.docEq(doc2_2, {a: 4567, b: "asdf", c: 222});
+
+
+ // Verify that --upsert without --upsertFields works by applying the update using _id
+ db.c.drop();
+ db.c.insert({_id: "one", a: "original value"});
+ db.c.insert({_id: "two", a: "original value 2"});
+ assert.eq(db.c.count(), 2, "collection count should be 2 at setup");
+
+ toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/upsert1.json",
+ "--upsert",
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+
+ // check that the upsert got applied
+ assert.eq(ret, 0);
+ assert.eq(db.c.count(), 2);
+
+ assert.docEq(db.c.findOne({_id: "one"}), {_id: "one", a: "unicorns", b: "zebras"});
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/import/upsert_id_subdoc.js b/src/mongo/gotools/test/qa-tests/jstests/import/upsert_id_subdoc.js
new file mode 100644
index 00000000000..68ea7963d78
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/import/upsert_id_subdoc.js
@@ -0,0 +1,82 @@
+(function() {
+ // This test creates a collection with a subdocument _id field. We export the collection,
+ // replace the existing documents with a pre-made dataset and --upsert, then overwrite
+ // that with the original data, again with --upsert. This verifies that import and export
+ // do not change the order of _id fields.
+ jsTest.log('Testing running import with --upsert and _id subdocuments');
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('import');
+ var db1 = toolTest.db;
+ var commonToolArgs = getCommonToolArguments();
+
+ var db = db1.getSiblingDB("upserttest");
+ db.dropDatabase();
+
+ // create a set of documents with a subdocument _id
+ var i, j;
+ for (i = 0; i < 100; i++) {
+ for (j = 0; j < 20; j++) {
+ db.c.insert({
+ _id: {
+ a: i,
+ b: [0, 1, 2, {c: j, d: "foo"}],
+ e: "bar",
+ },
+ x: "string",
+ });
+ }
+ }
+ assert.eq(db.c.count(), 2000);
+
+ jsTest.log('Exporting documents with subdocument _ids.');
+ var ret = toolTest.runTool.apply(toolTest, ["export",
+ "-o", toolTest.extFile,
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "export should succeed");
+
+ jsTest.log('Upserting pre-made documents with subdocument _ids.');
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", "jstests/import/testdata/upsert3.json",
+ "--upsert",
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "import should succeed");
+ assert.eq(db.c.count(), 2000,
+ "count should be the same before and after import");
+
+ // check each document
+ for (i = 0; i < 100; i++) {
+ for (j = 0; j < 20; j++) {
+ assert.eq(db.c.findOne({_id: {a: i, b: [0, 1, 2, {c: j, d: "foo"}], e: "bar"}}).x, "str2",
+ "all documents should be updated");
+ }
+ }
+
+ jsTest.log('Upserting original exported documents with subdocument _ids.');
+ ret = toolTest.runTool.apply(toolTest, ["import",
+ "--file", toolTest.extFile,
+ "--upsert",
+ "--db", db.getName(),
+ "--collection", db.c.getName()]
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "import should succeed");
+ assert.eq(db.c.count(), 2000,
+ "count should be the same before and after import");
+
+ // check each document to see that it is back at its original value
+ for (i = 0; i < 100; i++) {
+ for (j = 0; j < 20; j++) {
+ assert.eq(db.c.findOne({_id: {a: i, b: [0, 1, 2, {c: j, d: "foo"}], e: "bar"}}).x, "string",
+ "all documents should be updated");
+ }
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/.eslintrc.yml b/src/mongo/gotools/test/qa-tests/jstests/libs/.eslintrc.yml
new file mode 100644
index 00000000000..1750fda88a5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/.eslintrc.yml
@@ -0,0 +1,3 @@
+rules:
+ no-unused-vars: 0
+ no-empty-function: 0
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/analyze_plan.js b/src/mongo/gotools/test/qa-tests/jstests/libs/analyze_plan.js
new file mode 100644
index 00000000000..b930470fdb5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/analyze_plan.js
@@ -0,0 +1,76 @@
+// Contains helpers for checking, based on the explain output, properties of a
+// plan. For instance, there are helpers for checking whether a plan is a collection
+// scan or whether the plan is covered (index only).
+
+/**
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan has a stage called 'stage'.
+ */
+function planHasStage(root, stage) {
+ if (root.stage === stage) {
+ return true;
+ } else if ("inputStage" in root) {
+ return planHasStage(root.inputStage, stage);
+ } else if ("inputStages" in root) {
+ for (var i = 0; i < root.inputStages.length; i++) {
+ if (planHasStage(root.inputStages[i], stage)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * A query is covered iff it does *not* have a FETCH stage or a COLLSCAN.
+ *
+ * Given the root stage of explain's BSON representation of a query plan ('root'),
+ * returns true if the plan is index only. Otherwise returns false.
+ */
+function isIndexOnly(root) {
+ return !planHasStage(root, "FETCH") && !planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * an index scan, and false otherwise.
+ */
+function isIxscan(root) {
+ return planHasStage(root, "IXSCAN");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * the idhack fast path, and false otherwise.
+ */
+function isIdhack(root) {
+ return planHasStage(root, "IDHACK");
+}
+
+/**
+ * Returns true if the BSON representation of a plan rooted at 'root' is using
+ * a collection scan, and false otherwise.
+ */
+function isCollscan(root) {
+ return planHasStage(root, "COLLSCAN");
+}
+
+/**
+ * Get the number of chunk skips for the BSON exec stats tree rooted at 'root'.
+ */
+function getChunkSkips(root) {
+ if (root.stage === "SHARDING_FILTER") {
+ return root.chunkSkips;
+ } else if ("inputStage" in root) {
+ return getChunkSkips(root.inputStage);
+ } else if ("inputStages" in root) {
+ var skips = 0;
+ for (var i = 0; i < root.inputStages.length; i++) {
+ skips += getChunkSkips(root.inputStages[0]);
+ }
+ return skips;
+ }
+
+ return 0;
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/authTestsKey b/src/mongo/gotools/test/qa-tests/jstests/libs/authTestsKey
new file mode 100644
index 00000000000..573898a4f05
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/authTestsKey
@@ -0,0 +1 @@
+This key is only for running the suite with authentication dont use it in any tests directly
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/badSAN.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/badSAN.pem
new file mode 100644
index 00000000000..d8e362731e0
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/badSAN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgIDAYKXMA0GCSqGSIb3DQEBBQUAMHQxFzAVBgNVBAMTDktl
+cm5lbCBUZXN0IENBMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIx
+FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD
+VQQGEwJVUzAeFw0xNDA5MjMxNTE3MjNaFw0zNDA5MjMxNTE3MjNaMG8xEjAQBgNV
+BAMTCTEyNy4wLjAuMTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCDB/lxuzeU
+OHR5nnOTJM0fHz0WeicnuUfGG5wP89Mbkd3Y+BNS0ozbnkW+NAGhD+ehNBjogISZ
+jLCd+uaYu7TLWpkgki+1+gM99Ro0vv7dIc8vD7ToILKMbM8xQmLbSxDT2tCUoXlc
+m7ccgDZl9oW1scQYQ8gWHjmk3yK8sCoGa/uwr49u74aVM7673tLsK41m8oYPzt/q
+VGT+mXpBJQcGXkTNQtIPxBtD25jr+aPietS3u70zrVPY6ZDsGE7DofEeRl97kVoF
+NcpaQmVEwEo8KCWaT6OaPaUUUjAMwzqiZaHNZ6mL1pCr65bLXP6T9tiMtWLw5+SG
+3E09fhQuWod5AgMBAAGjFTATMBEGA1UdEQQKMAiCBmJhZFNBTjANBgkqhkiG9w0B
+AQUFAAOCAQEAQzlibJvlUpJG3vc5JppdrudpXoVAP3wtpzvnkrY0GTWIUE52mCIf
+MJ5sARvjzs/uMhV5GLnjqTcT+DFkihqKyFo1tKBD7LSuSjfDvjmggG9lq0/xDvVU
+uczAuNtI1T7N+6P7LyTG4HqniYouPMDWyCKBOmzzNsk+r1OJb6cxU7QQwmSWw1n1
+ztNcF6JzCQVcd9Isau9AEXZ9q0M0sjD9mL67Qo3Dh3Mvf4UkJKqm3KOQOupUHZLU
+vJwfsS2u+gfHY1Plywzq3AuT7ygbksR3Pqfs8LFPnuRAH+41sFTGUM52hiU7mNPj
+ebl8s1tjK7WQ+a8GTABJV0hDNeWd3Sr+Og==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAgwf5cbs3lDh0eZ5zkyTNHx89FnonJ7lHxhucD/PTG5Hd2PgT
+UtKM255FvjQBoQ/noTQY6ICEmYywnfrmmLu0y1qZIJIvtfoDPfUaNL7+3SHPLw+0
+6CCyjGzPMUJi20sQ09rQlKF5XJu3HIA2ZfaFtbHEGEPIFh45pN8ivLAqBmv7sK+P
+bu+GlTO+u97S7CuNZvKGD87f6lRk/pl6QSUHBl5EzULSD8QbQ9uY6/mj4nrUt7u9
+M61T2OmQ7BhOw6HxHkZfe5FaBTXKWkJlRMBKPCglmk+jmj2lFFIwDMM6omWhzWep
+i9aQq+uWy1z+k/bYjLVi8OfkhtxNPX4ULlqHeQIDAQABAoIBAC4Bx8jyJmKpq+Pk
+CcqZelg6HLXesA7XlGbv3M0RHIeqoM2E1SwYd5LJMM3G7ueBcR/97dz8+xH6/yyJ
+Ixxvk9xu9CMmkRABN9AyVkA867nzHA73Idr7WBXMQreWCqXa5o6sXt5BEB6/If0k
+23TTqUERqLuoWQHDHRRRsJ218RuNmbvBe8TGXcfunC0eeDVKDeqAXol6bD5lztdu
+B6jkdLt5UZSQ7X8OmClbeDlac90B8usNi+pUE9q1p7X462vAw8LohkxLY2nyIcmU
+feNdTNHP+lklv+E+p9w/Az7Hf6zxm525tw90QVI048fr9SL3ftLHOt4FhucSCn0Z
+CjylP4ECgYEA+nQrNVdVwmxcWCVn69LR1grNXUSz+fLHCo+QKma4IyC1kuuZ+BBo
+Iwdf9t/S1tgtTYru3uxzCpQg7J1iDeEFEsMHl0rc6U1MmIE+6OvACVG3yotqoOqE
+852pi1OWIe94yTk2ZmNXJ8gpUE/gtMprbcSWOb7IzzrXy2lDcaEMuGkCgYEAhe7L
+ZvYI4LEvu6GSPp97qBzDH9m5UrHaTZIJk/Nu7ie919Sdg62LTfphsaK+pSyA55XQ
+8L9P7wNUPC44NnE+7CIJZsIuKdYqR5QI6No9RdTyij0Hgljfc7KuH2b8lf8EjvuH
+qZAf5zL3pIOQs8E8/MYHlGIqmTkYK41eCAcS9JECgYEADnra6KmU9rmnGR2IhZTZ
+tuNG/kZzlVbY9R5ZumnX6YgBl23xp+ri6muJu88y9GLpM5t9tfu7pvfrc2KiAaVp
+0qzd6nxUi1SBwituxK6kmqVT1+z5jDYi26bY34pEms+qjw+0unSx3EXxRYhouGsf
+jOgZu1rxZzHCuirq0E38W0kCgYBzOK16RX37t9OFywlioJekWCIxu4BouSNCirl8
+s/eiIUR8cqiUCPAIRLhZNtZmiTPYiBW5mAyvZiDIqUao56InSVznL3TBf0LeU2ea
+023VLs79yGU2aTjLc1PDJjl03XDRhWj/okMgBsPvn1QUoNDT8ZXBvPZC3VCC31qe
+818GUQKBgQDBUP2BC/Th/0dErOQ5lWkY3YbmzrTp2pDsHGZJRD+OdQ5B8FUvCP8m
+JESk/0ATn7niUqawnOy/2KlKIkeBBV2XL1rjIGEhCkBUuhCiInNDqz1AGdXzIKaT
+myoZ4PhIsH1D643e6iLhyAZuUAA4yB31E2a3l7EMyhV3vKbdWWygGQ==
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/ca.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/ca.pem
new file mode 100644
index 00000000000..d1a5689cf0f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/ca.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD
+Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n
+b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL
+MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj
+qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N
+shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa
+zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO
+Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7
+SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb
+WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF
+BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS
+8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP
+b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY
+8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2
+vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp
+nOjaLwQJQgKejY62PiNcw7xC/nIxBeI=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAgcUl46gOcthDEJHJrywzYpBPPDoN/rKSd3setB1lT5PBHlze
+vI8n2yi2LKAnARNqwQBPzbIVQSWvIq4dKP1E4dVzVuIhgrHzL2Gpl4h52jo5hlSo
+3waI+qc0B9LitF+aJZt0ms2T5CywkfNcsbCusKCWTBU765RI8Y/0pg2fbVV1uvdm
+wuIupxrzgkqYL/ZJrPOcThHdsW7gi1xQ1ymn12XeQCvonoJA7N2oe1VzBBPLG6hp
+5agtZ7MuW6Qp2+2gvt2n+0ldlxBZUkE+ZME5pPQr0w8zPTu/EjvJzYRDhQS0BHRg
+xndwUoRPHQnYdiU8t8yrG1nhPpsctJhMMTXxnwIDAQABAoIBAD5iGOnM800wO2Uu
+wGbOd9FNEFoiinHDRHfdnw/1BavwmqjO+mBo7T8E3jarsrRosiwfyz1V+7O6uuuQ
+CgKXZlKuOuksgfGDPCWt7EolWHaZAOhbsGaujJD6ah/MuCD/yGmFxtNYOl05QpSX
+Cht9lSzhtf7TQl/og/xkOLbO27JB540ck/OCSOczXg9Z/O8AmIUyDn7AKb6G1Zhk
+2IN//HQoAvDUMZLWrzy+L7YGbA8pBR3yiPsYBH0rX2Oc9INpiGA+B9Nf1HDDsxeZ
+/o+5xLbRDDfIDtlYO0cekJ053W0zUQLrMEIn9991EpG2O/fPgs10NlKJtaFH8CmT
+ExgVA9ECgYEA+6AjtUdxZ0BL3Wk773nmhesNH5/5unWFaGgWpMEaEM7Ou7i6QApL
+KAbzOYItV3NNCbkcrejq7jsDGEmiwUOdXeQx6XN7/Gb2Byc/wezy5ALi0kcUwaur
+6s9+Ah+T4vcU2AjfuCWXIpe46KLEbwORmCRQGwkCBCwRhHGt5sGGxTkCgYEAhAaw
+voHI6Cb+4z3PNAKRnf2rExBYRyCz1KF16ksuwJyQSLzFleXRyRWFUEwLuVRL0+EZ
+JXhMbtrILrc23dJGEsB8kOCFehSH/IuL5eB0QfKpDFA+e6pimsbVeggx/rZhcERB
+WkcV3jN4O82gSL3EnIgvAT1/nwhmbmjvDhFJhZcCgYBaW4E3IbaZaz9S/O0m69Fa
+GbQWvS3CRV1oxqgK9cTUcE9Qnd9UC949O3GwHw0FMERjz3N7B/8FGW/dEuQ9Hniu
+NLmvqWbGlnqWywNcMihutJKbDCdp/Km5olUPkiNbB3sWsOkViXoiU/V0pK6BZvir
+d67EZpGwydpogyH9kVVCEQKBgGHXc3Q7SmCBRbOyQrQQk0m6i+V8328W1S5m2bPg
+M62aWXMOMn976ZRT1pBDSwz1Y5yJ3NDf7gTZLjEwpgCNrFCJRcc4HLL0NDL8V5js
+VjvpUU5GyYdsJdb+M4ZUPHi/QEaqzqPQumwJSLlJEdfWirZWVj9dDA8XcpGwQjjy
+psHRAoGBAJUTgeJYhjK7k5sgfh+PRqiRJP0msIH8FK7SenBGRUkelWrW6td2Riey
+EcOCMFkRWBeDgnZN5xDyWLBgrzpw9iHQQIUyyBaFknQcRUYKHkCx+k+fr0KHHCUb
+X2Kvf0rbeMucb4y/h7950HkBBq83AYKMAoI8Ql3cx7pKmyOLXRov
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/client.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/client.pem
new file mode 100644
index 00000000000..50a64e41728
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/client.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDXTCCAkWgAwIBAgIBAzANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBwMQ8wDQYDVQQD
+EwZjbGllbnQxEzARBgNVBAsTCktlcm5lbFVzZXIxEDAOBgNVBAoTB01vbmdvREIx
+FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD
+VQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJIFboAk9Fdi
+DY5Xld2iw36vB3IpHEfgWIimd+l1HX4jyp35i6xoqkZZHJUL/NMbUFJ6+44EfFJ5
+biB1y1Twr6GqpYp/3R30jKQU4PowO7DSal38MR34yiRFYPG4ZPPXXfwPSuwKrSNo
+bjqa0/DRJRVQlnGwzJkPsWxIgCjc8KNO/dSHv/CGymc9TjiFAI0VVOhMok1CBNvc
+ifwWjGBg5V1s3ItMw9x5qk+b9ff5hiOAGxPiCrr8R0C7RoeXg7ZG8K/TqXbsOZEG
+AOQPRGcrmqG3t4RNBJpZugarPWW6lr11zMpiPLFTrbq3ZNYB9akdsps4R43TKI4J
+AOtGMJmK430CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAA+nPgVT4addi13yB6mjW
++UhdUkFwtb1Wcg0sLtnNucopHZLlCj5FfDdp1RQxe3CyMonxyHTKkrWtQmVtUyvf
+C/fjpIKt9A9kAmveMHBiu9FTNTc0sbiXcrEBeHF5cD7N+Uwfoc/4rJm0WjEGNkAd
+pYLCCLVZXPVr3bnc3ZLY1dFZPsJrdH3nJGMjLgUmoNsKnaGozcjiKiXqm6doFzkg
+0Le5yD4C/QTaie2ycFa1X5bJfrgoMP7NqKko05h4l0B0+DnjpoTJN+zRreNTMKvE
+ETGvpUu0IYGxe8ZVAFnlEO/lUeMrPFvH+nDmJYsxO1Sjpds2hi1M1JoeyrTQPwXj
+2Q==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAkgVugCT0V2INjleV3aLDfq8HcikcR+BYiKZ36XUdfiPKnfmL
+rGiqRlkclQv80xtQUnr7jgR8UnluIHXLVPCvoaqlin/dHfSMpBTg+jA7sNJqXfwx
+HfjKJEVg8bhk89dd/A9K7AqtI2huOprT8NElFVCWcbDMmQ+xbEiAKNzwo0791Ie/
+8IbKZz1OOIUAjRVU6EyiTUIE29yJ/BaMYGDlXWzci0zD3HmqT5v19/mGI4AbE+IK
+uvxHQLtGh5eDtkbwr9Opduw5kQYA5A9EZyuaobe3hE0Emlm6Bqs9ZbqWvXXMymI8
+sVOturdk1gH1qR2ymzhHjdMojgkA60YwmYrjfQIDAQABAoIBAB249VEoNIRE9TVw
+JpVCuEBlKELYk2UeCWdnWykuKZ6vcmLNlNy3QVGoeeTs172w5ZykY+f4icXP6da5
+o3XauCVUMvYKKNwcFzSe+1xxzPSlH/mZh/Xt2left6f8PLBVuk/AXSPG2I9Ihodv
+VIzERaQdD0J9FmhhhV/hMhUfQ+w5rTCaDpq1KVGU61ks+JAtlQ46g+cvPF9c80cI
+TEC875n2LqWKmLRN43JUnctV3uGTmolIqCRMHPAs/egl+lG2RXJjqXSQ2uFLOvC/
+PXtBb597yadSs2BWPnTu/r7LbLGBAExzlQK1uFsTvuKsBPb3qrvUux0L68qwPuiv
+W24N8BECgYEAydtAvVB7OymQEX3mck2j7ixDN01wc1ZaCLBDvYPYS/Pvzq4MBiAD
+lHRtbIa6HPGA5jskbccPqQn8WGnJWCaYvCQryvgaA+BBgo1UTLfQJUo/7N5517vv
+KvbUa6NF0nj3VwfDV1vvy+amoWi9NOVn6qOh0K84PF4gwagb1EVy9MsCgYEAuTAt
+KCWdZ/aNcKgJc4NCUqBpLPF7EQypX14teixrbF/IRNS1YC9S20hpkG25HMBXjpBe
+tVg/MJe8R8CKzYjCt3z5Ff1bUQ2bzivbAtgjcaO0Groo8WWjnamQlrIQcvWM7vBf
+dnIflQ0slxbHfCi3XEe8tj2T69R7wJZ8L7PxR9cCgYEACgwNtt6Qo6s37obzt3DB
+3hL57YC/Ph5oMNKFLKOpWm5z2zeyhYOGahc5cxNppBMpNUxwTb6AuwsyMjxhty+E
+nqi2PU4IDXVWDWd3cLIdfB2r/OA99Ez4ZI0QmaLw0L8QoJZUVL7QurdqR9JsyHs6
+puUqIrb195s/yiPR7sjeJe0CgYEAuJviKEd3JxCN52RcJ58OGrh2oKsJ9/EbV0rX
+Ixfs7th9GMDDHuOOQbNqKOR4yMSlhCU/hKA4PgTFWPIEbOiM08XtuZIb2i0qyNjH
+N4qnqr166bny3tJnzOAgl1ljNHa8y+UsBTO3cCr17Jh0vL0KLSAGa9XvBAWKaG6b
+1iIXwXkCgYAVz+DA1yy0qfXdS1pgPiCJGlGZXpbBcFnqvbpGSclKWyUG4obYCbrb
+p5VKVfoK7uU0ly60w9+PNIRsX/VN/6SVcoOzKx40qQBMuYfJ72DQrsPjPYvNg/Nb
+4SK94Qhp9TlAyXbqKJ02DjtuDim44sGZ8g7b+k3FfoK4OtzNsqdVdQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/client_revoked.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/client_revoked.pem
new file mode 100644
index 00000000000..03db67deb50
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/client_revoked.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDZTCCAk2gAwIBAgIBAjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB4MRcwFQYDVQQD
+Ew5jbGllbnRfcmV2b2tlZDETMBEGA1UECxMKS2VybmVsVXNlcjEQMA4GA1UEChMH
+TW9uZ29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlv
+cmsxCzAJBgNVBAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
+lJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSKyZMGCcqlYVQmqT/J
+Fnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505HaWv7b+M3qksRHDLpw
+/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/6vcUkg/aU/50MRUN
+qGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4lQjrCpR36fkr5a+vI
+UbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNFvGDOCNBKZK5ZxLZ3
+gGFcR6kL6u11y4zoLrZ6xwIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQB8WQMn/cjh
+9qFtr7JL4VPIz/+96QaBmkHxMqiYL/iMg5Vko3GllLc1mgfWQfaWOvyRJClKj395
+595L2u8wBKon3DXUPAkinc6+VOwDWsxFLNtWl+jhigat5UDzGm8ZKFhl0WwNhqzZ
+dlNPrh2LJZzPFfimfGyVkhPHYYdELvn+bnEMT8ae1jw2yQEeVFzHe7ZdlV5nMOE7
+Gx6ZZhYlS+jgpIxez5aiKqit/0azq5GGkpCv2H8/EXxkR4gLZGYnIqGuZP3r34NY
+Lkh5J3Qnpyhdopa/34yOCa8mY1wW7vEro0fb/Dh21bpyEOz6tBk3C1QRaGD+XQOM
+cedxtUjYmWqn
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAlJog+grPpvDFKFn9mxxToLgkx1uM+LmuRf1XG707TIccGfSK
+yZMGCcqlYVQmqT/JFnq2rvtXGG9yxPhHoBwKw4x9rfQEy8Z93BAMNRoIlbt505Ha
+Wv7b+M3qksRHDLpw/Ix0Yay+fjT9DGmcrahC9h8c8SVtyHoODvxdmR7P+p7e6F6/
+6vcUkg/aU/50MRUNqGUSMkm/kpcjFNmDqRSLQoDPE8G2UOIolG/m95uaCOkGCT4l
+QjrCpR36fkr5a+vIUbDJP8M26Kp2fFnvObKzoSFSEVOMGeBM9p4qa88I4hwfthNF
+vGDOCNBKZK5ZxLZ3gGFcR6kL6u11y4zoLrZ6xwIDAQABAoIBAFlu0T3q18Iu0VlR
+n5GEYMgvSuM4NAVVKo8wwwYMwu1xuvXb+NMLyuyFqzaCQKpHmywOOnfhCC/KkxX8
+Ho87kTbTDKhuXZyOHx0cA1zKCDSlGdK8yt9M1vJMa0pdGi2M34b+uOQ35IVsOocH
+4KWayIH7g52V2xZ2bpOSSnpm0uCPZSBTgClCgTUYepOT2wbLn/8V0NtVpZhDsBqg
+fORuEHkiurrbLa8yjQsvbR+hsR/XbGhre8sTQapj4EITXvkEuOL/vwbRebhOFHgh
+8sipsXZ9CMaJkBpVoLZTxTKQID/9006cczJK2MGKFhn6mvP6AeFuJAM3xqLGZTc4
+xxpfJyECgYEA0+iKxy5r1WUpBHR8jTh7WjLc6r5MFJQlGgLPjdQW6gCIe/PZc+b9
+x5vDp27EQ1cAEePEu0glQ/yk19yfxbxrqHsRjRrgwoiYTXjGI5zZSjXKArHyEgBj
+XOyo5leO5XMFnk2AShPlh+/RhAW3NhxcWkBEAsCD6QyC3BPvP6aaAXkCgYEAs4WH
+dTuweTdnyquHQm59ijatvBeP8h4tBozSupflQjB9WxJeW5uEa8lNQ3lSz1F4TV3M
+xvGdDSqwftLRS2mWGho/1jaCeAzjsiUQ2WUHChxprt0+QU7XkJbaBY9eF+6THZFw
+sDG688TiolxqoD8OYi8EtxmIvbQhXHmXnrk3jj8CgYBSi74rkrisuqg8tQejl0Ht
+w+xsgM5wIblGJZwmOlzmsGh6KGYnkO6Ap/uSKELJnIVJcrk63wKtNigccjPGufwR
++EbA+ZxeCwmQ/B/q1XmLP+K+JAUQ4BfUpdexSqA+XwzsOnJj6NY7mr65t+RDbs7G
+1Uvo6oc37Ai5pAZJfCN3uQKBgQAJr5qvaJkM8UBYXwjdPLjpTCnzjBHoLlifkdmM
+18U23QbmcwdESg/LAQF6MoGVTf//rJ/v2/ltTHBZZ2aDex7uKZxoImjHsWpXokhW
+cmz+zqmlFarWOzrGQl1hD2s0P1sQrVg3KXe8z1KrD/Fw0/Yitga7GlWWZrGmG6li
+lvu4YQKBgQANODQYEaz739IoPNnMfTpTqAoQIOR4PNdMfCXSQrCB8i0Hh4z48E4F
+DEAd1xIYyxI8pu7r52dQlBk7yrILOTG0gmgLJd5xKdtCTrasYAICI3hsRLtP8dVA
+8WeykXY4Wf1bYQ+VzKVImkwL/SBm2ik5woyxCzT8JSjyoAwRrQp9Vw==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/cluster_cert.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/cluster_cert.pem
new file mode 100644
index 00000000000..a8623ab67ef
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/cluster_cert.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDXjCCAkagAwIBAgIBBDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBxMRQwEgYDVQQD
+EwtjbHVzdGVydGVzdDEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RC
+MRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkG
+A1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCX42ZTwADG
+sEkS7ijfADlDQaJpbdgrnQKa5ssMQK3oRGSqXfTp0ThsJiVBbYZ8ZZRpPMgJdowa
+pFCGHQJh6VOdKelR0f/uNVpBGVz1yD4E4AtkA6UYcIJq6ywcj+W7Pli1Ed8VUN3Q
+tBU+HvHiEdMj74kLJb4ID1cP3gehvRv/0szkN8/ODFKCgYb1619BdFb9gRn8eily
+Wcg1m1gXz2xSfqRZkFEcEYet3BeOEGZBhaufJFzinvQjocH+kWFKlZf0+2DEFFbH
+NRqmabMmqMBUke629EUn8a7PBWBYNLld9afoNHwNY68wpONf5IqR2mNar5bVz8/d
+4g7BuVNvEFdJAgMBAAEwDQYJKoZIhvcNAQEFBQADggEBAA3U2O+cE/ZS8SDBw/sr
+BVFf0uaoME7+XX2jdTi4RUpWPfQ6uTkhKnXKzTzGrQtKwA96slGp4c3mxGBaAbC5
+IuTS97mLCju9NFvJVtazIajO4eNlG6dJSk0pQzjc0RAeLYksX/9NRNKZ+lQ5QVS2
+NVLce70QZBIvujjVJZ5hqDdjPV0JGOOUzNGyyUhzgY7s9MQagNnBSu5HO4CK1onc
+goOkizulq/5WF+JtqW8VKKx+/CH6SnTkS4b3qbjgKRmHZcOshH/d4KqhoLya7sfH
+pedmm7WgO9p8umXXqNj+04ehuPKTnD8tLMhj+GbJ9eIChPCBf1XnIzOXYep+fq9j
+n/g=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAl+NmU8AAxrBJEu4o3wA5Q0GiaW3YK50CmubLDECt6ERkql30
+6dE4bCYlQW2GfGWUaTzICXaMGqRQhh0CYelTnSnpUdH/7jVaQRlc9cg+BOALZAOl
+GHCCaussHI/luz5YtRHfFVDd0LQVPh7x4hHTI++JCyW+CA9XD94Hob0b/9LM5DfP
+zgxSgoGG9etfQXRW/YEZ/HopclnINZtYF89sUn6kWZBRHBGHrdwXjhBmQYWrnyRc
+4p70I6HB/pFhSpWX9PtgxBRWxzUapmmzJqjAVJHutvRFJ/GuzwVgWDS5XfWn6DR8
+DWOvMKTjX+SKkdpjWq+W1c/P3eIOwblTbxBXSQIDAQABAoIBAHhjNFMDZ1oUlgbc
+ICcI/VoyprFb8DA5ZbwzXBMdHPpxYzyp9hpxy3/lCDiAwEzPEsAK/h6KCOiA/pYe
+XioPD0gN1TIV+f3r6dqZaNYi3g1tK3odbXkejDCEsFT/NT6hXxw9yw0RKI9ofUHc
+synVqP3duUjNpH6s8fvQp0nqI0wzoNm1kklpTWVjZmbtSZF9m/xfv7NGwQEYUL2V
+f5YvX6aHPVDtUXAqyPBgv6SGuogSSjwRTsNTef3aY6Se5MlP3YIfRqdad8+ORkKu
+WSrO+GjQccV4sztD8Sn3LR7qe6Lmid4yopHSS4EFq0Sc8LznTeflWcRAsBLezRp5
+xZB/blECgYEA8yrEzFA247AOXbhL1CdqMyPs523oy5+dmByyovjYjEhjUCRlAa9D
+ApvID4TfAkA4n0rUdICCtwbZlFrBZbn6rXNvJ362ufZjvaFIucQm90YkG1J6Ldek
+8ohJfLyyLLWzVHJIS7WxFqqsGmDhYUTErFbJZjI8tNSglrc81jUWT7UCgYEAn+dw
+ICyc09f6+xm3nFZIOq2Gtpw8lrOJlwZugn1AqY2D5Ko2gq1Fx2oZWpVaBivjH3gU
+ONlnPuealE0RJHvCm/+axy7Rcj65IwTrN5V+j6rg1tuEdi70PvNKmN6XQqRvEjOX
+HOh3gQYP6EFAoVINZZqUkwJzqpv4tnOSpEHXncUCgYB3+Z8Vq3IZjtDXvslzCGtm
+hhAp81mLtdocpfQhYqP9Ou39KafIV/+49sGTnpwlUShet53xSUK1KSULBGgtV8Bt
++ela1DM1t3Joqn3mYfhTwoCoFl5/5cjVfRa8+6DxXEj5nlU7PY79PwIhFbG9ux9K
+ZJuD17+J/Oqq0gerLJAwjQKBgAS4AbkRV/dwcjmiwqZcbXk90bHl3mvcFH1edTho
+ldXrFS9UTpOApYSC/wiLS8LO3L76/i3HTKKwlwE1XQIknNOZsWmbWhby/uenp4FW
+agu3UTdF9xy9uft5loP4XaJb0+NHnnf97DjkgueptUyNbVPIQgYsllk8jRRlSLiM
+MN65AoGAUPLlh8ok/iNirO5YKqc5/3FKA1o1V1KSTHYVUK+Y+vuVJxQZeO3LMybe
+7AJ1cLHEWc8V4B27e6g33rfGGAW+/+RJ7/uHxuYCuKhstbq/x+rf9i4nl93emlMV
+PC3yuZsCmpk9Uypzi2+PT10yVgXkXRYtLpuUpoABWRzVXGnEsXo=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/command_line/test_parsed_options.js b/src/mongo/gotools/test/qa-tests/jstests/libs/command_line/test_parsed_options.js
new file mode 100644
index 00000000000..55f93a01d1c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/command_line/test_parsed_options.js
@@ -0,0 +1,213 @@
+// Merge the two options objects. Used as a helper when we are trying to actually compare options
+// despite the fact that our test framework adds extra stuff to it. Anything set in the second
+// options object overrides the first options object. The two objects must have the same structure.
+function mergeOptions(obj1, obj2) {
+ var obj3 = {};
+ var attrname;
+ for (attrname in obj1) {
+ if (typeof obj1[attrname] === "object" &&
+ typeof obj2[attrname] !== "undefined") {
+ if (typeof obj2[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ obj3[attrname] = mergeOptions(obj1[attrname], obj2[attrname]);
+ } else {
+ obj3[attrname] = obj1[attrname];
+ }
+ }
+ for (attrname in obj2) {
+ if (typeof obj2[attrname] === "object" &&
+ typeof obj1[attrname] !== "undefined") {
+ if (typeof obj1[attrname] !== "object") {
+ throw Error("Objects being merged must have the same structure");
+ }
+ // Already handled above
+ } else {
+ obj3[attrname] = obj2[attrname];
+ }
+ }
+ return obj3;
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongod. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongod({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongod;
+function testGetCmdLineOptsMongod(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongod using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongod = baseMongod.adminCommand("getCmdLineOpts");
+
+ // Stop the mongod we used to get the options
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongod;
+ }
+
+ if (typeof getCmdLineOptsBaseMongod === "undefined") {
+ getCmdLineOptsBaseMongod = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongod;
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsExpected.parsed.storage.dbPath;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with options
+ var mongod = MongoRunner.runMongod(mongoRunnerConfig);
+
+ // Create and authenticate high-privilege user in case mongod is running with authorization.
+ // Try/catch is necessary in case this is being run on an uninitiated replset, by a test
+ // such as repl_options.js for example.
+ var ex;
+ try {
+ mongod.getDB("admin").createUser({user: "root", pwd: "pass", roles: ["root"]});
+ mongod.getDB("admin").auth("root", "pass");
+ } catch (err) {
+ ex = err;
+ }
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongod.adminCommand("getCmdLineOpts");
+
+ // Delete port and dbPath if we are not explicitly setting them, since they will change on
+ // multiple runs of the test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.storage === "undefined" ||
+ typeof expectedResult.parsed.storage.dbPath === "undefined") {
+ delete getCmdLineOptsResult.parsed.storage.dbPath;
+ }
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ mongod.getDB("admin").logout();
+ MongoRunner.stopMongod(mongod.port);
+}
+
+// Test that the parsed result of setting certain command line options has the correct format in
+// mongos. See SERVER-13379.
+//
+// Arguments:
+// mongoRunnerConfig - Configuration object to pass to the mongo runner
+// expectedResult - Object formatted the same way as the result of running the "getCmdLineOpts"
+// command, but with only the fields that should be set by the options implied by the first
+// argument set.
+//
+// Example:
+//
+// testGetCmdLineOptsMongos({ port : 10000 }, { "parsed" : { "net" : { "port" : 10000 } } });
+//
+var getCmdLineOptsBaseMongos;
+function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
+
+ // Get the options object returned by "getCmdLineOpts" when we spawn a mongos using our test
+ // framework without passing any additional options. We need this because the framework adds
+ // options of its own, and we only want to compare against the options we care about.
+ function getBaseOptsObject() {
+
+ // Start mongod with no options
+ var baseMongod = MongoRunner.runMongod();
+
+ // Start mongos with only the configdb option
+ var baseMongos = MongoRunner.runMongos({configdb: baseMongod.host});
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsBaseMongos = baseMongos.adminCommand("getCmdLineOpts");
+
+ // Remove the configdb option
+ delete getCmdLineOptsBaseMongos.parsed.sharding.configDB;
+
+ // Stop the mongod and mongos we used to get the options
+ MongoRunner.stopMongos(baseMongos.port);
+ MongoRunner.stopMongod(baseMongod.port);
+
+ return getCmdLineOptsBaseMongos;
+ }
+
+ if (typeof getCmdLineOptsBaseMongos === "undefined") {
+ getCmdLineOptsBaseMongos = getBaseOptsObject();
+ }
+
+ // Get base command line opts. Needed because the framework adds its own options
+ var getCmdLineOptsExpected = getCmdLineOptsBaseMongos;
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsExpected.parsed.net.port;
+ }
+
+ // Merge with the result that we expect
+ expectedResult = mergeOptions(getCmdLineOptsExpected, expectedResult);
+
+ // Start mongod with no options
+ var mongod = MongoRunner.runMongod();
+
+ // Add configdb option
+ mongoRunnerConfig['configdb'] = mongod.host;
+
+ // Start mongos connected to mongod
+ var mongos = MongoRunner.runMongos(mongoRunnerConfig);
+
+ // Get the parsed options
+ var getCmdLineOptsResult = mongos.adminCommand("getCmdLineOpts");
+
+ // Delete port if we are not explicitly setting it, since it will change on multiple runs of the
+ // test framework and cause false failures.
+ if (typeof expectedResult.parsed === "undefined" ||
+ typeof expectedResult.parsed.net === "undefined" ||
+ typeof expectedResult.parsed.net.port === "undefined") {
+ delete getCmdLineOptsResult.parsed.net.port;
+ }
+
+ // Remove the configdb option
+ delete getCmdLineOptsResult.parsed.sharding.configDB;
+
+ // Make sure the options are equal to what we expect
+ assert.docEq(getCmdLineOptsResult.parsed, expectedResult.parsed);
+
+ // Cleanup
+ MongoRunner.stopMongos(mongos.port);
+ MongoRunner.stopMongod(mongod.port);
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_auth.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_auth.ini
new file mode 100644
index 00000000000..c1193be1b03
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_auth.ini
@@ -0,0 +1 @@
+auth=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_dur.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_dur.ini
new file mode 100644
index 00000000000..8f83f3ae5a7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_dur.ini
@@ -0,0 +1 @@
+dur=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_httpinterface.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_httpinterface.ini
new file mode 100644
index 00000000000..fc839a98a76
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_httpinterface.ini
@@ -0,0 +1 @@
+httpinterface=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_ipv6.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_ipv6.ini
new file mode 100644
index 00000000000..a091421022d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_ipv6.ini
@@ -0,0 +1 @@
+ipv6=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_journal.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_journal.ini
new file mode 100644
index 00000000000..d0010a86906
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_journal.ini
@@ -0,0 +1 @@
+journal=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_jsonp.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_jsonp.ini
new file mode 100644
index 00000000000..82847f50b2b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_jsonp.ini
@@ -0,0 +1 @@
+jsonp=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_jsonp.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_jsonp.json
new file mode 100644
index 00000000000..4d5477a8547
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_jsonp.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "JSONPEnabled" : false
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_moveparanoia.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_moveparanoia.ini
new file mode 100644
index 00000000000..f21b50f9513
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_moveparanoia.ini
@@ -0,0 +1 @@
+moveParanoia=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noauth.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noauth.ini
new file mode 100644
index 00000000000..a65f909baf3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noauth.ini
@@ -0,0 +1 @@
+noauth=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noautosplit.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noautosplit.ini
new file mode 100644
index 00000000000..b490f9038dd
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noautosplit.ini
@@ -0,0 +1 @@
+noAutoSplit=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nodur.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nodur.ini
new file mode 100644
index 00000000000..b0c73a48b30
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nodur.ini
@@ -0,0 +1 @@
+nodur=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nohttpinterface.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nohttpinterface.ini
new file mode 100644
index 00000000000..52c4958da6e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nohttpinterface.ini
@@ -0,0 +1 @@
+nohttpinterface=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noindexbuildretry.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noindexbuildretry.ini
new file mode 100644
index 00000000000..79e428c492f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noindexbuildretry.ini
@@ -0,0 +1 @@
+noIndexBuildRetry=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nojournal.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nojournal.ini
new file mode 100644
index 00000000000..17172363d25
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nomoveparanoia.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nomoveparanoia.ini
new file mode 100644
index 00000000000..4696304134f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nomoveparanoia.ini
@@ -0,0 +1 @@
+noMoveParanoia=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noobjcheck.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noobjcheck.ini
new file mode 100644
index 00000000000..471e83c3172
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noobjcheck.ini
@@ -0,0 +1 @@
+noobjcheck=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noprealloc.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noprealloc.ini
new file mode 100644
index 00000000000..08c78be3507
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noprealloc.ini
@@ -0,0 +1 @@
+noprealloc=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noscripting.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noscripting.ini
new file mode 100644
index 00000000000..4cfaf3395f6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_noscripting.ini
@@ -0,0 +1 @@
+noscripting=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nounixsocket.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nounixsocket.ini
new file mode 100644
index 00000000000..66da9f08391
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_nounixsocket.ini
@@ -0,0 +1 @@
+nounixsocket=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_objcheck.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_objcheck.ini
new file mode 100644
index 00000000000..bd19d026bbf
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_objcheck.ini
@@ -0,0 +1 @@
+objcheck=false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_rest_interface.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_rest_interface.json
new file mode 100644
index 00000000000..f9ad93a4f5d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/disable_rest_interface.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "RESTInterfaceEnabled" : false
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_auth.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_auth.json
new file mode 100644
index 00000000000..9f9cc84d107
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_auth.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "authorization" : "enabled"
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_autosplit.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_autosplit.json
new file mode 100644
index 00000000000..a0d4f8af1be
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_autosplit.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "autoSplit" : true
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_httpinterface.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_httpinterface.json
new file mode 100644
index 00000000000..c87dabe125d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_httpinterface.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "http" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_indexbuildretry.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_indexbuildretry.json
new file mode 100644
index 00000000000..362db08edd3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_indexbuildretry.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "indexBuildRetry" : true
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_journal.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_journal.json
new file mode 100644
index 00000000000..d75b94ccbc7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_journal.json
@@ -0,0 +1,7 @@
+{
+ "storage" : {
+ "journal" : {
+ "enabled" : false
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_objcheck.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_objcheck.json
new file mode 100644
index 00000000000..b52be7382ed
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_objcheck.json
@@ -0,0 +1,5 @@
+{
+ "net" : {
+ "wireObjectCheck" : true
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_paranoia.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_paranoia.json
new file mode 100644
index 00000000000..218646b1662
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_paranoia.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "archiveMovedChunks" : true
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_prealloc.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_prealloc.json
new file mode 100644
index 00000000000..15ecefbb546
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_prealloc.json
@@ -0,0 +1,5 @@
+{
+ "storage" : {
+ "preallocDataFiles" : true
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_scripting.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_scripting.json
new file mode 100644
index 00000000000..e8f32f2c23c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_scripting.json
@@ -0,0 +1,5 @@
+{
+ "security" : {
+ "javascriptEnabled" : true
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_unixsocket.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_unixsocket.json
new file mode 100644
index 00000000000..660d21eb17f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/enable_unixsocket.json
@@ -0,0 +1,7 @@
+{
+ "net" : {
+ "unixDomainSocket" : {
+ "enabled" : true
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_dur.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_dur.ini
new file mode 100644
index 00000000000..43495fbd0bd
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_dur.ini
@@ -0,0 +1 @@
+dur=
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_journal.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_journal.ini
new file mode 100644
index 00000000000..f750ac2e185
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_journal.ini
@@ -0,0 +1 @@
+journal=
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nodur.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nodur.ini
new file mode 100644
index 00000000000..f1046df16a9
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nodur.ini
@@ -0,0 +1 @@
+nodur=
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nojournal.ini b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nojournal.ini
new file mode 100644
index 00000000000..737e5c28029
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/implicitly_enable_nojournal.ini
@@ -0,0 +1 @@
+nojournal=
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_component_verbosity.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_component_verbosity.json
new file mode 100644
index 00000000000..69c200834a1
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_component_verbosity.json
@@ -0,0 +1,16 @@
+{
+ "systemLog" : {
+ "verbosity" : 2,
+ "component" : {
+ "accessControl" : {
+ "verbosity" : 0
+ },
+ "storage" : {
+ "verbosity" : 3,
+ "journaling" : {
+ "verbosity" : 5
+ }
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_profiling.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_profiling.json
new file mode 100644
index 00000000000..944f0de1575
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_profiling.json
@@ -0,0 +1,5 @@
+{
+ "operationProfiling" : {
+ "mode" : "all"
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_replsetname.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_replsetname.json
new file mode 100644
index 00000000000..522ca2b766f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_replsetname.json
@@ -0,0 +1,5 @@
+{
+ "replication" : {
+ "replSetName" : "myconfigname"
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_shardingrole.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_shardingrole.json
new file mode 100644
index 00000000000..71f92f122db
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_shardingrole.json
@@ -0,0 +1,5 @@
+{
+ "sharding" : {
+ "clusterRole" : "configsvr"
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_verbosity.json b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_verbosity.json
new file mode 100644
index 00000000000..47a1cce1b03
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/config_files/set_verbosity.json
@@ -0,0 +1,5 @@
+{
+ "systemLog" : {
+ "verbosity" : 5
+ }
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/crl.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/crl.pem
new file mode 100644
index 00000000000..275c9e2d91c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/crl.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:56:28 2014 GMT
+ Next Update: Aug 18 13:56:28 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 48:1b:0b:b1:89:f5:6f:af:3c:dd:2a:a0:e5:55:04:80:16:b4:
+ 23:98:39:bb:9f:16:c9:25:73:72:c6:a6:73:21:1d:1a:b6:99:
+ fc:47:5e:bc:af:64:29:02:9c:a5:db:15:8a:65:48:3c:4f:a6:
+ cd:35:47:aa:c6:c0:39:f5:a6:88:8f:1b:6c:26:61:4e:10:d7:
+ e2:b0:20:3a:64:92:c1:d3:2a:11:3e:03:e2:50:fd:4e:3c:de:
+ e2:e5:78:dc:8e:07:a5:69:55:13:2b:8f:ae:21:00:42:85:ff:
+ b6:b1:2b:69:08:40:5a:25:8c:fe:57:7f:b1:06:b0:72:ff:61:
+ de:21:59:05:a8:1b:9e:c7:8a:08:ab:f5:bc:51:b3:36:68:0f:
+ 54:65:3c:8d:b7:80:d0:27:01:3e:43:97:89:19:89:0e:c5:01:
+ 2c:55:9f:b6:e4:c8:0b:35:f8:52:45:d3:b4:09:ce:df:73:98:
+ f5:4c:e4:5a:06:ac:63:4c:f8:4d:9c:af:88:fc:19:f7:77:ea:
+ ee:56:18:49:16:ce:62:66:d1:1b:8d:66:33:b5:dc:b1:25:b3:
+ 6c:81:e9:d0:8a:1d:83:61:49:0e:d9:94:6a:46:80:41:d6:b6:
+ 59:a9:30:55:3d:5b:d3:5b:f1:37:ec:2b:76:d0:3a:ac:b2:c8:
+ 7c:77:04:78
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNTYyOFoXDTI0MDgxODEzNTYyOFqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEASBsLsYn1b6883Sqg5VUEgBa0I5g5u58WySVzcsam
+cyEdGraZ/EdevK9kKQKcpdsVimVIPE+mzTVHqsbAOfWmiI8bbCZhThDX4rAgOmSS
+wdMqET4D4lD9Tjze4uV43I4HpWlVEyuPriEAQoX/trEraQhAWiWM/ld/sQawcv9h
+3iFZBagbnseKCKv1vFGzNmgPVGU8jbeA0CcBPkOXiRmJDsUBLFWftuTICzX4UkXT
+tAnO33OY9UzkWgasY0z4TZyviPwZ93fq7lYYSRbOYmbRG41mM7XcsSWzbIHp0Iod
+g2FJDtmUakaAQda2WakwVT1b01vxN+wrdtA6rLLIfHcEeA==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/crl_client_revoked.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/crl_client_revoked.pem
new file mode 100644
index 00000000000..0b99d56936e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/crl_client_revoked.pem
@@ -0,0 +1,41 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Aug 21 13:43:27 2014 GMT
+ Next Update: Aug 18 13:43:27 2024 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+Revoked Certificates:
+ Serial Number: 02
+ Revocation Date: Aug 21 13:43:27 2014 GMT
+ Signature Algorithm: sha256WithRSAEncryption
+ 24:86:73:8d:7f:55:15:d0:d6:8a:47:53:cf:97:f7:e5:3d:0b:
+ 4a:ea:fb:02:6a:2e:79:c6:b1:38:b2:ac:f0:c0:64:47:b0:3e:
+ ad:4e:2e:94:e6:64:ed:79:34:bd:74:c0:d4:3d:b9:a1:bb:38:
+ 89:5c:02:6a:ad:6b:dc:3b:64:34:6a:2d:4c:90:36:82:95:0c:
+ 19:88:e2:a3:bf:8e:1b:56:98:37:32:87:ed:f0:bd:dd:e2:0d:
+ f9:80:dc:f2:a5:b4:ee:d9:bb:83:fe:b8:3a:13:e0:da:fc:04:
+ 77:fb:ce:f9:c5:2a:54:a7:f0:34:09:2a:b2:3d:46:1b:48:e6:
+ e8:16:c7:a1:3c:88:8c:72:cd:cc:53:dc:f8:54:63:1f:b9:8b:
+ ea:2c:e5:26:c5:b4:a4:9f:8b:e1:6c:85:9b:c6:63:6f:2f:ae:
+ 18:c5:6a:23:f0:58:27:85:5c:0f:01:04:da:d2:8b:de:9e:ab:
+ 46:00:22:07:28:e1:ef:46:91:90:06:58:95:05:68:67:58:6e:
+ 67:a8:0b:06:1a:73:d9:04:18:c9:a3:e4:e3:d6:94:a3:e1:5c:
+ e5:08:1b:b3:9d:ab:3e:ea:20:b1:04:e5:90:e1:42:54:b2:58:
+ bb:51:1a:48:87:60:b0:95:4a:2e:ce:a0:4f:8c:17:6d:6b:4c:
+ 37:aa:4d:d7
+-----BEGIN X509 CRL-----
+MIIB5DCBzQIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDgyMTEzNDMyN1oXDTI0MDgxODEzNDMyN1owFDASAgECFw0xNDA4MjExMzQz
+MjdaoA8wDTALBgNVHRQEBAICEAAwDQYJKoZIhvcNAQELBQADggEBACSGc41/VRXQ
+1opHU8+X9+U9C0rq+wJqLnnGsTiyrPDAZEewPq1OLpTmZO15NL10wNQ9uaG7OIlc
+Amqta9w7ZDRqLUyQNoKVDBmI4qO/jhtWmDcyh+3wvd3iDfmA3PKltO7Zu4P+uDoT
+4Nr8BHf7zvnFKlSn8DQJKrI9RhtI5ugWx6E8iIxyzcxT3PhUYx+5i+os5SbFtKSf
+i+FshZvGY28vrhjFaiPwWCeFXA8BBNrSi96eq0YAIgco4e9GkZAGWJUFaGdYbmeo
+CwYac9kEGMmj5OPWlKPhXOUIG7Odqz7qILEE5ZDhQlSyWLtRGkiHYLCVSi7OoE+M
+F21rTDeqTdc=
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/crl_expired.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/crl_expired.pem
new file mode 100644
index 00000000000..c9b3abb05a7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/crl_expired.pem
@@ -0,0 +1,38 @@
+Certificate Revocation List (CRL):
+ Version 2 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: /CN=Kernel Test CA/OU=Kernel/O=MongoDB/L=New York City/ST=New York/C=US
+ Last Update: Jul 21 19:45:56 2014 GMT
+ Next Update: Jul 21 20:45:56 2014 GMT
+ CRL extensions:
+ X509v3 CRL Number:
+ 4096
+No Revoked Certificates.
+ Signature Algorithm: sha256WithRSAEncryption
+ 14:e8:6d:51:fc:0e:66:08:22:b2:4d:fb:da:7a:5f:4d:d1:a0:
+ 80:f0:18:f3:c5:ca:c7:05:6c:70:59:fa:d5:96:68:fa:c7:1d:
+ 7e:fb:53:3b:4a:8f:ed:bb:51:04:e8:fb:db:d7:b8:96:d9:e2:
+ 8d:bb:54:cc:11:60:c8:20:ea:81:28:5f:e1:eb:d6:8c:94:bf:
+ 42:e0:7f:a3:13:0c:76:05:f2:f0:34:98:a3:e8:64:74:4c:cb:
+ bf:39:bb:fa:d5:2d:72:02:d1:fa:56:15:59:12:b7:ff:a3:cc:
+ c9:d6:14:ca:4a:1e:0b:b4:47:cf:58:b0:e5:24:d2:21:71:0d:
+ 2d:09:77:5c:2f:ef:40:f8:74:90:03:cc:37:2e:ea:6a:25:59:
+ c0:bf:48:90:00:55:9c:db:bf:1f:f0:7b:b6:5a:90:94:b6:8d:
+ 7c:7d:bb:2d:11:5f:0c:f5:4a:9b:c5:ed:ab:e3:fd:35:c8:76:
+ 3b:2e:41:cb:df:76:b5:f4:e9:05:72:f6:56:7a:fc:34:07:d6:
+ a2:55:eb:7c:58:33:5b:9d:3e:b2:03:89:01:c6:d1:54:75:1a:
+ 5c:73:3f:5e:2e:fd:3b:38:ed:d4:e1:fa:ec:ff:84:f0:55:ee:
+ 83:e0:f0:13:97:e7:f0:55:8c:00:a3:1a:31:e4:31:9e:68:d0:
+ 6d:3e:81:b0
+-----BEGIN X509 CRL-----
+MIIBzjCBtwIBATANBgkqhkiG9w0BAQsFADB0MRcwFQYDVQQDEw5LZXJuZWwgVGVz
+dCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQH
+Ew1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMX
+DTE0MDcyMTE5NDU1NloXDTE0MDcyMTIwNDU1NlqgDzANMAsGA1UdFAQEAgIQADAN
+BgkqhkiG9w0BAQsFAAOCAQEAFOhtUfwOZggisk372npfTdGggPAY88XKxwVscFn6
+1ZZo+scdfvtTO0qP7btRBOj729e4ltnijbtUzBFgyCDqgShf4evWjJS/QuB/oxMM
+dgXy8DSYo+hkdEzLvzm7+tUtcgLR+lYVWRK3/6PMydYUykoeC7RHz1iw5STSIXEN
+LQl3XC/vQPh0kAPMNy7qaiVZwL9IkABVnNu/H/B7tlqQlLaNfH27LRFfDPVKm8Xt
+q+P9Nch2Oy5By992tfTpBXL2Vnr8NAfWolXrfFgzW50+sgOJAcbRVHUaXHM/Xi79
+Ozjt1OH67P+E8FXug+DwE5fn8FWMAKMaMeQxnmjQbT6BsA==
+-----END X509 CRL-----
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_bad_first.journal b/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_bad_first.journal
new file mode 100644
index 00000000000..687317844a7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_bad_first.journal
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_bad_last.journal b/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_bad_last.journal
new file mode 100644
index 00000000000..7dd98e2c97b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_bad_last.journal
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_good.journal b/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_good.journal
new file mode 100644
index 00000000000..d76790d2451
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/dur_checksum_good.journal
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/expired.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/expired.pem
new file mode 100644
index 00000000000..e1d2ceb8de8
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/expired.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDfzCCAmegAwIBAgIBEDANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzEwMTYwMDAwWhcNMTQwNzE2MTYwMDAwWjBtMRAwDgYDVQQD
+EwdleHBpcmVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdvREIxFjAU
+BgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQG
+EwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPFSQZlHvJpi3dmA
+1X5U1qaUN/O/EQy5IZ5Rw+cfFHWOZ84EsLZxehWyqDZRH49Rg06xSYdO2WZOopP8
+OnUVCLGL819K83ikZ5sCbvB/gKCSCenwveEN992gJfs70HaZfiJNC7/cFigSb5Jg
+5G77E1/Uml4hIThfYG2NbCsTuP/P4JLwuzCkfgEUWRbCioMPEpIpxQw2LCx5DCy6
+Llhct0Hp14N9dZ4nA1h1621wOckgGJHw9DXdt9rGzulY1UgOOPczyqT08CdpaVxK
+VzrJCcUxfUjhO4ukHz+LBFQY+ZEm+tVboDbinbiHxY24urP46/u+BwRvBvjOovJi
+NVUh5GsCAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcuMC4wLjEw
+DQYJKoZIhvcNAQEFBQADggEBAG3rRSFCSG3hilGK9SMtTpFnrquJNlL+yG0TP8VG
+1qVt1JGaDJ8YUc5HXXtKBeLnRYii7EUx1wZIKn78RHRdITo5OJvlmcwwh0bt+/eK
+u9XFgR3z35w5UPr/YktgoX39SOzAZUoorgNw500pfxfneqCZtcRufVvjtk8TUdlN
+lcd2HfIxtUHWJeTcVM18g0JdHMYdMBXDKuXOW9VWLIBC2G6nAL/8SZJtUaDllPb4
+NisuIGjfjGgNxMpEXn+sQjFTupAoJru21OtAgERWFJhKQ0hbO0kucEPKEfxHDBVG
+dKSRIl6b0XSDLfxEXPv5ZhdrK4KEw1dYYXySvIVXtn0Ys38=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEA8VJBmUe8mmLd2YDVflTWppQ3878RDLkhnlHD5x8UdY5nzgSw
+tnF6FbKoNlEfj1GDTrFJh07ZZk6ik/w6dRUIsYvzX0rzeKRnmwJu8H+AoJIJ6fC9
+4Q333aAl+zvQdpl+Ik0Lv9wWKBJvkmDkbvsTX9SaXiEhOF9gbY1sKxO4/8/gkvC7
+MKR+ARRZFsKKgw8SkinFDDYsLHkMLLouWFy3QenXg311nicDWHXrbXA5ySAYkfD0
+Nd232sbO6VjVSA449zPKpPTwJ2lpXEpXOskJxTF9SOE7i6QfP4sEVBj5kSb61Vug
+NuKduIfFjbi6s/jr+74HBG8G+M6i8mI1VSHkawIDAQABAoIBAGAO1QvVkU6HAjX8
+4X6a+KJwJ2F/8aJ14trpQyixp2wv1kQce9bzjpwqdGjCm+RplvHxAgq5KTJfJLnx
+UbefOsmpoqOQ6x9fmdoK+uwCZMoFt6qGaJ63960hfVzm71D2Qk4XCxFA4xTqWb0T
+knpWuNyRfSzw1Q9ib7jL7X2sKRyx9ZP+1a41ia/Ko6iYPUUnRb1Ewo10alYVWVIE
+upeIlWqv+1DGfda9f34pGVh3ldIDh1LHqaAZhdn6sKtcgIUGcWatZRmQiA5kSflP
+VBpOI2c2tkQv0j5cPGwD7GGaJ2aKayHG0EwnoNmxCeR0Ay3MO0vBAsxn7Wy6yqrS
+EfkYhFkCgYEA/OA2AHFIH7mE0nrMwegXrEy7BZUgLRCRFWTjxwnCKFQj2Uo2dtYD
+2QQKuQWeiP+LD2nHj4n1KXuSJiB1GtmEF3JkYV4Wd7mPWEVNDHa0G8ZndquPK40s
+YSjh9u0KesUegncBFfIiwzxsk9724iaXq3aXOexc0btQB2xltRzj6/0CgYEA9E2A
+QU6pnCOzGDyOV7+TFr0ha7TXaMOb5aIVz6tJ7r5Nb7oZP9T9UCdUnw2Tls5Ce5tI
+J23O7JqwT4CudnWnk5ZtVtGBYA23mUryrgf/Utfg08hU2uRyq9LOxVaVqfV/AipN
+62GmfuxkK4PatOcAOhKqmS/zGfZqIg7V6rtX2ocCgYEAlY1ogpR8ij6mvfBgPmGr
+9nues+uBDwXYOCXlzCYKTN2OIgkQ8vEZb3RDfy9CllVDgccWfd6iPnlVcvUJLOrt
+gwxlL2x8ryvwCc1ahv+A/1g0gmtuDdy9HW0XTnjcFMWViKUm4DrGsl5+/GkF67PV
+SVOmllwifOthpjJGaHmAlmUCgYB6EFMZzlzud+PfIzqX20952Avfzd6nKL03EjJF
+rbbmA82bGmfNPfVHXC9qvRTWD76mFeMKWFJAY9XeE1SYOZb+JfYBn/I9dP0cKZdx
+nutSkCx0hK7pI6Wr9kt7zBRBdDj+cva1ufe/iQtPtrTLGHRDj9oPaibT/Qvwcmst
+umdd9wKBgQDM7j6Rh7v8AeLy2bw73Qtk0ORaHqRBHSQw87srOLwtfQzE92zSGMj+
+FVt/BdPgzyaddegKvJ9AFCPAxbA8Glnmc89FO7pcXn9Wcy+ZoZIF6YwgUPhPCp/4
+r9bKuXuQiutFbKyes/5PTXqbJ/7xKRZIpQCvxg2syrW3hxx8LIx/kQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/extended_assert.js b/src/mongo/gotools/test/qa-tests/jstests/libs/extended_assert.js
new file mode 100644
index 00000000000..8680fb08da0
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/extended_assert.js
@@ -0,0 +1,61 @@
+// Exports 'extendedAssert' which includes all built in assertions and:
+// - New extendedAssert.strContains(needle, haystack, msg)
+// - a .soon variant of eq, neq, contains, gt, lt, gte, lte, and strContains
+// e.g. .eq.soon(expected, getActualFunc, msg[, timeout, interval])
+// This produces more descriptive assertion error messages than the built
+// in assert.soon provides.
+
+var extendedAssert;
+(function() {
+ if (typeof extendedAssert !== 'undefined') {
+ return;
+ }
+
+ // Make a copy of the assert object
+ extendedAssert = assert.bind(this);
+ for (var key in assert) {
+ if (assert.hasOwnProperty(key)) {
+ extendedAssert[key] = assert[key];
+ }
+ }
+
+ extendedAssert.strContains = function(needle, haystack, msg) {
+ if (haystack.indexOf(needle) === -1) {
+ doassert('"' + haystack + '" does not contain "' + needle + '" : ' + msg);
+ }
+ };
+
+ var EX_ASSERT_DONT_PRINT = '**extended_assert.js--do not print this error message**';
+ var builtin_doassert = doassert;
+ var muteable_doassert = function(msg, obj) {
+ if (msg.indexOf(EX_ASSERT_DONT_PRINT) !== -1) {
+ throw Error(msg);
+ }
+ builtin_doassert(msg, obj);
+ };
+
+ ['eq', 'neq', 'contains', 'gt', 'lt', 'gte', 'lte', 'strContains']
+ .forEach(function (name) {
+ var assertFunc = extendedAssert[name];
+ var newAssertFunc = assertFunc.bind(this);
+ newAssertFunc.soon = function(expected, actualFunc, msg, timeout, interval) {
+ try {
+ doassert = muteable_doassert;
+ extendedAssert.soon(function() {
+ try {
+ assertFunc(expected, actualFunc(), EX_ASSERT_DONT_PRINT);
+ return true;
+ } catch (e) {
+ return false;
+ }
+ }, EX_ASSERT_DONT_PRINT, timeout, interval);
+ doassert = builtin_doassert;
+ } catch (e) {
+ doassert = builtin_doassert;
+ // Make it fail
+ assertFunc(expected, actualFunc(), msg);
+ }
+ };
+ extendedAssert[name] = newAssertFunc;
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/fts.js b/src/mongo/gotools/test/qa-tests/jstests/libs/fts.js
new file mode 100644
index 00000000000..0da80d5d3ae
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/fts.js
@@ -0,0 +1,22 @@
+function getIDS(commandResult) {
+ if (!(commandResult && commandResult.results)) {
+ return [];
+ }
+
+ return commandResult.results.map(function(z) {
+ return z.obj._id;
+ });
+}
+
+function queryIDS(coll, search, filter, extra) {
+ var cmd = {search: search};
+ if (filter) {
+ cmd.filter = filter;
+ }
+ if (extra) {
+ Object.extend(cmd, extra);
+ }
+ lastCommadResult = coll.runCommand("text", cmd);
+
+ return getIDS(lastCommadResult);
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/geo_near_random.js b/src/mongo/gotools/test/qa-tests/jstests/libs/geo_near_random.js
new file mode 100644
index 00000000000..7809aa77adc
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/geo_near_random.js
@@ -0,0 +1,100 @@
+GeoNearRandomTest = function(name) {
+ this.name = name;
+ this.t = db[name];
+ this.nPts = 0;
+
+ // reset state
+ this.t.drop();
+ Random.srand(1234);
+
+ print("starting test: " + name);
+};
+
+
+GeoNearRandomTest.prototype.mkPt = function mkPt(scale, indexBounds) {
+ if (!indexBounds) {
+ scale = scale || 1; // scale is good for staying away from edges
+ return [((Random.rand() * 359.8) - 179.9) * scale, ((Random.rand() * 180) - 90) * scale];
+ }
+ var range = indexBounds.max - indexBounds.min;
+ var eps = Math.pow(2, -40);
+ // Go very close to the borders but not quite there.
+ return [(Random.rand() * (range - eps) + eps) + indexBounds.min, (Random.rand() * (range - eps) + eps) + indexBounds.min];
+
+};
+
+GeoNearRandomTest.prototype.insertPts = function(nPts, indexBounds, scale) {
+ assert.eq(this.nPts, 0, "insertPoints already called");
+ this.nPts = nPts;
+
+ var bulk = this.t.initializeUnorderedBulkOp();
+ for (var i=0; i<nPts; i++) {
+ bulk.insert({_id: i, loc: this.mkPt(scale, indexBounds)});
+ }
+ assert.writeOK(bulk.execute());
+
+ if (!indexBounds) {
+ this.t.ensureIndex({loc: '2d'});
+ } else {
+ this.t.ensureIndex({loc: '2d'}, indexBounds);
+ }
+};
+
+GeoNearRandomTest.prototype.assertIsPrefix = function(short, long) {
+ for (var i=0; i < short.length; i++) {
+
+ var xS = short[i].obj ? short[i].obj.loc[0] : short[i].loc[0];
+ var yS = short[i].obj ? short[i].obj.loc[1] : short[i].loc[1];
+ var dS = short[i].obj ? short[i].dis : 1;
+
+ var xL = long[i].obj ? long[i].obj.loc[0] : long[i].loc[0];
+ var yL = long[i].obj ? long[i].obj.loc[1] : long[i].loc[1];
+ var dL = long[i].obj ? long[i].dis : 1;
+
+ assert.eq([xS, yS, dS], [xL, yL, dL]);
+ }
+};
+
+GeoNearRandomTest.prototype.testPt = function(pt, opts) {
+ assert.neq(this.nPts, 0, "insertPoints not yet called");
+
+ opts = opts || {};
+ opts['sphere'] = opts['sphere'] || 0;
+ opts['nToTest'] = opts['nToTest'] || this.nPts; // be careful, test is O( N^2 )
+
+ print("testing point: " + tojson(pt) + " opts: " + tojson(opts));
+
+
+ var cmd = {geoNear: this.t.getName(), near: pt, num: 1, spherical: opts.sphere};
+
+ var last = db.runCommand(cmd).results;
+ for (var i=2; i <= opts.nToTest; i++) {
+ // print(i); // uncomment to watch status
+ cmd.num = i;
+ var ret = db.runCommand(cmd).results;
+
+ try {
+ this.assertIsPrefix(last, ret);
+ } catch (e) {
+ print("*** failed while compairing " + (i-1) + " and " + i);
+ printjson(cmd);
+ throw e; // rethrow
+ }
+
+ last = ret;
+ }
+
+
+ if (!opts.sharded) {
+ last = last.map(function(x) {
+ return x.obj;
+ });
+
+ var query = {loc: {}};
+ query.loc[opts.sphere ? '$nearSphere' : '$near'] = pt;
+ var near = this.t.find(query).limit(opts.nToTest).toArray();
+
+ this.assertIsPrefix(last, near);
+ assert.eq(last, near);
+ }
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/host_ipaddr.js b/src/mongo/gotools/test/qa-tests/jstests/libs/host_ipaddr.js
new file mode 100644
index 00000000000..f5412da1563
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/host_ipaddr.js
@@ -0,0 +1,38 @@
+// Returns non-localhost ipaddr of host running the mongo shell process
+function get_ipaddr() {
+ // set temp path, if it exists
+ var path = "";
+ try {
+ path = TestData.tmpPath;
+ if (typeof path === "undefined") {
+ path = "";
+ } else if (path.slice(-1) !== "/") {
+ // Terminate path with / if defined
+ path += "/";
+ }
+ } catch (err) {
+ // no testdata
+ }
+
+ var ipFile = path+"ipaddr.log";
+ var windowsCmd = "ipconfig > "+ipFile;
+ var unixCmd = "/sbin/ifconfig | grep inet | grep -v '127.0.0.1' > "+ipFile;
+ var ipAddr = null;
+ var hostType = null;
+
+ try {
+ hostType = getBuildInfo().sysInfo.split(' ')[0];
+
+ // os-specific methods
+ if (hostType === "windows") {
+ runProgram('cmd.exe', '/c', windowsCmd);
+ ipAddr = cat(ipFile).match(/IPv4.*: (.*)/)[1];
+ } else {
+ runProgram('bash', '-c', unixCmd);
+ ipAddr = cat(ipFile).replace(/addr:/g, "").match(/inet (.[^ ]*) /)[1];
+ }
+ } finally {
+ removeFile(ipFile);
+ }
+ return ipAddr;
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/key1 b/src/mongo/gotools/test/qa-tests/jstests/libs/key1
new file mode 100644
index 00000000000..b5c19e4092f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/key1
@@ -0,0 +1 @@
+foop de doop
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/key2 b/src/mongo/gotools/test/qa-tests/jstests/libs/key2
new file mode 100644
index 00000000000..cbde8212841
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/key2
@@ -0,0 +1 @@
+other key
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/localhostnameCN.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/localhostnameCN.pem
new file mode 100644
index 00000000000..e6aca6a217d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/localhostnameCN.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDgTCCAmmgAwIBAgIBBTANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBvMRIwEAYDVQQD
+EwkxMjcuMC4wLjExDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEW
+MBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNV
+BAYTAlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAiqQNGgQggL8S
+LlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9wd0VNuD6+Ycg1mBbopO+M/K/ZWv8c
+7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9kVGRb2bNAfV2bC5/UnO1ulQdHoIB
+p3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8rwNNFvooMRg8yq8tq0qBkVhh85kct
+HHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqvI/Y5eIeZLhdIzAv37kolr8AuyqIR
+qcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZxoZN9Jv7x5LyiA+ijtQ+5aI/kMPqG
+nox+/bNFCQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAu
+MTANBgkqhkiG9w0BAQUFAAOCAQEAVJJNuUVzMRaft17NH6AzMSTiJxMFWoafmYgx
+jZnzA42XDPoPROuN7Bst6WVYDNpPb1AhPDco9qDylSZl0d341nHAuZNc84fD0omN
+Mbqieu8WseRQ300cbnS8p11c9aYpO/fNQ5iaYhGsRT7pnLs9MIgR468KVjY2xt49
+V0rshG6RxZj83KKuJd0T4X+5UeYz4B677y+SR0aoK2I2Sh+cffrMX2LotHc2I+JI
+Y9SDLvQT7chD9GzaWz634kmy3EEY0LreMm6AxhMOsr0lbZx5O8wLTScSjKARJ6OH
+nPxM1gYT07mkNmfyEnl1ChAN0MPgcLHQqEfe7x7ZQSbAv2gWfA==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAiqQNGgQggL8SLlxRgpM3qoktt3J9Pd3WXkknz7OjQr4dyj9w
+d0VNuD6+Ycg1mBbopO+M/K/ZWv8c7WDMM13DDZtpWjm+Q6uFc+vlI9Q9bLHgsZF9
+kVGRb2bNAfV2bC5/UnO1ulQdHoIBp3e/Jtko2WmruzVQFGVMBGCY7RlMRyxe3R8r
+wNNFvooMRg8yq8tq0qBkVhh85kctHHPggXD4/uM2Yc/Q94t5MhDFabewdzsFOLqv
+I/Y5eIeZLhdIzAv37kolr8AuyqIRqcJKztXIrFbLgEZBjoCNkOYZOQE+l8iwwiZx
+oZN9Jv7x5LyiA+ijtQ+5aI/kMPqGnox+/bNFCQIDAQABAoIBAQAMiUT+Az2FJsHY
+G1Trf7Ba5UiS+/FDNNn7cJX++/lZQaOj9BSRVFzwuguw/8+Izxl+QIL5HlWDGupc
+tJICWwoWIuVl2S7RI6NPlhcEJF7hgzwUElnOWBfUgPEsqitpINM2e2wFSzHO3maT
+5AoO0zgUYK+8n9d74KT9CFcLqWvyS3iksK/FXfCZt0T1EoJ4LsDjeCTfVKqrku2U
++fCnZZYNkrgUI7Hku94EJfOh462V4KQAUGsvllwb1lfmR5NR86G6VX6oyMGctL5e
+1M6XQv+JQGEmAe6uULtCUGh32fzwJ9Un3j2GXOHT0LWrVc5iLuXwwzQvCGaMYtKm
+FAIDpPxhAoGBAMtwzpRyhf2op/REzZn+0aV5FWKjeq69Yxd62RaOf2EetcPwvUOs
+yQXcP0KZv15VWU/XhZUmTkPf52f0YHV/b1Sm6wUOiMNQ4XpnRj2THf0N7RS4idMm
+VwtMf1pxqttxQVKPpOvPEiTyIh2Nx/juyfD4CWkOVNTvOCd1w+av6ukNAoGBAK51
+gIXDuwJ2e5h3IJyewN/HOZqlgPKyMjnACaeXQ5wPJSrz4+UkJkuXT2dYKhv6u7K/
+GtucTdvBIJeq61+LjjkYk7OVDzoqP/uWU7p1y7gU9LZq+7tgq7r8cgeaC3IBQe7X
+jdFPEy1+zAEBh6MfFjnLZ2Kop9qbH3cNih/g9pTtAoGBAJ8dmdUtRXNByCsa7Rv2
+243qiDlf14J4CdrBcK1dwm75j/yye7VEnO2Cd8/lZHGpm3MBBC/FiA06QElkL1V2
+2GKDMun/liP9TH1p7NwYBqp3i+ha9SE6qXXi3PCmWpXLnOWwB7OPf4d6AgjPbYpb
+aYKY3PNYDC2G9IqYZyI0kSy5AoGBAJ5Fe5PfPom9c+OeL7fnTpO16kyiWZnUkDxU
+PG4OjQfHtbCCEv6PDS8G1sKq+Yjor+A5/+O8qeX0D92I8oB720txQI5rbKUYL3PP
+raY7t9YJLPlRlY8o5KN+4vSCjF+hRG+qnr6FPqDHp8xB1wvl6AQGxIR8/csVcDZR
+0j2ZmhsBAoGAO1Cpk/hWXOLAhSj8P8Q/+3439HEctTZheVBd8q/TtdwXocaZMLi8
+MXURuVTw0GtS9TmdqOFXzloFeaMhJx6TQzZ2aPcxu95b7RjEDtVHus3ed2cSJ2El
+AuRvFT2RCVvTu1mM0Ti7id+d8QBcpbIpPjNjK2Wxir/19gtEawlqlkA=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/localhostnameSAN.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/localhostnameSAN.pem
new file mode 100644
index 00000000000..480300f29e1
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/localhostnameSAN.pem
@@ -0,0 +1,49 @@
+-----BEGIN CERTIFICATE-----
+MIIDpDCCAoygAwIBAgIBBjANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB5MRwwGgYDVQQD
+ExNzYW50ZXN0aG9zdG5hbWUuY29tMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoT
+B01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZ
+b3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
+AJKOLTNEPv08IVmhfkv6Xq1dT6pki76ggpJ7UpwdUSsTsWDKO2o1c7wnzEjfhYQ+
+CtlEvbYyL3O7f8AaO15WJdi53SMuWS+QfCKs6b0symYbinSXlZGb4oZYFSrodSxH
++G8u+TUxyeaXgTHowMWArmTRi2LgtIwXwwHJawfhFDxji3cSmLAr5YQMAaXUynq3
+g0DEAGMaeOlyn1PkJ2ZfJsX2di+sceKb+KK1xT+2vUSsvnIumBCYqMhU6y3WjBWK
+6WrmOcsldWo4IcgyzwVRlZiuuYoe6ZsxZ4nMyTdYebALPqgkt8QVXqkgcjWK8F18
+nuqWIAn1ISTjj73H4cnzYv0CAwEAAaM8MDowOAYDVR0RBDEwL4INKi5leGFtcGxl
+LmNvbYIJMTI3LjAuMC4xgghtb3JlZnVuIYIJbG9jYWxob3N0MA0GCSqGSIb3DQEB
+BQUAA4IBAQA5M3U4wvQYI3jz/+Eh4POrJAs9eSRGkUhz1lP7D6Fcyp+BbbXB1fa9
+5qpD4bp1ZoDP2R2zca2uwwfd3DTWPbmwFMNqs2D7d0hgX71Vg9DCAwExFjoeRo44
+cCE9kakZtE3kT/tiH6SpYpnBa3dizxTmiY48z212Pw813SSXSPMN1myx5sMJof5I
+whJNQhSQOw6WHw5swZJZT4FkzxjQMrTWdF6r0d5EU9K2WWk5DTwq4QaysplB5l0H
+8qm+fnC6xI+2qgqMO9xqc6qMtHHICXtdUOup6wj/bdeo7bAQdVDyKlFKiYivDXvO
+RJNp2cwsBgxU+qdrtOLp7/j/0R3tUqWb
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAko4tM0Q+/TwhWaF+S/perV1PqmSLvqCCkntSnB1RKxOxYMo7
+ajVzvCfMSN+FhD4K2US9tjIvc7t/wBo7XlYl2LndIy5ZL5B8IqzpvSzKZhuKdJeV
+kZvihlgVKuh1LEf4by75NTHJ5peBMejAxYCuZNGLYuC0jBfDAclrB+EUPGOLdxKY
+sCvlhAwBpdTKereDQMQAYxp46XKfU+QnZl8mxfZ2L6xx4pv4orXFP7a9RKy+ci6Y
+EJioyFTrLdaMFYrpauY5yyV1ajghyDLPBVGVmK65ih7pmzFniczJN1h5sAs+qCS3
+xBVeqSByNYrwXXye6pYgCfUhJOOPvcfhyfNi/QIDAQABAoIBADqGMkClwS2pJHxB
+hEjc+4/pklWt/ywpttq+CpgzEOXN4GiRebaJD+WUUvzK3euYRwbKb6PhWJezyWky
+UID0j/qDBm71JEJdRWUnfdPAnja2Ss0Sd3UFNimF5TYUTC5ZszjbHkOC1WiTGdGP
+a+Oy5nF2SF4883x6RLJi963W0Rjn3jIW9LoLeTgm9bjWXg3iqonCo3AjREdkR/SG
+BZaCvulGEWl/A3a7NmW5EGGNUMvzZOxrqQz4EX+VnYdb7SPrH3pmQJyJpAqUlvD5
+y7pO01fI0wg9kOWiIR0vd3Gbm9NaFmlH9Gr2oyan3CWt1h1gPzkH/V17rZzVYb5L
+RnjLdyECgYEA6X16A5Gpb5rOVR/SK/JZGd+3z52+hRR8je4WhXkZqRZmbn2deKha
+LKZi1eVl11t8zitLg/OSN1uZ/873iESKtp/R6vcGcriUCd87cDh7KTyW/7ZW5jdj
+o6Y3Liai3Xrf6dL+V2xYw964Map9oK9qatYw/L+Ke6b9wbGi+hduf1kCgYEAoK8n
+pzctajS3Ntmk147n4ZVtcv78nWItBNH2B8UaofdkBlSRyUURsEY9nA34zLNWI0f3
+k59+cR13iofkQ0rKqJw1HbTTncrSsFqptyEDt23iWSmmaU3/9Us8lcNGqRm7a35V
+Km0XBFLnE0mGFGFoTpNt8oiR4WGASJPi482xkEUCgYEAwPmQn2SDCheDEr2zAdlR
+pN3O2EwCi5DMBK3TdUsKV0KJNCajwHY72Q1HQItQ6XXWp7sGta7YmOIfXFodIUWs
+85URdMXnUWeWCrayNGSp/gHytrNoDOuYcUfN8VnDX5PPfjyBM5X7ox7vUzUakXSJ
+WnVelXZlKR9yOOTs0xAMpjkCgYAbF61N6mXD5IOHwgajObsrM/CyVP/u4WDJ0UT0
+Zm1pJbc9wgCauQSUfiNhLpHmoc5CQJ4jy96b3+YJ+4OnPPMSntPt4FFV557CkWbQ
+M8bWpLZnZjhixP4FM9xRPA2r8WTCaRifAKnC1t+TRvBOe2YE6aK+I/zEzZW9pwG4
+ezQXKQKBgQAIBSJLa6xWbfbzqyPsvmRNgiEjIamF7wcb1sRjgqWM6sCzYwYv8f5v
+9C4YhNXEn+c5V2KevgYeg6iPSQuzEAfJx64QV7JD8kEBf5GNETnuW45Yg7KwKPD6
+ZCealfpy/o9iiNqbWqDNND91pj2/g5oZnac3misJg5tGCJbJsBFXag==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/mockkrb5.conf b/src/mongo/gotools/test/qa-tests/jstests/libs/mockkrb5.conf
new file mode 100644
index 00000000000..0f004f2de8a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/mockkrb5.conf
@@ -0,0 +1,13 @@
+[libdefaults]
+ default_realm = 10GEN.ME
+
+[realms]
+ 10GEN.ME = {
+ kdc = kdc.10gen.me
+ admin_server = kdc.10gen.me
+ default_domain = 10gen.me
+ }
+
+[domain_realm]
+ .10gen.me = 10GEN.ME
+ 10gen.me = 10GEN.ME
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/mockservice.keytab b/src/mongo/gotools/test/qa-tests/jstests/libs/mockservice.keytab
new file mode 100644
index 00000000000..3529d5fcbc6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/mockservice.keytab
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/mockuser.keytab b/src/mongo/gotools/test/qa-tests/jstests/libs/mockuser.keytab
new file mode 100644
index 00000000000..35fd2ff06e7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/mockuser.keytab
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/mongostat.js b/src/mongo/gotools/test/qa-tests/jstests/libs/mongostat.js
new file mode 100644
index 00000000000..d93ff4b8bee
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/mongostat.js
@@ -0,0 +1,114 @@
+var exitCodeSuccess = 0;
+var exitCodeErr = 1;
+// Go reserves exit code 2 for its own use.
+var exitCodeBadOptions = 3;
+var exitCodeStopped = 4;
+
+// NOTE: On Windows, stopMongoProgramByPid doesn't terminiate a process in a
+// way that it can control its exit code.
+if (_isWindows()) {
+ exitCodeStopped = exitCodeErr;
+}
+
+var rowRegex = /^sh\d+\|\s/;
+// portRegex finds the port on a line which has enough whitespace-delimited
+// values to be considered a stat line and not an error message
+var portRegex = /^sh\d+\|\s+\S+:(\d+)(\s+\S+){16}/;
+
+function statRows() {
+ return rawMongoProgramOutput()
+ .split("\n")
+ .filter(function(r) {
+ return r.match(rowRegex);
+ })
+ .map(function(r) {
+ return r.replace(/^sh\d+\| /, "");
+ });
+}
+
+function statFields(row) {
+ return row.split(/\s/).filter(function(s) {
+ return s !== "";
+ });
+}
+
+function getLatestChunk() {
+ var output = rawMongoProgramOutput();
+ // mongostat outputs a blank line between each set of stats when there are
+ // multiple hosts; we want just one chunk of stat lines
+ var lineChunks = output.split("| \n");
+ if (lineChunks.length === 1) {
+ return lineChunks[0];
+ }
+ return lineChunks[lineChunks.length - 2];
+}
+
+function latestPortCounts() {
+ var portCounts = {};
+ getLatestChunk().split("\n").forEach(function(r) {
+ var matches = r.match(portRegex);
+ if (matches === null) {
+ return;
+ }
+ var port = matches[1];
+ if (!portCounts[port]) {
+ portCounts[port] = 0;
+ }
+ portCounts[port]++;
+ });
+ return portCounts;
+}
+
+function hasPort(port) {
+ port = String(port);
+ return function() {
+ return latestPortCounts()[port] >= 1;
+ };
+}
+
+function lacksPort(port) {
+ port = String(port);
+ return function() {
+ return latestPortCounts()[port] === undefined;
+ };
+}
+
+function hasOnlyPorts(expectedPorts) {
+ expectedPorts = expectedPorts.map(String);
+ return function() {
+ var portCounts = latestPortCounts();
+ for (var port in portCounts) {
+ if (expectedPorts.indexOf(port) === -1) {
+ return false;
+ }
+ }
+ for (var i in expectedPorts) {
+ if (portCounts[expectedPorts[i]] !== 1) {
+ return false;
+ }
+ }
+ return true;
+ };
+}
+
+function statCheck(args, checker) {
+ clearRawMongoProgramOutput();
+ pid = startMongoProgramNoConnect.apply(null, args);
+ try {
+ assert.soon(checker, "discoverTest wait timed out");
+ return true;
+ } catch (e) {
+ return false;
+ } finally {
+ stopMongoProgramByPid(pid);
+ }
+}
+
+function discoverTest(ports, connectHost) {
+ return statCheck(["mongostat",
+ "--host", connectHost,
+ "--noheaders",
+ "--discover"],
+ hasOnlyPorts(ports));
+}
+
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/not_yet_valid.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/not_yet_valid.pem
new file mode 100644
index 00000000000..7c021c0becd
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/not_yet_valid.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDhTCCAm2gAwIBAgIBETANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMjAwNzE3MTYwMDAwWhcNMjUwNzE3MTYwMDAwWjBzMRYwFAYDVQQD
+Ew1ub3RfeWV0X3ZhbGlkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNVBAoTB01vbmdv
+REIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQsw
+CQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM2gF+Fo
+CeBKVlPyDAaEA7cjK75CxnzQy+oqw1j/vcfe/CfKL9MvDDXauR/9v1RRlww5zlxQ
+XJJtcMJtxN1EpP21cHrHCpJ/fRsCdMfJdD9MO6gcnclEI0Odwy5YI/57rAgxEuDC
+7z4d+M6z7PLq8DIwvRuhAZVTszeyTsCCkwfTJ/pisD2Ace75pS37t/ttQp+kQ+Vl
+QrfccHYxrScQ9i0JqBfrTULDl6ST76aINOaFKWqrLLkRUvE6pEkL/iP6xXUSKOsm
+uyc0yb0PK5Y/IVdrzwWUkabWEM27RAMH+CAx2iobk6REj0fsGySBzT2CaETZPjck
+vn/LYKqr+CvYjc8CAwEAAaMjMCEwHwYDVR0RBBgwFoIJbG9jYWxob3N0ggkxMjcu
+MC4wLjEwDQYJKoZIhvcNAQEFBQADggEBADw37jpmhj/fgCZdF1NrDKLmWxb4hovQ
+Y9PRe6GsBOc1wH8Gbe4UkYAE41WUuT3xW9YpfCHLXxC7da6dhaBISWryX7n72abM
+xbfAghV3je5JAmC0E/OzQz8tTgENxJN/c4oqCQ9nVOOLjwWiim5kF0/NY8HCc/Sg
+OG9IdseRX72CavDaPxcqR9/5KKY/pxARMeyy3/D0FIB1Fwu5h9vjHEi5fGOqcizf
+S1KHfzAmTxVtjw6HWRGKmkPX0W0/lURWVkKRxvC8KkJIeKx3fl9U1PqCw0AVi5d/
+whYn4qHNFFp4OiVzXq3b5YoBy0dlHUePCIPT2GkGlV4NQKosZMJUkKo=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAzaAX4WgJ4EpWU/IMBoQDtyMrvkLGfNDL6irDWP+9x978J8ov
+0y8MNdq5H/2/VFGXDDnOXFBckm1wwm3E3USk/bVwescKkn99GwJ0x8l0P0w7qByd
+yUQjQ53DLlgj/nusCDES4MLvPh34zrPs8urwMjC9G6EBlVOzN7JOwIKTB9Mn+mKw
+PYBx7vmlLfu3+21Cn6RD5WVCt9xwdjGtJxD2LQmoF+tNQsOXpJPvpog05oUpaqss
+uRFS8TqkSQv+I/rFdRIo6ya7JzTJvQ8rlj8hV2vPBZSRptYQzbtEAwf4IDHaKhuT
+pESPR+wbJIHNPYJoRNk+NyS+f8tgqqv4K9iNzwIDAQABAoIBAFWTmjyyOuIArhrz
+snOHv7AZUBw32DmcADGtqG1Cyi4DrHe22t6ORwumwsMArP8fkbiB2lNrEovSRkp0
+uqjH5867E1vVuJ2tt1hlVkrLmbi6Nl3JwxU/aVm7r7566kgAGmGyYsPt/PmiKamF
+Ekkq49pPlHSKNol6My0r5UCTVzO6uwW7dAa4GOQRI7bM7PVlxRVVeNzPH3yOsTzk
+smrkRgf8HbjtY7m/EHG281gu14ZQRCqzLshO2BtWbkx9dMXnNU5dRRaZ8Pe8XN0Z
+umsStcX6So6VFAqlwknZTi1/sqyIuQLfE+S9DocVQkvKFUgKpFddK8Nmqc8xPCKt
+UwR9hEECgYEA9kZ5KmUbzxQrF8Kn9G18AbZ/Cf6rE9fhs/J8OGcuuJ9QTjPO7pxV
+T7lGrIOX3dVu3+iHrYXZUZv+UTOePWx+ghqJ8ML7RdVsxAWMqh+1J0eBJKIdc9mt
+0hGkLEyyBbAlfNmvw8JugTUeZH2gA+VK9HoMTAjD+LvH164rrktauKECgYEA1b6z
+lZypAbAqnuCndcetcgatdd/bYNH5WWTgdZHqInt3k94EsUEHFNMQUbO+FNkOJ4qJ
+Jp7xrqkOUX+MPrzV5XYVapamlht9gvUtyxGq7DYndlq4mIsN5kReH++lqONBnWoG
+ZlbxvadkvPo+bK003hsl+E4F8X7xUssGGLvygG8CgYEAm/yLJkUgVgsqOER86R6n
+mtYipQv/A/SK6tU9xOPl/d46mS3LderjRjnN/9rhyAo1zfCUb14GBeDONlSBd9pO
+Ts3MbQiy6sqBt67kJ6UpspVhwPhFu2k25YVy/PQfFec591hSMaXnJEOm2nOPdKg4
+z5y2STqMFfGqZHvXAvCLp8ECgYA8oVGTmNKf9fbBBny5/iAG/jnp+8vg1O7kGqdI
+8lD14wvyV8IA/a8iixRP+Kpsg31uXe+1ktR/dNjo6UNA8JPD+RDuITmzzqx1n1KU
+DbjsNBhRjD5cluUkcjQ43uOg2oXcPxz9nqAH6hm7OUjHzwH2FsFYg9lPvXB6ybg6
+/+Uz5QKBgBxvTtLsZ3Cvvb3qezn4DdpLjlsrT6HWaTGqwEx8NYVBTFX/lT8P04tv
+NqFuQsDJ4gw0AZF7HqF49qdpnHEJ8tdHgBc/xDLFUMuKjON4IZtr0/j407K6V530
+m4q3ziHOu/lORDcZTz/YUjEzT8r7Qiv7QusWncvIWEiLSCC2dvvb
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/parallelTester.js b/src/mongo/gotools/test/qa-tests/jstests/libs/parallelTester.js
new file mode 100644
index 00000000000..a384d4eb59c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/parallelTester.js
@@ -0,0 +1,268 @@
+/**
+ * The ParallelTester class is used to test more than one test concurrently
+ */
+
+
+if (typeof _threadInject !== "undefined") {
+ // print( "fork() available!" );
+
+ Thread = function() {
+ this.init.apply(this, arguments);
+ };
+ _threadInject(Thread.prototype);
+
+ ScopedThread = function() {
+ this.init.apply(this, arguments);
+ };
+ ScopedThread.prototype = new Thread(function() {});
+ _scopedThreadInject(ScopedThread.prototype);
+
+ fork = function() {
+ var t = new Thread(function() {});
+ Thread.apply(t, arguments);
+ return t;
+ };
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function(me, collectionName, mean, host) {
+ this.mean = mean;
+ if (host === undefined) {
+ host = db.getMongo().host;
+ }
+ this.events = [me, collectionName, host];
+ };
+
+ EventGenerator.prototype._add = function(action) {
+ this.events.push([Random.genExp(this.mean), action]);
+ };
+
+ EventGenerator.prototype.addInsert = function(obj) {
+ this._add("t.insert( " + tojson(obj) + " )");
+ };
+
+ EventGenerator.prototype.addRemove = function(obj) {
+ this._add("t.remove( " + tojson(obj) + " )");
+ };
+
+ EventGenerator.prototype.addUpdate = function(objOld, objNew) {
+ this._add("t.update( " + tojson(objOld) + ", " + tojson(objNew) + " )");
+ };
+
+ EventGenerator.prototype.addCheckCount = function(count, query, shouldPrint, checkQuery) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson(query) + " ) );";
+ if (checkQuery) {
+ action += " assert.eq( " + count + ", t.find( " + tojson(query) + " ).toArray().length );";
+ }
+ if (shouldPrint) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add(action);
+ };
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ };
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray(arguments);
+ var me = args.shift();
+ var collectionName = args.shift();
+ var host = args.shift();
+ var m = new Mongo(host);
+ var t = m.getDB("test")[collectionName];
+ args.forEach(function(v) {
+ sleep(v[0]);
+ eval(v[1]); // eslint-disable-line no-eval
+ });
+ };
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ assert.neq(db.getMongo().writeMode(), "legacy", "wrong shell write mode");
+ this.params = [];
+ };
+
+ ParallelTester.prototype.add = function(fun, args) {
+ args = args || [];
+ args.unshift(fun);
+ this.params.push(args);
+ };
+
+ ParallelTester.prototype.run = function(msg, newScopes) {
+ newScopes = newScopes || false;
+ assert.parallelTests(this.params, msg, newScopes);
+ };
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function(n) {
+ var params = [];
+ for (var i = 0; i < n; ++i) {
+ params.push([]);
+ }
+
+ var makeKeys = function(a) {
+ var ret = {};
+ a.forEach(function(v) {
+ ret[v] = 1;
+ });
+ return ret;
+ };
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys([
+ "dbadmin.js",
+ "repair.js",
+ "cursor8.js",
+ "recstore.js",
+ "extent.js",
+ "indexb.js",
+
+ // tests turn on profiling
+ "profile1.js",
+ "profile3.js",
+ "profile4.js",
+ "profile5.js",
+
+ "mr_drop.js",
+ "mr3.js",
+ "indexh.js",
+ "apitest_db.js",
+ "evalb.js",
+ "evald.js",
+ "evalf.js",
+ "killop.js",
+ "run_program1.js",
+ "notablescan.js",
+ "drop2.js",
+ "dropdb_race.js",
+ "fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "bench_test1.js",
+ "padding.js",
+ "queryoptimizera.js",
+ "loglong.js", // log might overflow before
+ // this has a chance to see the message
+ "connections_opened.js", // counts connections, globally
+ "opcounters_write_cmd.js",
+ "currentop.js", // SERVER-8673, plus rwlock yielding issues
+ "set_param1.js", // changes global state
+ "geo_update_btree2.js", // SERVER-11132 test disables table scans
+ "update_setOnInsert.js", // SERVER-9982
+ ]);
+
+ var parallelFilesDir = "jstests/core";
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [
+ parallelFilesDir + "/fsync.js",
+ parallelFilesDir + "/auth1.js",
+
+ // These tests expect the profiler to be on or off at specific points
+ // during the test run.
+ parallelFilesDir + "/cursor6.js",
+ parallelFilesDir + "/profile2.js",
+ parallelFilesDir + "/updatee.js"
+ ];
+ var serialTests = makeKeys(serialTestsArr);
+
+ // prefix the first thread with the serialTests
+ // (which we will exclude from the rest of the threads below)
+ params[0] = serialTestsArr;
+ var files = listFiles(parallelFilesDir);
+ files = Array.shuffle(files);
+
+ i = 0;
+ files.forEach(function(x) {
+ if ((/[\/\\]_/.test(x.name)) ||
+ (!/\.js$/.test(x.name)) ||
+ (x.name.match(parallelFilesDir + "/(.*\\.js)")[1] in skipTests) ||
+ (x.name in serialTests)) {
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+ // add the test to run in one of the threads.
+ params[i % n].push(x.name);
+ ++i;
+ });
+
+ // randomize ordering of the serialTests
+ params[0] = Array.shuffle(params[0]);
+
+ params.forEach(function(param) {
+ param.unshift(i);
+ });
+
+ return params;
+ };
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray(arguments);
+ var suite = args.shift();
+ args.forEach(function(x) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc(function() {
+ load(x);
+ }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms");
+ });
+ };
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function(params, msg, newScopes) {
+ newScopes = newScopes || false;
+ var wrapper = function(fun, argv) {
+ // TODO: this doesn't need to use eval
+ eval( // eslint-disable-line no-eval
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson(argv) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print('');" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "print('');" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ };
+ var runners = [];
+ params.forEach(function(param) {
+ var test = param.shift();
+ var t;
+ if (newScopes) {
+ t = new ScopedThread(wrapper(test, param));
+ } else {
+ t = new Thread(wrapper(test, param));
+ }
+ runners.push(t);
+ });
+
+ runners.forEach(function(x) {
+ x.start();
+ });
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach(function(x) {
+ if (!x.returnData()) {
+ ++nFailed;
+ }
+ });
+ assert.eq(0, nFailed, msg);
+ };
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/password_protected.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/password_protected.pem
new file mode 100644
index 00000000000..25e47bc2402
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/password_protected.pem
@@ -0,0 +1,51 @@
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBCTANBgkqhkiG9w0BAQUFADB4MRswGQYDVQQDExJwYXNz
+d29yZF9wcm90ZWN0ZWQxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29E
+QjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJ
+BgNVBAYTAlVTMB4XDTE0MDcxNzE2MDAwMFoXDTIwMDcxNzE2MDAwMFoweDEbMBkG
+A1UEAxMScGFzc3dvcmRfcHJvdGVjdGVkMQ8wDQYDVQQLEwZLZXJuZWwxEDAOBgNV
+BAoTB01vbmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5l
+dyBZb3JrMQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBALT4r3Hcou2auIOHeihBSjk4bKQTVqI6r/stnkul359SRfKuzVA9gMQaRRDi
+MJoxczHJzS2FX+wElzBt2EUhfu3qpUJ4gJw7H4WjLx+mNnj/+6b4HUO4eRzH5hTE
+A+qgDH40qYjFDEjiARvybWo3IlDLeI/uFwlyUj5PZBUBc1LBBzNtCBfJ2MmHLhIx
+jzTFhkJZll673LL6BPHtJclXCazqKUZDLqObW4Ei6X4hdBOdC8v8Q6GMgC4BxLe0
+wsOpKYYeM3il4BtfiqDQB5ZPG0lgo1Y7OOyFHFXBA7oNkK8lykhdyH4iLt5L9mWo
+VKyZ79VqSODFuCqWo8n8kUTgA/0CAwEAAaMQMA4wDAYDVR0TBAUwAwEB/zANBgkq
+hkiG9w0BAQUFAAOCAQEAntxk8a0HcuPG8Fdjckp6WL+HKQQnUKdvSk06rPF0SHpN
+Ma4eZcaumROdtAYnPKvtbpq3DRCQlj59dlWPksEcYiXqf56TgcyAz5K5g5z9RbFi
+ArvAXJNRcDz1080NWGBUTPYyiKWR3PhtlYhJZ4r7fQIWLv4mifXHViw2roXXhsAY
+ubk9HOtrqE7x6NJXgR24aybxqI6TfAKfM+LJNtMwMFrPC+GHnhqMOs/jHJS38NIB
+TrKA63TdpYUroVu23/tGLQaJz352qgF4Di91RkUfnI528goj57pX78H8KRsSNVvs
+KHVNrxtZIez+pxxjBPnyfCH81swkiAPG9fdX+Hcu5A==
+-----END CERTIFICATE-----
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFHzBJBgkqhkiG9w0BBQ0wPDAbBgkqhkiG9w0BBQwwDgQI6y3b7IxcANECAggA
+MB0GCWCGSAFlAwQBAgQQrTslOKC0GZZwq48v7niXYQSCBNBNKsTN7fyw60/EEDH0
+JUgxL83Wfb7pNP97/lV5qiclY1mwcKz44kXQaesFTzhiwzAMOpbI/ijEtsNU25wV
+wtTgjAC3Em/+5/ygrmAu7hgacIRssspovmsgw029E9iOkyBd1VIrDVMi7HLHf0iU
+2Zq18QF20az2pXNMDipmVJkpc9NvjSdqka5+375pJuisspEWCDBd11K10jzCWqB5
+q3Rm1IIeq+mql6KT1rJcUoeE0facDc9GDYBiF/MfIKQ3FrZy/psqheCfL1UDUMyc
+mnm9GJO5+bCuHkg8ni0Zo5XXsf2VEFt0yt6lSucoOP43flucQaHnFKcn+5DHjDXv
+S6Eb5wEG9qWtzwWy/9DfRbkj6FxUgT3SFgizo/uLmdqFCJCnYkHUD1OuYCDmoIXP
+VTinwgK4lO/vrGfoPQrgJmdlnwHRWYjlB8edMCbmItaj2Esh3FBS12y976+UT0Sk
+8n5HsZAEYScDyNArVhrLUZRgF+r+bgZ28TDFO0MISPCAbZjhvq6lygS3dEmdTUW3
+cFDe1deNknWxZcv4UpJW4Nq6ckxwXBfTB1VFzjp7/vXrK/Sd9t8zi6vKTO8OTqc4
+KrlLXBgz0ouP/cxhYDykUrKXE2Eb0TjeAN1txZWo3fIFzXUvDZCphQEZNUqsFUxH
+86V2lwqVzKrFq6UpTgKrfTw/2ePQn9dQgd7iFWDTWjRkbzA5aAgTSVP8xQRoIOeQ
+epXtP9202kEz3h28SZYK7QBOTTX9xNmV/dzDTsi9nXZ6KtsP/aGFE5hh95jvESx/
+wlOBAPW4HR33rSYalvQPE7RjjLZHOKuYIllUBGlTOfgdA+WUXR3KxiLNPdslPBPV
++O6aDyerhWoQwE7TFwhP/FpxL/46hOu4iq4fgqfjddBTq8z5jG3c3zzogDjoDzBF
+LEQDcbenUCGbEQ7zxXsXtr3QinJ+aAejDO38hp1h9ROb5LF53/9H2j/16nby/jPX
+7kp2weRSKGJ0B6AVuS9pTsQz4+E3icsIgBWSU6qtcUz2GO2QxnFuvT9LEVnyMNN2
+IKMIEKi2FsUMddHGXLULTANlzUMocdHrd5j81eqcFPhMOFOiHpgwiwxqZyBYOLRl
+Fe7x5dLVWoLgjJagZj8uYnJbExDsfFLjEx8p4Z+rejJIC5CqZLbz9sDgCtIL+92k
++x4mlT1Rfmz9pU+RQqik83nFFRBGWxeW9iWWEgocWtmezvnK6E241v78zkqxNkvF
+JJo7BsBw7DiEHEfLhBZYuqV2q6+kwqgYrzyGIwAJkBGrkYfalVzgR+3/uN04h005
+M3jQRpSkDVGYr3JKEAlh3Sc+JD9VPbu6/RXNwy5mY67UCgWGaFwRqJE3DC9aKfNC
+OET8m8+8oQgFzhw3pNpENsgwR+Sx3K4q0GI3YwxT02pieBFNQaw53O3B3TtoCjkk
+UsuyIWqcLonwo4I3z0kjU3gEFN+0m4E4/A1DNt0J3rsKN+toCk1FqbxQg9xTZzXu
+hYmA3HMMwugzXmCanqBhmMsniPg+dRxCIfiHZhLuEpjKxZWcMWcW4M6l/wbM+LbE
+oDcTuI9ezfPTZ3xA8hNIHBT3MhuI7EJQnvKKvJDJeyX5sAtmSsSFqhEr8QZD8RgV
+5H9eOyUdfcWxLlstcq982V0oGg==
+-----END ENCRYPTED PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/server.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/server.pem
new file mode 100644
index 00000000000..df2b49163d6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/server.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDfjCCAmagAwIBAgIBBzANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu
+ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw
+FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE
+BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBsMQ8wDQYDVQQD
+EwZzZXJ2ZXIxDzANBgNVBAsTBktlcm5lbDEQMA4GA1UEChMHTW9uZ29EQjEWMBQG
+A1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsxCzAJBgNVBAYT
+AlVTMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp76KJeDczBqjSPJj
+5f8DHdtrWpQDK9AWNDlslWpi6+pL8hMqwbX0D7hC2r3kAgccMyFoNIudPqIXfXVd
+1LOh6vyY+jveRvqjKW/UZVzZeiL4Gy4bhke6R8JRC3O5aMKIAbaiQUAI1Nd8LxIt
+LGvH+ia/DFza1whgB8ym/uzVQB6igOifJ1qHWJbTtIhDKaW8gvjOhv5R3jzjfLEb
+R9r5Q0ZyE0lrO27kTkqgBnHKPmu54GSzU/r0HM3B+Sc/6UN+xNhNbuR+LZ+EvJHm
+r4de8jhW8wivmjTIvte33jlLibQ5nYIHrlpDLEwlzvDGaIio+OfWcgs2WuPk98MU
+tht0IQIDAQABoyMwITAfBgNVHREEGDAWgglsb2NhbGhvc3SCCTEyNy4wLjAuMTAN
+BgkqhkiG9w0BAQUFAAOCAQEANoYxvVFsIol09BQA0fwryAye/Z4dYItvKhmwB9VS
+t99DsmJcyx0P5meB3Ed8SnwkD0NGCm5TkUY/YLacPP9uJ4SkbPkNZ1fRISyShCCn
+SGgQUJWHbCbcIEj+vssFb91c5RFJbvnenDkQokRvD2VJWspwioeLzuwtARUoMH3Y
+qg0k0Mn7Bx1bW1Y6xQJHeVlnZtzxfeueoFO55ZRkZ0ceAD/q7q1ohTXi0vMydYgu
+1CB6VkDuibGlv56NdjbttPJm2iQoPaez8tZGpBo76N/Z1ydan0ow2pVjDXVOR84Y
+2HSZgbHOGBiycNw2W3vfw7uK0OmiPRTFpJCmewDjYwZ/6w==
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAp76KJeDczBqjSPJj5f8DHdtrWpQDK9AWNDlslWpi6+pL8hMq
+wbX0D7hC2r3kAgccMyFoNIudPqIXfXVd1LOh6vyY+jveRvqjKW/UZVzZeiL4Gy4b
+hke6R8JRC3O5aMKIAbaiQUAI1Nd8LxItLGvH+ia/DFza1whgB8ym/uzVQB6igOif
+J1qHWJbTtIhDKaW8gvjOhv5R3jzjfLEbR9r5Q0ZyE0lrO27kTkqgBnHKPmu54GSz
+U/r0HM3B+Sc/6UN+xNhNbuR+LZ+EvJHmr4de8jhW8wivmjTIvte33jlLibQ5nYIH
+rlpDLEwlzvDGaIio+OfWcgs2WuPk98MUtht0IQIDAQABAoIBACgi1ilECXCouwMc
+RDzm7Jb7Rk+Q9MVJ79YlG08Q+oRaNjvAzE03PSN5wj1WjDTUALJXPvi7oy82V4qE
+R6Q6Kvbv46aUJpYzKFEk2dw7ACpSLa1LNfjGNtMusnecA/QF/8bxLReRu8s5mBQn
+NDnZvCqllLbfjNlAvsF+/UIn5sqFZpAZPMtPwkTAeh5ge8H9JvrG8y8aXsiFGAhV
+Z7tMZyn8wPCUrRi14NLvVB4hxM66G/tuTp8r9AmeTU+PV+qbCnKXd+v0IS52hvX9
+z75OPfAc66nm4bbPCapb6Yx7WaewPXXU0HDxeaT0BeQ/YfoNa5OT+ZOX1KndSfHa
+VhtmEsECgYEA3m86yYMsNOo+dkhqctNVRw2N+8gTO28GmWxNV9AC+fy1epW9+FNR
+yTQXpBkRrR7qrd5mF7WBc7vAIiSfVs021RMofzn5B1x7jzkH34VZtlviNdE3TZhx
+lPinqo0Yy3UEksgsCBJFIofuCmeTLk4ZtqoiZnXr35RYibaZoQdUT4kCgYEAwQ6Y
+xsKFYFks1+HYl29kR0qUkXFlVbKOhQIlj/dPm0JjZ0xYkUxmzoXD68HrOWgz7hc2
+hZaQTgWf+8cRaZNfh7oL+Iglczc2UXuwuUYguYssD/G6/ZPY15PhItgCghaU5Ewy
+hMwIJ81NENY2EQTgk/Z1KZitXdVJfHl/IPMQgdkCgYASdqkqkPjaa5dDuj8byO8L
+NtTSUYlHJbAmjBbfcyTMG230/vkF4+SmDuznci1FcYuJYyyWSzqzoKISM3gGfIJQ
+rYZvCSDiu4qGGPXOWANaX8YnMXalukGzW/CO96dXPB9lD7iX8uxKMX5Q3sgYz+LS
+hszUNHWf2XB//ehCtZkKAQKBgQCxL2luepeZHx82H9T+38BkYgHLHw0HQzLkxlyd
+LjlE4QCEjSB4cmukvkZbuYXfEVEgAvQKVW6p/SWhGkpT4Gt8EXftKV9dyF21GVXQ
+JZnhUOcm1xBsrWYGLXYi2agrpvgONBTlprERfq5tdnz2z8giZL+RZswu45Nnh8bz
+AcKzuQKBgQCGOQvKvNL5XKKmws/KRkfJbXgsyRT2ubO6pVL9jGQG5wntkeIRaEpT
+oxFtWMdPx3b3cxtgSP2ojllEiISk87SFIN1zEhHZy/JpTF0GlU1qg3VIaA78M1p2
+ZdpUsuqJzYmc3dDbQMepIaqdW4xMoTtZFyenUJyoezz6eWy/NlZ/XQ==
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/servers.js b/src/mongo/gotools/test/qa-tests/jstests/libs/servers.js
new file mode 100644
index 00000000000..e9a7f68fa9f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/servers.js
@@ -0,0 +1,1079 @@
+// Wrap whole file in a function to avoid polluting the global namespace
+(function() {
+ jsTestOptions = function () {
+ if (TestData) {
+ return Object.merge(_jsTestOptions, {
+ setParameters: TestData.setParameters,
+ setParametersMongos: TestData.setParametersMongos,
+ storageEngine: TestData.storageEngine,
+ wiredTigerEngineConfigString: TestData.wiredTigerEngineConfigString,
+ wiredTigerCollectionConfigString: TestData.wiredTigerCollectionConfigString,
+ wiredTigerIndexConfigString: TestData.wiredTigerIndexConfigString,
+ noJournal: TestData.noJournal,
+ noJournalPrealloc: TestData.noJournalPrealloc,
+ auth: TestData.auth,
+ keyFile: TestData.keyFile,
+ authUser: "__system",
+ authPassword: TestData.keyFileData,
+ authMechanism: TestData.authMechanism,
+ adminUser: TestData.adminUser || "admin",
+ adminPassword: TestData.adminPassword || "password",
+ useLegacyConfigServers: TestData.useLegacyConfigServers || false,
+ useLegacyReplicationProtocol: TestData.useLegacyReplicationProtocol || false,
+ enableEncryption: TestData.enableEncryption,
+ encryptionKeyFile: TestData.encryptionKeyFile,
+ auditDestination: TestData.auditDestination,
+ useSSL: TestData.useSSL,
+ minPort: TestData.minPort,
+ maxPort: TestData.maxPort,
+ });
+ }
+ return _jsTestOptions;
+ };
+
+ // Shim to allow compatibility with newer shells.
+ if (typeof stopMongod === 'undefined') {
+ stopMongod = _stopMongoProgram;
+ }
+ if (typeof startMongod === 'undefined') {
+ startMongod = function() {
+ argArray = arguments;
+ if (jsTestOptions().useSSL) {
+ if (argArray.indexOf('--sslMode') < 0) {
+ argArray.push(
+ '--sslMode', 'requireSSL',
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation');
+ }
+ }
+ _startMongod.apply(null, argArray);
+ };
+ }
+
+ _parsePath = function() {
+ var dbpath = "";
+ for (var i = 0; i < arguments.length; ++i) {
+ if (arguments[i] === "--dbpath") {
+ dbpath = arguments[i + 1];
+ }
+ }
+
+ if (dbpath === "") {
+ throw Error("No dbpath specified");
+ }
+
+ return dbpath;
+ };
+
+ _parsePort = function() {
+ var port = "";
+ for (var i = 0; i < arguments.length; ++i) {
+ if (arguments[i] === "--port") {
+ port = arguments[i + 1];
+ }
+ }
+
+ if (port === "") {
+ throw Error("No port specified");
+ }
+ return port;
+ };
+
+ connectionURLTheSame = function(a, b) {
+
+ if (a === b) {
+ return true;
+ }
+
+ if (!a || !b) {
+ return false;
+ }
+
+ if (a.host) {
+ return connectionURLTheSame(a.host, b);
+ }
+ if (b.host) {
+ return connectionURLTheSame(a, b.host);
+ }
+
+ if (a.name) {
+ return connectionURLTheSame(a.name, b);
+ }
+ if (b.name) {
+ return connectionURLTheSame(a, b.name);
+ }
+
+ if (a.indexOf("/") < 0 && b.indexOf("/") < 0) {
+ a = a.split(":");
+ b = b.split(":");
+
+ if (a.length !== b.length) {
+ return false;
+ }
+
+ if (a.length === 2 && a[1] !== b[1]) {
+ return false;
+ }
+
+ if (a[0] === "localhost" || a[0] === "127.0.0.1") {
+ a[0] = getHostName();
+ }
+ if (b[0] === "localhost" || b[0] === "127.0.0.1") {
+ b[0] = getHostName();
+ }
+
+ return a[0] === b[0];
+ }
+ var a0 = a.split("/")[0];
+ var b0 = b.split("/")[0];
+ return a0 === b0;
+ };
+
+ assert(connectionURLTheSame("foo", "foo"));
+ assert(!connectionURLTheSame("foo", "bar"));
+
+ assert(connectionURLTheSame("foo/a,b", "foo/b,a"));
+ assert(!connectionURLTheSame("foo/a,b", "bar/a,b"));
+
+ createMongoArgs = function(binaryName, args) {
+ var fullArgs = [binaryName];
+
+ if (args.length === 1 && isObject(args[0])) {
+ var o = args[0];
+ for (var k in o) {
+ if (o.hasOwnProperty(k)) {
+ if (k === "v" && isNumber(o[k])) {
+ var n = o[k];
+ if (n > 0) {
+ if (n > 10) {
+ n = 10;
+ }
+ var temp = "-";
+ while (n-- > 0) {
+ temp += "v";
+ }
+ fullArgs.push(temp);
+ }
+ } else {
+ fullArgs.push("--" + k);
+ if (o[k] !== "") {
+ fullArgs.push(String(o[k]));
+ }
+ }
+ }
+ }
+ } else {
+ for (var i=0; i<args.length; i++) {
+ fullArgs.push(args[i]);
+ }
+ }
+
+ return fullArgs;
+ };
+
+
+ MongoRunner = function() {};
+
+ MongoRunner.dataDir = "/data/db";
+ MongoRunner.dataPath = "/data/db/";
+ MongoRunner.usedPortMap = {};
+
+ MongoRunner.VersionSub = function(regex, version) {
+ this.regex = regex;
+ this.version = version;
+ };
+
+ // These patterns allow substituting the binary versions used for each
+ // version string to support the dev/stable MongoDB release cycle.
+ MongoRunner.binVersionSubs = [
+ new MongoRunner.VersionSub(/^latest$/, ""),
+ new MongoRunner.VersionSub(/^oldest-supported$/, "1.8"),
+ // To-be-updated when 3.0 becomes available
+ new MongoRunner.VersionSub(/^last-stable$/, "2.6"),
+ // Latest unstable and next stable are effectively the same release
+ new MongoRunner.VersionSub(/^2\.7(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^2\.8(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^3\.0(\..*){0,1}/, ""),
+ new MongoRunner.VersionSub(/^3\.1(\..*){0,1}/, ""),
+ ];
+
+ MongoRunner.getBinVersionFor = function(version) {
+
+ // If this is a version iterator, iterate the version via toString()
+ if (version instanceof MongoRunner.versionIterator.iterator) {
+ version = version.toString();
+ }
+
+ // No version set means we use no suffix, this is *different* from "latest"
+ // since latest may be mapped to a different version.
+ version = version || "";
+ version = version.trim();
+ if (version === "") {
+ return "";
+ }
+
+ // See if this version is affected by version substitutions
+ for (var i = 0; i < MongoRunner.binVersionSubs.length; i++) {
+ var sub = MongoRunner.binVersionSubs[i];
+ if (sub.regex.test(version)) {
+ version = sub.version;
+ }
+ }
+
+ return version;
+ };
+
+ MongoRunner.areBinVersionsTheSame = function(versionA, versionB) {
+ versionA = MongoRunner.getBinVersionFor(versionA);
+ versionB = MongoRunner.getBinVersionFor(versionB);
+
+ if (versionA === "" || versionB === "") {
+ return versionA === versionB;
+ }
+
+ return versionA.startsWith(versionB) ||
+ versionB.startsWith(versionA);
+ };
+
+ MongoRunner.logicalOptions = {
+ runId: true,
+ pathOpts: true,
+ remember: true,
+ noRemember: true,
+ appendOptions: true,
+ restart: true,
+ noCleanData: true,
+ cleanData: true,
+ startClean: true,
+ forceLock: true,
+ useLogFiles: true,
+ logFile: true,
+ useHostName: true,
+ useHostname: true,
+ noReplSet: true,
+ forgetPort: true,
+ arbiter: true,
+ noJournalPrealloc: true,
+ noJournal: true,
+ binVersion: true,
+ waitForConnect: true,
+ };
+
+ MongoRunner.toRealPath = function(path, pathOpts) {
+ // Replace all $pathOptions with actual values
+ pathOpts = pathOpts || {};
+ path = path.replace(/\$dataPath/g, MongoRunner.dataPath);
+ path = path.replace(/\$dataDir/g, MongoRunner.dataDir);
+ for (key in pathOpts) { // eslint-disable-line guard-for-in
+ path = path.replace(RegExp("\\$" + RegExp.escape(key), "g"), pathOpts[key]);
+ }
+
+ // Relative path
+ // Detect Unix and Windows absolute paths
+ // as well as Windows drive letters
+ // Also captures Windows UNC paths
+ if (!path.match(/^(\/|\\|[A-Za-z]:)/)) {
+ if (path !== "" && !path.endsWith("/")) {
+ path += "/";
+ }
+
+ path = MongoRunner.dataPath + path;
+ }
+
+ return path;
+ };
+
+ MongoRunner.toRealDir = function(path, pathOpts) {
+ path = MongoRunner.toRealPath(path, pathOpts);
+ if (path.endsWith("/")) {
+ path = path.substring(0, path.length - 1);
+ }
+ return path;
+ };
+
+ MongoRunner.toRealFile = MongoRunner.toRealDir;
+
+ MongoRunner.nextOpenPort = function() {
+ if (typeof allocatePort === "function") {
+ return allocatePort();
+ }
+
+ var i = 0;
+ while (MongoRunner.usedPortMap[String(27000 + i)]) {
+ i++;
+ }
+ MongoRunner.usedPortMap[String(27000 + i)] = true;
+
+ return 27000 + i;
+ };
+
+ /**
+ * Returns an iterator object which yields successive versions on toString(), starting from a
+ * random initial position, from an array of versions.
+ *
+ * If passed a single version string or an already-existing version iterator, just returns the
+ * object itself, since it will yield correctly on toString()
+ *
+ * @param {Array.<String>}|{String}|{versionIterator}
+ */
+ MongoRunner.versionIterator = function(arr, isRandom) {
+
+ // If this isn't an array of versions, or is already an iterator, just use it
+ if (typeof arr === "string") {
+ return arr;
+ }
+ if (arr.isVersionIterator) {
+ return arr;
+ }
+
+ isRandom = isRandom || false;
+
+ // Starting pos
+ var i = isRandom ? parseInt(Random.rand() * arr.length) : 0;
+
+ return new MongoRunner.versionIterator.iterator(i, arr);
+ };
+
+ MongoRunner.versionIterator.iterator = function(i, arr) {
+
+ this.toString = function() {
+ i = (i + 1) % arr.length;
+ print("Returning next version : " + i +
+ " (" + arr[i] + ") from " + tojson(arr) + "...");
+ return arr[i];
+ };
+
+ this.isVersionIterator = true;
+
+ };
+
+ /**
+ * Converts the args object by pairing all keys with their value and appending
+ * dash-dash (--) to the keys. The only exception to this rule are keys that
+ * are defined in MongoRunner.logicalOptions, of which they will be ignored.
+ *
+ * @param {string} binaryName
+ * @param {Object} args
+ *
+ * @return {Array.<String>} an array of parameter strings that can be passed
+ * to the binary.
+ */
+ MongoRunner.arrOptions = function(binaryName, args) {
+ var fullArgs = [""];
+
+ // isObject returns true even if "args" is an array, so the else branch of this statement is
+ // dead code. See SERVER-14220.
+ if (isObject(args) || (args.length === 1 && isObject(args[0]))) {
+ var o = isObject(args) ? args : args[0];
+
+ // If we've specified a particular binary version, use that
+ if (o.binVersion && o.binVersion !== "") {
+ binaryName += "-" + o.binVersion;
+ }
+
+ // Manage legacy options
+ var isValidOptionForBinary = function(option, value) {
+ if (!o.binVersion) {
+ return true;
+ }
+
+ // Version 1.x options
+ if (o.binVersion.startsWith("1.")) {
+ return ["nopreallocj"].indexOf(option) < 0;
+ }
+
+ return true;
+ };
+
+ for (var k in o) {
+ // Make sure our logical option should be added to the array of options
+ if (!o.hasOwnProperty(k) ||
+ k in MongoRunner.logicalOptions ||
+ !isValidOptionForBinary(k, o[k])) {
+ continue;
+ }
+
+ if ((k === "v" || k === "verbose") && isNumber(o[k])) {
+ var n = o[k];
+ if (n > 0) {
+ if (n > 10) {
+ n = 10;
+ }
+ var temp = "-";
+ while (n-- > 0) {
+ temp += "v";
+ }
+ fullArgs.push(temp);
+ }
+ } else {
+ if (o[k] === undefined || o[k] === null) {
+ continue;
+ }
+ fullArgs.push("--" + k);
+ if (o[k] !== "") {
+ fullArgs.push(String(o[k]));
+ }
+ }
+ }
+ } else {
+ for (var i=0; i<args.length; i++) {
+ fullArgs.push(args[i]);
+ }
+ }
+
+ fullArgs[0] = binaryName;
+ return fullArgs;
+ };
+
+ MongoRunner.arrToOpts = function(arr) {
+ var opts = {};
+ for (var i = 1; i < arr.length; i++) {
+ if (arr[i].startsWith("-")) {
+ var opt = arr[i].replace(/^-/, "").replace(/^-/, "");
+ if (arr.length > i + 1 && !arr[i + 1].startsWith("-")) {
+ opts[opt] = arr[i + 1];
+ i++;
+ } else {
+ opts[opt] = "";
+ }
+
+ if (opt.replace(/v/g, "") === "") {
+ opts["verbose"] = opt.length;
+ }
+ }
+ }
+
+ return opts;
+ };
+
+ MongoRunner.savedOptions = {};
+
+ MongoRunner.mongoOptions = function(opts) {
+ // Don't remember waitForConnect
+ var waitForConnect = opts.waitForConnect;
+ delete opts.waitForConnect;
+
+ // If we're a mongo object
+ if (opts.getDB) {
+ opts = {restart: opts.runId};
+ }
+
+ // Initialize and create a copy of the opts
+ opts = Object.merge(opts || {}, {});
+
+ opts.restart = opts.restart || false;
+
+ // RunId can come from a number of places
+ // If restart is passed as an old connection
+ if (opts.restart && opts.restart.getDB) {
+ opts.runId = opts.restart.runId;
+ opts.restart = true;
+ } else if (isObject(opts.restart)) {
+ // If it's the runId itself
+ opts.runId = opts.restart;
+ opts.restart = true;
+ }
+
+ if (isObject(opts.remember)) {
+ opts.runId = opts.remember;
+ opts.remember = true;
+ } else if (opts.remember === undefined) {
+ // Remember by default if we're restarting
+ opts.remember = opts.restart;
+ }
+
+ // If we passed in restart : <conn> or runId : <conn>
+ if (isObject(opts.runId) && opts.runId.runId) {
+ opts.runId = opts.runId.runId;
+ }
+
+ if (opts.restart && opts.remember) {
+ opts = Object.merge(MongoRunner.savedOptions[opts.runId], opts);
+ }
+
+ // Create a new runId
+ opts.runId = opts.runId || ObjectId();
+
+ // Save the port if required
+ if (!opts.forgetPort) {
+ opts.port = opts.port || MongoRunner.nextOpenPort();
+ }
+
+ var shouldRemember = (!opts.restart && !opts.noRemember) || (opts.restart && opts.appendOptions);
+
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ if (shouldRemember) {
+ MongoRunner.savedOptions[opts.runId] = Object.merge(opts, {});
+ }
+
+ // Default for waitForConnect is true
+ opts.waitForConnect = (waitForConnect === undefined || waitForConnect === null) ?
+ true : waitForConnect;
+
+ if (jsTestOptions().useSSL) {
+ opts.sslMode = opts.sslMode || "requireSSL";
+ opts.sslPEMKeyFile = opts.sslPEMKeyFile || "jstests/libs/server.pem";
+ opts.sslCAFile = opts.sslCAFile || "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if (jsTestOptions().useX509 && !opts.clusterAuthMode) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ opts.port = opts.port || MongoRunner.nextOpenPort();
+ MongoRunner.usedPortMap[String(parseInt(opts.port))] = true;
+
+ opts.pathOpts = Object.merge(opts.pathOpts || {}, {
+ port: String(opts.port),
+ runId: String(opts.runId),
+ });
+
+ return opts;
+ };
+
+ /**
+ * @option {object} opts
+ *
+ * {
+ * dbpath {string}
+ * useLogFiles {boolean}: use with logFile option.
+ * logFile {string}: path to the log file. If not specified and useLogFiles
+ * is true, automatically creates a log file inside dbpath.
+ * noJournalPrealloc {boolean}
+ * noJournal {boolean}
+ * keyFile
+ * replSet
+ * oplogSize
+ * }
+ */
+ MongoRunner.mongodOptions = function(opts) {
+ opts = MongoRunner.mongoOptions(opts);
+ opts.dbpath = MongoRunner.toRealDir(opts.dbpath || "$dataDir/mongod-$port",
+ opts.pathOpts);
+ opts.pathOpts = Object.merge(opts.pathOpts, {dbpath: opts.dbpath});
+
+ if (!opts.logFile && opts.useLogFiles) {
+ opts.logFile = opts.dbpath + "/mongod.log";
+ } else if (opts.logFile) {
+ opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
+ }
+
+ if (opts.logFile !== undefined) {
+ opts.logpath = opts.logFile;
+ }
+
+ if (jsTestOptions().noJournalPrealloc || opts.noJournalPrealloc) {
+ opts.nopreallocj = "";
+ }
+
+ if (jsTestOptions().noJournal || opts.noJournal) {
+ opts.nojournal = "";
+ }
+
+ if (jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile;
+ }
+
+ if (jsTestOptions().useSSL) {
+ opts.sslMode = opts.sslMode || "requireSSL";
+ opts.sslPEMKeyFile = opts.sslPEMKeyFile || "jstests/libs/server.pem";
+ opts.sslCAFile = opts.sslCAFile || "jstests/libs/ca.pem";
+
+ // Needed for jstest/ssl/upgrade_to_ssl.js
+ opts.sslWeakCertificateValidation = "";
+
+ // Needed for jstest/ssl/ssl_hostname_validation.js
+ opts.sslAllowInvalidHostnames = "";
+ }
+
+ if (jsTestOptions().useX509 && !opts.clusterAuthMode) {
+ opts.clusterAuthMode = "x509";
+ }
+
+ if (opts.noReplSet) {
+ opts.replSet = null;
+ }
+ if (opts.arbiter) {
+ opts.oplogSize = 1;
+ }
+
+ return opts;
+ };
+
+ MongoRunner.mongosOptions = function(opts) {
+ opts = MongoRunner.mongoOptions(opts);
+
+ // Normalize configdb option to be host string if currently a host
+ if (opts.configdb && opts.configdb.getDB) {
+ opts.configdb = opts.configdb.host;
+ }
+
+ opts.pathOpts = Object.merge(opts.pathOpts, {
+ configdb: opts.configdb.replace(/:|,/g, "-")
+ });
+
+ if (!opts.logFile && opts.useLogFiles) {
+ opts.logFile = MongoRunner.toRealFile("$dataDir/mongos-$configdb-$port.log",
+ opts.pathOpts);
+ } else if (opts.logFile) {
+ opts.logFile = MongoRunner.toRealFile(opts.logFile, opts.pathOpts);
+ }
+
+ if (opts.logFile !== undefined) {
+ opts.logpath = opts.logFile;
+ }
+
+ if (jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile;
+ }
+
+ return opts;
+ };
+
+ /**
+ * Starts a mongod instance.
+ *
+ * @param {Object} opts
+ *
+ * {
+ * useHostName {boolean}: Uses hostname of machine if true
+ * forceLock {boolean}: Deletes the lock file if set to true
+ * dbpath {string}: location of db files
+ * cleanData {boolean}: Removes all files in dbpath if true
+ * startClean {boolean}: same as cleanData
+ * noCleanData {boolean}: Do not clean files (cleanData takes priority)
+ *
+ * @see MongoRunner.mongodOptions for other options
+ * }
+ *
+ * @return {Mongo} connection object to the started mongod instance.
+ *
+ * @see MongoRunner.arrOptions
+ */
+ MongoRunner.runMongod = function(opts) {
+ opts = opts || {};
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if (isObject(opts)) {
+ opts = MongoRunner.mongodOptions(opts);
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ if (opts.forceLock) {
+ removeFile(opts.dbpath + "/mongod.lock");
+ }
+ if ((opts.cleanData || opts.startClean) || (!opts.restart && !opts.noCleanData)) {
+ print("Resetting db path '" + opts.dbpath + "'");
+ resetDbpath(opts.dbpath);
+ }
+
+ opts = MongoRunner.arrOptions("mongod", opts);
+ }
+
+ var mongod = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) {
+ mongos = {};
+ }
+ if (!mongod) {
+ return null;
+ }
+
+ mongod.commandLine = MongoRunner.arrToOpts(opts);
+ mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port;
+ mongod.host = mongod.name;
+ mongod.port = parseInt(mongod.commandLine.port);
+ mongod.runId = runId || ObjectId();
+ mongod.dbpath = fullOptions.dbpath;
+ mongod.savedOptions = MongoRunner.savedOptions[mongod.runId];
+ mongod.fullOptions = fullOptions;
+
+ return mongod;
+ };
+
+ MongoRunner.runMongos = function(opts) {
+ opts = opts || {};
+ var useHostName = false;
+ var runId = null;
+ var waitForConnect = true;
+ var fullOptions = opts;
+
+ if (isObject(opts)) {
+ opts = MongoRunner.mongosOptions(opts);
+ fullOptions = opts;
+
+ useHostName = opts.useHostName || opts.useHostname;
+ runId = opts.runId;
+ waitForConnect = opts.waitForConnect;
+
+ opts = MongoRunner.arrOptions("mongos", opts);
+ }
+
+ var mongos = MongoRunner.startWithArgs(opts, waitForConnect);
+ if (!waitForConnect) {
+ mongos = {};
+ }
+ if (!mongos) {
+ return null;
+ }
+
+ mongos.commandLine = MongoRunner.arrToOpts(opts);
+ mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port;
+ mongos.host = mongos.name;
+ mongos.port = parseInt(mongos.commandLine.port);
+ mongos.runId = runId || ObjectId();
+ mongos.savedOptions = MongoRunner.savedOptions[mongos.runId];
+ mongos.fullOptions = fullOptions;
+
+ return mongos;
+ };
+
+ /**
+ * Kills a mongod process.
+ *
+ * @param {number} port the port of the process to kill
+ * @param {number} signal The signal number to use for killing
+ * @param {Object} opts Additional options. Format:
+ * {
+ * auth: {
+ * user {string}: admin user name
+ * pwd {string}: admin password
+ * }
+ * }
+ *
+ * Note: The auth option is required in a authenticated mongod running in Windows since
+ * it uses the shutdown command, which requires admin credentials.
+ */
+ MongoRunner.stopMongod = function(port, signal, opts) {
+ if (!port) {
+ print("Cannot stop mongo process " + port);
+ return;
+ }
+
+ signal = signal || 15;
+
+ if (port.port) {
+ port = parseInt(port.port);
+ }
+
+ if (port instanceof ObjectId) {
+ opts = MongoRunner.savedOptions(port);
+ if (opts) {
+ port = parseInt(opts.port);
+ }
+ }
+
+ var exitCode = stopMongod(parseInt(port), parseInt(signal), opts);
+
+ delete MongoRunner.usedPortMap[String(parseInt(port))];
+
+ return exitCode;
+ };
+
+ MongoRunner.stopMongos = MongoRunner.stopMongod;
+
+ MongoRunner.isStopped = function(port) {
+ if (!port) {
+ print("Cannot detect if process " + port + " is stopped.");
+ return;
+ }
+
+ if (port.port) {
+ port = parseInt(port.port);
+ }
+
+ if (port instanceof ObjectId) {
+ opts = MongoRunner.savedOptions(port);
+ if (opts) {
+ port = parseInt(opts.port);
+ }
+ }
+
+ return !MongoRunner.usedPortMap[String(parseInt(port))];
+ };
+
+ /**
+ * Starts an instance of the specified mongo tool
+ *
+ * @param {String} binaryName The name of the tool to run
+ * @param {Object} opts options to pass to the tool
+ * {
+ * binVersion {string}: version of tool to run
+ * }
+ *
+ * @see MongoRunner.arrOptions
+ */
+ MongoRunner.runMongoTool = function(binaryName, opts) {
+ opts = opts || {};
+ // Normalize and get the binary version to use
+ opts.binVersion = MongoRunner.getBinVersionFor(opts.binVersion);
+
+ var argsArray = MongoRunner.arrOptions(binaryName, opts);
+
+ return runMongoProgram.apply(null, argsArray);
+ };
+
+ // Given a test name figures out a directory for that test to use for dump files and makes sure
+ // that directory exists and is empty.
+ MongoRunner.getAndPrepareDumpDirectory = function(testName) {
+ var dir = MongoRunner.dataPath + testName + "_external/";
+ resetDbpath(dir);
+ return dir;
+ };
+
+ // Start a mongod instance and return a 'Mongo' object connected to it.
+ // This function's arguments are passed as command line arguments to mongod.
+ // The specified 'dbpath' is cleared if it exists, created if not.
+ // var conn = startMongodEmpty("--port", 30000, "--dbpath", "asdf");
+ startMongodEmpty = function () {
+ var args = createMongoArgs("mongod", arguments);
+
+ var dbpath = _parsePath.apply(null, args);
+ resetDbpath(dbpath);
+
+ return startMongoProgram.apply(null, args);
+ };
+ startMongod = function () {
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
+ return startMongodEmpty.apply(null, arguments);
+ };
+ startMongodNoReset = function() {
+ var args = createMongoArgs("mongod", arguments);
+ return startMongoProgram.apply(null, args);
+ };
+
+ startMongos = function(args) {
+ return MongoRunner.runMongos(args);
+ };
+
+ /**
+ * Returns a new argArray with any test-specific arguments added.
+ */
+ function appendSetParameterArgs(argArray) {
+ var programName = argArray[0];
+ if (programName.endsWith('mongod') || programName.endsWith('mongos')) {
+ if (jsTest.options().enableTestCommands) {
+ argArray.push('--setParameter', "enableTestCommands=1");
+ }
+ if (jsTest.options().authMechanism && jsTest.options().authMechanism !== "SCRAM-SHA-1") {
+ var hasAuthMechs = false;
+ for (i in argArray) {
+ if (typeof argArray[i] === 'string' &&
+ argArray[i].indexOf('authenticationMechanisms') !== -1) {
+ hasAuthMechs = true;
+ break;
+ }
+ }
+ if (!hasAuthMechs) {
+ argArray.push('--setParameter', "authenticationMechanisms=" + jsTest.options().authMechanism);
+ }
+ }
+ if (jsTest.options().auth) {
+ argArray.push('--setParameter', "enableLocalhostAuthBypass=false");
+ }
+ if (jsTestOptions().useSSL) {
+ if (argArray.indexOf('--sslMode') < 0) {
+ argArray.push(
+ '--sslMode', 'requireSSL',
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslWeakCertificateValidation');
+ }
+ }
+
+ if (programName.endsWith('mongos')) {
+ // mongos only options
+ // apply setParameters for mongos
+ if (jsTest.options().setParametersMongos) {
+ var params = jsTest.options().setParametersMongos.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) {
+ argArray.push('--setParameter', p);
+ }
+ });
+ }
+ }
+ } else if (programName.endsWith('mongod')) {
+ // mongod only options
+ // set storageEngine for mongod
+ if (jsTest.options().storageEngine) {
+ if (argArray.indexOf("--storageEngine") < 0) {
+ argArray.push('--storageEngine', jsTest.options().storageEngine);
+ }
+ }
+ if (jsTest.options().wiredTigerEngineConfigString) {
+ argArray.push('--wiredTigerEngineConfigString', jsTest.options().wiredTigerEngineConfigString);
+ }
+ if (jsTest.options().wiredTigerCollectionConfigString) {
+ argArray.push('--wiredTigerCollectionConfigString', jsTest.options().wiredTigerCollectionConfigString);
+ }
+ if (jsTest.options().wiredTigerIndexConfigString) {
+ argArray.push('--wiredTigerIndexConfigString', jsTest.options().wiredTigerIndexConfigString);
+ }
+ // apply setParameters for mongod
+ if (jsTest.options().setParameters) {
+ params = jsTest.options().setParameters.split(",");
+ if (params && params.length > 0) {
+ params.forEach(function(p) {
+ if (p) {
+ argArray.push('--setParameter', p);
+ }
+ });
+ }
+ }
+ }
+ }
+ return argArray;
+ }
+
+ /**
+ * Start a mongo process with a particular argument array. If we aren't waiting for connect,
+ * return null.
+ */
+ MongoRunner.startWithArgs = function(argArray, waitForConnect) {
+ // TODO: Make there only be one codepath for starting mongo processes
+ argArray = appendSetParameterArgs(argArray);
+ var port = _parsePort.apply(null, argArray);
+ var pid = _startMongoProgram.apply(null, argArray);
+
+ var conn = null;
+ if (waitForConnect) {
+ assert.soon(function() {
+ try {
+ conn = new Mongo("127.0.0.1:" + port);
+ return true;
+ } catch (e) {
+ if (!checkProgram(pid)) {
+ print("Could not start mongo program at " + port + ", process ended");
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+ }
+
+ return conn;
+ };
+
+ /**
+ * DEPRECATED
+ *
+ * Start mongod or mongos and return a Mongo() object connected to there.
+ * This function's first argument is "mongod" or "mongos" program name, \
+ * and subsequent arguments to this function are passed as
+ * command line arguments to the program.
+ */
+ startMongoProgram = function() {
+ var port = _parsePort.apply(null, arguments);
+
+ // Enable test commands.
+ // TODO: Make this work better with multi-version testing so that we can support
+ // enabling this on 2.4 when testing 2.6
+ var args = Array.from(arguments);
+ args = appendSetParameterArgs(args);
+ var pid = _startMongoProgram.apply(null, args);
+
+ var m;
+ assert.soon(function() {
+ try {
+ m = new Mongo("127.0.0.1:" + port);
+ return true;
+ } catch (e) {
+ if (!checkProgram(pid)) {
+
+ print("Could not start mongo program at " + port + ", process ended");
+
+ // Break out
+ m = null;
+ return true;
+ }
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000);
+
+ return m;
+ };
+
+ runMongoProgram = function() {
+ var args = Array.from(arguments);
+ var progName = args[0];
+
+ if (jsTestOptions().auth) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationDatabase=admin'
+ );
+ }
+
+ if (jsTestOptions().useSSL) {
+ args.push("--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidHostnames");
+ }
+
+ if (progName === 'mongo' && !_useWriteCommandsDefault()) {
+ progName = args[0];
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _runMongoProgram.apply(null, args);
+ };
+
+ // Start a mongo program instance. This function's first argument is the
+ // program name, and subsequent arguments to this function are passed as
+ // command line arguments to the program. Returns pid of the spawned program.
+ startMongoProgramNoConnect = function() {
+ var args = Array.from(arguments);
+ var progName = args[0];
+
+ if (jsTestOptions().auth) {
+ args = args.slice(1);
+ args.unshift(progName,
+ '-u', jsTestOptions().authUser,
+ '-p', jsTestOptions().authPassword,
+ '--authenticationDatabase=admin');
+ }
+
+ if (jsTestOptions().useSSL) {
+ args.push("--ssl",
+ "--sslPEMKeyFile", "jstests/libs/client.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidHostnames");
+ }
+
+ if (progName === 'mongo' && !_useWriteCommandsDefault()) {
+ args = args.slice(1);
+ args.unshift(progName, '--useLegacyWriteOps');
+ }
+
+ return _startMongoProgram.apply(null, args);
+ };
+
+ myPort = function() {
+ var m = db.getMongo();
+ if (m.host.match(/:/)) {
+ return m.host.match(/:(.*)/)[1];
+ }
+ return 27017;
+ };
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/servers_misc.js b/src/mongo/gotools/test/qa-tests/jstests/libs/servers_misc.js
new file mode 100644
index 00000000000..f0f5cfe7ceb
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/servers_misc.js
@@ -0,0 +1,379 @@
+
+/**
+ * Run a mongod process.
+ *
+ * After initializing a MongodRunner, you must call start() on it.
+ * @param {int} port port to run db on, use allocatePorts(num) to requision
+ * @param {string} dbpath path to use
+ * @param {boolean} peer pass in false (DEPRECATED, was used for replica pair host)
+ * @param {boolean} arbiter pass in false (DEPRECATED, was used for replica pair host)
+ * @param {array} extraArgs other arguments for the command line
+ * @param {object} options other options include no_bind to not bind_ip to 127.0.0.1
+ * (necessary for replica set testing)
+ */
+MongodRunner = function(port, dbpath, peer, arbiter, extraArgs, options) {
+ this.port_ = port;
+ this.dbpath_ = dbpath;
+ this.peer_ = peer;
+ this.arbiter_ = arbiter;
+ this.extraArgs_ = extraArgs;
+ this.options_ = options ? options : {};
+};
+
+/**
+ * Start this mongod process.
+ *
+ * @param {boolean} reuseData If the data directory should be left intact (default is to wipe it)
+ */
+MongodRunner.prototype.start = function(reuseData) {
+ var args = [];
+ if (reuseData) {
+ args.push("mongod");
+ }
+ args.push(
+ "--port", this.port_,
+ "--dbpath", this.dbpath_,
+ "--nohttpinterface",
+ "--noprealloc",
+ "--smallfiles");
+ if (!this.options_.no_bind) {
+ args.push("--bind_ip", "127.0.0.1");
+ }
+ if (this.extraArgs_) {
+ args = args.concat(this.extraArgs_);
+ }
+ removeFile(this.dbpath_ + "/mongod.lock");
+ if (reuseData) {
+ return startMongoProgram.apply(null, args);
+ }
+ return startMongod.apply(null, args);
+};
+
+MongodRunner.prototype.port = function() {
+ return this.port_;
+};
+
+MongodRunner.prototype.toString = function() {
+ return [this.port_, this.dbpath_, this.peer_, this.arbiter_].toString();
+};
+
+ToolTest = function(name, extraOptions) {
+ this.useSSL = jsTestOptions().useSSL;
+ this.name = name;
+ this.options = extraOptions;
+ this.port = allocatePorts(1)[0];
+ this.baseName = "jstests_tool_" + name;
+ this.root = MongoRunner.dataPath + this.baseName;
+ this.dbpath = this.root + "/";
+ this.ext = this.root + "_external/";
+ this.extFile = this.root + "_external/a";
+ resetDbpath(this.dbpath);
+ resetDbpath(this.ext);
+};
+
+ToolTest.prototype.startDB = function(coll) {
+ assert(!this.m, "db already running");
+
+ var options = {
+ port: this.port,
+ dbpath: this.dbpath,
+ nohttpinterface: "",
+ noprealloc: "",
+ smallfiles: "",
+ bind_ip: "127.0.0.1",
+ };
+
+ Object.extend(options, this.options);
+
+ if (this.useSSL) {
+ Object.extend(options, {
+ sslMode: "requireSSL",
+ sslPEMKeyFile: "jstests/libs/server.pem",
+ sslCAFile: "jstests/libs/ca.pem",
+ sslWeakCertificateValidation: "",
+ });
+ }
+
+ this.m = startMongoProgram.apply(null, MongoRunner.arrOptions("mongod", options));
+ this.db = this.m.getDB(this.baseName);
+ if (coll) {
+ return this.db.getCollection(coll);
+ }
+ return this.db;
+};
+
+ToolTest.prototype.stop = function() {
+ if (!this.m) {
+ return;
+ }
+ MongoRunner.stopMongod(this.port);
+ this.m = null;
+ this.db = null;
+
+ print('*** ' + this.name + " completed successfully ***");
+};
+
+ToolTest.prototype.runTool = function() {
+ var a = ["mongo" + arguments[0]];
+
+ var hasdbpath = false;
+
+ for (var i=1; i<arguments.length; i++) {
+ a.push(arguments[i]);
+ if (arguments[i] === "--dbpath") {
+ hasdbpath = true;
+ }
+ }
+
+ if (this.useSSL) {
+ a = a.concat(["--ssl",
+ "--sslPEMKeyFile", "jstests/libs/server.pem",
+ "--sslCAFile", "jstests/libs/ca.pem",
+ "--sslAllowInvalidHostnames"]);
+ }
+
+ if (!hasdbpath) {
+ a.push("--host");
+ a.push("127.0.0.1:" + this.port);
+ }
+
+ return runMongoProgram.apply(null, a);
+};
+
+
+ReplTest = function(name, ports) {
+ this.name = name;
+ this.ports = ports || allocatePorts(2);
+};
+
+ReplTest.prototype.getPort = function(master) {
+ if (master) {
+ return this.ports[0];
+ }
+ return this.ports[1];
+};
+
+ReplTest.prototype.getPath = function(master) {
+ var p = MongoRunner.dataPath + this.name + "-";
+ if (master) {
+ p += "master";
+ } else {
+ p += "slave";
+ }
+ return p;
+};
+
+ReplTest.prototype.getOptions = function(master, extra, putBinaryFirst, norepl) {
+ if (!extra) {
+ extra = {};
+ }
+
+ if (!extra.oplogSize) {
+ extra.oplogSize = "40";
+ }
+
+ var a = [];
+ if (putBinaryFirst) {
+ a.push("mongod");
+ }
+ a.push("--nohttpinterface",
+ "--noprealloc",
+ "--bind_ip", "127.0.0.1",
+ "--smallfiles",
+ "--port", this.getPort(master),
+ "--dbpath", this.getPath(master));
+
+ if (jsTestOptions().noJournal) {
+ a.push("--nojournal");
+ }
+ if (jsTestOptions().noJournalPrealloc) {
+ a.push("--nopreallocj");
+ }
+ if (jsTestOptions().keyFile) {
+ a.push("--keyFile", jsTestOptions().keyFile);
+ }
+
+ if (jsTestOptions().useSSL) {
+ if (!Array.contains(a, "--sslMode")) {
+ a.push("--sslMode", "requireSSL");
+ }
+ if (!Array.contains(a, "--sslPEMKeyFile")) {
+ a.push("--sslPEMKeyFile", "jstests/libs/server.pem");
+ }
+ if (!Array.contains(a, "--sslCAFile")) {
+ a.push("--sslCAFile", "jstests/libs/ca.pem");
+ }
+ a.push("--sslWeakCertificateValidation");
+ }
+ if (jsTestOptions().useX509 && !Array.contains(a, "--clusterAuthMode")) {
+ a.push("--clusterAuthMode", "x509");
+ }
+
+ if (!norepl) {
+ if (master) {
+ a.push("--master");
+ } else {
+ a.push("--slave", "--source", "127.0.0.1:" + this.ports[0]);
+ }
+ }
+
+ for (var k in extra) {
+ if (!extra.hasOwnProperty(k)) {
+ continue;
+ }
+ var v = extra[k];
+ if (k in MongoRunner.logicalOptions) {
+ continue;
+ }
+ a.push("--" + k);
+ if (v !== undefined && v !== null && v !== "") {
+ a.push(v);
+ }
+ }
+
+ return a;
+};
+
+ReplTest.prototype.start = function(master, options, restart, norepl) {
+ var lockFile = this.getPath(master) + "/mongod.lock";
+ removeFile(lockFile);
+ var o = this.getOptions(master, options, restart, norepl);
+
+ if (restart) {
+ var conn = startMongoProgram.apply(null, o);
+ if (!master) {
+ conn.setSlaveOk();
+ }
+ return conn;
+ }
+ conn = startMongod.apply(null, o);
+ if (jsTestOptions().keyFile || jsTestOptions().auth || jsTestOptions().useX509) {
+ jsTest.authenticate(conn);
+ }
+ if (!master) {
+ conn.setSlaveOk();
+ }
+ return conn;
+};
+
+ReplTest.prototype.stop = function(master, signal) {
+ if (arguments.length === 0) {
+ this.stop(true);
+ this.stop(false);
+ return;
+ }
+
+ print('*** ' + this.name + " completed successfully ***");
+ return MongoRunner.stopMongod(this.getPort(master), signal || 15);
+};
+
+if (typeof allocatePort === 'function') {
+ allocatePorts = function (numPorts) {
+ var ports = [];
+ for (var i = 0; i < numPorts; i++) {
+ ports.push(allocatePort());
+ }
+ return ports;
+ };
+} else {
+ allocatePorts = function(n, startPort) {
+ var ret = [];
+ var start = startPort || 31000;
+ for (var i = start; i < start + n; ++i) {
+ ret.push(i);
+ }
+ return ret;
+ };
+}
+
+
+SyncCCTest = function(testName, extraMongodOptions) {
+ this._testName = testName;
+ this._connections = [];
+
+ for (var i=0; i<3; i++) {
+ this._connections.push(startMongodTest(30000 + i, testName + i, false, extraMongodOptions));
+ }
+
+ this.url = this._connections.map(function(z) {
+ return z.name;
+ }).join(",");
+ this.conn = new Mongo(this.url);
+};
+
+SyncCCTest.prototype.stop = function() {
+ for (var i=0; i<this._connections.length; i++) {
+ MongoRunner.stopMongod(30000 + i);
+ }
+
+ print('*** ' + this._testName + " completed successfully ***");
+};
+
+SyncCCTest.prototype.checkHashes = function(dbname, msg) {
+ var hashes = this._connections.map(function(z) {
+ return z.getDB(dbname).runCommand("dbhash");
+ });
+
+ for (var i=1; i<hashes.length; i++) {
+ assert.eq(hashes[0].md5, hashes[i].md5, "checkHash on " + dbname + " " + msg + "\n" + tojson(hashes));
+ }
+};
+
+SyncCCTest.prototype.tempKill = function(num) {
+ num = num || 0;
+ MongoRunner.stopMongod(30000 + num);
+};
+
+SyncCCTest.prototype.tempStart = function(num) {
+ num = num || 0;
+ this._connections[num] = startMongodTest(30000 + num, this._testName + num, true);
+};
+
+
+function startParallelShell(jsCode, port, noConnect) {
+ var x;
+
+ var args = ["mongo"];
+
+ // Convert function into call-string
+ if (typeof (jsCode) === "function") {
+ var id = Math.floor(Math.random() * 100000);
+ jsCode = "var f" + id + " = " + jsCode.toString() + ";f" + id + "();";
+ } else if (typeof (jsCode) === "string") {
+ // do nothing
+ } else {
+ throw Error("bad first argument to startParallelShell");
+ }
+
+ if (noConnect) {
+ args.push("--nodb");
+ } else if (typeof (db) === "object") {
+ jsCode = "db = db.getSiblingDB('" + db.getName() + "');" + jsCode;
+ }
+
+ if (TestData) {
+ jsCode = "TestData = " + tojson(TestData) + ";" + jsCode;
+ }
+
+ args.push("--eval", jsCode);
+
+ if (typeof db === "object") {
+ var hostAndPort = db.getMongo().host.split(':');
+ var host = hostAndPort[0];
+ args.push("--host", host);
+ if (!port && hostAndPort.length >= 2) {
+ port = hostAndPort[1];
+ }
+ }
+ if (port) {
+ args.push("--port", port);
+ }
+
+
+ x = startMongoProgramNoConnect.apply(null, args);
+ return function() {
+ return waitProgram(x);
+ };
+}
+
+var testingReplication = false;
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/slow_weekly_util.js b/src/mongo/gotools/test/qa-tests/jstests/libs/slow_weekly_util.js
new file mode 100644
index 00000000000..e6b31eede9d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/slow_weekly_util.js
@@ -0,0 +1,25 @@
+
+SlowWeeklyMongod = function(name) {
+ this.name = name;
+ this.port = 30201;
+
+ this.start = new Date();
+
+ this.conn = startMongodEmpty(
+ "--port", this.port,
+ "--dbpath", MongoRunner.dataPath + this.name,
+ "--smallfiles",
+ "--nojournal");
+};
+
+SlowWeeklyMongod.prototype.getDB = function(name) {
+ return this.conn.getDB(name);
+};
+
+SlowWeeklyMongod.prototype.stop = function() {
+ stopMongod(this.port);
+ var end = new Date();
+ print("slowWeekly test: " + this.name + " completed successfully in "
+ + ((end.getTime() - this.start.getTime()) / 1000) + " seconds");
+};
+
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/smoke.pem b/src/mongo/gotools/test/qa-tests/jstests/libs/smoke.pem
new file mode 100644
index 00000000000..7dddf222386
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/smoke.pem
@@ -0,0 +1,48 @@
+-----BEGIN CERTIFICATE-----
+MIIDYTCCAkmgAwIBAgIBCDANBgkqhkiG9w0BAQUFADBrMQ4wDAYDVQQDEwVzbW9r
+ZTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1O
+ZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwHhcN
+MTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjBrMQ4wDAYDVQQDEwVzbW9rZTEP
+MA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcg
+WW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDb4fOWDomCPyYesh42pQ/bEHdK7r73
+06x1hdku9i+nytCSxhhuITGC1FA4ZIbYdQC/jgfzC0D+SDFKCCyNZA/2Pxam9y3F
+QHrueNtD9bw/OB98D6hC2fCow5OxUqWDkee2hQRTwLKDzec+H72AkwURh8oTfJsl
+LL/1YITZs9kfs59r8HG2YAT7QBbg3xBmK0wZvL4V/FY/OeeR92pIgjUU/6xm/1LU
+bhNHl5JTrXQxPpmvDb1ysiI0mMLeUz7UI+Pe/9mn91dHwgkprWyFi6VnV3/aW7DC
+nW/DklOPD8vMWu2A6iYU0fZbcj4vGM607vst5QLDMoD5Y2ilrKLiTRa5AgMBAAGj
+EDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAJc64d76+eyNGX6C
+5r4IdFF3zJjkLs/NcSMReUTEv4zAdJCn7c1FNRkQBS3Ky2CeSGmiyYOhWZ7usv7x
+EvprmHouWsrQXV+o5EIW366e5wzg0c5KWO3oBIRjx4hDkRSQSjJjy5NFrc8fAW9x
+eeaHFWdqk3CHvqBhd32QYEs4+7v8hBYM3PBkj8qghXta4ZZS89cTMSjhu5s4Opje
+qUzGzoHat2VBdYzIpVOorYMFXObwCeQkCAXO5epuGZ0QhML66hc7FuOsW75kI9aW
+QXVoM/z2Gb1wbBYnwHOXtClK783S3RdV0uJun/pVj+VeHb6fyIQRmC5d0eJ0C8mY
+X+acnvA=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA2+Hzlg6Jgj8mHrIeNqUP2xB3Su6+99OsdYXZLvYvp8rQksYY
+biExgtRQOGSG2HUAv44H8wtA/kgxSggsjWQP9j8WpvctxUB67njbQ/W8PzgffA+o
+QtnwqMOTsVKlg5HntoUEU8Cyg83nPh+9gJMFEYfKE3ybJSy/9WCE2bPZH7Ofa/Bx
+tmAE+0AW4N8QZitMGby+FfxWPznnkfdqSII1FP+sZv9S1G4TR5eSU610MT6Zrw29
+crIiNJjC3lM+1CPj3v/Zp/dXR8IJKa1shYulZ1d/2luwwp1vw5JTjw/LzFrtgOom
+FNH2W3I+LxjOtO77LeUCwzKA+WNopayi4k0WuQIDAQABAoIBAQDRFgAaDcLGfqQS
+Bk/iqHz2U6cMMxCW+sqAioGmPWW9iYdiOkra1meNP7T0mur7A+9tN3LpsybfZeiw
+vCsZXDAteXph1KPKcPE0uOnPqumRuB2ATCc1Qqas5CUaNju7a8/J6Jzfw1o9KVud
+4HLDw4nLTLNkalXhOLdkbp6FoZZypAgc8OnSdw7z9Kri6VndkddX3fWv4t203XwT
+AvBxvy4Qfblz6VKYRnjj2CPvo/kD+ncFEg+S6u8/LkghTX7CYeMHdTC0P9jOcEK2
+PMm3kS3sX7VkypsAirYK5QtBWxur+mINxfOBDtRlA2RaJQnikRiGb14bMkLx8Liy
+JNjEHSLdAoGBAP9+KpjniozZIbrcS79wdRrW+ARyDp1Plzyd4nQxfWmQ//nsnK5T
+EYCFXWTR/ldkAoHpD+bGGU02p1+1u4vmWqw/x+Qy56Gh/eylhe0RvYEjkVLyreuc
+bXu0BFlKVgRlBq1ZyXnr2lz3bAIZxvZs13lZn6qVPMt7w2/JTCal9jw7AoGBANxR
+sGik9mq/678nzLiNlf/LcwIz7siuyISoWDOaVEVva0uorqctVqL95w0f+3FXqBO/
+5BiJRFo5D8SfzRjkNkJ7V+rm+7/CjtsjEw2Ue+ZJYPlm+Wr545GYmhU9QH9NLZIN
+JBwTVWjLgdsyQyi0Gc+xMraBwEwoyS8cO17uHO2bAoGBANRmO91/6BPt0ve4epR9
+Vi1o9yki9PlcmHtBOmikWAFyFQvd4+eckVlKBflyBkL6locPjTOqDpC9VengeDj2
+2PyHzZLtqtkZhbK9bJhIfkWknwTZUTMliXMkldTxUo82uZVVpoRgSdmtq7IXYeut
+UnjExFMY3EDB9BizvUYIBKvPAoGAViQ6bS/SiPpxGlRdXus88r6BQSM9AYoVLIkF
+s2dr+5oMwZA6eXLopOHRLPiMP0yekto8PLuu1ffpil9QuaLA9E11moqlc9yGLngQ
+QwcDSo72M41nh8Qcjhi0ZgmE5kEuyCQLMk783fRz2VhVmdyRGvuVcHZa0WxA/QJ0
+1DEVbnECgYEA3i2PGHUvU2TIFNvubw3qdH5y7FXafF+O0ulQ8e6r/CbVAG14Z6xP
+RHLc7/JIYK9CG1PWCbkjiHZ4MsKFuRWFrUMrwSj8M3euCaEIxa/Co60qQ/CnZiZ6
+geleTtUcTZ2T0pqGLnrHwlzhLpCkPJPyjcfQjjEZRwd0bVFX6b3C/rw=
+-----END RSA PRIVATE KEY----- \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/test_background_ops.js b/src/mongo/gotools/test/qa-tests/jstests/libs/test_background_ops.js
new file mode 100644
index 00000000000..08c12cb90aa
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/test_background_ops.js
@@ -0,0 +1,334 @@
+//
+// Utilities related to background operations while other operations are working
+//
+
+/**
+ * Allows synchronization between background ops and the test operations
+ */
+var waitForLock = function(mongo, name) {
+ var ts = new ObjectId();
+ var lockColl = mongo.getCollection("config.testLocks");
+
+ lockColl.update({_id: name, state: 0}, {$set: {state: 0}}, true);
+
+ //
+ // Wait until we can set the state to 1 with our id
+ //
+
+ var startTime = new Date().getTime();
+
+ assert.soon(function() {
+ lockColl.update({_id: name, state: 0}, {$set: {ts: ts, state: 1}});
+ var gleObj = lockColl.getDB().getLastErrorObj();
+
+ if (new Date().getTime() - startTime > 20 * 1000) {
+ print("Waiting for...");
+ printjson(gleObj);
+ printjson(lockColl.findOne());
+ printjson(ts);
+ }
+
+ return gleObj.n === 1 || gleObj.updatedExisting;
+ }, "could not acquire lock", 30 * 1000, 100);
+
+ print("Acquired lock " + tojson({_id: name, ts: ts}) + " curr : " +
+ tojson(lockColl.findOne({_id: name})));
+
+ // Set the state back to 0
+ var unlock = function() {
+ print("Releasing lock " + tojson({_id: name, ts: ts}) + " curr : " +
+ tojson(lockColl.findOne({_id: name})));
+ lockColl.update({_id: name, ts: ts}, {$set: {state: 0}});
+ };
+
+ // Return an object we can invoke unlock on
+ return {unlock: unlock};
+};
+
+/**
+ * Allows a test or background op to say it's finished
+ */
+var setFinished = function(mongo, name, finished) {
+ if (finished || finished === undefined || finished === null) {
+ mongo.getCollection("config.testFinished").update({_id: name}, {_id: name}, true);
+ } else {
+ mongo.getCollection("config.testFinished").remove({_id: name});
+ }
+};
+
+/**
+ * Checks whether a test or background op is finished
+ */
+var isFinished = function(mongo, name) {
+ return mongo.getCollection("config.testFinished").findOne({_id: name}) !== null;
+};
+
+/**
+ * Sets the result of a background op
+ */
+var setResult = function(mongo, name, result, err) {
+ mongo.getCollection("config.testResult").update({_id: name}, {_id: name, result: result, err: err}, true);
+};
+
+/**
+ * Gets the result for a background op
+ */
+var getResult = function(mongo, name) {
+ return mongo.getCollection("config.testResult").findOne({_id: name});
+};
+
+/**
+ * Overrides the parallel shell code in mongo
+ */
+function startParallelShell(jsCode, port) {
+ var x;
+ if (port) {
+ x = startMongoProgramNoConnect("mongo", "--port", port, "--eval", jsCode);
+ } else {
+ x = startMongoProgramNoConnect("mongo", "--eval", jsCode, db ? db.getMongo().host : null);
+ }
+
+ return function() {
+ jsTestLog("Waiting for shell " + x + "...");
+ waitProgram(x);
+ jsTestLog("Shell " + x + " finished.");
+ };
+}
+
+var RandomFunctionContext = function(context) {
+ Random.srand(context.seed);
+ Random.randBool = function() {
+ return Random.rand() > 0.5;
+ };
+
+ Random.randInt = function(min, max) {
+ if (max === undefined) {
+ max = min;
+ min = 0;
+ }
+ return min + Math.floor(Random.rand() * max);
+ };
+
+ Random.randShardKey = function() {
+ var numFields = 2; // Random.randInt(1, 3)
+ var key = {};
+ for (var i = 0; i < numFields; i++) {
+ var field = String.fromCharCode("a".charCodeAt() + i);
+ key[field] = 1;
+ }
+ return key;
+ };
+
+ Random.randShardKeyValue = function(shardKey) {
+ var keyValue = {};
+ for (field in shardKey) {
+ if (!shardKey.hasOwnProperty(field)) {
+ continue;
+ }
+ keyValue[field] = Random.randInt(1, 100);
+ }
+ return keyValue;
+ };
+
+ Random.randCluster = function() {
+ var numShards = 2; // Random.randInt( 1, 10 )
+ var rs = false; // Random.randBool()
+ var st = new ShardingTest({
+ shards: numShards,
+ mongos: 4,
+ other: {separateConfig: true, rs: rs}
+ });
+ return st;
+ };
+};
+
+
+startParallelOps = function(mongo, proc, args, context) {
+ var procName = proc.name + "-" + new ObjectId();
+ var seed = new ObjectId(new ObjectId().valueOf().split("").reverse().join(""))
+ .getTimestamp().getTime();
+
+ // Make sure we aren't finished before we start
+ setFinished(mongo, procName, false);
+ setResult(mongo, procName, undefined, undefined);
+
+ // TODO: Make this a context of its own
+ var procContext = {
+ procName: procName,
+ seed: seed,
+ waitForLock: waitForLock,
+ setFinished: setFinished,
+ isFinished: isFinished,
+ setResult: setResult,
+ setup: function(context, stored) {
+ waitForLock = function() {
+ return context.waitForLock(db.getMongo(), context.procName);
+ };
+ setFinished = function(finished) {
+ return context.setFinished(db.getMongo(), context.procName, finished);
+ };
+ isFinished = function() {
+ return context.isFinished(db.getMongo(), context.procName);
+ };
+ setResult = function(result, err) {
+ return context.setResult(db.getMongo(), context.procName, result, err);
+ };
+ },
+ };
+
+ var bootstrapper = function(stored) {
+ var procContext = stored.procContext;
+ procContext.setup(procContext, stored);
+
+ var contexts = stored.contexts;
+ eval("contexts = " + contexts); // eslint-disable-line no-eval
+
+ for (var i = 0; i < contexts.length; i++) {
+ if (typeof (contexts[i]) !== "undefined") {
+ // Evaluate all contexts
+ contexts[i](procContext);
+ }
+ }
+
+ var operation = stored.operation;
+ eval("operation = " + operation); // eslint-disable-line no-eval
+
+ var args = stored.args;
+ eval("args = " + args); // eslint-disable-line no-eval
+
+ result = undefined;
+ err = undefined;
+
+ try {
+ result = operation.apply(null, args);
+ } catch (e) {
+ err = e;
+ }
+
+ setResult(result, err);
+ };
+
+ var contexts = [RandomFunctionContext, context];
+
+ var testDataColl = mongo.getCollection("config.parallelTest");
+
+ testDataColl.insert({
+ _id: procName,
+ bootstrapper: tojson(bootstrapper),
+ operation: tojson(proc),
+ args: tojson(args),
+ procContext: procContext,
+ contexts: tojson(contexts),
+ });
+
+ assert.eq(null, testDataColl.getDB().getLastError());
+
+ var bootstrapStartup =
+ "{ var procName = '" + procName + "'; " +
+ "var stored = db.getMongo().getCollection( '" + testDataColl + "' )" +
+ ".findOne({ _id : procName }); " +
+ "var bootstrapper = stored.bootstrapper; " +
+ "eval( 'bootstrapper = ' + bootstrapper ); " +
+ "bootstrapper( stored ); " +
+ "}";
+
+
+ var oldDB = db;
+ db = mongo.getDB("test"); // eslint-disable-line no-native-reassign
+
+ jsTest.log("Starting " + proc.name + " operations...");
+
+ var rawJoin = startParallelShell(bootstrapStartup);
+
+ db = oldDB; // eslint-disable-line no-native-reassign
+
+
+ var join = function() {
+ setFinished(mongo, procName, true);
+
+ rawJoin();
+ result = getResult(mongo, procName);
+
+ assert.neq(result, null);
+
+ if (result.err) {
+ throw Error("Error in parallel ops " + procName + " : "
+ + tojson(result.err));
+ }
+ return result.result;
+ };
+
+ join.isFinished = function() {
+ return isFinished(mongo, procName);
+ };
+
+ join.setFinished = function(finished) {
+ return setFinished(mongo, procName, finished);
+ };
+
+ join.waitForLock = function(name) {
+ return waitForLock(mongo, name);
+ };
+
+ return join;
+};
+
+
+//
+// Some utility operations
+//
+
+function moveOps(collName, options) {
+ options = options || {};
+
+ var admin = db.getMongo().getDB("admin");
+ var config = db.getMongo().getDB("config");
+ var shards = config.shards.find().toArray();
+ var shardKey = config.collections.findOne({_id: collName}).key;
+
+ while (!isFinished()) {
+ var findKey = Random.randShardKeyValue(shardKey);
+ var toShard = shards[Random.randInt(shards.length)]._id;
+
+ try {
+ printjson(admin.runCommand({
+ moveChunk: collName,
+ find: findKey,
+ to: toShard,
+ }));
+ } catch (e) {
+ printjson(e);
+ }
+
+ sleep(1000);
+ }
+
+ jsTest.log("Stopping moveOps...");
+}
+
+function splitOps(collName, options) {
+ options = options || {};
+
+ var admin = db.getMongo().getDB("admin");
+ var config = db.getMongo().getDB("config");
+ var shards = config.shards.find().toArray();
+ var shardKey = config.collections.findOne({_id: collName}).key;
+
+ while (!isFinished()) {
+ var middleKey = Random.randShardKeyValue(shardKey);
+
+ try {
+ printjson(admin.runCommand({
+ split: collName,
+ middle: middleKey,
+ }));
+ } catch (e) {
+ printjson(e);
+ }
+
+ sleep(1000);
+ }
+
+ jsTest.log("Stopping splitOps...");
+}
+
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/testconfig b/src/mongo/gotools/test/qa-tests/jstests/libs/testconfig
new file mode 100644
index 00000000000..4b09f37ad13
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/testconfig
@@ -0,0 +1,6 @@
+fastsync = true
+#comment line
+#commentedflagwithan = false
+version = false
+help = false
+sysinfo = false
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/testconfig.json b/src/mongo/gotools/test/qa-tests/jstests/libs/testconfig.json
new file mode 100644
index 00000000000..5af32aad7d3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/testconfig.json
@@ -0,0 +1,4 @@
+{
+ "fastsync" : true,
+ "version" : false
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/trace_missing_docs.js b/src/mongo/gotools/test/qa-tests/jstests/libs/trace_missing_docs.js
new file mode 100644
index 00000000000..ebee080dcc7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/trace_missing_docs.js
@@ -0,0 +1,99 @@
+
+//
+// On error inserting documents, traces back and shows where the document was dropped
+//
+
+function traceMissingDoc(coll, doc, mongos) {
+ if (mongos) {
+ coll = mongos.getCollection(String(coll));
+ } else {
+ mongos = coll.getMongo();
+ }
+
+ var config = mongos.getDB("config");
+ var shards = config.shards.find().toArray();
+ for (var i = 0; i < shards.length; i++) {
+ shards[i].conn = new Mongo(shards[i].host);
+ }
+
+ var shardKeyPatt = config.collections.findOne({_id: String(coll)}).key;
+
+ // Project out the shard key
+ var shardKey = {};
+ for (var k in shardKeyPatt) {
+ if (doc[k] === undefined || docs[k] === null) {
+ jsTest.log("Shard key " + tojson(shardKey)
+ + " not found in doc " + tojson(doc)
+ + ", falling back to _id search...");
+ shardKeyPatt = {_id: 1};
+ shardKey = {_id: doc['_id']};
+ break;
+ }
+ shardKey[k] = doc[k];
+ }
+
+ if (doc['_id'] === undefined) {
+ jsTest.log("Id not found in doc " + tojson(doc) + " cannot trace oplog entries.");
+ return;
+ }
+
+ jsTest.log("Using shard key : " + tojson(shardKey));
+
+ var allOps = [];
+ for (i = 0; i < shards.length; i++) {
+ var oplog = shards[i].conn.getCollection("local.oplog.rs");
+ if (!oplog.findOne()) {
+ oplog = shards[i].conn.getCollection("local.oplog.$main");
+ }
+
+ if (!oplog.findOne()) {
+ jsTest.log("No oplog was found on shard " + shards[i]._id);
+ continue;
+ }
+
+ var addKeyQuery = function(query, prefix) {
+ for (var k in shardKey) { // eslint-disable-line guard-for-in
+ query[prefix + '.' + k] = shardKey[k];
+ }
+ return query;
+ };
+
+ var addToOps = function(cursor) { // eslint-disable-line no-loop-func
+ cursor.forEach(function(doc) {
+ doc.shard = shards[i]._id;
+ doc.realTime = new Date(doc.ts.getTime() * 1000);
+ allOps.push(doc);
+ });
+ };
+
+ // Find ops
+ addToOps(oplog.find(addKeyQuery({op: 'i'}, 'o')));
+ var updateQuery = {
+ $or: [
+ addKeyQuery({op: 'u'}, 'o2'),
+ {op: 'u', 'o2._id': doc['_id']},
+ ],
+ };
+ addToOps(oplog.find(updateQuery));
+ addToOps(oplog.find({op: 'd', 'o._id': doc['_id']}));
+ }
+
+ var compareOps = function(opA, opB) {
+ if (opA.ts < opB.ts) {
+ return -1;
+ }
+ if (opB.ts < opA.ts) {
+ return 1;
+ }
+ return 0;
+ };
+
+ allOps.sort(compareOps);
+
+ print("Ops found for doc " + tojson(doc) + " on each shard:\n");
+ for (i = 0; i < allOps.length; i++) {
+ printjson(allOps[i]);
+ }
+
+ return allOps;
+}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/libs/wc_framework.js b/src/mongo/gotools/test/qa-tests/jstests/libs/wc_framework.js
new file mode 100644
index 00000000000..51670f520eb
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/libs/wc_framework.js
@@ -0,0 +1,72 @@
+// runWCTest executes a tool against a number of configurations. A given replica set will have nodes prevented
+// from replicating and the tool should either pass or fail based on the supplied write concern. As a final test,
+// the tools is run with w:3, and waits for all three nodes to come back online, simulating a slowly-replicated write.
+var runWCTest = function runWCTest(progName, rs, toolTest, testWriteConcern, testProgramNoConnect) {
+ jsTest.log("testing that "+progName+" deals with write concern");
+
+ function windowsEscape(json) {
+ if (_isWindows()) {
+ json = '"' + json.replace(/"/g, '\\"') + '"';
+ }
+ return json;
+ }
+
+ // grab the two secondary nodes
+ var masterPort = rs.getPrimary().port;
+ var members = [];
+ var ports = [];
+ for (var i = 0; i < rs.nodes.length; i++) {
+ if (rs.nodes[i].port !== masterPort) {
+ members.push(rs.nodes[i].getDB("admin"));
+ ports.push(rs.nodes[i].port);
+ }
+ }
+ var member1 = members[0];
+ var member2 = members[1];
+
+ testWriteConcern(0, [], progName+" without write concern to a fully functioning repl-set should succeed");
+
+ testWriteConcern(0, ['--writeConcern=majority'], progName+" with majority to a fully functioning repl-set should succeed");
+
+ testWriteConcern(0, ['--writeConcern={w:1,wtimeout:10000}'], progName+" with w:1,timeout:10000 to a fully functioning repl-set should succeed");
+
+ testWriteConcern(0, ['--writeConcern={w:2,wtimeout:10000}'], progName+" with w:2,timeout:10000 to a fully functioning repl-set should succeed");
+
+ jsTest.log("stopping node on port " + ports[0] + " from doing any further syncing");
+ member1.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ sleep(2000);
+
+ testWriteConcern(0, ['--writeConcern={w:1,wtimeout:10000}'], progName+" with w:1,timeout:10000 repl-set with 2 working nodes should succeed");
+
+ testWriteConcern(0, ['--writeConcern={w:2,wtimeout:10000}'], progName+" with w:2,timeout:10000 repl-set with 2 working nodes should succeed");
+
+ testWriteConcern(0, ['--writeConcern=majority'], progName+" with majority with two working nodes should succeed");
+
+ testWriteConcern(1, ['--writeConcern={w:3,wtimeout:2000}'], progName+" with w:3,timeout:2000 repl-set with two working nodes should fail");
+
+ jsTest.log("stopping second node on port " + ports[1] + " from doing any further syncing");
+ member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ sleep(2000);
+
+ testWriteConcern(1, [windowsEscape('--writeConcern={w:"majority",wtimeout:2000}')], progName+" with majority with one working node should fail");
+
+ testWriteConcern(1, ['--writeConcern={w:2,wtimeout:10000}'], progName+" with w:2,timeout:10000 with one working node should fail");
+
+ testWriteConcern(0, ['--writeConcern={w:1,wtimeout:10000}'], progName+" with w:1,timeout:10000 repl-set with one working nodes should succeed");
+
+ jsTest.log(progName+" with w:3 concern and no working member and no timeout waits until member are available");
+ pid = testProgramNoConnect();
+
+ sleep(2000);
+
+ assert(checkProgram(pid), progName+" with w:3 and no working members should not have finished");
+
+ jsTest.log("starting stopped members");
+
+ member1.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+ member2.runCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'off'});
+
+ jsTest.log("waiting for "+progName+" to finish");
+ ret = waitProgram(pid);
+ assert.eq(0, ret, progName+" with w:3 should succeed once enough members start working");
+};
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/all_ops_tests.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/all_ops_tests.js
new file mode 100644
index 00000000000..dd3b97c2923
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/all_ops_tests.js
@@ -0,0 +1,164 @@
+/*
+ * This test creates a fake oplog and uses it to test correct behavior of
+ * all possible op codes.
+ */
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var OPLOG_INSERT_CODE = 'i';
+ var OPLOG_COMMAND_CODE = 'c';
+ var OPLOG_UPDATE_CODE = 'u';
+ var OPLOG_REMOVE_CODE = 'd';
+ var OPLOG_NOOP_CODE = 'n';
+ var CURRENT_OPLOG_VERSION = 2;
+
+ var toolTest = getToolTest('applyAllOpsTest');
+ var commonToolArgs = getCommonToolArguments();
+
+ // Get the db that we'll insert the fake oplog into
+ var db = toolTest.db.getSiblingDB('foo');
+ db.dropDatabase();
+ db.getSiblingDB('rs').dropDatabase();
+
+ // Create capped collection
+ db.getSiblingDB('rs').createCollection('rs_test', {capped: true, size: 4});
+
+ // Add a bunch of operations to the fake oplog
+
+ // Create a collection to drop
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_COMMAND_CODE,
+ ns: "foo.$cmd",
+ o: {create: "baz"}
+ });
+
+ // Insert a doc
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ h: 0,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_INSERT_CODE,
+ o: {
+ _id: 0
+ },
+ ns: 'foo.baz'
+ });
+
+ // Drop the doc's database
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ h: 1,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_COMMAND_CODE,
+ o: {
+ dropDatabase: 1
+ },
+ ns: 'foo.$cmd'
+ });
+
+ // Create the collection
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_COMMAND_CODE,
+ ns: "foo.$cmd",
+ o: {create: "bar"}
+ });
+
+ // Insert 2 docs
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ h: 2,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_INSERT_CODE,
+ o: {
+ _id: 1
+ },
+ ns: 'foo.bar'
+ });
+
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ h: 3,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_INSERT_CODE,
+ o: {
+ _id: 2
+ },
+ ns: 'foo.bar'
+ });
+
+ // Remove first doc
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ h: 4,
+ b: true,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_REMOVE_CODE,
+ o: {
+ _id: 1
+ },
+ ns: 'foo.bar'
+ });
+
+ // Update the second doc
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ h: 5,
+ b: true,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_UPDATE_CODE,
+ o2: {
+ _id: 2
+ },
+ o: {
+ _id: 2,
+ x: 1
+ },
+ ns: 'foo.bar'
+ });
+
+ // Noop
+ db.getSiblingDB('rs').rs_test.insert({
+ ts: new Timestamp(),
+ h: 6,
+ op: OPLOG_NOOP_CODE,
+ ns: 'foo.bar',
+ o: {x: 'noop'}
+ });
+
+ var args = ['oplog', '--oplogns', 'rs.rs_test',
+ '--from', '127.0.0.1:' + toolTest.port].concat(commonToolArgs);
+
+ if (toolTest.isSharded) {
+ // When applying ops to a sharded cluster,
+ assert(toolTest.runTool.apply(toolTest, args) !== 0,
+ 'mongooplog should fail when running applyOps on a sharded cluster');
+
+ var expectedError =
+ 'error applying ops: applyOps not allowed through mongos';
+ assert.strContains.soon(expectedError, rawMongoProgramOutput,
+ 'mongooplog crash should output the correct error message');
+
+ assert.eq(0, db.bar.count({}),
+ 'mongooplog should not have applied any operations');
+ } else {
+ // Running with default --seconds should apply all operations
+ assert.eq(toolTest.runTool.apply(toolTest, args), 0,
+ 'mongooplog should succeed');
+
+ assert.eq(1, db.bar.count({}),
+ 'mongooplog should apply all operations');
+ assert.eq(0, db.baz.count({}), 'mongooplog should have dropped db');
+ assert.eq(1, db.bar.count({_id: 2}),
+ 'mongooplog should have applied correct ops');
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/apply_ops_tests.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/apply_ops_tests.js
new file mode 100644
index 00000000000..d79aebbc680
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/apply_ops_tests.js
@@ -0,0 +1,137 @@
+/*
+ * This test creates a fake oplog and uses it to test correct behavior of
+ * --oplogns and --seconds
+ */
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var OPLOG_INSERT_CODE = 'i';
+ var OPLOG_UPDATE_CODE = 'u';
+ // unused: OPLOG_COMMAND_CODE = 'c';
+ var CURRENT_OPLOG_VERSION = 2;
+
+ // Oplog TS is in seconds since unix epoch
+ var TEST_START = Math.floor(new Date().getTime() / 1000);
+ var toolTest = getToolTest('oplogSuccessTest');
+ var commonToolArgs = getCommonToolArguments();
+
+ // Get the db that we'll insert the fake oplog into
+ var db = toolTest.db.getSiblingDB('gnr');
+ db.dropDatabase();
+
+ // Create capped collection
+ db.createCollection('rs_test', {capped: true, max: 4});
+ // Create test collection
+ db.createCollection('greatest_hits');
+
+ // Add a bunch of operations to the fakeoplog
+ var tracks = ['Welcome to the Jungle', 'Sweet Child O\' Mine', 'Patience',
+ 'Paradise City', 'Knockin\' on Heaven\'s Door', 'Civil War'];
+
+ tracks.forEach(function(track, index) {
+ db.rs_test.insert({
+ ts: new Timestamp(TEST_START - index * 10000 - 1, 1),
+ h: index,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_INSERT_CODE,
+ o: {
+ _id: track
+ },
+ ns: 'gnr.greatest_hits'
+ });
+ });
+
+ tracks.forEach(function(track, index) {
+ db.rs_test.insert({
+ ts: new Timestamp(TEST_START - index * 10000 - 1, 2),
+ h: index,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_UPDATE_CODE,
+ o2: {
+ _id: track
+ },
+ o: {
+ _id: track,
+ index: index
+ },
+ ns: 'gnr.greatest_hits'
+ });
+ });
+
+ var args = ['oplog', '--oplogns', 'gnr.rs_test',
+ '--from', '127.0.0.1:' + toolTest.port].concat(commonToolArgs);
+
+ assert.eq(0, db.getSiblingDB('gnr').greatest_hits.count({}),
+ 'target collection should be empty before mongooplog runs');
+
+ if (toolTest.isSharded) {
+ // When applying ops to a sharded cluster,
+ assert(toolTest.runTool.apply(toolTest, args) !== 0,
+ 'mongooplog should fail when running applyOps on a sharded cluster');
+
+ var expectedError =
+ 'error applying ops: applyOps not allowed through mongos';
+ assert.strContains.soon(expectedError, rawMongoProgramOutput,
+ 'mongooplog crash should output the correct error message');
+
+ assert.eq(0, db.greatest_hits.count({}),
+ 'mongooplog should not have applied any operations');
+ } else {
+ // Running with default --seconds should apply all operations
+ assert.eq(toolTest.runTool.apply(toolTest, args), 0,
+ 'mongooplog should succeed');
+
+ assert.eq(6, db.greatest_hits.count({}),
+ 'mongooplog should apply all operations');
+ tracks.forEach(function(track, index) {
+ assert.eq(1, db.greatest_hits.count({_id: track, index: index}),
+ 'mongooplog should have inserted a doc with _id="' + track + '" and ' +
+ 'updated it to have index=' + index);
+ });
+
+ // Running a second time should have no effect
+ assert.eq(toolTest.runTool.apply(toolTest, args), 0,
+ 'mongooplog should succeed');
+ assert.eq(6, db.greatest_hits.count({}),
+ 'mongooplog should apply all operations');
+ tracks.forEach(function(track, index) {
+ assert.eq(1, db.greatest_hits.count({_id: track, index: index}),
+ 'mongooplog should have inserted a doc with _id="' + track + '" and ' +
+ 'updated it to have index=' + index);
+ });
+
+ db.greatest_hits.drop();
+ db.createCollection('greatest_hits');
+
+ // Running with `--seconds 25000` should apply last 3 operations, which
+ // have timestamps T - 1, T - 10001, and T - 20001 (roughly)
+ var last3Seconds = args.concat(['--seconds', 25000]);
+ assert.eq(toolTest.runTool.apply(toolTest, last3Seconds), 0,
+ 'mongooplog should succeed');
+
+ assert.eq(3, db.greatest_hits.count({}),
+ '`mongooplog --seconds 25000` should apply 3 operations');
+ tracks.slice(0, 3).forEach(function(track, index) {
+ assert.eq(1, db.greatest_hits.count({_id: track, index: index}),
+ 'mongooplog should have inserted a doc with _id="' + track + '" and ' +
+ 'updated it to have index=' + index);
+ });
+
+ db.greatest_hits.drop();
+ db.createCollection('greatest_hits');
+
+ // Running with `--seconds 0` should apply no operations
+ var noOpsArgs = args.concat(['--seconds', 0]);
+ assert.eq(toolTest.runTool.apply(toolTest, noOpsArgs), 0,
+ 'mongooplog should succeed');
+
+ assert.eq(0, db.greatest_hits.count({}),
+ '`mongooplog --seconds 0` should apply 0 operations');
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/asymmetric_ssl_test.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/asymmetric_ssl_test.js
new file mode 100644
index 00000000000..720b6c53bf4
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/asymmetric_ssl_test.js
@@ -0,0 +1,48 @@
+/*
+ * If SSL is enabled in the config, this test starts mongod with SSL off and
+ * tests that we get a sensible failure. Otherwise, it runs with --ssl and
+ * asserts that we get a sensible failure.
+ *
+ * Note: this requires an SSL-enabled tool suite
+ */
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('oplogAsymmetricSSLTest');
+ var commonToolArgs = getCommonToolArguments();
+ var sslOpts = [
+ '--ssl',
+ '--sslPEMKeyFile', 'jstests/libs/client.pem'
+ ];
+
+ if (toolTest.useSSL) {
+ var port = 26999;
+
+ // this mongod is actually started with SSL flags because of `useSSL`
+ startMongod('--auth', '--port', port,
+ '--dbpath', MongoRunner.dataPath + 'oplogAsymmetricSSLTest2');
+
+ var args = ['mongooplog'].concat(commonToolArgs).concat(
+ '--from', '127.0.0.1:' + toolTest.port, '--host', '127.0.0.1', '--port', port);
+
+ // mongooplog run without SSL against a destination server started with SSL should fail
+ jsTest.log("Running mongooplog without SSL against mongod with SSL");
+ assert.neq(runProgram.apply(this, args), 0,
+ 'mongooplog should fail when run without SSL flags against destination host (--host) ' +
+ 'started with SSL');
+ } else {
+ // toolTest.runTool will add the underlying --host argument for the mongod started without SSL
+ args = ['oplog'].concat(commonToolArgs).concat(sslOpts).concat(
+ '--from', '127.0.0.1:' + toolTest.port);
+
+ // mongooplog run with SSL against a destination server not started with SSL should fail
+ jsTest.log("Running mongooplog with SSL against mongod without SSL");
+ assert.neq(toolTest.runTool.apply(toolTest, args), 0,
+ 'mongooplog should fail when run with SSL flags against destination host (--host) ' +
+ 'not started with SSL');
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/deprecated_flags_tests.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/deprecated_flags_tests.js
new file mode 100644
index 00000000000..53dac128274
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/deprecated_flags_tests.js
@@ -0,0 +1,31 @@
+/*
+ * Tests that we provide helpful output when user tries to use flags that were
+ * deprecated in 2.7.x
+ */
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest('oplogDeprecatedFlagTest');
+ var commonToolArgs = getCommonToolArguments();
+ var expectedError = 'error parsing command line options: --dbpath and related ' +
+ 'flags are not supported in 3.0 tools.';
+
+ var verifyFlagFails = function(flag) {
+ var args = ['oplog'].concat(commonToolArgs).concat(flag);
+ assert(toolTest.runTool.apply(toolTest, args) !== 0,
+ 'mongooplog should fail when --dbpath specified');
+
+ assert.strContains.soon(expectedError, rawMongoProgramOutput,
+ 'mongooplog should output the correct error message');
+ };
+
+ verifyFlagFails('--dbpath');
+ verifyFlagFails('--directoryperdb');
+ verifyFlagFails('--journal');
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/drop_database_test.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/drop_database_test.js
new file mode 100644
index 00000000000..e4c51ed8323
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/drop_database_test.js
@@ -0,0 +1,97 @@
+/*
+ * Tests behavior when oplog contains an operation to drop itself
+ */
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var OPLOG_INSERT_CODE = 'i';
+ var OPLOG_COMMAND_CODE = 'c';
+ var CURRENT_OPLOG_VERSION = 2;
+
+ var toolTest = getToolTest('oplogDropDbTest');
+ var commonToolArgs = getCommonToolArguments();
+
+ // Get the db that we'll insert the fake oplog into
+ var db = toolTest.db.getSiblingDB('foo');
+ db.dropDatabase();
+
+ // Create capped collection on foo
+ db.createCollection('rs_test', {capped: true, size: 4});
+
+ // Create test collection
+ db.createCollection("baz");
+
+ // Insert a doc
+ db.rs_test.insert({
+ ts: new Timestamp(),
+ h: 0,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_INSERT_CODE,
+ o: {
+ _id: 0
+ },
+ ns: 'foo.baz'
+ });
+
+ // Drop foo, which also includes the rs_test collection that the oplog is in
+ db.rs_test.insert({
+ ts: new Timestamp(),
+ h: 1,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_COMMAND_CODE,
+ o: {
+ dropDatabase: 1
+ },
+ ns: 'foo.$cmd'
+ });
+
+ // Recreate collection
+ db.rs_test.insert({
+ ts: new Timestamp(),
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_COMMAND_CODE,
+ ns: "foo.$cmd",
+ o: {create: "baz"},
+ });
+
+ // Insert another doc
+ db.rs_test.insert({
+ ts: new Timestamp(),
+ h: 2,
+ v: CURRENT_OPLOG_VERSION,
+ op: OPLOG_INSERT_CODE,
+ o: {
+ _id: 1
+ },
+ ns: 'foo.baz'
+ });
+
+ var args = ['oplog', '--oplogns', 'foo.rs_test',
+ '--from', '127.0.0.1:' + toolTest.port].concat(commonToolArgs);
+
+ if (toolTest.isSharded) {
+ // When applying ops to a sharded cluster,
+ assert(toolTest.runTool.apply(toolTest, args) !== 0,
+ 'mongooplog should fail when running applyOps on a sharded cluster');
+
+ var expectedError =
+ 'error applying ops: applyOps not allowed through mongos';
+ assert.strContains.soon(expectedError, rawMongoProgramOutput,
+ 'mongooplog crash should output the correct error message');
+
+ assert.eq(0, db.baz.count({}),
+ 'mongooplog should not have applied any operations');
+ } else {
+ // Running with default --seconds should apply all operations
+ assert.eq(toolTest.runTool.apply(toolTest, args), 0,
+ 'mongooplog should succeed');
+
+ assert.eq(1, db.baz.count({_id: 1}), 'should have restored the document');
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/informational_flags_test.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/informational_flags_test.js
new file mode 100644
index 00000000000..a6536ecba4a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/informational_flags_test.js
@@ -0,0 +1,28 @@
+/*
+ * Tests that the informational flags --version and --help give reasonable
+ * output.
+ */
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest('oplogInformationalFlagTest');
+ var commonToolArgs = getCommonToolArguments();
+
+ var verifyFlagOutput = function(flag, expected) {
+ var args = ['oplog'].concat(commonToolArgs).concat(flag);
+ assert.eq(toolTest.runTool.apply(toolTest, args), 0,
+ 'mongooplog should succeed with ' + flag);
+
+ assert.strContains.soon(expected, rawMongoProgramOutput,
+ 'mongooplog ' + flag + " should produce output that contains '" +
+ expected + "'");
+ };
+
+ verifyFlagOutput('--help', 'Usage:');
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/oplog_server_ko_test.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/oplog_server_ko_test.js
new file mode 100644
index 00000000000..9bd5f0996bb
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/oplog_server_ko_test.js
@@ -0,0 +1,52 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest('OplogServerKOTest');
+ var commonToolArgs = getCommonToolArguments();
+
+ // Overwrite global db object for startParallelShell()
+ db = toolTest.db.getSiblingDB('foo'); // eslint-disable-line no-native-reassign
+ db.dropDatabase();
+
+ var port = 26999;
+ startMongod('--auth', '--port', port,
+ '--dbpath', MongoRunner.dataPath + 'oplogServerKOTest2');
+
+ var start = Date.now();
+
+ // Insert into a fake oplog as fast as possible for 20 seconds
+ while (Date.now() - start < 20000) {
+ db.test.insert({breakfast: 'bacon'}, {w: 0});
+ }
+
+ // Run parallel shell that waits for mongooplog to start and kills the
+ // server
+ if (!toolTest.isReplicaSet || !toolTest.authCommand) {
+ // shutdownServer() is flakey on replica sets because of localhost
+ // exception, so do a stepdown instead
+ print('Nothing to do: can only run server KO test with replica set + auth');
+ return;
+ }
+ // Start a parallel shell to kill the server
+ startParallelShell(
+ 'sleep(1000); ' +
+ (toolTest.authCommand || '') +
+ 'print(\'Killing server!\');' +
+ 'db.getSiblingDB(\'admin\').shutdownServer({ force: true });');
+
+ var args = ['oplog',
+ '--from', '127.0.0.1:' + toolTest.port].concat(commonToolArgs);
+
+ assert(toolTest.runTool.apply(toolTest, args) !== 0,
+ 'mongooplog should crash gracefully when remote server dies');
+
+ var expected = 'error communicating with server';
+ assert.strContains.soon(expected, rawMongoProgramOutput,
+ 'Should output sensible error message when host server dies');
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/real_oplog_tests.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/real_oplog_tests.js
new file mode 100644
index 00000000000..bf16b6da742
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/real_oplog_tests.js
@@ -0,0 +1,75 @@
+/*
+ * Tests correct behavior when operating against a live oplog
+ */
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest('oplogRealOplogTest');
+ var commonToolArgs = getCommonToolArguments();
+
+ // Get the db that we'll insert operations into
+ var db = toolTest.db.getSiblingDB('gnr');
+ db.dropDatabase();
+
+ // Sleep for a long time so we can safely use --seconds to get the
+ // right operations to verify that the `dropDatabase` and subsequent
+ // inserts and updates get applied
+ db.test.insert({x: 1});
+
+ var LONG_SLEEP_TIME = 5000;
+ sleep(LONG_SLEEP_TIME);
+
+ db.dropDatabase();
+
+ // Do 6 inserts and 6 updates
+ var tracks = ['Welcome to the Jungle', 'Sweet Child O\' Mine', 'Patience',
+ 'Paradise City', 'Knockin\' on Heaven\'s Door', 'Civil War'];
+
+ tracks.forEach(function(track) {
+ db.greatest_hits.insert({
+ _id: track
+ });
+ });
+
+ tracks.forEach(function(track, index) {
+ db.greatest_hits.update({_id: track}, {$set: {index: index}});
+ });
+
+ var args = ['oplog', '--seconds', '1',
+ '--from', '127.0.0.1:' + toolTest.port].concat(commonToolArgs);
+
+ if (toolTest.isSharded) {
+ // When applying ops to a sharded cluster,
+ assert(toolTest.runTool.apply(toolTest, args) !== 0,
+ 'mongooplog should fail when running applyOps on a sharded cluster');
+
+ var expectedError =
+ 'error applying ops: applyOps not allowed through mongos';
+ assert.strContains.soon(expectedError, rawMongoProgramOutput,
+ 'mongooplog crash should output the correct error message');
+
+ assert.eq(0, db.greatest_hits.count({}),
+ 'mongooplog should not have applied any operations');
+ } else {
+ // Running should apply the drop followed by 6 updates and 6 inserts,
+ // but not the { x: 1 } insert.
+ assert.eq(toolTest.runTool.apply(toolTest, args), 0,
+ 'mongooplog should succeed');
+
+ assert.eq(6, db.greatest_hits.count({}),
+ 'mongooplog should apply all operations');
+ assert.eq(0, db.test.count(), 'mongooplog should not have restored an ' +
+ 'insert that happened before the --seconds cutoff');
+ tracks.forEach(function(track, index) {
+ assert.eq(1, db.greatest_hits.count({_id: track, index: index}),
+ 'mongooplog should have inserted a doc with _id="' + track + '" and ' +
+ 'updated it to have index=' + index);
+ });
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/oplog/unreachable_host_tests.js b/src/mongo/gotools/test/qa-tests/jstests/oplog/unreachable_host_tests.js
new file mode 100644
index 00000000000..47a0a48c489
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/oplog/unreachable_host_tests.js
@@ -0,0 +1,46 @@
+/*
+ * Tests behavior when the host provided in --host or in --from is unreachable
+ */
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ // unused: var CURRENT_MONGOD_RELEASE = '3.0';
+
+ var toolTest = getToolTest('oplogUnreachableHostsTest');
+ var commonToolArgs = getCommonToolArguments();
+
+ var fromUnreachableError = 'error connecting to source db';
+ var args = ['oplog'].concat(commonToolArgs).concat('--from',
+ 'doesnte.xist:27999');
+ assert(toolTest.runTool.apply(toolTest, args) !== 0,
+ 'mongooplog should fail when --from is not reachable');
+
+ assert.strContains.soon(fromUnreachableError, rawMongoProgramOutput,
+ 'mongooplog should output correct error when "from" is not reachable');
+
+ // Clear output
+ clearRawMongoProgramOutput();
+
+ /** Overwrite so toolTest.runTool doesn't append --host */
+ toolTest.runTool = function() {
+ arguments[0] = 'mongo' + arguments[0];
+ return runMongoProgram.apply(null, arguments);
+ };
+
+ args = ['oplog'].concat(commonToolArgs).concat('--host', 'doesnte.xist',
+ '--from', '127.0.0.1:' + toolTest.port);
+ assert(toolTest.runTool.apply(toolTest, args) !== 0,
+ 'mongooplog should fail when --host is not reachable');
+
+ output = rawMongoProgramOutput();
+ var hostUnreachableError = 'error connecting to destination db';
+
+ assert(output.indexOf(hostUnreachableError) !== -1,
+ 'mongooplog should output correct error when "host" is not reachable');
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/15k_collections.js b/src/mongo/gotools/test/qa-tests/jstests/restore/15k_collections.js
new file mode 100644
index 00000000000..7bdbaceab60
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/15k_collections.js
@@ -0,0 +1,38 @@
+// this tests that we can restore a large number of collections, resolving
+// an issue raised by TOOLS-1088
+// @tags: [requires_many_files, requires_large_ram]
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('15k_collections');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+
+ for (var i=0; i<=15000; i++) {
+ collName = "Coll" + i;
+ dbOne.createCollection(collName);
+ }
+
+ // dump it
+ var dumpTarget = '15k_collections_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // restore it
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore to empty DB should have returned successfully");
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/24_to_28.js b/src/mongo/gotools/test/qa-tests/jstests/restore/24_to_28.js
new file mode 100644
index 00000000000..02dea923cfa
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/24_to_28.js
@@ -0,0 +1,70 @@
+// This test requires mongo 2.4.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_24, requires_mongo_30]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // skip tests requiring wiredTiger storage engine on pre 3.0 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Skip this test if running with SSL turned on, because the common tool args are not
+ // compatible with 2.4 servers.
+ if (TestData && TestData.useSSL) {
+ return;
+ }
+ // Tests using mongorestore to restore a dump from a 2.4 mongod to a 3.0 mongod.
+
+ jsTest.log('Testing running mongorestore restoring data from a 2.4 mongod to'+
+ ' a 3.0 mongod');
+
+ var toolTest = new ToolTest('24_to_28', {binVersion: '2.4'});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = '24_to_28_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // insert some documents
+ for (var i = 0; i < 50; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insert worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // restart the mongod as a 3.0
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ delete toolTest.options.binVersion;
+ toolTest.startDB('foo');
+
+ // refresh the db and coll reference
+ testDB = toolTest.db.getSiblingDB('test');
+ testColl = testDB.coll;
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/26_to_28.js b/src/mongo/gotools/test/qa-tests/jstests/restore/26_to_28.js
new file mode 100644
index 00000000000..3e03aa6bf18
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/26_to_28.js
@@ -0,0 +1,65 @@
+// This test requires mongo 2.6.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_26, requires_mongo_30]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // skip tests requiring wiredTiger storage engine on pre 3.0 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Tests using mongorestore to restore a dump from a 2.6 mongod to a 3.0 mongod.
+ jsTest.log('Testing running mongorestore restoring data from a 2.6 mongod to'+
+ ' a 3.0 mongod');
+
+ var toolTest = new ToolTest('26_to_28', {binVersion: '2.6'});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = '26_to_28_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // insert some documents
+ for (var i = 0; i < 50; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insert worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // restart the mongod as a 3.0
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ delete toolTest.options.binVersion;
+ toolTest.startDB('foo');
+
+ // refresh the db and coll reference
+ testDB = toolTest.db.getSiblingDB('test');
+ testColl = testDB.coll;
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/28_to_26.js b/src/mongo/gotools/test/qa-tests/jstests/restore/28_to_26.js
new file mode 100644
index 00000000000..01b2a50a24a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/28_to_26.js
@@ -0,0 +1,66 @@
+// This test requires mongo 2.6.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_26, requires_mongo_30]
+(function() {
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // skip tests requiring wiredTiger storage engine on pre 2.8 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Tests using mongorestore to restore a dump from a 2.8 mongod to a 2.6 mongod.
+
+ jsTest.log('Testing running mongorestore restoring data from a 2.8 mongod to'+
+ ' a 2.6 mongod');
+
+ var toolTest = new ToolTest('28_to_26');
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = '28_to_26_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // insert some documents
+ for (var i = 0; i < 50; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insert worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database
+ testDB.dropDatabase();
+
+ // restart the mongod as a 2.6
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options = toolTest.options || {};
+ toolTest.options.binVersion = '2.6';
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // refresh the db and coll reference
+ testDB = toolTest.db.getSiblingDB('test');
+ testColl = testDB.coll;
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(50, testColl.count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/archive_stdout.js b/src/mongo/gotools/test/qa-tests/jstests/restore/archive_stdout.js
new file mode 100644
index 00000000000..c8566d7f606
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/archive_stdout.js
@@ -0,0 +1,50 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var toolTest = getToolTest('archive_stdout');
+ var baseArgs = getCommonToolArguments();
+ baseArgs = baseArgs.concat('--port', toolTest.port);
+
+ if (toolTest.useSSL) {
+ baseArgs = baseArgs.concat([
+ '--ssl',
+ '--sslPEMKeyFile', 'jstests/libs/server.pem',
+ '--sslCAFile', 'jstests/libs/ca.pem',
+ '--sslAllowInvalidHostnames']);
+ }
+ if (dump_targets === 'gzip') {
+ baseArgs = baseArgs.concat('--gzip');
+ }
+ var dumpArgs = ['mongodump', '--archive'].concat(baseArgs);
+ var restoreArgs = ['mongorestore', '--archive', '--drop'].concat(baseArgs);
+
+ dumpArgs[0] = 'PATH=.:$PATH ' + dumpArgs[0];
+ restoreArgs[0] = 'PATH=.:$PATH ' + restoreArgs[0];
+ if (_isWindows()) {
+ dumpArgs[0] += '.exe';
+ restoreArgs[0] += '.exe';
+ }
+
+ var testDb = toolTest.db;
+ testDb.dropDatabase();
+ for (var i = 0; i < 500; i++) {
+ testDb.foo.insert({i: i});
+ testDb.bar.insert({i: i*5});
+ }
+ assert.eq(500, testDb.foo.count(), 'foo should have our test documents');
+ assert.eq(500, testDb.bar.count(), 'bar should have our test documents');
+
+ var ret = runProgram('bash', '-c', dumpArgs.concat('|', restoreArgs).join(' '));
+ assert.eq(0, ret, "bash execution should succeed");
+
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, testDb.foo.find({i: i}).count(), 'document #'+i+' not in foo');
+ assert.eq(1, testDb.bar.find({i: i*5}).count(), 'document #'+i+' not in bar');
+ }
+ assert.eq(500, testDb.foo.count(), 'foo should have our test documents');
+ assert.eq(500, testDb.bar.count(), 'bar should have our test documents');
+
+ testDb.dropDatabase();
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/bad_options.js b/src/mongo/gotools/test/qa-tests/jstests/restore/bad_options.js
new file mode 100644
index 00000000000..1639dfa645f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/bad_options.js
@@ -0,0 +1,54 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests running mongorestore with bad command line options.
+
+ jsTest.log('Testing running mongorestore with bad'+
+ ' command line options');
+
+ var toolTest = new ToolTest('incompatible_flags');
+ toolTest.startDB('foo');
+
+ // run restore with both --objcheck and --noobjcheck specified
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--objcheck', '--noobjcheck']
+ .concat(getRestoreTarget('restore/testdata/dump_empty')));
+ assert.neq(0, ret);
+
+ // run restore with --oplogLimit with a bad timestamp
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay', '--oplogLimit',
+ 'xxx']
+ .concat(getRestoreTarget('restore/testdata/dump_with_oplog')));
+ assert.neq(0, ret);
+
+ // run restore with a negative --w value
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--w', '-1']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_empty')));
+ assert.neq(0, ret);
+
+ // run restore with an invalid db name
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'billy.crystal']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb')));
+ assert.neq(0, ret);
+
+ // run restore with an invalid collection name
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--collection', '$money']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank.bson')));
+ assert.neq(0, ret);
+
+ // run restore with an invalid verbosity value
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '-v', 'torvalds']
+ .concat(getRestoreTarget('restore/testdata/dump_empty')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/blank_collection_bson.js b/src/mongo/gotools/test/qa-tests/jstests/restore/blank_collection_bson.js
new file mode 100644
index 00000000000..e3d2f62f037
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/blank_collection_bson.js
@@ -0,0 +1,43 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests using mongorestore to restore data from a blank collection
+ // file, with both a missing and blank metadata file.
+
+ jsTest.log('Testing restoration from a blank collection file');
+
+ var toolTest = getToolTest('blank_collection_bson');
+ var commonToolArgs = getCommonToolArguments();
+
+ // run the restore with the blank collection file and no
+ // metadata file. it should succeed, but insert nothing.
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--collection', 'blank']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(0, toolTest.db.getSiblingDB('test').blank.count());
+
+ // run the restore with the blank collection file and a blank
+ // metadata file. it should succeed, but insert nothing.
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--collection', 'blank']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankcoll/blank_metadata.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(0, toolTest.db.getSiblingDB('test').blank.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/blank_db.js b/src/mongo/gotools/test/qa-tests/jstests/restore/blank_db.js
new file mode 100644
index 00000000000..1d3c85e3e0b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/blank_db.js
@@ -0,0 +1,29 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets === "archive") {
+ print('skipping test incompatable with archiving');
+ return assert(true);
+ }
+
+ // Tests using mongorestore to restore data from a blank db directory.
+
+ jsTest.log('Testing restoration from a blank db directory');
+
+ var toolTest = getToolTest('blank_db');
+ var commonToolArgs = getCommonToolArguments();
+
+ // run the restore with the blank db directory. it should succeed, but
+ // insert nothing.
+ var ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js b/src/mongo/gotools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js
new file mode 100644
index 00000000000..638a170c5d8
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/conflicting_auth_schema_version.js
@@ -0,0 +1,136 @@
+// This test requires mongo 2.6.x releases
+// @tags: [requires_mongo_26]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests using mongorestore to restore a dump containing users. If there is
+ // conflicting authSchemaVersion in the admin.system.version document, it
+ // should be ignored, and the restore should complete successfully.
+
+ jsTest.log('Testing restoring a dump with a potentially conflicting'+
+ ' authSchemaVersion in the database');
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ var runTest = function(sourceDBVersion, dumpVersion, restoreVersion, destDBVersion, shouldSucceed) {
+
+ jsTest.log('Running with sourceDBVersion=' + (sourceDBVersion || 'latest') +
+ ', dumpVersion=' + (dumpVersion || 'latest') + ', restoreVersion=' +
+ (restoreVersion || 'latest') + ', and destDBVersion=' +
+ (destDBVersion || 'latest') + ', expected to pass=' + shouldSucceed);
+
+ var toolTest = new ToolTest('conflicting_auth_schema_version',
+ {binVersion: sourceDBVersion, auth: ''});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'conflicting_auth_schema_version_dump';
+ resetDbpath(dumpTarget);
+
+ // the admin db, and the non-admin db we'll be using
+ var adminDB = toolTest.db.getSiblingDB('admin');
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create a user admin
+ adminDB.createUser({
+ user: 'admin',
+ pwd: 'password',
+ roles: [
+ {role: 'userAdminAnyDatabase', db: 'admin'},
+ {role: 'readWriteAnyDatabase', db: 'admin'},
+ {role: 'backup', db: 'admin'},
+ ],
+ });
+ var authInfo = {user: 'admin', pwd: 'password'};
+ if (sourceDBVersion === "2.6") {
+ authInfo.mechanism = "MONGODB-CR";
+ }
+ assert.eq(1, adminDB.auth(authInfo));
+
+ // add some data
+ for (var i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+
+ // sanity check the data was inserted
+ assert.eq(10, testDB.data.count());
+
+ // dump all the data
+ args = ['mongodump' + (dumpVersion ? ('-'+dumpVersion) : ''),
+ '--username', 'admin',
+ '--password', 'password', '--port', toolTest.port]
+ .concat(getDumpTarget(dumpTarget));
+ if (sourceDBVersion === "2.6") {
+ args.push("--authenticationMechanism=MONGODB-CR");
+ }
+ var ret = runMongoProgram.apply(this, args);
+ assert.eq(0, ret);
+
+ // restart the mongod, with a clean db path
+ stopMongod(toolTest.port);
+ resetDbpath(toolTest.dbpath);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options.binVersion = destDBVersion;
+ toolTest.startDB('foo');
+
+ // refresh the db references
+ adminDB = toolTest.db.getSiblingDB('admin');
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // create a new user admin
+ adminDB.createUser({
+ user: 'admin28',
+ pwd: 'password',
+ roles: [
+ {role: 'userAdminAnyDatabase', db: 'admin'},
+ {role: 'readWriteAnyDatabase', db: 'admin'},
+ {role: 'restore', db: 'admin'},
+ ],
+ });
+
+ var authInfoDest = {user: 'admin28', pwd: 'password'};
+ if (destDBVersion === "2.6") {
+ authInfoDest.mechanism = "MONGODB-CR";
+ }
+ assert.eq(1, adminDB.auth(authInfoDest));
+
+ // do a full restore
+ args = ['mongorestore' + (restoreVersion ? ('-'+restoreVersion) : ''),
+ '--username', 'admin28',
+ '--password', 'password',
+ '--port', toolTest.port,
+ '--stopOnError']
+ .concat(getRestoreTarget(dumpTarget));
+
+ ret = runMongoProgram.apply(this, args);
+
+ if (shouldSucceed) {
+ assert.eq(0, ret);
+ // make sure the data and users are all there
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+ var users = adminDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'admin' || users[1].user === 'admin');
+ assert(users[0].user === 'admin28' || users[1].user === 'admin28');
+ } else {
+ assert.neq(0, ret);
+ }
+ // success
+ toolTest.stop();
+ };
+
+ // 'undefined' triggers latest
+ runTest('2.6', '2.6', undefined, '2.6', true);
+ runTest('2.6', '2.6', undefined, undefined, true);
+ runTest('2.6', undefined, undefined, undefined, true);
+ runTest(undefined, undefined, undefined, '2.6', false);
+ runTest(undefined, undefined, undefined, undefined, true);
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/different_collection.js b/src/mongo/gotools/test/qa-tests/jstests/restore/different_collection.js
new file mode 100644
index 00000000000..444b8d8115a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/different_collection.js
@@ -0,0 +1,89 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore data to a different collection
+ // then it was dumped from.
+
+ jsTest.log('Testing restoration to a different collection');
+
+ if (dump_targets === 'archive') {
+ jsTest.log('Skipping test unsupported against archive targets');
+ return assert(true);
+ }
+
+ var toolTest = getToolTest('different_collection');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'different_collection_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will dump from
+ var sourceDB = toolTest.db.getSiblingDB('source');
+ // the collection we will dump from
+ var sourceCollName = 'sourceColl';
+
+ // insert a bunch of data
+ for (var i = 0; i < 500; i++) {
+ sourceDB[sourceCollName].insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(500, sourceDB[sourceCollName].count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // restore just the collection into a different collection
+ // in the same database
+ var destCollName = 'destColl';
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'source',
+ '--collection', destCollName]
+ .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(500, sourceDB[destCollName].count());
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, sourceDB[destCollName].count({_id: i}));
+ }
+
+ // restore just the collection into a similarly-named collection
+ // in a different database
+ var destDB = toolTest.db.getSiblingDB('dest');
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dest',
+ '--collection', sourceCollName]
+ .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(500, destDB[sourceCollName].count());
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, destDB[sourceCollName].count({_id: i}));
+ }
+
+ // restore just the collection into a different collection
+ // in a different database
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dest',
+ '--collection', destCollName]
+ .concat(getRestoreTarget(dumpTarget+'/source/sourceColl.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(500, destDB[destCollName].count());
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, destDB[destCollName].count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/different_db.js b/src/mongo/gotools/test/qa-tests/jstests/restore/different_db.js
new file mode 100644
index 00000000000..da55fed41c7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/different_db.js
@@ -0,0 +1,84 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore data to a different db than
+ // it was dumped from.
+
+ jsTest.log('Testing restoration to a different db');
+
+ if (dump_targets === 'archive') {
+ jsTest.log('Skipping test unsupported against archive targets');
+ return assert(true);
+ }
+
+ var toolTest = getToolTest('different_db');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'different_db_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will dump from
+ var sourceDB = toolTest.db.getSiblingDB('source');
+ // the db we will restore to
+ var destDB = toolTest.db.getSiblingDB('dest');
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // we'll use two collections
+ var collNames = ['coll1', 'coll2'];
+
+ // insert a bunch of data
+ collNames.forEach(function(collName) {
+ for (var i = 0; i < 500; i++) {
+ sourceDB[collName].insert({_id: i+'_'+collName});
+ }
+ // sanity check the insertion worked
+ assert.eq(500, sourceDB[collName].count());
+ });
+
+ // dump the data
+ ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // restore the data to a different db
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'dest']
+ .concat(getRestoreTarget(dumpTarget+'/source'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ collNames.forEach(function(collName) {
+ assert.eq(500, destDB[collName].count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, destDB[collName].count({_id: i+'_'+collName}));
+ }
+ });
+
+ // restore the data to another different db
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--nsFrom', '$db$.$collection$',
+ '--nsTo', 'otherdest.$collection$_$db$']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ destDB = toolTest.db.getSiblingDB('otherdest');
+ collNames.forEach(function(collName) {
+ assert.eq(500, destDB[collName+'_source'].count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, destDB[collName+'_source'].count({_id: i+'_'+collName}));
+ }
+ });
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/drop_authenticated_user.js b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_authenticated_user.js
new file mode 100644
index 00000000000..12923e868aa
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_authenticated_user.js
@@ -0,0 +1,107 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests running mongorestore with --drop and --restoreDbUsersAndRoles,
+ // in addition to --auth, and makes sure the authenticated user does not
+ // get dropped before it can complete the restore job.
+
+ jsTest.log('Testing dropping the authenticated user with mongorestore');
+
+ var toolTest = new ToolTest('drop_authenticated_user', {auth: ''});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'drop_authenticated_user_dump';
+ resetDbpath(dumpTarget);
+
+ // we'll use the admin db so that the user we are restoring as
+ // is part of the db we are restoring
+ var adminDB = toolTest.db.getSiblingDB('admin');
+
+ // create the users we'll need for the dump
+ adminDB.createUser({
+ user: 'admin',
+ pwd: 'password',
+ roles: [
+ {role: 'userAdmin', db: 'admin'},
+ {role: 'readWrite', db: 'admin'},
+ ],
+ });
+ adminDB.auth('admin', 'password');
+
+ adminDB.createUser({
+ user: 'backup',
+ pwd: 'password',
+ roles: [{role: 'backup', db: 'admin'}],
+ });
+
+ // create a role
+ adminDB.createRole({
+ role: 'extraRole',
+ privileges: [{
+ resource: {db: 'admin', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+
+ // insert some data
+ for (var i = 0; i < 10; i++) {
+ adminDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, adminDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump',
+ '--username', 'backup',
+ '--password', 'password']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop all the data, but not the users or roles
+ adminDB.data.remove({});
+ // sanity check the removal worked
+ assert.eq(0, adminDB.data.count());
+
+ // now create the restore user, so that we can use it for the restore but it is
+ // not part of the dump
+ adminDB.createUser({
+ user: 'restore',
+ pwd: 'password',
+ roles: [{role: 'restore', db: 'admin'}],
+ });
+
+ // insert some data to be removed when --drop is run
+ for (i = 10; i < 20; i++) {
+ adminDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, adminDB.data.count());
+
+ // restore the data, specifying --drop
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--drop',
+ '--username', 'restore',
+ '--password', 'password']
+ .concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the existing data was removed, and replaced with the dumped data
+ assert.eq(10, adminDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, adminDB.data.count({_id: i}));
+ }
+
+ // make sure the correct roles and users exist - that the restore user was dropped
+ var users = adminDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'backup' || users[1].user === 'backup');
+ assert(users[0].user === 'admin' || users[1].user === 'admin');
+ assert.eq(1, adminDB.getRoles().length);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/drop_nonexistent_db.js b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_nonexistent_db.js
new file mode 100644
index 00000000000..fded2c8706e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_nonexistent_db.js
@@ -0,0 +1,56 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --drop on a database with
+ // nothing to drop does not error out, and completes the
+ // restore successfully.
+
+ jsTest.log('Testing restoration with --drop on a nonexistent db');
+
+ var toolTest = getToolTest('drop_nonexistent_db');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'drop_nonexistent_db_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // insert a bunch of data
+ for (var i = 0; i < 500; i++) {
+ testDB.coll.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(500, testDB.coll.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database we are using
+ testDB.dropDatabase();
+ // sanity check the drop worked
+ assert.eq(0, testDB.coll.count());
+
+ // restore the data with --drop
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--drop']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(500, testDB.coll.count());
+ for (i = 0; i < 500; i++) {
+ assert.eq(1, testDB.coll.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/drop_one_collection.js b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_one_collection.js
new file mode 100644
index 00000000000..4f1c16fee3c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_one_collection.js
@@ -0,0 +1,86 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --drop and --collection leaves data
+ // in other collections untouched (that --drop only applies to the
+ // specified collection).
+
+ jsTest.log('Testing restoration with --drop and --collection, with data in'+
+ ' other collections');
+
+ var toolTest = getToolTest('drop_one_collection');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'drop_one_collection_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will take the dump from
+ var sourceDB = toolTest.db.getSiblingDB('source');
+
+ // dump from two different collections, even though we'll
+ // only be restoring one.
+ var collNames = ['coll1', 'coll2'];
+ collNames.forEach(function(collName) {
+ for (var i = 0; i < 500; i++) {
+ sourceDB[collName].insert({_id: i+'_'+collName});
+ }
+ // sanity check the insertion worked
+ assert.eq(500, sourceDB[collName].count());
+ });
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop and replace the data
+ collNames.forEach(function(collName) {
+ sourceDB[collName].drop();
+ // sanity check the drop worked
+ assert.eq(0, sourceDB[collName].count());
+
+ // insert a disjoint set of data from the dump
+ for (var i = 500; i < 600; i++) {
+ sourceDB[collName].insert({_id: i+'_'+collName});
+ }
+ // sanity check the insertion worked
+ assert.eq(100, sourceDB[collName].count());
+ });
+
+ // insert data into the same collections in a different db
+ var otherDB = toolTest.db.getSiblingDB('other');
+ collNames.forEach(function(collName) {
+ for (var i = 500; i < 600; i++) {
+ otherDB[collName].insert({_id: i+'_'+collName});
+ }
+ // sanity check the insertion worked
+ assert.eq(100, otherDB[collName].count());
+ });
+
+ // restore with --drop and --collection
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--drop',
+ '--db', 'source',
+ '--collection', 'coll1']
+ .concat(getRestoreTarget(dumpTarget+'/source/coll1.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure that the dumped data replaced the old data in only
+ // the specified collection, and all other data was left untouched
+ assert.eq(500, sourceDB.coll1.count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, sourceDB.coll1.count({_id: i+'_coll1'}));
+ }
+ assert.eq(100, sourceDB.coll2.count());
+ assert.eq(100, otherDB.coll1.count());
+ assert.eq(100, otherDB.coll2.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/drop_with_data.js b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_with_data.js
new file mode 100644
index 00000000000..9c43d105d88
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/drop_with_data.js
@@ -0,0 +1,73 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --drop drops existing data
+ // before restoring.
+
+ jsTest.log('Testing restoration with --drop on existing data');
+
+ var toolTest = getToolTest('drop_with_data');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'drop_with_data_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // we'll use two collections, to make sure they both
+ // get dropped appropriately
+ var collNames = ['coll1', 'coll2'];
+
+ // insert a bunch of data to be dumped
+ collNames.forEach(function(collName) {
+ for (var i = 0; i < 500; i++) {
+ testDB[collName].insert({_id: i+'_'+collName});
+ }
+ // sanity check the insertion worked
+ assert.eq(500, testDB[collName].count());
+ });
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop all the data, and replace it with different data
+ collNames.forEach(function(collName) {
+ testDB[collName].drop();
+ // sanity check the drop worked
+ assert.eq(0, testDB[collName].count());
+
+ for (var i = 500; i < 600; i++) {
+ testDB[collName].insert({_id: i+'_'+collName});
+ }
+ // sanity check the insertion worked
+ assert.eq(100, testDB[collName].count());
+ });
+
+ // restore with --drop. the current data in all collections should
+ // be removed and replaced with the dumped data
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--drop']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the dumped data was restored, and the old data
+ // was dropped
+ collNames.forEach(function(collName) {
+ assert.eq(500, testDB[collName].count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, testDB[collName].count({_id: i+'_'+collName}));
+ }
+ });
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/duplicate_keys.js b/src/mongo/gotools/test/qa-tests/jstests/restore/duplicate_keys.js
new file mode 100644
index 00000000000..3ae4fb0f9ac
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/duplicate_keys.js
@@ -0,0 +1,73 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore a mix of existing and
+ // non-existing documents to a collection, so we can make sure
+ // all new documents are actually added.
+
+ jsTest.log('Testing restoration of a dump on top of existing documents');
+
+ var toolTest = getToolTest('dupe_restore');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'dupe_restore_dump';
+ resetDbpath(dumpTarget);
+
+ // we'll insert data into three collections spread across two dbs
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ var testColl = dbOne.duplicates;
+
+ // insert a bunch of data
+ for (var i = 0; i < 50; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // remove a few random documents
+ var removeDocs = function() {
+ testColl.remove({_id: 0});
+ testColl.remove({_id: 5});
+ testColl.remove({_id: 6});
+ testColl.remove({_id: 9});
+ testColl.remove({_id: 12});
+ testColl.remove({_id: 27});
+ testColl.remove({_id: 40});
+ testColl.remove({_id: 46});
+ testColl.remove({_id: 47});
+ testColl.remove({_id: 49});
+ assert.eq(40, testColl.count());
+ };
+ removeDocs();
+
+ // restore the db with default settings
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the restore worked, and all of the removed keys were restored
+ assert.eq(50, testColl.count(), "some documents were not restored with default settings");
+
+ // now check an array of batch sizes
+ for (i = 1; i < 100; i++) {
+ removeDocs();
+ ret = toolTest.runTool.apply(toolTest, ['restore', "--batchSize", String(i)]
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(50, testColl.count(), "some documents were not restored for batchSize="+i);
+ }
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/empty_users_and_roles.js b/src/mongo/gotools/test/qa-tests/jstests/restore/empty_users_and_roles.js
new file mode 100644
index 00000000000..24a3032aab5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/empty_users_and_roles.js
@@ -0,0 +1,33 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets === "archive") {
+ print('skipping test incompatable with archiving');
+ return assert(true);
+ }
+
+ // Tests running mongorestore with --restoreDbUsersAndRoles, with
+ // no users or roles in the dump.
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles with'+
+ ' no users or roles in the dump');
+
+ var toolTest = getToolTest('empty_users_and_roles');
+ var commonToolArgs = getCommonToolArguments();
+
+ // run the restore with no users or roles. it should succeed, but create no
+ // users or roles
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/extended_json_metadata.js b/src/mongo/gotools/test/qa-tests/jstests/restore/extended_json_metadata.js
new file mode 100644
index 00000000000..59d9997262e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/extended_json_metadata.js
@@ -0,0 +1,42 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests that using mongorestore on a collection with extended json types
+ // in the metadata (both indexes and options) is handled gracefully.
+
+ jsTest.log('Testing that restoration of extended JSON collection options works.');
+
+ var toolTest = getToolTest('extended_json_metadata_restore');
+ var commonToolArgs = getCommonToolArguments();
+ var testDB = toolTest.db.getSiblingDB('test');
+ assert.eq(testDB.changelog.exists(), null, "collection already exists in db");
+
+ // run a restore against the mongos
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_extended_json_options'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "the restore does not crash");
+
+ var collectionOptionsFromDB = testDB.changelog.exists();
+ printjson(collectionOptionsFromDB);
+ assert.eq(collectionOptionsFromDB.options.capped, true, "capped option should be restored");
+ // Mongodb might fudge the collection max values for different storage engines,
+ // so we need some wiggle room.
+ var delta = 1000;
+ var size = 10 * 1000 * 1000;
+ assert.lte(collectionOptionsFromDB.options.size, size+delta, "size should be ~10000000");
+ assert.gte(collectionOptionsFromDB.options.size, size-delta, "size should be ~10000000");
+
+ var indexes = testDB.changelog.getIndexes();
+ printjson(indexes);
+ assert.eq(indexes[0].key._id, 1, "index is read properly");
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/indexes.js b/src/mongo/gotools/test/qa-tests/jstests/restore/indexes.js
new file mode 100644
index 00000000000..bb50f70b848
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/indexes.js
@@ -0,0 +1,96 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that mongorestore handles restoring different types of
+ // indexes correctly.
+
+ jsTest.log('Testing restoration of different types of indexes');
+
+ var toolTest = getToolTest('indexes');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'indexes_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // create a bunch of indexes of different types
+ testColl.ensureIndex({a: 1});
+ testColl.ensureIndex({b: 1}, {sparse: true, unique: true});
+ testColl.ensureIndex({a: 1, b: -1});
+ testColl.ensureIndex({b: NumberLong("1"), a: NumberLong("1")});
+ testColl.ensureIndex({listField: 1});
+ testColl.ensureIndex({textField: 'text'}, {language: 'spanish'});
+ testColl.ensureIndex({geoField: '2dsphere'});
+
+ // store the getIndexes() output, to compare with the output
+ // after dumping and restoring
+ var indexesPre = testColl.getIndexes();
+
+ // insert some data
+ for (var i = 0; i < 5; i++) {
+ testColl.insert({a: i, b: i+1, listField: [i, i+1]});
+ testColl.insert({textField: 'hola '+i});
+ testColl.insert({geoField: {type: 'Point', coordinates: [i, i+1]}});
+ }
+ // sanity check the data was inserted
+ assert.eq(15, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the collection
+ testColl.drop();
+ // sanity check that the drop worked
+ assert.eq(0, testColl.count());
+ assert.eq(0, testColl.getIndexes().length);
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(15, testColl.count());
+
+ // make sure the indexes were restored correctly
+ var indexesPost = testColl.getIndexes();
+ assert.eq(indexesPre.length, indexesPost.length);
+
+ if (dump_targets === "archive") {
+ jsTest.log('skipping bson file restore test while running with archiving');
+ } else {
+ // drop the collection again
+ testColl.drop();
+ // sanity check that the drop worked
+ assert.eq(0, testColl.count());
+
+ assert.eq(0, testColl.getIndexes().length);
+
+ // restore the data, but this time mentioning the bson file specifically
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget+"/test/coll.bson"))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(15, testColl.count());
+
+ // make sure the indexes were restored correctly
+ indexesPost = testColl.getIndexes();
+ assert.eq(indexesPre.length, indexesPost.length);
+ }
+
+ // success
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_dump_target.js b/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_dump_target.js
new file mode 100644
index 00000000000..89ecaca7ddc
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_dump_target.js
@@ -0,0 +1,32 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests running mongorestore with invalid specified dumps (directories when
+ // files are expected, and visa versa).
+
+ jsTest.log('Testing running mongorestore with a invalid dump targets');
+
+ var toolTest = new ToolTest('invalid_dump_target');
+ toolTest.startDB('foo');
+
+ // run restore with a file, not a directory, specified as the dump location
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb/README')));
+ assert.neq(0, ret);
+
+ // run restore with --db specified and a file, not a directory, as the db dump
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb/README')));
+ assert.neq(0, ret);
+
+ // run restore with --collection specified and a directory, not a file,
+ // as the dump file
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--collection', 'blank']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_metadata.js b/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_metadata.js
new file mode 100644
index 00000000000..5630d8648e6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/invalid_metadata.js
@@ -0,0 +1,22 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // Tests using mongorestore to restore data from a collection whose .metadata.json
+ // file contains invalid indexes.
+
+ jsTest.log('Testing restoration from a metadata file with invalid indexes');
+
+ var toolTest = new ToolTest('invalid_metadata');
+ toolTest.startDB('foo');
+
+ // run restore, targeting a collection whose metadata file contains an invalid index
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dbOne',
+ '--collection', 'invalid_metadata']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson')));
+ assert.neq(0, ret);
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/keep_index_version.js b/src/mongo/gotools/test/qa-tests/jstests/restore/keep_index_version.js
new file mode 100644
index 00000000000..8de84517b7c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/keep_index_version.js
@@ -0,0 +1,88 @@
+(function() {
+
+ load('jstests/common/check_version.js');
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --keepIndexVersion does not
+ // update the index version, and that running it without
+ // --keepIndexVersion does.
+
+ jsTest.log('Testing mongorestore with --keepIndexVersion');
+
+ var toolTest = getToolTest('keep_index_version');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'keep_index_version_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ if (isAtLeastVersion(testDB.version(), '3.1.0')) {
+ jsTest.log("skipping test on "+testDB.version());
+ return;
+ }
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ jsTest.log("skipping test on "+testDB.version()+" when storage engine is wiredTiger");
+ return;
+ }
+
+ // create a version 0 index on the collection
+ testColl.ensureIndex({num: 1}, {v: 0});
+
+ // insert some data
+ for (var i = 0; i < 10; i++) {
+ testColl.insert({num: i});
+ }
+ // sanity check the insert worked
+ assert.eq(10, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the db
+ testDB.dropDatabase();
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(10, testColl.count());
+
+ // make sure the index version was updated
+ var indexes = testColl.getIndexes();
+ assert.eq(2, indexes.length);
+ assert.eq(1, indexes[1].v);
+
+ // drop the db
+ testDB.dropDatabase();
+
+ // restore the data with --keepIndexVersion specified
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--keepIndexVersion']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ assert.eq(10, testColl.count());
+
+ // make sure the index version was not updated
+ indexes = testColl.getIndexes();
+ assert.eq(2, indexes.length);
+ assert.eq(0, indexes[1].v);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/large_bulk.js b/src/mongo/gotools/test/qa-tests/jstests/restore/large_bulk.js
new file mode 100644
index 00000000000..2eec217b455
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/large_bulk.js
@@ -0,0 +1,52 @@
+(function() {
+
+ // this test tests that the bulk api doesn't create BSON documents greater then the
+ // 16MB limit, as was discovered in TOOLS-939.
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('large_bulk');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ // create a test collection
+
+ var oneK="";
+ var oneM="";
+ var i;
+ for (i=0; i<=1024; i++) {
+ oneK+="X";
+ }
+ for (i=0; i<=1024; i++) {
+ oneM+=oneK;
+ }
+
+ for (i=0; i<=32; i++) {
+ dbOne.test.insert({data: oneM});
+ }
+
+ // dump it
+ var dumpTarget = 'large_bulk_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // restore it
+ // 32 records are well under the 1k batch size
+ // so this should test wether the physcial size limit is respected
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore to empty DB should have returned successfully");
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_bson.js b/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_bson.js
new file mode 100644
index 00000000000..41844f58407
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_bson.js
@@ -0,0 +1,20 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests using mongorestore to restore data from a malformed bson file.
+
+ jsTest.log('Testing restoration from a malformed bson file');
+
+ var toolTest = new ToolTest('malformed_bson');
+ toolTest.startDB('foo');
+
+ // run restore, targeting a malformed bson file
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dbOne',
+ '--collection', 'malformed_coll']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson')));
+ assert.neq(0, ret);
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_metadata.js b/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_metadata.js
new file mode 100644
index 00000000000..f724a15c620
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/malformed_metadata.js
@@ -0,0 +1,22 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests using mongorestore to restore data from a collection with
+ // a malformed metadata file.
+
+ jsTest.log('Testing restoration from a malformed metadata file');
+
+ var toolTest = new ToolTest('malformed_metadata');
+ toolTest.startDB('foo');
+
+ // run restore, targeting a collection with a malformed
+ // metadata.json file.
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dbOne',
+ '--collection', 'malformed_metadata']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson')));
+ assert.neq(0, ret);
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/missing_dump.js b/src/mongo/gotools/test/qa-tests/jstests/restore/missing_dump.js
new file mode 100644
index 00000000000..0d8ff685105
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/missing_dump.js
@@ -0,0 +1,32 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests running mongorestore with a missing dump files and directories.
+
+ jsTest.log('Testing running mongorestore with missing dump files and directories');
+
+ var toolTest = new ToolTest('missing_dump');
+ toolTest.startDB('foo');
+
+ // run restore with a missing dump directory
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('xxxxxxxx')));
+ assert.neq(0, ret);
+
+ // run restore with --db and a missing dump directory
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test']
+ .concat(getRestoreTarget('xxxxxxxx')));
+ assert.neq(0, ret);
+
+ // specify --collection with a missing file
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--collection', 'data']
+ .concat(getRestoreTarget('jstests/restore/testdata/blankdb/xxxxxxxx.bson')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/multiple_dbs.js b/src/mongo/gotools/test/qa-tests/jstests/restore/multiple_dbs.js
new file mode 100644
index 00000000000..3fbaa7c4670
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/multiple_dbs.js
@@ -0,0 +1,72 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore data to multiple dbs.
+
+ jsTest.log('Testing restoration to multiple dbs');
+
+ var toolTest = getToolTest('multiple_dbs');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'multiple_dbs_dump';
+ resetDbpath(dumpTarget);
+
+ // the dbs we will be using
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ var dbTwo = toolTest.db.getSiblingDB('dbTwo');
+
+ // we'll use two collections in each db, with one of
+ // the collection names common across the dbs
+ var oneOnlyCollName = 'dbOneColl';
+ var twoOnlyCollName = 'dbTwoColl';
+ var sharedCollName = 'bothColl';
+
+ // insert a bunch of data
+ for (var i = 0; i < 50; i++) {
+ dbOne[oneOnlyCollName].insert({_id: i+'_'+oneOnlyCollName});
+ dbTwo[twoOnlyCollName].insert({_id: i+'_'+twoOnlyCollName});
+ dbOne[sharedCollName].insert({_id: i+'_dbOne_'+sharedCollName});
+ dbTwo[sharedCollName].insert({_id: i+'_dbTwo_'+sharedCollName});
+ }
+ // sanity check the insertion worked
+ assert.eq(50, dbOne[oneOnlyCollName].count());
+ assert.eq(50, dbTwo[twoOnlyCollName].count());
+ assert.eq(50, dbOne[sharedCollName].count());
+ assert.eq(50, dbTwo[sharedCollName].count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the databases
+ dbOne.dropDatabase();
+ dbTwo.dropDatabase();
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored properly
+ assert.eq(50, dbOne[oneOnlyCollName].count());
+ assert.eq(50, dbTwo[twoOnlyCollName].count());
+ assert.eq(50, dbOne[sharedCollName].count());
+ assert.eq(50, dbTwo[sharedCollName].count());
+ for (i = 0; i < 50; i++) {
+ assert.eq(1, dbOne[oneOnlyCollName].count({_id: i+'_'+oneOnlyCollName}));
+ assert.eq(1, dbTwo[twoOnlyCollName].count({_id: i+'_'+twoOnlyCollName}));
+ assert.eq(1, dbOne[sharedCollName].count({_id: i+'_dbOne_'+sharedCollName}));
+ assert.eq(1, dbTwo[sharedCollName].count({_id: i+'_dbTwo_'+sharedCollName}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/namespaces.js b/src/mongo/gotools/test/qa-tests/jstests/restore/namespaces.js
new file mode 100644
index 00000000000..cdad2a667d2
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/namespaces.js
@@ -0,0 +1,152 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ jsTest.log('Testing namespaces escludes, includes, and mappings during restore');
+
+ var toolTest = getToolTest('namespaces');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'namespaces_dump';
+
+ // the db we will dump from
+ var source1DB = toolTest.db.getSiblingDB('source1');
+ var source2DB = toolTest.db.getSiblingDB('source2');
+ var source3DB = toolTest.db.getSiblingDB('source3');
+ // the db we will restore to
+ var destDB = toolTest.db.getSiblingDB('dest');
+
+ function performRestoreWithArgs(...args) {
+ return toolTest.runTool.apply(toolTest, ['restore']
+ .concat(args)
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ }
+
+ function addTestDataTo(db, colls) {
+ colls.forEach(function(coll) {
+ var data = [];
+ for (var i = 0; i < 500; i++) {
+ data.push({_id: i+'_'+db.getName()+'.'+coll});
+ }
+ db[coll].insertMany(data);
+ // sanity check the insertion worked
+ assert.eq(500, db[coll].count());
+ // Add an index
+ var index = {};
+ index[db.getName()+'.'+coll] = 1;
+ db[coll].createIndex(index);
+ });
+ }
+
+ function verifyDataIn(collection, sourceNS) {
+ if (sourceNS === null) {
+ assert.eq(0, collection.count());
+ return;
+ }
+ assert.eq(500, collection.count());
+ for (var i = 0; i < 500; i++) {
+ assert.eq(1, collection.count({_id: i+'_'+sourceNS}));
+ }
+ assert.eq(1, collection.getIndexes()[1].key[sourceNS]);
+ }
+
+ addTestDataTo(source1DB, ['coll1', 'coll2', 'coll3']);
+ verifyDataIn(source1DB.coll1, 'source1.coll1');
+ verifyDataIn(source1DB.coll2, 'source1.coll2');
+ verifyDataIn(source1DB.coll3, 'source1.coll3');
+
+ addTestDataTo(source2DB, ['coll1', 'coll2', 'coll3']);
+ verifyDataIn(source2DB.coll1, 'source2.coll1');
+ verifyDataIn(source2DB.coll2, 'source2.coll2');
+ verifyDataIn(source2DB.coll3, 'source2.coll3');
+
+ addTestDataTo(source3DB, ['coll3', 'coll4']);
+ verifyDataIn(source3DB.coll3, 'source3.coll3');
+ verifyDataIn(source3DB.coll4, 'source3.coll4');
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // Get rid of the source databases
+ source1DB.dropDatabase();
+ source2DB.dropDatabase();
+ source3DB.dropDatabase();
+
+ // Exclude *.coll1
+ ret = performRestoreWithArgs('--nsExclude', '*.coll1', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$');
+ assert.eq(0, ret);
+
+ verifyDataIn(destDB.coll_1_1, null);
+ verifyDataIn(destDB.coll_1_2, 'source1.coll2');
+ verifyDataIn(destDB.coll_1_3, 'source1.coll3');
+ verifyDataIn(destDB.coll_2_1, null);
+ verifyDataIn(destDB.coll_2_2, 'source2.coll2');
+ verifyDataIn(destDB.coll_2_3, 'source2.coll3');
+ verifyDataIn(destDB.coll_3_1, null);
+ verifyDataIn(destDB.coll_3_2, null);
+ verifyDataIn(destDB.coll_3_3, 'source3.coll3');
+ verifyDataIn(destDB.coll_3_4, 'source3.coll4');
+
+ destDB.dropDatabase();
+
+ // Inclode only *.coll1
+ ret = performRestoreWithArgs('--nsInclude', '*.coll1', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$');
+ assert.eq(0, ret);
+
+ verifyDataIn(destDB.coll_1_1, 'source1.coll1');
+ verifyDataIn(destDB.coll_1_2, null);
+ verifyDataIn(destDB.coll_1_3, null);
+ verifyDataIn(destDB.coll_2_1, 'source2.coll1');
+ verifyDataIn(destDB.coll_2_2, null);
+ verifyDataIn(destDB.coll_2_3, null);
+ verifyDataIn(destDB.coll_3_1, null);
+ verifyDataIn(destDB.coll_3_2, null);
+ verifyDataIn(destDB.coll_3_3, null);
+ verifyDataIn(destDB.coll_3_4, null);
+
+ destDB.dropDatabase();
+
+ // Exclude collections beginning with 'coll' (which is all of them)
+ ret = performRestoreWithArgs('--excludeCollectionsWithPrefix', 'coll', '--nsFrom', 'source$db-num$.coll$coll-num$', '--nsTo', 'dest.coll_$db-num$_$coll-num$');
+ assert.eq(0, ret);
+
+ verifyDataIn(destDB.coll_1_1, null);
+ verifyDataIn(destDB.coll_1_2, null);
+ verifyDataIn(destDB.coll_1_3, null);
+ verifyDataIn(destDB.coll_2_1, null);
+ verifyDataIn(destDB.coll_2_2, null);
+ verifyDataIn(destDB.coll_2_3, null);
+ verifyDataIn(destDB.coll_3_1, null);
+ verifyDataIn(destDB.coll_3_2, null);
+ verifyDataIn(destDB.coll_3_3, null);
+ verifyDataIn(destDB.coll_3_4, null);
+
+ destDB.dropDatabase();
+
+ // Swap source1 and source2 databases
+ ret = performRestoreWithArgs('--nsFrom', 'source1.*', '--nsTo', 'source2.*', '--nsFrom', 'source2.*', '--nsTo', 'source1.*');
+ assert.eq(0, ret);
+
+ verifyDataIn(source1DB.coll1, 'source2.coll1');
+ verifyDataIn(source1DB.coll2, 'source2.coll2');
+ verifyDataIn(source1DB.coll3, 'source2.coll3');
+ verifyDataIn(source2DB.coll1, 'source1.coll1');
+ verifyDataIn(source2DB.coll2, 'source1.coll2');
+ verifyDataIn(source2DB.coll3, 'source1.coll3');
+ verifyDataIn(source3DB.coll3, 'source3.coll3');
+ verifyDataIn(source3DB.coll4, 'source3.coll4');
+
+ source1DB.dropDatabase();
+ source2DB.dropDatabase();
+ source3DB.dropDatabase();
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/no_index_restore.js b/src/mongo/gotools/test/qa-tests/jstests/restore/no_index_restore.js
new file mode 100644
index 00000000000..8e2c5a26155
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/no_index_restore.js
@@ -0,0 +1,75 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests that running mongorestore with --noIndexRestore does not
+ // restore indexes.
+
+ jsTest.log('Testing restoration with --noIndexRestore');
+
+ var toolTest = getToolTest('no_index_restore');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'no_index_restore_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // we'll use two collections, one with no indexes, the other
+ // with indexes
+ var collNames = ['coll1', 'coll2'];
+
+ // insert some data to be dumped
+ collNames.forEach(function(collName) {
+ for (var i = 0; i < 10; i++) {
+ testDB[collName].insert({_id: i, num: i+1, s: ''+i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, testDB[collName].count());
+ });
+
+ // create some indexes for the second collection
+ testDB.coll2.ensureIndex({num: 1});
+ testDB.coll2.ensureIndex({num: 1, s: -1});
+ // sanity check the indexes were created
+ assert.eq(3, testDB.coll2.getIndexes().length);
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the collections
+ collNames.forEach(function(collName) {
+ testDB[collName].drop();
+ // sanity check the drop worked
+ assert.eq(0, testDB[collName].count());
+ assert.eq(0, testDB[collName].getIndexes().length);
+ });
+
+ // restore the data, with --noIndexRestore
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--noIndexRestore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored fully, and only the _id
+ // indexes were restored
+ collNames.forEach(function(collName) {
+ assert.eq(10, testDB[collName].count());
+ for (var i = 0; i < 10; i++) {
+ assert.eq(1, testDB[collName].count({_id: i}));
+ }
+
+ assert.eq(1, testDB[collName].getIndexes().length);
+ });
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/no_options_restore.js b/src/mongo/gotools/test/qa-tests/jstests/restore/no_options_restore.js
new file mode 100644
index 00000000000..c6549839976
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/no_options_restore.js
@@ -0,0 +1,125 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Using the collection options command is the way to get full
+ // collection options as of 2.8, so we use this helper to
+ // pull the options from a listCollections cursor.
+ var extractCollectionOptions = function(db, name) {
+ var res = db.runCommand("listCollections");
+ for (var i = 0; i < res.cursor.firstBatch.length; i++) {
+ if (res.cursor.firstBatch[i].name === name) {
+ return res.cursor.firstBatch[i].options;
+ }
+ }
+ return {};
+ };
+
+ // Tests that running mongorestore with --noOptionsRestore does
+ // not restore collection options, and that running it without
+ // --noOptionsRestore does restore collection options.
+ jsTest.log('Testing restoration with --noOptionsRestore');
+
+ var toolTest = getToolTest('no_options_restore');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'no_options_restore_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // we'll use three different collections - the first will have
+ // options set, the second won't, the third will be capped.
+ // TODO: why aren't these being used?
+ // var collWithOptions = testDB.withOptions;
+ // var collWithoutOptions = testDB.withoutOptions;
+ // var collCapped = testDB.capped;
+
+ // create the noPadding collection
+ var noPaddingOptions = {noPadding: true};
+ testDB.createCollection('withOptions', noPaddingOptions);
+
+ // create the capped collection
+ var cappedOptions = {capped: true, size: 4096, autoIndexId: true};
+ testDB.createCollection('capped', cappedOptions);
+
+ // insert some data into all three collections
+ ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) {
+ for (var i = 0; i < 50; i++) {
+ testDB[collName].insert({_id: i});
+ }
+ // sanity check the insertions worked
+ assert.eq(50, testDB[collName].count());
+ });
+
+ // add options to the appropriate collection
+ cmdRet = testDB.runCommand({'collMod': 'withOptions', usePowerOf2Sizes: true});
+ assert.eq(1, cmdRet.ok);
+
+ // store the default options, because they change based on storage engine
+ var baseCappedOptionsFromDB = extractCollectionOptions(testDB, 'capped');
+ var baseWithOptionsFromDB = extractCollectionOptions(testDB, 'withOptions');
+ var baseWithoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions');
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the data
+ testDB.dropDatabase();
+
+ // restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) {
+ assert.eq(50, testDB[collName].count());
+ });
+
+ // make sure the options were restored correctly
+ var cappedOptionsFromDB = extractCollectionOptions(testDB, 'capped');
+ assert.eq(baseCappedOptionsFromDB, cappedOptionsFromDB);
+ var withOptionsFromDB = extractCollectionOptions(testDB, 'withOptions');
+ assert.eq(baseWithOptionsFromDB, withOptionsFromDB);
+ var withoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions');
+ assert.eq(baseWithoutOptionsFromDB, withoutOptionsFromDB);
+
+ // drop the data
+ testDB.dropDatabase();
+
+ // restore the data, without the options
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--noOptionsRestore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored correctly
+ ['withOptions', 'withoutOptions', 'capped'].forEach(function(collName) {
+ assert.eq(50, testDB[collName].count());
+ });
+
+ // make sure the options were not restored
+ cappedOptionsFromDB = extractCollectionOptions(testDB, 'capped');
+ assert.eq(baseWithoutOptionsFromDB, cappedOptionsFromDB);
+ withOptionsFromDB = extractCollectionOptions(testDB, 'withOptions');
+ assert.eq(baseWithoutOptionsFromDB, withOptionsFromDB);
+ withoutOptionsFromDB = extractCollectionOptions(testDB, 'withoutOptions');
+ assert.eq(baseWithoutOptionsFromDB, withoutOptionsFromDB);
+
+ // additional check that the capped collection is no longer capped
+ var cappedStats = testDB.capped.stats();
+ assert(!cappedStats.capped);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/nonempty_temp_users.js b/src/mongo/gotools/test/qa-tests/jstests/restore/nonempty_temp_users.js
new file mode 100644
index 00000000000..1ab3617ff14
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/nonempty_temp_users.js
@@ -0,0 +1,46 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests running mongorestore and restoring users with a nonempty temp
+ // users collection.
+
+ jsTest.log('Testing restoring users with a nonempty temp users collection.'+
+ ' The restore should fail');
+
+ var toolTest = new ToolTest('nonempty_temp_users');
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'nonempty_temp_users_dump';
+ resetDbpath(dumpTarget);
+
+ // the admin db
+ var adminDB = toolTest.db.getSiblingDB('admin');
+
+ // create a user on the admin database
+ adminDB.createUser({
+ user: 'adminUser',
+ pwd: 'password',
+ roles: [{role: 'read', db: 'admin'}],
+ });
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.neq(1, ret);
+
+ // clear out the user
+ adminDB.dropAllUsers();
+
+ // insert into the tempusers collection
+ adminDB.tempusers.insert({_id: 'corruption'});
+
+ // restore the data. It should succeed
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget)));
+ assert.neq(1, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/norestore_profile.js b/src/mongo/gotools/test/qa-tests/jstests/restore/norestore_profile.js
new file mode 100644
index 00000000000..8242ff15899
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/norestore_profile.js
@@ -0,0 +1,56 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('norestore_profile');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ // turn on the profiler
+ dbOne.setProfilingLevel(2);
+
+ // create some test data
+ for (var i=0; i<=100; i++) {
+ dbOne.test.insert({_id: i, x: i*i});
+ }
+ // run some queries to end up in the profile collection
+ dbOne.test.find({_id: 3});
+ dbOne.test.find({_id: 30});
+ dbOne.test.find({_id: 50});
+
+ assert.gt(dbOne.system.profile.count(), 0, "profiler still empty after running test setup");
+
+ // dump it
+ var dumpTarget = 'norestore_profile';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // turn off profiling and remove the profiler collection
+ dbOne.setProfilingLevel(0);
+ dbOne.system.profile.drop();
+ assert.eq(dbOne.system.profile.count(), 0);
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // restore it, this should restore everything *except* the profile collection
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore to empty DB should have returned successfully");
+
+ // check that the data actually got restored
+ assert.gt(dbOne.test.count(), 100);
+
+ // but the profile collection should still be empty
+ assert.eq(dbOne.system.profile.count(), 0);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/objcheck_valid_bson.js b/src/mongo/gotools/test/qa-tests/jstests/restore/objcheck_valid_bson.js
new file mode 100644
index 00000000000..60ed695b1cf
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/objcheck_valid_bson.js
@@ -0,0 +1,44 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests that running mongorestore with --objcheck on valid bson
+ // files restores the data successfully.
+
+ jsTest.log('Testing restoration with --objcheck');
+
+ var toolTest = new ToolTest('objcheck_valid_bson');
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'objcheck_valid_bson_dump';
+ resetDbpath(dumpTarget);
+
+ // the db and collection we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.coll;
+
+ // insert some data
+ for (var i = 0; i < 50; i++) {
+ testColl.insert({_id: i});
+ }
+ // sanity check the insert worked
+ assert.eq(50, testColl.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump'].concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the data
+ testDB.dropDatabase();
+
+ // restore the data, with --objcheck
+ ret = toolTest.runTool.apply(toolTest, ['restore'].concat(getRestoreTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // make sure the restore completed succesfully
+ assert.eq(50, testColl.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js
new file mode 100644
index 00000000000..378e018f155
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_and_limit.js
@@ -0,0 +1,78 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests using mongorestore with the --oplogReplay and --oplogLimit flags.
+
+ jsTest.log('Testing restoration with the --oplogReplay and --oplogLimit options');
+
+ var toolTest = getToolTest('oplog_replay_and_limit');
+ var commonToolArgs = getCommonToolArguments();
+
+ // this test uses the testdata/dump_with_oplog directory. this directory contains:
+ // - a test/ subdirectory, which will restore objects { _id: i } for i from
+ // 0-9 to the test.data collection
+ // - an oplog.bson file, which contains oplog entries for inserts of
+ // objects { _id: i } for i from 10-14 to the test.data collection.
+ //
+ // within the oplog.bson file, the entries for i from 10-13 have timestamps
+ // 1416342265:2 through 1416342265:5. the entry for { _id: i } has
+ // timestamp 1500000000:1.
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // restore the data, without --oplogReplay. _ids 0-9, which appear in the
+ // collection's bson file, should be restored.
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(10, testColl.count());
+ for (var i = 0; i < 10; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // drop the db
+ testDB.dropDatabase();
+
+ // restore the data, with --oplogReplay. _ids 10-14, appearing
+ // in the oplog.bson file, should be inserted as well.
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(15, testColl.count());
+ for (i = 0; i < 15; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // drop the db
+ testDB.dropDatabase();
+
+ // restore the data, with --oplogReplay and --oplogLimit with a
+ // value that will filter out { _id: 14 } from getting inserted.
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ '--oplogLimit', '1416342266:0']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_oplog'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(14, testColl.count());
+ for (i = 0; i < 14; i++) {
+ assert.eq(1, testColl.count({_id: i}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_conflict.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_conflict.js
new file mode 100644
index 00000000000..4d021a656e2
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_conflict.js
@@ -0,0 +1,33 @@
+/**
+ * oplog_replay_conflict.js
+ *
+ * This file tests mongorestore with --oplogReplay where the user provides two top priority
+ * oplogs and mongorestore should exit with an error.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var restoreTarget = 'jstests/restore/testdata/dump_oplog_conflict';
+
+ var toolTest = getToolTest('oplog_replay_conflict');
+
+ // The test db and collections we'll be using.
+ var testDB = toolTest.db.getSiblingDB('test');
+ testDB.createCollection('data');
+ var testColl = testDB.data;
+
+ // Replay the oplog from the provided oplog
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ '--oplogFile', 'jstests/restore/testdata/extra_oplog.bson',
+ restoreTarget].concat(commonToolArgs));
+
+ assert.eq(0, testColl.count(),
+ "no original entries should be restored");
+ assert.eq(1, ret, "restore operation succeeded when it shouldn't have");
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_main.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_main.js
new file mode 100644
index 00000000000..ad149c2a910
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_main.js
@@ -0,0 +1,60 @@
+/**
+ * oplog_replay_local_main.js
+ *
+ * This file tests mongorestore with --oplogReplay where the oplog file is in the 'oplog.$main'
+ * collection of the 'local' database. This occurs when using master-slave replication.
+ */
+(function() {
+ 'use strict';
+
+ var dumpTarget = 'oplog_replay_local_main';
+ var rt = new ReplTest('oplog_replay_local_main');
+ var m = rt.start(true);
+ // Set the test db to 'local' and collection to 'oplog.$main' to fake a replica set oplog
+ var testDB = m.getDB('local');
+ var testColl = testDB.oplog.$main;
+ var testRestoreDB = m.getDB('test');
+ var testRestoreColl = testRestoreDB.op;
+ resetDbpath(dumpTarget);
+
+ var lastop = function() {
+ return testColl.find().sort({$natural: -1}).next();
+ };
+
+ var lastTS = lastop().ts.t;
+ var oplogSize = 100;
+
+ // Create a fake oplog consisting of 100 inserts.
+ for (var i = 0; i < oplogSize; i++) {
+ var op = {
+ ts: new Timestamp(lastTS, i),
+ op: 'i',
+ o: {_id: i, x: 'a' + i},
+ ns: 'test.op'
+ };
+ assert.commandWorked(testDB.runCommand({godinsert: 'oplog.$main', obj: op}));
+ }
+
+ // Dump the fake oplog.
+ var ret = runMongoProgram('mongodump',
+ '--port', rt.ports[0],
+ '--db', 'local',
+ '-c', 'oplog.$main',
+ '--out', dumpTarget);
+ assert.eq(0, ret, "dump operation failed");
+
+ // Create the test.op collection.
+ testRestoreColl.drop();
+ testRestoreDB.createCollection("op");
+ assert.eq(0, testRestoreColl.count());
+
+ // Replay the oplog from the provided oplog
+ ret = runMongoProgram('mongorestore',
+ '--port', rt.ports[0],
+ '--oplogReplay',
+ dumpTarget);
+ assert.eq(0, ret, "restore operation failed");
+
+ assert.eq(oplogSize, testRestoreColl.count(), "all oplog entries should be inserted");
+ rt.stop(true);
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js
new file mode 100644
index 00000000000..03a16c4a745
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_local_rs.js
@@ -0,0 +1,67 @@
+/**
+ * oplog_replay_local_rs.js
+ *
+ * This file tests mongorestore with --oplogReplay where the oplog file is in the 'oplog.rs'
+ * collection of the 'local' database. This occurs when using a replica-set for replication.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var dumpTarget = 'oplog_replay_local_rs';
+
+ var toolTest = getToolTest('oplog_replay_local_rs');
+
+ // Set the test db to 'local' and collection to 'oplog.rs' to fake a replica set oplog
+ var testDB = toolTest.db.getSiblingDB('local');
+ var testColl = testDB['oplog.rs'];
+ var testRestoreDB = toolTest.db.getSiblingDB('test');
+ var testRestoreColl = testRestoreDB.op;
+ resetDbpath(dumpTarget);
+
+ var oplogSize = 100;
+ testDB.createCollection('oplog.rs', {capped: true, size: 100000});
+
+ // Create a fake oplog consisting of 100 inserts.
+ for (var i = 0; i < oplogSize; i++) {
+ var r = testColl.insert({
+ ts: new Timestamp(0, i),
+ op: "i",
+ o: {_id: i, x: 'a' + i},
+ ns: "test.op",
+ });
+ assert.eq(1, r.nInserted, "insert failed");
+ }
+
+ // Dump the fake oplog.
+ var ret = toolTest.runTool.apply(toolTest, ['dump',
+ '--db', 'local',
+ '-c', 'oplog.rs',
+ '--out', dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "dump operation failed");
+
+ // Dump original data.
+ testColl.drop();
+ assert.eq(0, testColl.count(), "all original entries should be dropped");
+
+
+ // Create the test.op collection.
+ testRestoreColl.drop();
+ testRestoreDB.createCollection("op");
+ assert.eq(0, testRestoreColl.count());
+
+ // Replay the oplog from the provided oplog
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore operation failed");
+
+ assert.eq(oplogSize, testRestoreColl.count(),
+ "all oplog entries should be inserted");
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js
new file mode 100644
index 00000000000..eae7db1519e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_no_oplog.js
@@ -0,0 +1,19 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // Tests using mongorestore with --oplogReplay when no oplog.bson file is present.
+
+ jsTest.log('Testing restoration with --oplogReplay and no oplog.bson file');
+
+ var toolTest = new ToolTest('oplog_replay_no_oplog');
+ toolTest.startDB('foo');
+
+ // run the restore, with a dump directory that has no oplog.bson file
+ var ret = toolTest.runTool.apply(toolTest, ['restore', '--oplogReplay']
+ .concat(getRestoreTarget('restore/testdata/dump_empty')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_noop.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_noop.js
new file mode 100644
index 00000000000..6a1f20d5cf6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_noop.js
@@ -0,0 +1,37 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ // Tests using mongorestore with --oplogReplay and noops in the oplog.bson,
+ // making sure the noops are ignored.
+
+ jsTest.log('Testing restoration with --oplogReplay and noops');
+
+ var toolTest = getToolTest('oplog_replay_noop');
+ var commonToolArgs = getCommonToolArguments();
+
+ // the db and collection we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+ var testColl = testDB.data;
+
+ // restore the data, with --oplogReplay
+ var ret = toolTest.runTool.apply(toolTest, ['restore', '--oplogReplay']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_noop_in_oplog'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the document appearing in the oplog, which shows up
+ // after the noops, was added successfully
+ assert.eq(1, testColl.count());
+ assert.eq(1, testColl.count({a: 1}));
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js
new file mode 100644
index 00000000000..6d2d873285e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_priority_oplog.js
@@ -0,0 +1,40 @@
+/**
+ * oplog_replay_priority_oplog.js
+ *
+ * This file tests mongorestore with --oplogReplay where the user provides two oplogs and
+ * mongorestore only restores the higher priority one.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var restoreTarget = 'jstests/restore/testdata/dump_local_oplog';
+
+ var toolTest = getToolTest('oplog_replay_priority_oplog');
+
+ // The test db and collections we'll be using.
+ var testDB = toolTest.db.getSiblingDB('test');
+ testDB.createCollection('data');
+ var testColl = testDB.data;
+ testDB.createCollection('op');
+ var restoreColl = testDB.op;
+
+ // Replay the oplog from the provided oplog
+ var ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ '--oplogFile', 'jstests/restore/testdata/extra_oplog.bson',
+ restoreTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore operation failed");
+
+ // Extra oplog has 5 entries as explained in oplog_replay_and_limit.js
+ assert.eq(5, testColl.count(),
+ "all original entries from high priority oplog should be restored");
+ assert.eq(0, restoreColl.count(),
+ "no original entries from low priority oplog should be restored");
+ toolTest.stop();
+}());
+
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js
new file mode 100644
index 00000000000..0b3000ad60e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_size_safety.js
@@ -0,0 +1,65 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var dumpTarget = 'oplog_replay_sizes';
+
+ // Helper for using mongorestore with --oplogReplay and a large oplog.bson
+ function tryOplogReplay(oplogSize, documentSize) {
+ var toolTest = getToolTest('oplog_replay_sizes');
+ // the test db and collections we'll be using
+ var testDB = toolTest.db.getSiblingDB('test_oplog');
+ var testColl = testDB.oplog;
+ var testRestoreDB = toolTest.db.getSiblingDB('test');
+ var testRestoreColl = testRestoreDB.op;
+ resetDbpath(dumpTarget);
+
+ var debugString = 'with ' + oplogSize + ' ops of size ' + documentSize;
+ jsTest.log('Testing --oplogReplay ' + debugString);
+
+
+ // create a fake oplog consisting of a large number of inserts
+ var xStr = new Array(documentSize).join("x"); // ~documentSize bytes string
+ for (var i = 0; i < oplogSize; i++) {
+ testColl.insert({
+ ts: new Timestamp(0, i),
+ op: "i",
+ o: {_id: i, x: xStr},
+ ns: "test.op"
+ });
+ }
+
+ // dump the fake oplog
+ var ret = toolTest.runTool.apply(toolTest, ['dump',
+ '--db', 'test_oplog',
+ '-c', 'oplog',
+ '--out', dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "dump operation failed " + debugString);
+
+ // create the test.op collection
+ testRestoreColl.drop();
+ testRestoreDB.createCollection("op");
+ assert.eq(0, testRestoreColl.count());
+
+ // trick restore into replaying the "oplog" we forged above
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay', dumpTarget+'/test_oplog']
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore operation failed " + debugString);
+ assert.eq(oplogSize, testRestoreColl.count(),
+ "all oplog entries should be inserted " + debugString);
+ toolTest.stop();
+ }
+
+ // run the test on various oplog and op sizes
+ tryOplogReplay(1024, 1024); // sanity check
+ tryOplogReplay(1024*1024, 1); // millions of micro ops
+ tryOplogReplay(8, 16*1024*1023); // 8 ~16MB ops
+ tryOplogReplay(32, 1024*1024); // 32 ~1MB ops
+ tryOplogReplay(32*1024, 1024); // many ~1KB ops
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js
new file mode 100644
index 00000000000..52cf5a953cd
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/oplog_replay_specify_file.js
@@ -0,0 +1,68 @@
+/**
+ * oplog_replay_specify_file.js
+ *
+ * This file tests mongorestore with --oplogReplay where the user specifies a file with the
+ * --oplogFile flag.
+ */
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var commonToolArgs = getCommonToolArguments();
+ var dumpTarget = 'oplog_replay_specify_file';
+
+ var toolTest = getToolTest('oplog_replay_specify_file');
+
+ // The test db and collections we'll be using.
+ var testDB = toolTest.db.getSiblingDB('test_oplog');
+ var testColl = testDB.foo;
+ var testRestoreDB = toolTest.db.getSiblingDB('test');
+ var testRestoreColl = testRestoreDB.op;
+ resetDbpath(dumpTarget);
+
+ var oplogSize = 100;
+
+ // Create a fake oplog consisting of 100 inserts.
+ for (var i = 0; i < oplogSize; i++) {
+ testColl.insert({
+ ts: new Timestamp(0, i),
+ op: "i",
+ o: {_id: i, x: 'a' + i},
+ ns: "test.op"
+ });
+ }
+
+ // Dump the fake oplog.
+ var ret = toolTest.runTool.apply(toolTest, ['dump',
+ '--db', 'test_oplog',
+ '-c', 'foo',
+ '--out', dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "dump operation failed");
+
+ // Dump original data.
+ testColl.drop();
+ assert.eq(0, testColl.count(),
+ "all original entries should be dropped");
+
+ // Create the test.op collection.
+ testRestoreColl.drop();
+ testRestoreDB.createCollection("op");
+ assert.eq(0, testRestoreColl.count());
+
+ // Replay the oplog from the provided oplog
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--oplogReplay',
+ '--oplogFile', dumpTarget + '/test_oplog/foo.bson',
+ dumpTarget]
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore operation failed");
+
+ assert.eq(oplogSize, testRestoreColl.count(),
+ "all oplog entries should be inserted");
+ assert.eq(oplogSize, testColl.count(),
+ "all original entries should be restored");
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/partial_restore.js b/src/mongo/gotools/test/qa-tests/jstests/restore/partial_restore.js
new file mode 100644
index 00000000000..0335f94f53b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/partial_restore.js
@@ -0,0 +1,77 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests using mongorestore to restore only a subset of a dump (either a
+ // single db or a single collection) from a larger dump.
+
+ jsTest.log('Testing restoration of a subset of a dump');
+
+ var toolTest = getToolTest('partial_restore');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'partial_restore_dump';
+ resetDbpath(dumpTarget);
+
+ // we'll insert data into three collections spread across two dbs
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ var dbTwo = toolTest.db.getSiblingDB('dbTwo');
+ var collOne = dbOne.collOne;
+ var collTwo = dbOne.collTwo;
+ var collThree = dbTwo.collThree;
+
+ // insert a bunch of data
+ for (var i = 0; i < 50; i++) {
+ collOne.insert({_id: i+'_collOne'});
+ collTwo.insert({_id: i+'_collTwo'});
+ collThree.insert({_id: i+'_collThree'});
+ }
+ // sanity check the insertion worked
+ assert.eq(50, collOne.count());
+ assert.eq(50, collTwo.count());
+ assert.eq(50, collThree.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the databases
+ dbOne.dropDatabase();
+ dbTwo.dropDatabase();
+
+ // restore a single db
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'dbOne']
+ .concat(getRestoreTarget(dumpTarget+'/dbOne'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the restore worked, and nothing else but that db was restored
+ assert.eq(50, collOne.count());
+ assert.eq(50, collTwo.count());
+ assert.eq(0, collThree.count());
+
+ // drop the data
+ dbOne.dropDatabase();
+
+ // restore a single collection
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'dbOne',
+ '--collection', 'collTwo']
+ .concat(getRestoreTarget(dumpTarget+'/dbOne/collTwo.bson'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the restore worked, and nothing else but that collection was restored
+ assert.eq(0, collOne.count());
+ assert.eq(50, collTwo.count());
+ assert.eq(0, collThree.count());
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js b/src/mongo/gotools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js
new file mode 100644
index 00000000000..785a5ad31d6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/preserve_oplog_structure_order.js
@@ -0,0 +1,25 @@
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ jsTest.log('Testing that the order of fields is preserved in the oplog');
+
+ var toolTest = new ToolTest('ordered_oplog');
+ toolTest.startDB('foo');
+
+ // run restore, with an "update" oplog with a _id field that is a subdocument with several fields
+ // { "h":{"$numberLong":"7987029173745013482"},"ns":"test.foobar",
+ // "o":{"_id":{"a":1,"b":2,"c":3,"d":5,"e":6,"f":7,"g":8},"foo":"bar"},
+ // "o2":{"_id":{"a":1,"b":2,"c":3,"d":5,"e":6,"f":7,"g":8}},"op":"u","ts":{"$timestamp":{"t":1439225650,"i":1}},"v":NumberInt(2)
+ // }
+ // if the _id from the "o" and the _id from the "o2" don't match then mongod complains
+ // run it several times, because with just one execution there is a chance that restore randomly selects the correct order
+ // With several executions the chances of all false positives diminishes.
+ for (var i=0; i<10; i++) {
+ var ret = toolTest.runTool('restore', '--oplogReplay', 'jstests/restore/testdata/dump_with_complex_id_oplog');
+ assert.eq(0, ret);
+ }
+
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/restore_document_validation.js b/src/mongo/gotools/test/qa-tests/jstests/restore/restore_document_validation.js
new file mode 100644
index 00000000000..8750c29c6a7
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/restore_document_validation.js
@@ -0,0 +1,178 @@
+/**
+ * restore_document_validation.js
+ *
+ * This file test that mongorestore works with document validation. It both checks that when
+ * validation is turned on invalid documents are not restored and that when a user indicates
+ * they want to bypass validation, that all documents are restored.
+ */
+
+(function() {
+ 'use strict';
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ /**
+ * Part 1: Test that restore follows document validation rules.
+ */
+ jsTest.log('Testing that restore reacts well to document validation');
+
+ var toolTest = getToolTest('document_validation');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'doc_validation';
+ resetDbpath(dumpTarget);
+
+ // the db we will use
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create 1000 documents, half of which will pass the validation
+ for (var i = 0; i < 1000; i++) {
+ if (i%2 === 0) {
+ testDB.bar.insert({_id: i, num: i+1, s: String(i)});
+ } else {
+ testDB.bar.insert({_id: i, num: i+1, s: String(i), baz: i});
+ }
+ }
+ // sanity check the insertion worked
+ assert.eq(1000, testDB.bar.count(), 'all documents should be inserted');
+
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '-v']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'dumping should run successfully');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // sanity check that we can restore the data without validation
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ assert.eq(1000, testDB.bar.count(), 'after the restore, all documents should be seen');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // turn on validation
+ var r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}});
+ assert.eq(r, {ok: 1}, 'create collection with validation should work');
+
+ // test that it's working
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 0, "invalid documents shouldn't be inserted");
+
+ // restore the 1000 records of which only 500 are valid
+ ret = toolTest.runTool.apply(toolTest, ['restore', '-v']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'restoring against a collection with validation on should still succeed');
+
+ assert.eq(500, testDB.bar.count(), 'only the valid documents should be restored');
+
+ /**
+ * Part 2: Test that restore can bypass document validation rules.
+ */
+ jsTest.log('Testing that bypass document validation works');
+
+ testDB.dropDatabase();
+
+ // turn on validation
+ r = testDB.createCollection('bar', {validator: {baz: {$exists: true}}});
+ assert.eq(r, {ok: 1}, 'create collection with validation should work');
+
+ // test that we cannot insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 0, 'invalid documents should not be inserted');
+
+ // restore the 1000 records again with bypassDocumentValidation turned on
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--bypassDocumentValidation']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'restoring documents should work with bypass document validation set');
+ assert.eq(1000, testDB.bar.count(),
+ 'all documents should be restored with bypass document validation set');
+
+ /**
+ * Part 3: Test that restore can restore the document validation rules,
+ * if they're dumped with the collection.
+ */
+ jsTest.log('Testing that dump and restore restores the validation rules themselves');
+
+ // clear out the database, including the validation rules
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // test that we can insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 1,
+ 'invalid documents should be inserted after validation rules are dropped');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // restore the 1000 records again
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+ assert.eq(1000, testDB.bar.count());
+
+ // turn on validation on a existing collection
+ testDB.runCommand({'collMod': 'bar', 'validator': {baz: {$exists: true}}});
+
+ // re-dump everything, this time dumping the validation rules themselves
+ ret = toolTest.runTool.apply(toolTest, ['dump', '-v']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'the dump should run successfully');
+
+ // clear out the database, including the validation rules
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // test that we can insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 1,
+ 'invalid documents should be inserted after we drop validation rules');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // restore the 1000 records again
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'restoring rules and some invalid documents should run successfully');
+ assert.eq(500, testDB.bar.count(),
+ 'restoring the validation rules and documents should only restore valid documents');
+
+ /**
+ * Part 4: Test that restore can bypass the document validation rules,
+ * even if they're dumped with the collection and restored with the collection.
+ */
+ jsTest.log('Testing that bypass document validation works when restoring the rules as well');
+
+ // clear out the database, including the validation rules
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // test that we can insert an 'invalid' document
+ r = testDB.bar.insert({num: 10000});
+ assert.eq(r.nInserted, 1,
+ 'invalid documents should be inserted after validation rules are dropped');
+
+ testDB.dropDatabase();
+ assert.eq(0, testDB.bar.count(), 'after the drop, no documents should be seen');
+
+ // restore the 1000 records again with bypassDocumentValidation turned on
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--bypassDocumentValidation']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, 'restoring documents should work with bypass document validation set');
+ assert.eq(1000, testDB.bar.count(),
+ 'all documents should be restored with bypass document validation set');
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/sharded_fullrestore.js b/src/mongo/gotools/test/qa-tests/jstests/restore/sharded_fullrestore.js
new file mode 100644
index 00000000000..0cff1cc2845
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/sharded_fullrestore.js
@@ -0,0 +1,43 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/sharding_28.config.js');
+ }
+
+ if (dump_targets === "archive") {
+ print('skipping test incompatable with archiving');
+ return assert(true);
+ }
+
+ var targetPath = 'restore_full_restore';
+ var toolTest = getToolTest('fullrestore');
+ var commonToolArgs = getCommonToolArguments();
+
+ var sourceDB = toolTest.db.getSiblingDB('blahblah');
+
+ // put in some sample data
+ for (var i=0; i<100; i++) {
+ sourceDB.test.insert({x: 1});
+ }
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(targetPath))
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "dump of full sharded system should have succeeded");
+
+ // a full restore should fail
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(targetPath))
+ .concat(commonToolArgs));
+ assert.neq(ret, 0, "restore of full sharded system should have failed");
+
+ // delete the config dir
+ resetDbpath(targetPath + "/config");
+
+ // *now* the restore should succeed
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(targetPath))
+ .concat(commonToolArgs));
+ assert.eq(ret, 0, "restore of sharded system without config db should have succeeded");
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/stop_on_error.js b/src/mongo/gotools/test/qa-tests/jstests/restore/stop_on_error.js
new file mode 100644
index 00000000000..b91b391a617
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/stop_on_error.js
@@ -0,0 +1,48 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ var toolTest = getToolTest('stop_on_error');
+ var commonToolArgs = getCommonToolArguments();
+
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ // create a test collection
+ for (var i=0; i<=100; i++) {
+ dbOne.test.insert({_id: i, x: i*i});
+ }
+
+ // dump it
+ var dumpTarget = 'stop_on_error_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // restore it - database was just dropped, so this should work successfully
+ ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret, "restore to empty DB should have returned successfully");
+
+ // restore it again with --stopOnError - this one should fail since there are dup keys
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--stopOnError', '-vvvv']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.neq(0, ret);
+
+ // restore it one more time without --stopOnError - there are dup keys but they will be ignored
+ ret = toolTest.runTool.apply(toolTest, ['restore', '-vvvv']
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/symlinks.js b/src/mongo/gotools/test/qa-tests/jstests/restore/symlinks.js
new file mode 100644
index 00000000000..a27ef8b94c3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/symlinks.js
@@ -0,0 +1,46 @@
+(function() {
+
+ // Tests using mongorestore on a dump directory containing symlinks
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ jsTest.log('Testing restoration from a dump containing symlinks');
+
+ var toolTest = getToolTest('symlinks');
+
+ // this test uses the testdata/dump_with_soft_link. within that directory,
+ // the dbTwo directory is a soft link to testdata/soft_linked_db and the
+ // dbOne/data.bson file is a soft link to testdata/soft_linked_collection.bson.
+ // the file not_a_dir is a softlink to a bson file, and is there to make
+ // sure that softlinked regular files are not treated as directories.
+
+ // the two dbs we'll be using
+ var dbOne = toolTest.db.getSiblingDB('dbOne');
+ var dbTwo = toolTest.db.getSiblingDB('dbTwo');
+ var notADir = toolTest.db.getSiblingDB('not_a_dir');
+
+ // restore the data
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(getRestoreTarget('jstests/restore/testdata/dump_with_soft_links')));
+ assert.eq(0, ret);
+
+ // make sure the data was restored properly
+ assert.eq(10, dbOne.data.count());
+ assert.eq(10, dbTwo.data.count());
+ assert.eq(0, notADir.data.count());
+ for (var i = 0; i < 10; i++) {
+ assert.eq(1, dbOne.data.count({_id: i+'_dbOne'}));
+ assert.eq(1, dbTwo.data.count({_id: i+'_dbTwo'}));
+ }
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank.bson
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.bson
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.metadata.json
new file mode 100644
index 00000000000..0967ef424bc
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankcoll/blank_metadata.metadata.json
@@ -0,0 +1 @@
+{}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankdb/README b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankdb/README
new file mode 100644
index 00000000000..8a13ce0a00c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/blankdb/README
@@ -0,0 +1 @@
+This exists so that this directory can remain blank of .bson files but still be checked into version control.
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_empty/db_empty/coll_empty.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_empty/db_empty/coll_empty.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_empty/db_empty/coll_empty.bson
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.bson
new file mode 100644
index 00000000000..3799a6f04b6
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.metadata.json
new file mode 100644
index 00000000000..64d0433a836
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_extended_json_options/test/changelog.metadata.json
@@ -0,0 +1 @@
+{ "options" : { "create" : "changelog", "size" : { "$numberLong" : "10000000" }, "capped" : true }, "indexes" : [ { "v" : 1, "key" : { "_id" : { "$numberLong" : "1"}}, "ns" : "config.changelog", "name" : "_id_" }, {"v":1,"key":{"pos":"2d"},"name":"position_2d","ns":"config.changelog","min":{"$numberLong":"0"},"max":{"$numberLong":"1000"},"bits":{"$numberLong":"32"}} ] }
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.bson
new file mode 100644
index 00000000000..6051944948e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.metadata.json
new file mode 100644
index 00000000000..9e28c8db056
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_local_oplog/local/oplog.rs.metadata.json
@@ -0,0 +1 @@
+{"options":{"capped":true,"size":100096},"indexes":[]} \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_oplog_conflict/oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_oplog_conflict/oplog.bson
new file mode 100644
index 00000000000..a9ada58715f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_oplog_conflict/oplog.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_complex_id_oplog/oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_complex_id_oplog/oplog.bson
new file mode 100644
index 00000000000..9a47fca217f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_complex_id_oplog/oplog.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.bson
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.metadata.json
new file mode 100644
index 00000000000..e0ea3257e88
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_invalid/dbOne/invalid_metadata.metadata.json
@@ -0,0 +1 @@
+{"indexes":[{"v":1,"name":"_id_","ns":"dbOne.invalid_metadata"},{"v":1,"name":"a_1","ns":"dbOne.invalid_metadata"}]}
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson
new file mode 100644
index 00000000000..dd6d86a43dc
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_coll.bson
@@ -0,0 +1 @@
+XXX
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.bson
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.metadata.json
new file mode 100644
index 00000000000..dd6d86a43dc
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/malformed_metadata.metadata.json
@@ -0,0 +1 @@
+XXX
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/missing_metadata.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/missing_metadata.bson
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_malformed/dbOne/missing_metadata.bson
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_noop_in_oplog/oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_noop_in_oplog/oplog.bson
new file mode 100644
index 00000000000..29172294c0f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_noop_in_oplog/oplog.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/oplog.bson
new file mode 100644
index 00000000000..a9ada58715f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/oplog.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.bson
new file mode 100644
index 00000000000..c570d917b76
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.metadata.json
new file mode 100644
index 00000000000..65e5d967f00
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/data.metadata.json
@@ -0,0 +1 @@
+{"options":{"flags":1},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"test.data"}]} \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/system.indexes.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/system.indexes.bson
new file mode 100644
index 00000000000..324f8e270df
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_oplog/test/system.indexes.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.bson
new file mode 100644
index 00000000000..ff0d2e6bc31
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.metadata.json
new file mode 100644
index 00000000000..98eb8799771
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/data.metadata.json
@@ -0,0 +1 @@
+{"options":{"flags":1},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"dbOne.data"}]} \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/system.indexes.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/system.indexes.bson
new file mode 100644
index 00000000000..f1247e928c3
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbOne/system.indexes.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.bson
new file mode 100644
index 00000000000..0d5439cec2e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.metadata.json
new file mode 100644
index 00000000000..8fa8534bde5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/data.metadata.json
@@ -0,0 +1 @@
+{"options":{"flags":1},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"dbTwo.data"}]} \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/system.indexes.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/system.indexes.bson
new file mode 100644
index 00000000000..cea78cca0a5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/dbTwo/system.indexes.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir
new file mode 100644
index 00000000000..a9ada58715f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/dump_with_soft_links/not_a_dir
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/extra_oplog.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/extra_oplog.bson
new file mode 100644
index 00000000000..a9ada58715f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/extra_oplog.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_collection.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_collection.bson
new file mode 100644
index 00000000000..ff0d2e6bc31
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_collection.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.bson
new file mode 100644
index 00000000000..0d5439cec2e
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.metadata.json b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.metadata.json
new file mode 100644
index 00000000000..8fa8534bde5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/data.metadata.json
@@ -0,0 +1 @@
+{"options":{"flags":1},"indexes":[{"v":1,"key":{"_id":1},"name":"_id_","ns":"dbTwo.data"}]} \ No newline at end of file
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/system.indexes.bson b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/system.indexes.bson
new file mode 100644
index 00000000000..cea78cca0a5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/testdata/soft_linked_db/system.indexes.bson
Binary files differ
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles.js
new file mode 100644
index 00000000000..2cdd595d090
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles.js
@@ -0,0 +1,85 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongorestore with --restoreDbUsersAndRoles
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles');
+
+ var toolTest = getToolTest('users_and_roles');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ for (var i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restore the data, specifying --restoreDBUsersAndRoles
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test'))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+
+ // make sure the users were restored
+ var users = testDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'userOne' || users[1].user === 'userOne');
+ assert(users[0].user === 'userTwo' || users[1].user === 'userTwo');
+
+ // make sure the role was restored
+ var roles = testDB.getRoles();
+ assert.eq(1, roles.length);
+ assert.eq('roleOne', roles[0].role);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_26_to_28_to_26.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_26_to_28_to_26.js
new file mode 100644
index 00000000000..e0f9cf3dd1c
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_26_to_28_to_26.js
@@ -0,0 +1,143 @@
+// This test requires mongo 2.6.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_26, requires_mongo_30]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+ // skip tests requiring wiredTiger storage engine on pre 2.8 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Tests using mongorestore with --restoreDbUsersAndRoles, using a dump from
+ // a 2.6 mongod and restoring to a 2.8 mongod, then dumping again and
+ // restoring to a 2.6 mongod.
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+
+ ' restoring a 2.6 dump to a 2.8 mongod, then back to a 2.6 mongod');
+
+ var toolTest = new ToolTest('users_and_roles_26_to_28_to_26', {binVersion: '2.6'});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_26_to_28_to_26_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ for (var i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restart the mongod as a 2.8
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ delete toolTest.options.binVersion;
+ toolTest.startDB('foo');
+
+ // refresh the db reference
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // restore the data, specifying --restoreDBUsersAndRoles
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test')));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+
+ // make sure the users were restored
+ var users = testDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'userOne' || users[1].user === 'userOne');
+ assert(users[0].user === 'userTwo' || users[1].user === 'userTwo');
+
+ // make sure the role was restored
+ var roles = testDB.getRoles();
+ assert.eq(1, roles.length);
+ assert.eq('roleOne', roles[0].role);
+
+ // dump the data again, to a slightly different target
+ ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget+'_second')));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restart the mongod as a 2.6
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options = toolTest.options || {};
+ toolTest.options.binVersion = '2.6';
+ toolTest.startDB('foo');
+
+ // refresh the db reference
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // restore the data, specifying --restoreDBUsersAndRoles
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'_second/test')));
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+
+ // make sure the users were restored
+ users = testDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'userOne' || users[1].user === 'userOne');
+ assert(users[0].user === 'userTwo' || users[1].user === 'userTwo');
+
+ // make sure the role was restored
+ roles = testDB.getRoles();
+ assert.eq(1, roles.length);
+ assert.eq('roleOne', roles[0].role);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js
new file mode 100644
index 00000000000..abc37867c6f
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_28_to_26.js
@@ -0,0 +1,157 @@
+// This test requires mongo 2.6.x, and mongo 3.0.0 releases
+// @tags: [requires_mongo_26, requires_mongo_30]
+(function() {
+
+ load("jstests/configs/standard_dump_targets.config.js");
+
+ // skip tests requiring wiredTiger storage engine on pre 2.8 mongod
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+
+ // Tests using mongorestore with --restoreDbUsersAndRoles, using a dump from
+ // a 2.8 mongod and restoring to a 2.6 mongod, which should fail.
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+
+ ' restoring a 2.8 dump to a 2.6 mongod');
+
+ var toolTest = new ToolTest('users_and_roles_28_to_26');
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_28_to_26_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ for (var i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restart the mongod as a 2.6
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options = toolTest.options || {};
+ toolTest.options.binVersion = '2.6';
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // refresh the db reference
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // restore the data, specifying --restoreDBUsersAndRoles. it should fail
+ // since the auth version is too new
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles,'+
+ ' restoring a 2.8 dump to a 2.6 mongod');
+
+ toolTest = new ToolTest('users_and_roles_28_to_26');
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ dumpTarget = 'users_and_roles_28_to_26_dump';
+
+ // the db we'll be using
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ for (i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget)));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // restart the mongod as a 2.6
+ stopMongod(toolTest.port);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options = toolTest.options || {};
+ toolTest.options.binVersion = '2.6';
+ resetDbpath(toolTest.dbpath);
+ toolTest.startDB('foo');
+
+ // refresh the db reference
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // restore the data, specifying --restoreDBUsersAndRoles. it should fail
+ // since the auth version is too new
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test')));
+ assert.neq(0, ret);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js
new file mode 100644
index 00000000000..97b95377a45
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_full_dump.js
@@ -0,0 +1,142 @@
+// This test requires mongo 2.6.x releases
+// @tags: [requires_mongo_26]
+(function() {
+
+ // Tests running mongorestore with --restoreDbUsersAndRoles against
+ // a full dump.
+
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles against'+
+ ' a full dump');
+
+ if (typeof getDumpTarget === 'undefined') {
+ load('jstests/configs/standard_dump_targets.config.js');
+ }
+
+ if (dump_targets !== "standard") {
+ print('skipping test incompatable with archiving or compression');
+ return assert(true);
+ }
+
+ var runTest = function(sourceDBVersion, dumpVersion, restoreVersion, destDBVersion) {
+
+ jsTest.log('Running with sourceDBVersion=' + (sourceDBVersion || 'latest') +
+ ', dumpVersion=' + (dumpVersion || 'latest') + ', restoreVersion=' +
+ (restoreVersion || 'latest') + ', and destDBVersion=' +
+ (destDBVersion || 'latest'));
+
+ var toolTest = new ToolTest('users_and_roles_full_dump',
+ {binVersion: sourceDBVersion});
+ toolTest.startDB('foo');
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_full_dump_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using, and the admin db
+ var adminDB = toolTest.db.getSiblingDB('admin');
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create a user and role on the admin database
+ adminDB.createUser({
+ user: 'adminUser',
+ pwd: 'password',
+ roles: [{role: 'read', db: 'admin'}],
+ });
+ adminDB.createRole({
+ role: 'adminRole',
+ privileges: [{
+ resource: {db: 'admin', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ for (var i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var args = ['mongodump' + (dumpVersion ? ('-'+dumpVersion) : ''),
+ '--port', toolTest.port]
+ .concat(getDumpTarget(dumpTarget));
+ var ret = runMongoProgram.apply(this, args);
+ assert.eq(0, ret);
+
+ // restart the mongod, with a clean db path
+ stopMongod(toolTest.port);
+ resetDbpath(toolTest.dbpath);
+ toolTest.m = null;
+ toolTest.db = null;
+ toolTest.options.binVersion = destDBVersion;
+ toolTest.startDB('foo');
+
+ // refresh the db references
+ adminDB = toolTest.db.getSiblingDB('admin');
+ testDB = toolTest.db.getSiblingDB('test');
+
+ // do a full restore
+ args = ['mongorestore' + (restoreVersion ? ('-'+restoreVersion) : ''),
+ '--port', toolTest.port]
+ .concat(getRestoreTarget(dumpTarget));
+ ret = runMongoProgram.apply(this, args);
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+
+ // make sure the users were restored
+ var users = testDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'userOne' || users[1].user === 'userOne');
+ assert(users[0].user === 'userTwo' || users[1].user === 'userTwo');
+ var adminUsers = adminDB.getUsers();
+ assert.eq(1, adminUsers.length);
+ assert.eq('adminUser', adminUsers[0].user);
+
+ // make sure the roles were restored
+ var roles = testDB.getRoles();
+ assert.eq(1, roles.length);
+ assert.eq('roleOne', roles[0].role);
+ var adminRoles = adminDB.getRoles();
+ assert.eq(1, adminRoles.length);
+ assert.eq('adminRole', adminRoles[0].role);
+
+ // success
+ toolTest.stop();
+
+ };
+
+ // 'undefined' triggers latest
+ runTest('2.6', '2.6', undefined, '2.6');
+ runTest('2.6', '2.6', undefined, undefined);
+ runTest('2.6', undefined, undefined, undefined);
+ runTest('2.6', undefined, undefined, '2.6');
+ runTest(undefined, undefined, undefined, undefined);
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_temp_collections.js b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_temp_collections.js
new file mode 100644
index 00000000000..fdbf236e8f8
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/users_and_roles_temp_collections.js
@@ -0,0 +1,104 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+
+ // Tests running mongorestore with --restoreDbUsersAndRoles
+
+ jsTest.log('Testing running mongorestore with --restoreDbUsersAndRoles');
+
+ var toolTest = getToolTest('users_and_roles_temp_collections');
+ var commonToolArgs = getCommonToolArguments();
+
+ // where we'll put the dump
+ var dumpTarget = 'users_and_roles_temp_collections_dump';
+ resetDbpath(dumpTarget);
+
+ // the db we'll be using
+ var testDB = toolTest.db.getSiblingDB('test');
+
+ // create some users and roles on the database
+ testDB.createUser({
+ user: 'userOne',
+ pwd: 'pwdOne',
+ roles: [{role: 'read', db: 'test'}],
+ });
+ testDB.createRole({
+ role: 'roleOne',
+ privileges: [{
+ resource: {db: 'test', collection: ''},
+ actions: ['find'],
+ }],
+ roles: [],
+ });
+ testDB.createUser({
+ user: 'userTwo',
+ pwd: 'pwdTwo',
+ roles: [{role: 'roleOne', db: 'test'}],
+ });
+
+ // insert some data
+ for (var i = 0; i < 10; i++) {
+ testDB.data.insert({_id: i});
+ }
+ // sanity check the insertion worked
+ assert.eq(10, testDB.data.count());
+
+ // dump the data
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '--db', 'test', '--dumpDbUsersAndRoles']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ // drop the database, users, and roles
+ testDB.dropDatabase();
+ testDB.dropAllUsers();
+ testDB.dropAllRoles();
+
+ // insert to the default temp collections so we hit them later
+ var adminDB = toolTest.db.getSiblingDB('admin');
+ adminDB.tempusers.insert({_id: 1});
+ adminDB.temproles.insert({_id: 1});
+
+ // try to restore the data
+ ret = toolTest.runTool.apply(toolTest, ['restore', '--db', 'test', '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test'))
+ .concat(commonToolArgs));
+
+ // we should succeed with default temp collections
+ assert.eq(0, ret);
+
+ // try to restore the data with new temp collections
+ ret = toolTest.runTool.apply(toolTest, ['restore',
+ '--db', 'test',
+ '--tempUsersColl', 'tempU',
+ '--tempRolesColl', 'tempR',
+ '--restoreDbUsersAndRoles']
+ .concat(getRestoreTarget(dumpTarget+'/test'))
+ .concat(commonToolArgs));
+
+ // we should succeed with new temp collections
+ assert.eq(0, ret);
+
+ // make sure the data was restored
+ assert.eq(10, testDB.data.count());
+ for (i = 0; i < 10; i++) {
+ assert.eq(1, testDB.data.count({_id: i}));
+ }
+
+ // make sure the users were restored
+ var users = testDB.getUsers();
+ assert.eq(2, users.length);
+ assert(users[0].user === 'userOne' || users[1].user === 'userOne');
+ assert(users[0].user === 'userTwo' || users[1].user === 'userTwo');
+
+ // make sure the role was restored
+ var roles = testDB.getRoles();
+ assert.eq(1, roles.length);
+ assert.eq('roleOne', roles[0].role);
+
+ // success
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern.js b/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern.js
new file mode 100644
index 00000000000..ecdcddbcb18
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern.js
@@ -0,0 +1,64 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var toolTest = new ToolTest('write_concern', null);
+ var commonToolArgs = getCommonToolArguments();
+
+ var rs = new ReplSetTest({
+ name: "rpls",
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ });
+
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+ toolTest.port = rs.getPrimary().port;
+ var dbOne = rs.getPrimary().getDB("dbOne");
+
+ // create a test collection
+ for (var i=0; i<=100; i++) {
+ dbOne.test.insert({_id: i, x: i*i});
+ }
+ rs.awaitReplication();
+
+ // dump the data that we'll
+ var dumpTarget = 'write_concern_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(writeConcern)
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(exitCode, ret, name);
+ dbOne.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongorestore',
+ '--writeConcern={w:3}', '--host', rs.getPrimary().host]
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ }
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongorestore", rs, toolTest, writeConcernTestFunc, noConnectTest);
+
+ dbOne.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern_mongos.js b/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern_mongos.js
new file mode 100644
index 00000000000..593004daaae
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/restore/write_concern_mongos.js
@@ -0,0 +1,69 @@
+(function() {
+
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ var toolTest = new ToolTest('write_concern', null);
+ var st = new ShardingTest({
+ shards: {
+ rs0: {
+ nodes: 3,
+ useHostName: true,
+ settings: {chainingAllowed: false},
+ },
+ },
+ mongos: 1,
+ config: 1,
+ configReplSetTestOptions: {
+ settings: {chainingAllowed: false},
+ },
+ });
+ var rs = st.rs0;
+ rs.awaitReplication();
+ toolTest.port = st.s.port;
+ var commonToolArgs = getCommonToolArguments();
+ var dbOne = st.s.getDB("dbOne");
+
+ // create a test collection
+ for (var i=0; i<=100; i++) {
+ dbOne.test.insert({_id: i, x: i*i});
+ }
+ rs.awaitReplication();
+
+ // dump the data that we'll
+ var dumpTarget = 'write_concern_mongos_dump';
+ resetDbpath(dumpTarget);
+ var ret = toolTest.runTool.apply(toolTest, ['dump', '-d', 'dbOne']
+ .concat(getDumpTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(0, ret);
+
+ function writeConcernTestFunc(exitCode, writeConcern, name) {
+ jsTest.log(name);
+ var ret = toolTest.runTool.apply(toolTest, ['restore']
+ .concat(writeConcern)
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ assert.eq(exitCode, ret, name);
+ dbOne.dropDatabase();
+ }
+
+ function noConnectTest() {
+ return startMongoProgramNoConnect.apply(null, ['mongorestore',
+ '--writeConcern={w:3}', '--host', st.s.host]
+ .concat(getRestoreTarget(dumpTarget))
+ .concat(commonToolArgs));
+ }
+
+ // drop the database so it's empty
+ dbOne.dropDatabase();
+
+ // load and run the write concern suite
+ load('jstests/libs/wc_framework.js');
+ runWCTest("mongorestore", rs, toolTest, writeConcernTestFunc, noConnectTest);
+
+ dbOne.dropDatabase();
+ rs.stopSet();
+ toolTest.stop();
+
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/stat/stat_auth.js b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_auth.js
new file mode 100644
index 00000000000..fd62745c674
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_auth.js
@@ -0,0 +1,31 @@
+(function() {
+ load("jstests/libs/mongostat.js");
+ var port = allocatePort();
+ var m = startMongod(
+ "--auth",
+ "--port", port,
+ "--dbpath", MongoRunner.dataPath+"stat_auth"+port,
+ "--nohttpinterface",
+ "--bind_ip", "127.0.0.1");
+
+ var db = m.getDB("admin");
+ db.createUser({
+ user: "foobar",
+ pwd: "foobar",
+ roles: jsTest.adminUserRoles
+ });
+
+ assert(db.auth("foobar", "foobar"), "auth failed");
+
+ var args = ["mongostat",
+ "--host", "127.0.0.1:" + port,
+ "--rowcount", "1",
+ "--authenticationDatabase", "admin",
+ "--username", "foobar"];
+
+ var x = runMongoProgram.apply(null, args.concat("--password", "foobar"));
+ assert.eq(x, exitCodeSuccess, "mongostat should exit successfully with foobar:foobar");
+
+ x = runMongoProgram.apply(null, args.concat("--password", "wrong"));
+ assert.eq(x, exitCodeErr, "mongostat should exit with an error exit code with foobar:wrong");
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/stat/stat_custom_headers.js b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_custom_headers.js
new file mode 100644
index 00000000000..89cb539bc34
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_custom_headers.js
@@ -0,0 +1,107 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load("jstests/libs/mongostat.js");
+ load("jstests/libs/extended_assert.js");
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest("stat_custom_headers");
+ var port = toolTest.port;
+
+ var x, rows;
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn,time", "-O", "metrics.record.moves");
+ assert.eq(x, exitCodeBadOptions, "mongostat should fail with both -o and -O options");
+ clearRawMongoProgramOutput();
+
+ // basic -o --humanReadable=false
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn,time", "-n", 4, "--humanReadable=false");
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(5, function() {
+ rows = statRows();
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "host,conn,time",
+ "first row doesn't match 'host conn time'");
+ assert.eq(statFields(rows[1]).length, 3,
+ "there should be exactly three entries for a row of this stat output");
+ clearRawMongoProgramOutput();
+
+ // basic -o
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn,time", "-n", 4);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(5, function() {
+ rows = statRows();
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "host,conn,time",
+ "first row doesn't match 'host conn time'");
+ assert.eq(statFields(rows[1]).length, 5,
+ "there should be exactly five entries for a row of this stat output (time counts as three)");
+ clearRawMongoProgramOutput();
+
+ // basic -O
+ x = runMongoProgram("mongostat", "--port", port,
+ "-O", "host", "-n", 4);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ rows = statRows();
+ var fields = statFields(rows[0]);
+ assert.eq(fields[fields.length-1], "host",
+ "first row should end with added 'host' field");
+ clearRawMongoProgramOutput();
+
+ // named
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host=H,conn=C,time=MYTiME", "-n", 4);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(5, function() {
+ rows = statRows();
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "H,C,MYTiME",
+ "first row doesn't match 'H C MYTiME'");
+ assert.eq(statFields(rows[1]).length, 5,
+ "there should be exactly five entries for a row of this stat output (time counts as three)");
+ clearRawMongoProgramOutput();
+
+ // serverStatus custom field
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn,mem.bits", "-n", 4);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(5, function() {
+ rows = statRows();
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "host,conn,mem.bits",
+ "first row doesn't match 'host time mem.bits'");
+ fields = statFields(rows[1]);
+ assert.eq(fields.length, 3,
+ "there should be exactly three entries for a row of this stat output");
+ assert(fields[2] === "32" || fields[2] === "64",
+ "mem.bits didn't yield valid output (should be one of 32 or 64, was '"
+ +fields[2]+"')");
+ clearRawMongoProgramOutput();
+
+ // serverStatus named field
+ x = runMongoProgram("mongostat", "--port", port,
+ "-o", "host,conn=MYCoNN,mem.bits=BiTs", "-n", 4);
+ assert.eq(x, 0, "mongostat should succeed with -o and -n options");
+ assert.eq.soon(5, function() {
+ rows = statRows();
+ return rows.length;
+ }, "expected 5 rows in mongostat output");
+ assert.eq(statFields(rows[0]).join(), "host,MYCoNN,BiTs",
+ "first row doesn't match 'host MYTiME BiTs'");
+ fields = statFields(rows[1]);
+ assert.eq(fields.length, 3,
+ "there should be exactly three entries for a row of this stat output");
+ assert(fields[2] === "32" || fields[2] === "64",
+ "mem.bits didn't yield valid output (should be one of 32 or 64, was '"
+ +fields[2]+"')");
+ clearRawMongoProgramOutput();
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/stat/stat_discover.js b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_discover.js
new file mode 100644
index 00000000000..5b4aa3a0f6a
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_discover.js
@@ -0,0 +1,60 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load("jstests/libs/mongostat.js");
+
+ var toolTest = getToolTest("stat_discover");
+ var rs = new ReplSetTest({
+ name: "rpls",
+ nodes: 4,
+ useHostName: true,
+ });
+
+ rs.startSet();
+ rs.initiate();
+ rs.awaitReplication();
+
+ worked = statCheck(["mongostat",
+ "--port", rs.liveNodes.master.port,
+ "--discover"],
+ hasOnlyPorts(rs.ports));
+ assert(worked, "when only port is used, each host still only appears once");
+
+ assert(discoverTest(rs.ports, rs.liveNodes.master.host), "--discover against a replset master sees all members");
+
+ assert(discoverTest(rs.ports, rs.liveNodes.slaves[0].host), "--discover against a replset slave sees all members");
+
+ hosts = [rs.liveNodes.master.host, rs.liveNodes.slaves[0].host, rs.liveNodes.slaves[1].host];
+ ports = [rs.liveNodes.master.port, rs.liveNodes.slaves[0].port, rs.liveNodes.slaves[1].port];
+ worked = statCheck(['mongostat',
+ '--host', hosts.join(',')],
+ hasOnlyPorts(ports));
+ assert(worked, "replica set specifiers are correctly used");
+
+ assert(discoverTest([toolTest.port], toolTest.m.host), "--discover against a stand alone-sees just the stand-alone");
+
+ // Test discovery with nodes cutting in and out
+ clearRawMongoProgramOutput();
+ pid = startMongoProgramNoConnect("mongostat", "--host", rs.liveNodes.slaves[1].host, "--discover");
+
+ assert.soon(hasPort(rs.liveNodes.slaves[0].port), "discovered host is seen");
+ assert.soon(hasPort(rs.liveNodes.slaves[1].port), "specified host is seen");
+
+ rs.stop(rs.liveNodes.slaves[0]);
+ assert.soon(lacksPort(rs.liveNodes.slaves[0].port), "after discovered host is stopped, it is not seen");
+ assert.soon(hasPort(rs.liveNodes.slaves[1].port), "after discovered host is stopped, specified host is still seen");
+
+ rs.start(rs.liveNodes.slaves[0]);
+ assert.soon(hasPort(rs.liveNodes.slaves[0].port), "after discovered is restarted, discovered host is seen again");
+ assert.soon(hasPort(rs.liveNodes.slaves[1].port), "after discovered is restarted, specified host is still seen");
+
+ rs.stop(rs.liveNodes.slaves[1]);
+ assert.soon(lacksPort(rs.liveNodes.slaves[1].port), "after specified host is stopped, specified host is not seen");
+ assert.soon(hasPort(rs.liveNodes.slaves[0].port), "after specified host is stopped, the discovered host is still seen");
+
+ stopMongoProgramByPid(pid);
+
+ rs.stopSet();
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/stat/stat_discover_shard.js b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_discover_shard.js
new file mode 100644
index 00000000000..621cdcdc388
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_discover_shard.js
@@ -0,0 +1,14 @@
+(function() {
+ load("jstests/libs/mongostat.js");
+
+ var st = new ShardingTest({name: "shard1", shards: 2});
+ shardPorts = [st._mongos[0].port, st._connections[0].port, st._connections[1].port];
+
+ clearRawMongoProgramOutput();
+ pid = startMongoProgramNoConnect("mongostat", "--host", st._mongos[0].host, "--discover");
+ assert.soon(hasOnlyPorts(shardPorts), "--discover against a mongos sees all shards");
+
+ st.stop();
+ assert.soon(hasOnlyPorts([]), "stops showing data when hosts come down");
+ assert.eq(exitCodeStopped, stopMongoProgramByPid(pid), "mongostat --discover against a sharded cluster shouldn't error when the cluster goes down");
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/stat/stat_header.js b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_header.js
new file mode 100644
index 00000000000..e9fe39957d5
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_header.js
@@ -0,0 +1,27 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load('jstests/libs/mongostat.js');
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var toolTest = getToolTest('stat_header');
+
+ function outputIncludesHeader() {
+ return rawMongoProgramOutput()
+ .split("\n").some(function(line) {
+ return line.match(/^sh\d+\| insert/);
+ });
+ }
+
+ clearRawMongoProgramOutput();
+ x = runMongoProgram("mongostat", "--port", toolTest.port, "--rowcount", 1);
+ assert.soon(outputIncludesHeader, "normally a header appears");
+
+ clearRawMongoProgramOutput();
+ x = runMongoProgram("mongostat", "--port", toolTest.port, "--rowcount", 1, "--noheaders");
+ assert.eq.soon(false, outputIncludesHeader, "--noheaders suppresses the header");
+
+ toolTest.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/stat/stat_mixed_storage_engine.js b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_mixed_storage_engine.js
new file mode 100644
index 00000000000..56c5a10d30d
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_mixed_storage_engine.js
@@ -0,0 +1,45 @@
+// @tags: [requires_mmap_available]
+(function() {
+ if (TestData && TestData.storageEngine === 'wiredTiger') {
+ return;
+ }
+ load("jstests/libs/mongostat.js");
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+
+ var mmap_options = {storageEngine: "mmapv1"};
+ var wt_options = {storageEngine: "wiredTiger"};
+ var replTest = new ReplSetTest({
+ nodes: {
+ node0: mmap_options,
+ node1: mmap_options,
+ node2: wt_options,
+ },
+ });
+
+ replTest.startSet();
+ replTest.initiate();
+ replTest.awaitReplication();
+
+ clearRawMongoProgramOutput();
+ assert(discoverTest(replTest.ports, replTest.nodes[0].host), "mongostat against a heterogenous storage engine replica set sees all hosts");
+
+ clearRawMongoProgramOutput();
+ runMongoProgram("mongostat", "--host", replTest.nodes[0].host, "--rowcount", 7, "--discover");
+ assert.strContains.soon("used flushes mapped", rawMongoProgramOutput, "against replset has fields for both engines");
+
+ replTest.stopSet();
+
+ st = new ShardingTest({shards: [wt_options, mmap_options], options: {nopreallocj: true}});
+ stdb = st.getDB("test");
+ shardPorts = [st._mongos[0].port, st._connections[0].port, st._connections[1].port];
+
+ clearRawMongoProgramOutput();
+ assert(discoverTest(shardPorts, st._mongos[0].host, "mongostat reports on a heterogenous storage engine sharded cluster"));
+
+ clearRawMongoProgramOutput();
+ runMongoProgram("mongostat", "--host", st._mongos[0].host, "--rowcount", 7, "--discover");
+ assert.strContains.soon("used flushes mapped", rawMongoProgramOutput, "against sharded cluster has fields for both engines");
+
+ st.stop();
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/stat/stat_rowcount.js b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_rowcount.js
new file mode 100644
index 00000000000..3af09f723ea
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/stat/stat_rowcount.js
@@ -0,0 +1,56 @@
+(function() {
+ if (typeof getToolTest === 'undefined') {
+ load('jstests/configs/plain_28.config.js');
+ }
+ load("jstests/libs/mongostat.js");
+ load('jstests/libs/extended_assert.js');
+ var assert = extendedAssert;
+ var commonToolArgs = getCommonToolArguments();
+ print("common tool sargs");
+ printjson(commonToolArgs);
+
+ var toolTest = getToolTest('stat_rowcount');
+ var x, pid;
+ clearRawMongoProgramOutput();
+
+ x = runMongoProgram("mongostat", "--host", toolTest.m.host, "--rowcount", 7, "--noheaders");
+ assert.eq.soon(7, function() {
+ return rawMongoProgramOutput().split("\n").filter(function(r) {
+ return r.match(rowRegex);
+ }).length;
+ }, "--rowcount value is respected correctly");
+
+ startTime = new Date();
+ x = runMongoProgram("mongostat", "--host", toolTest.m.host, "--rowcount", 3, "--noheaders", 3);
+ endTime = new Date();
+ duration = Math.floor((endTime - startTime) / 1000);
+ assert.gte(duration, 9, "sleep time affects the total time to produce a number or results");
+
+ clearRawMongoProgramOutput();
+
+ pid = startMongoProgramNoConnect.apply(null, ["mongostat", "--port", toolTest.port].concat(commonToolArgs));
+ assert.strContains.soon('sh'+pid+'| ', rawMongoProgramOutput, "should produce some output");
+ assert.eq(exitCodeStopped, stopMongoProgramByPid(pid), "stopping should cause mongostat exit with a 'stopped' code");
+
+ x = runMongoProgram.apply(null, ["mongostat", "--port", toolTest.port - 1, "--rowcount", 1].concat(commonToolArgs));
+ assert.neq(exitCodeSuccess, x, "can't connect causes an error exit code");
+
+ x = runMongoProgram.apply(null, ["mongostat", "--rowcount", "-1"].concat(commonToolArgs));
+ assert.eq(exitCodeBadOptions, x, "mongostat --rowcount specified with bad input: negative value");
+
+ x = runMongoProgram.apply(null, ["mongostat", "--rowcount", "foobar"].concat(commonToolArgs));
+ assert.eq(exitCodeBadOptions, x, "mongostat --rowcount specified with bad input: non-numeric value");
+
+ x = runMongoProgram.apply(null, ["mongostat", "--host", "badreplset/127.0.0.1:" + toolTest.port, "--rowcount", 1].concat(commonToolArgs));
+ assert.eq(exitCodeErr, x, "--host used with a replica set string for nodes not in a replica set");
+
+ pid = startMongoProgramNoConnect.apply(null, ["mongostat", "--host", "127.0.0.1:" + toolTest.port].concat(commonToolArgs));
+ assert.strContains.soon('sh'+pid+'| ', rawMongoProgramOutput, "should produce some output");
+
+ MongoRunner.stopMongod(toolTest.port);
+ assert.gte.soon(10, function() {
+ var rows = statRows();
+ return statFields(rows[rows.length - 1]).length;
+ }, "should stop showing new stat lines");
+ assert.eq(exitCodeStopped, stopMongoProgramByPid(pid), "mongostat shouldn't error out when the server goes down");
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_json.js b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_json.js
new file mode 100644
index 00000000000..cdf92788f94
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_json.js
@@ -0,0 +1,44 @@
+// mongotop_json.js; ensure that running mongotop using the --json flag works as
+// expected
+var testName = 'mongotop_json';
+(function() {
+ jsTest.log('Testing mongotop --json option');
+ load('jstests/top/util/mongotop_common.js');
+ var assert = extendedAssert;
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ // clear the output buffer
+ clearRawMongoProgramOutput();
+
+ // ensure tool runs without error with --rowcount = 1
+ var ret = executeProgram(['mongotop', '--port', conn.port, '--json', '--rowcount', 1].concat(passthrough.args));
+ assert.eq(ret.exitCode, 0, 'failed 1');
+ assert.eq.soon('object', function() {
+ return typeof JSON.parse(extractJSON(ret.getOutput()));
+ }, 'invalid JSON 1');
+
+ // ensure tool runs without error with --rowcount > 1
+ var rowcount = 5;
+ clearRawMongoProgramOutput();
+ ret = executeProgram(['mongotop', '--port', conn.port, '--json', '--rowcount', rowcount].concat(passthrough.args));
+ assert.eq(ret.exitCode, 0, 'failed 2');
+ assert.eq.soon(rowcount, function() {
+ return ret.getOutput().split('\n').length;
+ }, "expected " + rowcount + " top results");
+ ret.getOutput().split('\n').forEach(function(line) {
+ assert(typeof JSON.parse(extractJSON(line)) === 'object', 'invalid JSON 2');
+ });
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_reports.js b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_reports.js
new file mode 100644
index 00000000000..24f2f13be84
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_reports.js
@@ -0,0 +1,148 @@
+// mongotop_reports.js; ensure that running mongotop reports accurately on operations
+// going on in namespaces
+var testName = 'mongotop_reports';
+load('jstests/top/util/mongotop_common.js');
+
+(function() {
+ jsTest.log('Testing mongotop\'s reporting fidelity');
+ var assert = extendedAssert;
+ var read = 'read';
+ var write = 'write';
+
+ var runReportTest = function(topology, passthrough, test) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough on ' + test.name + ' shell');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ db = conn.getDB('foo'); // eslint-disable-line no-native-reassign
+ db.dropDatabase();
+ assert.eq(db.bar.count(), 0, 'drop failed');
+
+ // start the parallel shell command
+ if (passthrough.name === auth.name) {
+ var authCommand = '\n db.getSiblingDB(\'admin\').auth(\'' + authUser + '\',\'' + authPassword + '\'); \n';
+ test.shellCommand = authCommand + test.shellCommand;
+ }
+ startParallelShell(test.shellCommand);
+
+ // allow for command to actually start
+ sleep(5000);
+
+ // ensure tool runs without error
+ clearRawMongoProgramOutput();
+ var ret = executeProgram(['mongotop', '--port', conn.port, '--json', '--rowcount', 1].concat(passthrough.args));
+ assert.eq(ret.exitCode, 0, 'failed 1');
+ var parsedOutput;
+ assert.eq.soon('object', function() {
+ parsedOutput = JSON.parse(extractJSON(ret.getOutput()));
+ return typeof parsedOutput;
+ }, 'invalid JSON 1');
+
+ // ensure only the active namespaces reports a non-zero value
+ for (var namespace in parsedOutput.totals) {
+ if (!parsedOutput.totals.hasOwnProperty(namespace)) {
+ continue;
+ }
+ var isAuthActivity = namespace.indexOf('.system.') !== -1;
+ var isReplActivity = namespace.indexOf('local.') !== -1;
+
+ // authentication and replication activity should be ignored
+ if (isAuthActivity || isReplActivity) {
+ continue;
+ }
+
+ var nsDetails = parsedOutput.totals[namespace];
+ assert.neq(nsDetails, undefined, 'no details reported for namespace ' + namespace);
+
+ var comparator = 'eq';
+ var shouldHaveActivity = test.namespaces.filter(function(testSpace) { // eslint-disable-line no-loop-func
+ return testSpace === namespace;
+ });
+
+ // return the opposite comparator if this namespace should have activity
+ if (shouldHaveActivity.length !== 0) {
+ comparator = 'neq';
+ }
+
+ test.indicators.forEach(function(indicator) { // eslint-disable-line no-loop-func
+ ['count', 'time'].forEach(function(metric) {
+ assert[comparator](nsDetails[indicator][metric], 0, 'unexpected ' + indicator + ' activity on ' + namespace + '; ' + metric + ': ' + nsDetails[indicator][metric]);
+ if (test.indicators.length === 1) {
+ // read or write shell
+ var opposite = read;
+ if (test.name === read) {
+ opposite = write;
+ }
+ // ensure there's no activity on the inactive metric
+ // sometimes the readings are a bit out of sync - making some
+ // allowance to prevent test flakiness
+ assert.between(0, nsDetails[opposite][metric], 1, 'unexpected ' + opposite + ' (opposite) activity on ' + namespace + '; ' + metric + ': ' + nsDetails[opposite][metric]);
+ } else {
+ // read/write shell should have read and write activity
+ assert[comparator](nsDetails[read][metric], 0, 'unexpected ' + read + ' activity (read/write) on ' + namespace + '; ' + metric + ': ' + nsDetails[read][metric]);
+ assert[comparator](nsDetails[write][metric], 0, 'unexpected ' + write + ' activity (read/write) on ' + namespace + '; ' + metric + ': ' + nsDetails[write][metric]);
+ }
+ var calculatedSum = nsDetails[read][metric] + nsDetails[write][metric];
+ var expectedSum = nsDetails['total'][metric];
+
+ // sometimes the total isn't exact - making some allowance to prevent
+ // test flakiness
+ assert.between(0, expectedSum - calculatedSum, 1, 'unexpected sum for metric ' + metric + ': expected ' + expectedSum + ' but got ' + calculatedSum);
+ });
+ });
+ }
+ t.stop();
+ };
+
+ var runTests = function(topology, passthrough) {
+ var readShell = '\nprint(\'starting read\'); \n' +
+ 'for (var i = 0; i < 1000000; ++i) \n{ ' +
+ ' db.getSiblingDB(\'foo\').bar.find({ x: i }).forEach(function(){}); \n' +
+ ' sleep(1); \n' +
+ '}\n';
+
+ var writeShell = '\nprint(\'starting write\'); \n' +
+ 'for (var i = 0; i < 1000000; ++i) { \n' +
+ ' db.getSiblingDB(\'foo\').bar.insert({ x: i }); \n' +
+ ' sleep(1); \n' +
+ '}\n';
+
+ var readWriteShell = '\nprint(\'starting read/write\'); \n' +
+ 'for (var i = 0; i < 1000000; ++i) \n{ ' +
+ ' db.getSiblingDB(\'foo\').bar.insert({ x: i }); \n' +
+ ' db.getSiblingDB(\'foo\').bar.find({ x: i }).forEach(function(){}); \n' +
+ ' sleep(1); \n' +
+ '}\n';
+
+ var testSpaces = [
+ ['foo.bar'],
+ ['foo.bar', 'bar.foo'],
+ ];
+
+ var tests = [{
+ name: read,
+ indicators: [read],
+ shellCommand: readShell,
+ }, {
+ name: write,
+ indicators: [write],
+ shellCommand: writeShell,
+ }, {
+ name: read + '/' + write,
+ indicators: [read, write],
+ shellCommand: readWriteShell,
+ }];
+
+ tests.forEach(function(test) {
+ testSpaces.forEach(function(testSpace) {
+ test.namespaces = testSpace;
+ runReportTest(topology, passthrough, test);
+ });
+ });
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_sharded.js b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_sharded.js
new file mode 100644
index 00000000000..6bce18a5e9b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_sharded.js
@@ -0,0 +1,47 @@
+// mongotop_sharded.js; ensure that running mongotop against a sharded cluster
+// fails with a useful error message
+var testName = 'mongotop_sharded';
+(function() {
+ jsTest.log('Testing mongotop against sharded cluster');
+ load('jstests/top/util/mongotop_common.js');
+ var assert = extendedAssert;
+
+ var expectedError = 'cannot run mongotop against a mongos';
+ var verifyOutput = function(getOutput) {
+ assert.strContains.soon(expectedError, getOutput, 'error message must appear at least once');
+ var shellOutput = getOutput();
+ jsTest.log('shell output: ' + shellOutput);
+ shellOutput.split('\n').forEach(function(line) {
+ // check the displayed error message
+ assert.neq(line.match(expectedError), null, 'unexpeced error message');
+ });
+ };
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ // getting the version should work without error
+ assert.eq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--version'].concat(passthrough.args)), 0, 'failed 1');
+
+ // getting the help text should work without error
+ assert.eq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--help'].concat(passthrough.args)), 0, 'failed 2');
+
+ // anything that runs against the mongos server should fail
+ var result = executeProgram(['mongotop', '--port', conn.port].concat(passthrough.args));
+ assert.neq(result.exitCode, 0, 'expected failure against a mongos');
+ verifyOutput(result.getOutput);
+
+ result = executeProgram(['mongotop', '--port', conn.port, '2'].concat(passthrough.args));
+ assert.neq(result.exitCode, 0, 'expected failure against a mongos');
+ verifyOutput(result.getOutput);
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(shardedClusterTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_stress.js b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_stress.js
new file mode 100644
index 00000000000..0283fa00b3b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_stress.js
@@ -0,0 +1,49 @@
+// mongotop_stress.js; ensure that running mongotop, even when the server is
+// under heavy load, works as expected
+var testName = 'mongotop_stress';
+load('jstests/top/util/mongotop_common.js');
+
+(function() {
+ jsTest.log('Testing mongotop\'s performance under load');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+ db = conn.getDB('foo'); // eslint-disable-line no-native-reassign
+
+ // concurrently insert documents into thousands of collections
+ var stressShell = '\nprint(\'starting read/write stress test\'); \n' +
+ ' if (\'' + passthrough.name + '\' === \'auth\')' +
+ ' db.getSiblingDB(\'admin\').auth(\'' + authUser + '\',\'' + authPassword + '\'); ' +
+ ' var dbName = (Math.random() + 1).toString(36).substring(7); ' +
+ ' var clName = (Math.random() + 1).toString(36).substring(7); ' +
+ ' for (var i = 0; i < 10000; ++i) { ' +
+ ' db.getSiblingDB(dbName).getCollection(clName).find({ x: i }).forEach(); \n' +
+ ' sleep(1); \n' +
+ ' db.getSiblingDB(dbName).getCollection(clName).insert({ x: i }); \n' +
+ ' sleep(1);\n' +
+ ' }\n';
+
+ for (var i = 0; i < 10; ++i) {
+ startParallelShell(stressShell);
+ }
+
+ // wait a bit for the stress to kick in
+ sleep(5000);
+ jsTest.log('Current operation(s)');
+ printjson(db.currentOp());
+
+ // ensure tool runs without error
+ clearRawMongoProgramOutput();
+ assert.eq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--json', '--rowcount', 1].concat(passthrough.args)), 0, 'failed 1');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_validation.js b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_validation.js
new file mode 100644
index 00000000000..0abe5a4c74b
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/top/mongotop_validation.js
@@ -0,0 +1,46 @@
+// mongotop_validation.js; ensure that running mongotop using invalid arguments
+// fail as expected
+var testName = 'mongotop_validation';
+load('jstests/top/util/mongotop_common.js');
+
+(function() {
+ jsTest.log('Testing mongotop with invalid arguments');
+
+ var runTests = function(topology, passthrough) {
+ jsTest.log('Using ' + passthrough.name + ' passthrough');
+ var t = topology.init(passthrough);
+ var conn = t.connection();
+
+ // checking the version should not return an error
+ assert.eq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--version'].concat(passthrough.args)), 0, '--version assertion failure 1');
+
+
+ // ensure tool returns an error...
+
+ // when used with an invalid port
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', 55555].concat(passthrough.args)), 0, '--port assertion failure 1');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', 'hello'].concat(passthrough.args)), 0, '--port assertion failure 2');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', ''].concat(passthrough.args)), 0, '--port assertion failure 3');
+
+ // when supplied invalid row counts
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--rowcount', '-2'].concat(passthrough.args)), 0, '--rowcount assertion failure 1');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--rowcount', 'hello'].concat(passthrough.args)), 0, '--rowcount assertion failure 2');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--rowcount', ''].concat(passthrough.args)), 0, '--rowcount assertion failure 3');
+
+ // when supplied invalid sleep times
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '-4'].concat(passthrough.args)), 0, 'sleep time assertion failure 1');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, 'forever'].concat(passthrough.args)), 0, 'sleep time assertion failure 2');
+
+ // when supplied invalid options
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--elder'].concat(passthrough.args)), 0, 'invalid options failure 1');
+ assert.neq(runMongoProgram.apply(this, ['mongotop', '--port', conn.port, '--price'].concat(passthrough.args)), 0, 'invalid options failure 2');
+
+ t.stop();
+ };
+
+ // run with plain and auth passthroughs
+ passthroughs.forEach(function(passthrough) {
+ runTests(standaloneTopology, passthrough);
+ runTests(replicaSetTopology, passthrough);
+ });
+}());
diff --git a/src/mongo/gotools/test/qa-tests/jstests/top/util/mongotop_common.js b/src/mongo/gotools/test/qa-tests/jstests/top/util/mongotop_common.js
new file mode 100644
index 00000000000..5d4e002fb92
--- /dev/null
+++ b/src/mongo/gotools/test/qa-tests/jstests/top/util/mongotop_common.js
@@ -0,0 +1,25 @@
+// mongotop_common.js; contains variables used by mongotop tests
+/* exported executeProgram */
+/* exported extractJSON */
+load('jstests/common/topology_helper.js');
+load('jstests/libs/extended_assert.js');
+
+var executeProgram = function(args) {
+ clearRawMongoProgramOutput();
+ var pid = startMongoProgramNoConnect.apply(this, args);
+ var exitCode = waitProgram(pid);
+ var prefix = 'sh'+pid+'| ';
+ var getOutput = function() {
+ return rawMongoProgramOutput().split('\n').filter(function(line) {
+ return line.indexOf(prefix) === 0;
+ }).join('\n');
+ };
+ return {
+ exitCode: exitCode,
+ getOutput: getOutput,
+ };
+};
+
+var extractJSON = function(shellOutput) {
+ return shellOutput.substring(shellOutput.indexOf('{'), shellOutput.lastIndexOf('}') + 1);
+};
diff --git a/src/mongo/gotools/vendor.bat b/src/mongo/gotools/vendor.bat
new file mode 100644
index 00000000000..5648d108b15
--- /dev/null
+++ b/src/mongo/gotools/vendor.bat
@@ -0,0 +1,31 @@
+@echo off
+
+setlocal EnableDelayedExpansion
+
+set GOPATH=%cd%\vendor
+
+for /F "eol=; tokens=1,2,3" %%i in (Godeps) do (
+ set package=%%i
+ set version=%%j
+ set dest=%%k
+ echo Getting package !package!
+
+ if not "!dest!"=="" (
+ set dest=!package!
+ set package=%%k
+ )
+
+ go get -u -d "!package!" >nul 2>&1
+ echo Setting package to version !version!
+ cd "%GOPATH%\src\!package!"
+ git checkout !version! >nul 2>&1
+
+ if not "!dest!"=="" (
+ cd "%GOPATH%"
+ if exist "%GOPATH%\src\!dest!" rd /s /q "%GOPATH%\src\!dest!"
+ xcopy "%GOPATH%\src\!package!" "%GOPATH%\src\!dest!" /Y /S /I >nul 2>&1
+ rd /s /q "%GOPATH%\src\!package!"
+ )
+)
+
+endlocal
diff --git a/src/mongo/gotools/vendor.sh b/src/mongo/gotools/vendor.sh
new file mode 100755
index 00000000000..de748eee91f
--- /dev/null
+++ b/src/mongo/gotools/vendor.sh
@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+
+set -eu
+# Make sure we're in the directory where the script lives
+SCRIPT_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})" && pwd)"
+cd $SCRIPT_DIR
+echo "Installing dependencies..."
+
+# Set the $GOPATH appropriately so that the dependencies are
+# installed into the vendor directory
+export GOPATH=`pwd`/vendor
+
+## Functions/
+usage() {
+cat << EOF
+USAGE
+ $ vendor.sh # Same as 'install'.
+ $ vendor.sh install # Parses the Godeps file, installs dependencies and sets
+ # them to the appropriate version.
+ $ vendor.sh version # Outputs the version of gpm used
+ $ vendor.sh help # Prints this message
+EOF
+}
+
+# Iterates over Godep file dependencies and sets
+# the specified version on each of them.
+set_dependencies() {
+ local pids=()
+ while read line; do
+ local line=`echo $line | sed 's/#.*//;/^\s*$/d' || echo ""`
+ [ ! "$line" ] && continue
+ (
+ line=($line)
+ local package=${line[0]}
+ local version=${line[1]}
+ local dest=""
+ if [[ -n ${line[2]:-} ]]; then
+ dest=$package
+ package=${line[2]}
+ fi
+
+ if [[ "$OSTYPE" == "cygwin" || "$OSTYPE" == "msys" ]]
+ then
+ local install_path="${GOPATH%%;*}/src/${package%%/...}"
+ else
+ local install_path="${GOPATH%%:*}/src/${package%%/...}"
+ fi
+
+ [[ -e "$install_path/.git/index.lock" ||
+ -e "$install_path/.hg/store/lock" ||
+ -e "$install_path/.bzr/checkout/lock" ]] && wait
+
+ echo ">> Getting package "$package""
+ go get -u -d "$package"
+
+ cd $install_path
+ hg update "$version" > /dev/null 2>&1 || \
+ git checkout "$version" > /dev/null 2>&1 || \
+ bzr revert -r "$version" > /dev/null 2>&1 || \
+ #svn has exit status of 0 when there is no .svn
+ { [ -d .svn ] && svn update -r "$version" > /dev/null 2>&1; } || \
+ { echo ">> Failed to set $package to version $version"; exit 1; }
+
+ echo ">> Set $package to version $version"
+ if [[ -n "$dest" ]] ; then
+ if [[ "$OSTYPE" == "cygwin" || "$OSTYPE" == "msys" ]]
+ then
+ local dest_path="${GOPATH%%;*}/src/${dest%%/...}"
+ else
+ local dest_path="${GOPATH%%:*}/src/${dest%%/...}"
+ fi
+ mkdir -p "$(dirname "$dest_path")"
+ cd "$(dirname "$dest_path")"
+ rm -rf $dest_path
+ mv $install_path $dest_path
+ echo ">> moved $install_path to $dest_path"
+ fi
+ ) &
+ pids=(${pids[@]-} $!)
+ done < $1
+
+ for pid in "${pids[@]-}"; do
+ wait $pid
+ local status=$?
+ [ $status -ne 0 ] && exit $status
+ done
+
+ echo ">> All Done"
+}
+## /Functions
+
+## Command Line Parsing
+case "${1:-"install"}" in
+ "version")
+ echo ">> gpm v1.2.1"
+ ;;
+ "install")
+ deps_file="${2:-"Godeps"}"
+ [[ -f "$deps_file" ]] || (echo ">> $deps_file file does not exist." && exit 1)
+ (go version > /dev/null) ||
+ ( echo ">> Go is currently not installed or in your PATH" && exit 1)
+ set_dependencies $deps_file
+ ;;
+ "help")
+ usage
+ ;;
+ *)
+ ## Support for Plugins: if command is unknown search for a gpm-command executable.
+ if command -v "gpm-$1" > /dev/null
+ then
+ plugin=$1 &&
+ shift &&
+ gpm-$plugin $@ &&
+ exit
+ else
+ usage && exit 1
+ fi
+ ;;
+esac
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/LICENSE b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/LICENSE
new file mode 100644
index 00000000000..65d761bc9f2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/README b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/README
new file mode 100644
index 00000000000..c763bdd2e49
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/README
@@ -0,0 +1,59 @@
+Golint is a linter for Go source code.
+
+To install, run
+ go get github.com/golang/lint/golint
+
+Invoke golint with one or more filenames or directories.
+The output of this tool is a list of suggestions in Vim quickfix format,
+which is accepted by lots of different editors.
+
+Golint differs from gofmt. Gofmt reformats Go source code, whereas
+golint prints out style mistakes.
+
+Golint differs from govet. Govet is concerned with correctness, whereas
+golint is concerned with coding style. Golint is in use at Google, and it
+seeks to match the accepted style of the open source Go project.
+
+The suggestions made by golint are exactly that: suggestions.
+Golint is not perfect, and has both false positives and false negatives.
+Do not treat its output as a gold standard. We will not be adding pragmas
+or other knobs to suppress specific warnings, so do not expect or require
+code to be completely "lint-free".
+In short, this tool is not, and will never be, trustworthy enough for its
+suggestions to be enforced automatically, for example as part of a build process.
+
+If you find an established style that is frequently violated, and which
+you think golint could statically check, file an issue at
+ https://github.com/golang/lint/issues
+
+
+Contributions
+-------------
+Contributions to this project are welcome, though please send mail before
+starting work on anything major. Contributors retain their copyright, so we
+need you to fill out a short form before we can accept your contribution:
+ https://developers.google.com/open-source/cla/individual
+
+
+Vim
+---
+Add this to your ~/.vimrc:
+ set rtp+=$GOPATH/src/github.com/golang/lint/misc/vim
+If you have multiple entries in your GOPATH, replace $GOPATH with the right value.
+
+Running :Lint will run golint on the current file and populate the quickfix list.
+
+Optionally, add this to your ~/.vimrc to automatically run golint on :w
+ autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow
+
+
+Emacs
+-----
+Add this to your .emacs file:
+ (add-to-list 'load-path (concat (getenv "GOPATH") "/src/github.com/golang/lint/misc/emacs"))
+ (require 'golint)
+If you have multiple entries in your GOPATH, replace $GOPATH with the right value.
+
+Running M-x golint will run golint on the current file.
+For more usage, see Compilation-Mode:
+ http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/golint/golint.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/golint/golint.go
new file mode 100644
index 00000000000..d1b07baee59
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/golint/golint.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// golint lints the Go source files named on its command line.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/3rf/mongo-lint"
+)
+
+var minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it")
+
+func main() {
+ flag.Parse()
+
+ for _, filename := range flag.Args() {
+ if isDir(filename) {
+ lintDir(filename)
+ } else {
+ lintFile(filename)
+ }
+ }
+}
+
+func isDir(filename string) bool {
+ fi, err := os.Stat(filename)
+ return err == nil && fi.IsDir()
+}
+
+func lintFile(filename string) {
+ src, err := ioutil.ReadFile(filename)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ return
+ }
+
+ l := new(lint.Linter)
+ ps, err := l.Lint(filename, src)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "%v:%v\n", filename, err)
+ return
+ }
+ for _, p := range ps {
+ if p.Confidence >= *minConfidence {
+ fmt.Printf("%s:%v: %s\n", filename, p.Position, p.Text)
+ }
+ }
+}
+
+func lintDir(dirname string) {
+ filepath.Walk(dirname, func(path string, info os.FileInfo, err error) error {
+ if err == nil && !info.IsDir() && strings.HasSuffix(path, ".go") {
+ lintFile(path)
+ }
+ return err
+ })
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/lint.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/lint.go
new file mode 100644
index 00000000000..e8a7b36bc3b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/lint.go
@@ -0,0 +1,1057 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package lint contains a linter for Go source code.
+package lint
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "regexp"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+const styleGuideBase = "http://golang.org/s/comments"
+
+// A Linter lints Go source code.
+type Linter struct {
+}
+
+// Problem represents a problem in some source code.
+type Problem struct {
+ Position token.Position // position in source file
+ Text string // the prose that describes the problem
+ Link string // (optional) the link to the style guide for the problem
+ Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness
+ LineText string // the source line
+}
+
+func (p *Problem) String() string {
+ if p.Link != "" {
+ return p.Text + "\n\n" + p.Link
+ }
+ return p.Text
+}
+
+// Lint lints src.
+func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, "", src, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+ return (&file{fset: fset, f: f, src: src, filename: filename}).lint(), nil
+}
+
+// file represents a file being linted.
+type file struct {
+ fset *token.FileSet
+ f *ast.File
+ src []byte
+ filename string
+
+ // sortable is the set of types in the file that implement sort.Interface.
+ sortable map[string]bool
+ // main is whether this file is in a "main" package.
+ main bool
+
+ problems []Problem
+}
+
+func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") }
+
+func (f *file) lint() []Problem {
+ f.scanSortable()
+ f.main = f.isMain()
+
+ f.lintPackageComment()
+ f.lintImports()
+ f.lintBlankImports()
+ //f.lintExported()
+ //f.lintNames()
+ f.lintVarDecls()
+ f.lintElses()
+ f.lintRanges()
+ f.lintErrorf()
+ f.lintErrors()
+ f.lintErrorStrings()
+ //f.lintReceiverNames()
+ f.lintIncDec()
+ f.lintMake()
+
+ return f.problems
+}
+
+func (f *file) errorf(n ast.Node, confidence float64, link, format string, a ...interface{}) {
+ p := f.fset.Position(n.Pos())
+ f.problems = append(f.problems, Problem{
+ Position: p,
+ Text: fmt.Sprintf(format, a...),
+ Link: link,
+ Confidence: confidence,
+ LineText: srcLine(f.src, p),
+ })
+}
+
+func (f *file) scanSortable() {
+ f.sortable = make(map[string]bool)
+
+ // bitfield for which methods exist on each type.
+ const (
+ Len = 1 << iota
+ Less
+ Swap
+ )
+ nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap}
+ has := make(map[string]int)
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Recv == nil {
+ return true
+ }
+ // TODO(dsymonds): We could check the signature to be more precise.
+ recv := receiverType(fn)
+ if i, ok := nmap[fn.Name.Name]; ok {
+ has[recv] |= i
+ }
+ return false
+ })
+ for typ, ms := range has {
+ if ms == Len|Less|Swap {
+ f.sortable[typ] = true
+ }
+ }
+}
+
+func (f *file) isMain() bool {
+ if f.f.Name.Name == "main" {
+ return true
+ }
+ return false
+}
+
+// lintPackageComment checks package comments. It complains if
+// there is no package comment, or if it is not of the right form.
+// This has a notable false positive in that a package comment
+// could rightfully appear in a different file of the same package,
+// but that's not easy to fix since this linter is file-oriented.
+func (f *file) lintPackageComment() {
+ if f.isTest() {
+ return
+ }
+
+ const link = styleGuideBase + "#Package_Comments"
+ if f.f.Doc == nil {
+ f.errorf(f.f, 0.2, link, "should have a package comment, unless it's in another file for this package")
+ return
+ }
+ s := f.f.Doc.Text()
+ prefix := "Package " + f.f.Name.Name + " "
+ if ts := strings.TrimLeft(s, " \t"); ts != s {
+ f.errorf(f.f.Doc, 1, link, "package comment should not have leading space")
+ s = ts
+ }
+ // Only non-main packages need to keep to this form.
+ if f.f.Name.Name != "main" && !strings.HasPrefix(s, prefix) {
+ f.errorf(f.f.Doc, 1, link, `package comment should be of the form "%s..."`, prefix)
+ }
+}
+
+// lintBlankImports complains if a non-main package has blank imports that are
+// not documented.
+func (f *file) lintBlankImports() {
+ // In package main and in tests, we don't complain about blank imports.
+ if f.main || f.isTest() {
+ return
+ }
+
+ // The first element of each contiguous group of blank imports should have
+ // an explanatory comment of some kind.
+ for i, imp := range f.f.Imports {
+ pos := f.fset.Position(imp.Pos())
+
+ if !isBlank(imp.Name) {
+ continue // Ignore non-blank imports.
+ }
+ if i > 0 {
+ prev := f.f.Imports[i-1]
+ prevPos := f.fset.Position(prev.Pos())
+ if isBlank(prev.Name) && prevPos.Line+1 == pos.Line {
+ continue // A subsequent blank in a group.
+ }
+ }
+
+ // This is the first blank import of a group.
+ if imp.Doc == nil && imp.Comment == nil {
+ link := ""
+ f.errorf(imp, 1, link, "a blank import should be only in a main or test package, or have a comment justifying it")
+ }
+ }
+}
+
+// lintImports examines import blocks.
+func (f *file) lintImports() {
+
+ for i, is := range f.f.Imports {
+ _ = i
+ if is.Name != nil && is.Name.Name == "." && !f.isTest() {
+ f.errorf(is, 1, styleGuideBase+"#Import_Dot", "should not use dot imports")
+ }
+
+ }
+
+}
+
+const docCommentsLink = styleGuideBase + "#Doc_Comments"
+
+// lintExported examines the doc comments of exported names.
+// It complains if any required doc comments are missing,
+// or if they are not of the right form. The exact rules are in
+// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function
+// also tracks the GenDecl structure being traversed to permit
+// doc comments for constants to be on top of the const block.
+func (f *file) lintExported() {
+ if f.isTest() {
+ return
+ }
+
+ var lastGen *ast.GenDecl // last GenDecl entered.
+
+ // Set of GenDecls that have already had missing comments flagged.
+ genDeclMissingComments := make(map[*ast.GenDecl]bool)
+
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.GenDecl:
+ if v.Tok == token.IMPORT {
+ return false
+ }
+ // token.CONST, token.TYPE or token.VAR
+ lastGen = v
+ return true
+ case *ast.FuncDecl:
+ f.lintFuncDoc(v)
+ // Don't proceed inside funcs.
+ return false
+ case *ast.TypeSpec:
+ // inside a GenDecl, which usually has the doc
+ doc := v.Doc
+ if doc == nil {
+ doc = lastGen.Doc
+ }
+ f.lintTypeDoc(v, doc)
+ // Don't proceed inside types.
+ return false
+ case *ast.ValueSpec:
+ f.lintValueSpecDoc(v, lastGen, genDeclMissingComments)
+ return false
+ }
+ return true
+ })
+}
+
+var allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`)
+
+// lintNames examines all names in the file.
+// It complains if any use underscores or incorrect known initialisms.
+func (f *file) lintNames() {
+ // Package names need slightly different handling than other names.
+ if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") {
+ f.errorf(f.f, 1, "http://golang.org/doc/effective_go.html#package-names", "don't use an underscore in package name")
+ }
+
+ check := func(id *ast.Ident, thing string) {
+ if id.Name == "_" {
+ return
+ }
+
+ // Handle two common styles from other languages that don't belong in Go.
+ if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") {
+ f.errorf(id, 0.8, styleGuideBase+"#Mixed_Caps", "don't use ALL_CAPS in Go names; use CamelCase")
+ return
+ }
+ if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' {
+ should := string(id.Name[1]+'a'-'A') + id.Name[2:]
+ f.errorf(id, 0.8, "", "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should)
+ }
+
+ should := lintName(id.Name)
+ if id.Name == should {
+ return
+ }
+ if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") {
+ f.errorf(id, 0.9, "http://golang.org/doc/effective_go.html#mixed-caps", "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should)
+ return
+ }
+ f.errorf(id, 0.8, styleGuideBase+"#Initialisms", "%s %s should be %s", thing, id.Name, should)
+ }
+ checkList := func(fl *ast.FieldList, thing string) {
+ if fl == nil {
+ return
+ }
+ for _, f := range fl.List {
+ for _, id := range f.Names {
+ check(id, thing)
+ }
+ }
+ }
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.AssignStmt:
+ if v.Tok == token.ASSIGN {
+ return true
+ }
+ for _, exp := range v.Lhs {
+ if id, ok := exp.(*ast.Ident); ok {
+ check(id, "var")
+ }
+ }
+ case *ast.FuncDecl:
+ if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test")) {
+ return true
+ }
+ check(v.Name, "func")
+
+ thing := "func"
+ if v.Recv != nil {
+ thing = "method"
+ }
+ checkList(v.Type.Params, thing+" parameter")
+ checkList(v.Type.Results, thing+" result")
+ case *ast.GenDecl:
+ if v.Tok == token.IMPORT {
+ return true
+ }
+ var thing string
+ switch v.Tok {
+ case token.CONST:
+ thing = "const"
+ case token.TYPE:
+ thing = "type"
+ case token.VAR:
+ thing = "var"
+ }
+ for _, spec := range v.Specs {
+ switch s := spec.(type) {
+ case *ast.TypeSpec:
+ check(s.Name, thing)
+ case *ast.ValueSpec:
+ for _, id := range s.Names {
+ check(id, thing)
+ }
+ }
+ }
+ case *ast.InterfaceType:
+ // Do not check interface method names.
+ // They are often constrainted by the method names of concrete types.
+ for _, x := range v.Methods.List {
+ ft, ok := x.Type.(*ast.FuncType)
+ if !ok { // might be an embedded interface name
+ continue
+ }
+ checkList(ft.Params, "interface method parameter")
+ checkList(ft.Results, "interface method result")
+ }
+ case *ast.RangeStmt:
+ if v.Tok == token.ASSIGN {
+ return true
+ }
+ if id, ok := v.Key.(*ast.Ident); ok {
+ check(id, "range var")
+ }
+ if id, ok := v.Value.(*ast.Ident); ok {
+ check(id, "range var")
+ }
+ case *ast.StructType:
+ for _, f := range v.Fields.List {
+ for _, id := range f.Names {
+ check(id, "struct field")
+ }
+ }
+ }
+ return true
+ })
+}
+
+// lintName returns a different name if it should be different.
+func lintName(name string) (should string) {
+ // Fast path for simple cases: "_" and all lowercase.
+ if name == "_" {
+ return name
+ }
+ allLower := true
+ for _, r := range name {
+ if !unicode.IsLower(r) {
+ allLower = false
+ break
+ }
+ }
+ if allLower {
+ return name
+ }
+
+ // Split camelCase at any lower->upper transition, and split on underscores.
+ // Check each word for common initialisms.
+ runes := []rune(name)
+ w, i := 0, 0 // index of start of word, scan
+ for i+1 <= len(runes) {
+ eow := false // whether we hit the end of a word
+ if i+1 == len(runes) {
+ eow = true
+ } else if runes[i+1] == '_' {
+ // underscore; shift the remainder forward over any run of underscores
+ eow = true
+ n := 1
+ for i+n+1 < len(runes) && runes[i+n+1] == '_' {
+ n++
+ }
+ copy(runes[i+1:], runes[i+n+1:])
+ runes = runes[:len(runes)-n]
+ } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
+ // lower->non-lower
+ eow = true
+ }
+ i++
+ if !eow {
+ continue
+ }
+
+ // [w,i) is a word.
+ word := string(runes[w:i])
+ if u := strings.ToUpper(word); commonInitialisms[u] {
+ // Keep consistent case, which is lowercase only at the start.
+ if w == 0 && unicode.IsLower(runes[w]) {
+ u = strings.ToLower(u)
+ }
+ // All the common initialisms are ASCII,
+ // so we can replace the bytes exactly.
+ copy(runes[w:], []rune(u))
+ } else if w > 0 && strings.ToLower(word) == word {
+ // already all lowercase, and not the first word, so uppercase the first character.
+ runes[w] = unicode.ToUpper(runes[w])
+ }
+ w = i
+ }
+ return string(runes)
+}
+
+// commonInitialisms is a set of common initialisms.
+// Only add entries that are highly unlikely to be non-initialisms.
+// For instance, "ID" is fine (Freudian code is rare), but "AND" is not.
+var commonInitialisms = map[string]bool{
+ "API": true,
+ "ASCII": true,
+ "CPU": true,
+ "CSS": true,
+ "DNS": true,
+ "EOF": true,
+ "HTML": true,
+ "HTTP": true,
+ "HTTPS": true,
+ "ID": true,
+ "IP": true,
+ "JSON": true,
+ "LHS": true,
+ "QPS": true,
+ "RAM": true,
+ "RHS": true,
+ "RPC": true,
+ "SLA": true,
+ "SSH": true,
+ "TLS": true,
+ "TTL": true,
+ "UI": true,
+ "UID": true,
+ "URL": true,
+ "UTF8": true,
+ "VM": true,
+ "XML": true,
+}
+
+// lintTypeDoc examines the doc comment on a type.
+// It complains if they are missing from an exported type,
+// or if they are not of the standard form.
+func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) {
+ if !ast.IsExported(t.Name.Name) {
+ return
+ }
+ if doc == nil {
+ f.errorf(t, 1, docCommentsLink, "exported type %v should have comment or be unexported", t.Name)
+ return
+ }
+
+ s := doc.Text()
+ articles := [...]string{"A", "An", "The"}
+ for _, a := range articles {
+ if strings.HasPrefix(s, a+" ") {
+ s = s[len(a)+1:]
+ break
+ }
+ }
+ if !strings.HasPrefix(s, t.Name.Name+" ") {
+ f.errorf(doc, 1, docCommentsLink, `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name)
+ }
+}
+
+var commonMethods = map[string]bool{
+ "Error": true,
+ "Read": true,
+ "ServeHTTP": true,
+ "String": true,
+ "Write": true,
+}
+
+// lintFuncDoc examines doc comments on functions and methods.
+// It complains if they are missing, or not of the right form.
+// It has specific exclusions for well-known methods (see commonMethods above).
+func (f *file) lintFuncDoc(fn *ast.FuncDecl) {
+ if !ast.IsExported(fn.Name.Name) {
+ // func is unexported
+ return
+ }
+ kind := "function"
+ name := fn.Name.Name
+ if fn.Recv != nil {
+ // method
+ kind = "method"
+ recv := receiverType(fn)
+ if !ast.IsExported(recv) {
+ // receiver is unexported
+ return
+ }
+ if commonMethods[name] {
+ return
+ }
+ switch name {
+ case "Len", "Less", "Swap":
+ if f.sortable[recv] {
+ return
+ }
+ }
+ name = recv + "." + name
+ }
+ if fn.Doc == nil {
+ f.errorf(fn, 1, docCommentsLink, "exported %s %s should have comment or be unexported", kind, name)
+ return
+ }
+ s := fn.Doc.Text()
+ prefix := fn.Name.Name + " "
+ if !strings.HasPrefix(s, prefix) {
+ f.errorf(fn.Doc, 1, docCommentsLink, `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
+ }
+}
+
+// lintValueSpecDoc examines package-global variables and constants.
+// It complains if they are not individually declared,
+// or if they are not suitably documented in the right form (unless they are in a block that is commented).
+func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) {
+ kind := "var"
+ if gd.Tok == token.CONST {
+ kind = "const"
+ }
+
+ if len(vs.Names) > 1 {
+ // Check that none are exported except for the first.
+ for _, n := range vs.Names[1:] {
+ if ast.IsExported(n.Name) {
+ f.errorf(vs, 1, "", "exported %s %s should have its own declaration", kind, n.Name)
+ return
+ }
+ }
+ }
+
+ // Only one name.
+ name := vs.Names[0].Name
+ if !ast.IsExported(name) {
+ return
+ }
+
+ if vs.Doc == nil {
+ if gd.Doc == nil && !genDeclMissingComments[gd] {
+ block := ""
+ if kind == "const" && gd.Lparen.IsValid() {
+ block = " (or a comment on this block)"
+ }
+ f.errorf(vs, 1, docCommentsLink, "exported %s %s should have comment%s or be unexported", kind, name, block)
+ genDeclMissingComments[gd] = true
+ }
+ return
+ }
+ prefix := name + " "
+ if !strings.HasPrefix(vs.Doc.Text(), prefix) {
+ f.errorf(vs.Doc, 1, docCommentsLink, `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix)
+ }
+}
+
+// zeroLiteral is a set of ast.BasicLit values that are zero values.
+// It is not exhaustive.
+var zeroLiteral = map[string]bool{
+ "false": true, // bool
+ // runes
+ `'\x00'`: true,
+ `'\000'`: true,
+ // strings
+ `""`: true,
+ "``": true,
+ // numerics
+ "0": true,
+ "0.": true,
+ "0.0": true,
+ "0i": true,
+}
+
+// lintVarDecls examines variable declarations. It complains about declarations with
+// redundant LHS types that can be inferred from the RHS.
+func (f *file) lintVarDecls() {
+ var lastGen *ast.GenDecl // last GenDecl entered.
+
+ f.walk(func(node ast.Node) bool {
+ switch v := node.(type) {
+ case *ast.GenDecl:
+ if v.Tok != token.CONST && v.Tok != token.VAR {
+ return false
+ }
+ lastGen = v
+ return true
+ case *ast.ValueSpec:
+ if lastGen.Tok == token.CONST {
+ return false
+ }
+ if len(v.Names) > 1 || v.Type == nil || len(v.Values) == 0 {
+ return false
+ }
+ rhs := v.Values[0]
+ // An underscore var appears in a common idiom for compile-time interface satisfaction,
+ // as in "var _ Interface = (*Concrete)(nil)".
+ if isIdent(v.Names[0], "_") {
+ return false
+ }
+ // If the RHS is a zero value, suggest dropping it.
+ zero := false
+ if lit, ok := rhs.(*ast.BasicLit); ok {
+ zero = zeroLiteral[lit.Value]
+ } else if isIdent(rhs, "nil") {
+ zero = true
+ }
+ if zero {
+ f.errorf(rhs, 0.9, "", "should drop = %s from declaration of var %s; it is the zero value", f.render(rhs), v.Names[0])
+ return false
+ }
+ // If the LHS type is an interface, don't warn, since it is probably a
+ // concrete type on the RHS. Note that our feeble lexical check here
+ // will only pick up interface{} and other literal interface types;
+ // that covers most of the cases we care to exclude right now.
+ // TODO(dsymonds): Use typechecker to make this heuristic more accurate.
+ if _, ok := v.Type.(*ast.InterfaceType); ok {
+ return false
+ }
+ // If the RHS is an untyped const, only warn if the LHS type is its default type.
+ if defType, ok := isUntypedConst(rhs); ok && !isIdent(v.Type, defType) {
+ return false
+ }
+ f.errorf(v.Type, 0.8, "", "should omit type %s from declaration of var %s; it will be inferred from the right-hand side", f.render(v.Type), v.Names[0])
+ return false
+ }
+ return true
+ })
+}
+
+// lintElses examines else blocks. It complains about any else block whose if block ends in a return.
+func (f *file) lintElses() {
+ // We don't want to flag if { } else if { } else { } constructions.
+ // They will appear as an IfStmt whose Else field is also an IfStmt.
+ // Record such a node so we ignore it when we visit it.
+ ignore := make(map[*ast.IfStmt]bool)
+
+ f.walk(func(node ast.Node) bool {
+ ifStmt, ok := node.(*ast.IfStmt)
+ if !ok || ifStmt.Else == nil {
+ return true
+ }
+ if ignore[ifStmt] {
+ return true
+ }
+ if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok {
+ ignore[elseif] = true
+ return true
+ }
+ if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok {
+ // only care about elses without conditions
+ return true
+ }
+ if len(ifStmt.Body.List) == 0 {
+ return true
+ }
+ shortDecl := false // does the if statement have a ":=" initialization statement?
+ if ifStmt.Init != nil {
+ if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE {
+ shortDecl = true
+ }
+ }
+ lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1]
+ if _, ok := lastStmt.(*ast.ReturnStmt); ok {
+ extra := ""
+ if shortDecl {
+ extra = " (move short variable declaration to its own line if necessary)"
+ }
+ f.errorf(ifStmt.Else, 1, styleGuideBase+"#Indent_Error_Flow", "if block ends with a return statement, so drop this else and outdent its block"+extra)
+ }
+ return true
+ })
+}
+
+// lintRanges examines range clauses. It complains about redundant constructions.
+func (f *file) lintRanges() {
+ f.walk(func(node ast.Node) bool {
+ rs, ok := node.(*ast.RangeStmt)
+ if !ok {
+ return true
+ }
+ if rs.Value == nil {
+ // for x = range m { ... }
+ return true // single var form
+ }
+ if !isIdent(rs.Value, "_") {
+ // for ?, y = range m { ... }
+ return true
+ }
+
+ f.errorf(rs.Value, 1, "", "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok)
+ return true
+ })
+}
+
+// lintErrorf examines errors.New calls. It complains if its only argument is an fmt.Sprintf invocation.
+func (f *file) lintErrorf() {
+ f.walk(func(node ast.Node) bool {
+ ce, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !isPkgDot(ce.Fun, "errors", "New") || len(ce.Args) != 1 {
+ return true
+ }
+ arg := ce.Args[0]
+ ce, ok = arg.(*ast.CallExpr)
+ if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") {
+ return true
+ }
+ f.errorf(node, 1, "", "should replace errors.New(fmt.Sprintf(...)) with fmt.Errorf(...)")
+ return true
+ })
+}
+
+// lintErrors examines global error vars. It complains if they aren't named in the standard way.
+func (f *file) lintErrors() {
+ for _, decl := range f.f.Decls {
+ gd, ok := decl.(*ast.GenDecl)
+ if !ok || gd.Tok != token.VAR {
+ continue
+ }
+ for _, spec := range gd.Specs {
+ spec := spec.(*ast.ValueSpec)
+ if len(spec.Names) != 1 || len(spec.Values) != 1 {
+ continue
+ }
+ ce, ok := spec.Values[0].(*ast.CallExpr)
+ if !ok {
+ continue
+ }
+ if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
+ continue
+ }
+
+ id := spec.Names[0]
+ prefix := "err"
+ if id.IsExported() {
+ prefix = "Err"
+ }
+ if !strings.HasPrefix(id.Name, prefix) {
+ f.errorf(id, 0.9, "", "error var %s should have name of the form %sFoo", id.Name, prefix)
+ }
+ }
+ }
+}
+
+func lintCapAndPunct(s string) (isCap, isPunct bool) {
+ first, firstN := utf8.DecodeRuneInString(s)
+ last, _ := utf8.DecodeLastRuneInString(s)
+ isPunct = last == '.' || last == ':' || last == '!'
+ isCap = unicode.IsUpper(first)
+ if isCap && len(s) > firstN {
+ // Don't flag strings starting with something that looks like an initialism.
+ if second, _ := utf8.DecodeRuneInString(s[firstN:]); unicode.IsUpper(second) {
+ isCap = false
+ }
+ }
+ return
+}
+
+// lintErrorStrings examines error strings. It complains if they are capitalized or end in punctuation.
+func (f *file) lintErrorStrings() {
+ f.walk(func(node ast.Node) bool {
+ ce, ok := node.(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") {
+ return true
+ }
+ if len(ce.Args) < 1 {
+ return true
+ }
+ str, ok := ce.Args[0].(*ast.BasicLit)
+ if !ok || str.Kind != token.STRING {
+ return true
+ }
+ s, _ := strconv.Unquote(str.Value) // can assume well-formed Go
+ if s == "" {
+ return true
+ }
+ isCap, isPunct := lintCapAndPunct(s)
+ var msg string
+ switch {
+ case isCap && isPunct:
+ msg = "error strings should not be capitalized and should not end with punctuation"
+ case isCap:
+ msg = "error strings should not be capitalized"
+ case isPunct:
+ msg = "error strings should not end with punctuation"
+ default:
+ return true
+ }
+ // People use proper nouns and exported Go identifiers in error strings,
+ // so decrease the confidence of warnings for capitalization.
+ conf := 0.8
+ if isCap {
+ conf = 0.6
+ }
+ f.errorf(str, conf, styleGuideBase+"#Error_Strings", msg)
+ return true
+ })
+}
+
+var badReceiverNames = map[string]bool{
+ "me": true,
+ "this": true,
+ "self": true,
+}
+
+// lintReceiverNames examines receiver names. It complains about inconsistent
+// names used for the same type and names such as "this".
+func (f *file) lintReceiverNames() {
+ typeReceiver := map[string]string{}
+ f.walk(func(n ast.Node) bool {
+ fn, ok := n.(*ast.FuncDecl)
+ if !ok || fn.Recv == nil {
+ return true
+ }
+ names := fn.Recv.List[0].Names
+ if len(names) < 1 {
+ return true
+ }
+ name := names[0].Name
+ const link = styleGuideBase + "#Receiver_Names"
+ if badReceiverNames[name] {
+ f.errorf(n, 1, link, `receiver name should be a reflection of its identity; don't use generic names such as "me", "this", or "self"`)
+ return true
+ }
+ recv := receiverType(fn)
+ if prev, ok := typeReceiver[recv]; ok && prev != name {
+ f.errorf(n, 1, link, "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv)
+ return true
+ }
+ typeReceiver[recv] = name
+ return true
+ })
+}
+
+// lintIncDec examines statements that increment or decrement a variable.
+// It complains if they don't use x++ or x--.
+func (f *file) lintIncDec() {
+ f.walk(func(n ast.Node) bool {
+ as, ok := n.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ if len(as.Lhs) != 1 {
+ return true
+ }
+ if !isOne(as.Rhs[0]) {
+ return true
+ }
+ var suffix string
+ switch as.Tok {
+ case token.ADD_ASSIGN:
+ suffix = "++"
+ case token.SUB_ASSIGN:
+ suffix = "--"
+ default:
+ return true
+ }
+ f.errorf(as, 0.8, "", "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix)
+ return true
+ })
+}
+
+// lineMake examines statements that declare and initialize a variable with make.
+// It complains if they are constructing a zero element slice.
+func (f *file) lintMake() {
+ f.walk(func(n ast.Node) bool {
+ as, ok := n.(*ast.AssignStmt)
+ if !ok {
+ return true
+ }
+ // Only want single var := assignment statements.
+ if len(as.Lhs) != 1 || as.Tok != token.DEFINE {
+ return true
+ }
+ ce, ok := as.Rhs[0].(*ast.CallExpr)
+ if !ok {
+ return true
+ }
+ // Check if ce is make([]T, 0).
+ if !isIdent(ce.Fun, "make") || len(ce.Args) != 2 || !isZero(ce.Args[1]) {
+ return true
+ }
+ at, ok := ce.Args[0].(*ast.ArrayType)
+ if !ok || at.Len != nil {
+ return true
+ }
+ f.errorf(as, 0.8, "", `can probably use "var %s %s" instead`, f.render(as.Lhs[0]), f.render(at))
+ return true
+ })
+}
+
+func receiverType(fn *ast.FuncDecl) string {
+ switch e := fn.Recv.List[0].Type.(type) {
+ case *ast.Ident:
+ return e.Name
+ case *ast.StarExpr:
+ return e.X.(*ast.Ident).Name
+ }
+ panic(fmt.Sprintf("unknown method receiver AST node type %T", fn.Recv.List[0].Type))
+}
+
+func (f *file) walk(fn func(ast.Node) bool) {
+ ast.Walk(walker(fn), f.f)
+}
+
+func (f *file) render(x interface{}) string {
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, f.fset, x); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func (f *file) debugRender(x interface{}) string {
+ var buf bytes.Buffer
+ if err := ast.Fprint(&buf, f.fset, x, nil); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+// walker adapts a function to satisfy the ast.Visitor interface.
+// The function return whether the walk should proceed into the node's children.
+type walker func(ast.Node) bool
+
+func (w walker) Visit(node ast.Node) ast.Visitor {
+ if w(node) {
+ return w
+ }
+ return nil
+}
+
+func isIdent(expr ast.Expr, ident string) bool {
+ id, ok := expr.(*ast.Ident)
+ return ok && id.Name == ident
+}
+
+// isBlank returns whether id is the blank identifier "_".
+// If id == nil, the answer is false.
+func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" }
+
+func isPkgDot(expr ast.Expr, pkg, name string) bool {
+ sel, ok := expr.(*ast.SelectorExpr)
+ return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name)
+}
+
+func isZero(expr ast.Expr) bool {
+ lit, ok := expr.(*ast.BasicLit)
+ return ok && lit.Kind == token.INT && lit.Value == "0"
+}
+
+func isOne(expr ast.Expr) bool {
+ lit, ok := expr.(*ast.BasicLit)
+ return ok && lit.Kind == token.INT && lit.Value == "1"
+}
+
+var basicLitKindTypes = map[token.Token]string{
+ token.FLOAT: "float64",
+ token.IMAG: "complex128",
+ token.CHAR: "rune",
+ token.STRING: "string",
+}
+
+// isUntypedConst reports whether expr is an untyped constant,
+// and indicates what its default type is.
+func isUntypedConst(expr ast.Expr) (defType string, ok bool) {
+ if isIntLiteral(expr) {
+ return "int", true
+ }
+ if bl, ok := expr.(*ast.BasicLit); ok {
+ if dt, ok := basicLitKindTypes[bl.Kind]; ok {
+ return dt, true
+ }
+ }
+ return "", false
+}
+
+func isIntLiteral(expr ast.Expr) bool {
+ // Either a BasicLit with Kind token.INT,
+ // or some combination of a UnaryExpr with Op token.SUB (for "-<lit>")
+ // or a ParenExpr (for "(<lit>)").
+Loop:
+ for {
+ switch v := expr.(type) {
+ case *ast.UnaryExpr:
+ if v.Op == token.SUB {
+ expr = v.X
+ continue Loop
+ }
+ case *ast.ParenExpr:
+ expr = v.X
+ continue Loop
+ case *ast.BasicLit:
+ if v.Kind == token.INT {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// srcLine returns the complete line at p, including the terminating newline.
+func srcLine(src []byte, p token.Position) string {
+ // Run to end of line in both directions if not at line start/end.
+ lo, hi := p.Offset, p.Offset+1
+ for lo > 0 && src[lo-1] != '\n' {
+ lo--
+ }
+ for hi < len(src) && src[hi-1] != '\n' {
+ hi++
+ }
+ return string(src[lo:hi])
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/lint_test.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/lint_test.go
new file mode 100644
index 00000000000..12e3afb8138
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/lint_test.go
@@ -0,0 +1,197 @@
+// Copyright (c) 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+package lint
+
+import (
+ "bytes"
+ "flag"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io/ioutil"
+ "path"
+ "regexp"
+ "strings"
+ "testing"
+)
+
+var lintMatch = flag.String("lint.match", "", "restrict testdata matches to this pattern")
+
+func TestAll(t *testing.T) {
+ l := new(Linter)
+ rx, err := regexp.Compile(*lintMatch)
+ if err != nil {
+ t.Fatalf("Bad -lint.match value %q: %v", *lintMatch, err)
+ }
+
+ baseDir := "testdata"
+ fis, err := ioutil.ReadDir(baseDir)
+ if err != nil {
+ t.Fatalf("ioutil.ReadDir: %v", err)
+ }
+ if len(fis) == 0 {
+ t.Fatalf("no files in %v", baseDir)
+ }
+ for _, fi := range fis {
+ if !rx.MatchString(fi.Name()) {
+ continue
+ }
+ //t.Logf("Testing %s", fi.Name())
+ src, err := ioutil.ReadFile(path.Join(baseDir, fi.Name()))
+ if err != nil {
+ t.Fatalf("Failed reading %s: %v", fi.Name(), err)
+ }
+
+ ins := parseInstructions(t, fi.Name(), src)
+ if ins == nil {
+ t.Errorf("Test file %v does not have instructions", fi.Name())
+ continue
+ }
+
+ ps, err := l.Lint(fi.Name(), src)
+ if err != nil {
+ t.Errorf("Linting %s: %v", fi.Name(), err)
+ continue
+ }
+
+ for _, in := range ins {
+ ok := false
+ for i, p := range ps {
+ if p.Position.Line != in.Line {
+ continue
+ }
+ if in.Match.MatchString(p.Text) {
+ // remove this problem from ps
+ copy(ps[i:], ps[i+1:])
+ ps = ps[:len(ps)-1]
+
+ //t.Logf("/%v/ matched at %s:%d", in.Match, fi.Name(), in.Line)
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ t.Errorf("Lint failed at %s:%d; /%v/ did not match", fi.Name(), in.Line, in.Match)
+ }
+ }
+ for _, p := range ps {
+ t.Errorf("Unexpected problem at %s:%d: %v", fi.Name(), p.Position.Line, p.Text)
+ }
+ }
+}
+
+type instruction struct {
+ Line int // the line number this applies to
+ Match *regexp.Regexp // what pattern to match
+}
+
+// parseInstructions parses instructions from the comments in a Go source file.
+// It returns nil if none were parsed.
+func parseInstructions(t *testing.T, filename string, src []byte) []instruction {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+ if err != nil {
+ t.Fatalf("Test file %v does not parse: %v", filename, err)
+ }
+ var ins []instruction
+ for _, cg := range f.Comments {
+ ln := fset.Position(cg.Pos()).Line
+ raw := cg.Text()
+ for _, line := range strings.Split(raw, "\n") {
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+ if line == "OK" && ins == nil {
+ // so our return value will be non-nil
+ ins = make([]instruction, 0)
+ continue
+ }
+ if strings.Contains(line, "MATCH") {
+ a, b := strings.Index(line, "/"), strings.LastIndex(line, "/")
+ if a == -1 || a == b {
+ t.Fatalf("Malformed match instruction %q at %v:%d", line, filename, ln)
+ }
+ pat := line[a+1 : b]
+ rx, err := regexp.Compile(pat)
+ if err != nil {
+ t.Fatalf("Bad match pattern %q at %v:%d: %v", pat, filename, ln, err)
+ }
+ ins = append(ins, instruction{
+ Line: ln,
+ Match: rx,
+ })
+ }
+ }
+ }
+ return ins
+}
+
+func render(fset *token.FileSet, x interface{}) string {
+ var buf bytes.Buffer
+ if err := printer.Fprint(&buf, fset, x); err != nil {
+ panic(err)
+ }
+ return buf.String()
+}
+
+func TestLine(t *testing.T) {
+ tests := []struct {
+ src string
+ offset int
+ want string
+ }{
+ {"single line file", 5, "single line file"},
+ {"single line file with newline\n", 5, "single line file with newline\n"},
+ {"first\nsecond\nthird\n", 2, "first\n"},
+ {"first\nsecond\nthird\n", 9, "second\n"},
+ {"first\nsecond\nthird\n", 14, "third\n"},
+ {"first\nsecond\nthird with no newline", 16, "third with no newline"},
+ {"first byte\n", 0, "first byte\n"},
+ }
+ for _, test := range tests {
+ got := srcLine([]byte(test.src), token.Position{Offset: test.offset})
+ if got != test.want {
+ t.Errorf("srcLine(%q, offset=%d) = %q, want %q", test.src, test.offset, got, test.want)
+ }
+ }
+}
+
+func TestLintName(t *testing.T) {
+ tests := []struct {
+ name, want string
+ }{
+ {"foo_bar", "fooBar"},
+ {"foo_bar_baz", "fooBarBaz"},
+ {"Foo_bar", "FooBar"},
+ {"foo_WiFi", "fooWiFi"},
+ {"id", "id"},
+ {"Id", "ID"},
+ {"foo_id", "fooID"},
+ {"fooId", "fooID"},
+ {"fooUid", "fooUID"},
+ {"idFoo", "idFoo"},
+ {"uidFoo", "uidFoo"},
+ {"midIdDle", "midIDDle"},
+ {"APIProxy", "APIProxy"},
+ {"ApiProxy", "APIProxy"},
+ {"apiProxy", "apiProxy"},
+ {"_Leading", "_Leading"},
+ {"___Leading", "_Leading"},
+ {"trailing_", "trailing"},
+ {"trailing___", "trailing"},
+ {"a_b", "aB"},
+ {"a__b", "aB"},
+ {"a___b", "aB"},
+ {"Rpc1150", "RPC1150"},
+ }
+ for _, test := range tests {
+ got := lintName(test.name)
+ if got != test.want {
+ t.Errorf("lintName(%q) = %q, want %q", test.name, got, test.want)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/misc/emacs/golint.el b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/misc/emacs/golint.el
new file mode 100644
index 00000000000..de729df68dd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/misc/emacs/golint.el
@@ -0,0 +1,51 @@
+;;; golint.el --- lint for the Go source code
+
+;; Copyright 2013 The Go Authors. All rights reserved.
+;; Use of this source code is governed by a BSD-style
+;; license that can be found in the LICENSE file.
+
+;; URL: https://github.com/golang/lint
+
+;;; Commentary:
+
+;; To install golint, add the following lines to your .emacs file:
+;; (add-to-list 'load-path "PATH CONTAINING golint.el" t)
+;; (require 'golint)
+;;
+;; After this, type M-x golint on Go source code.
+;;
+;; Usage:
+;; C-x `
+;; Jump directly to the line in your code which caused the first message.
+;;
+;; For more usage, see Compilation-Mode:
+;; http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html
+
+;;; Code:
+(require 'compile)
+
+(defun go-lint-buffer-name (mode)
+ "*Golint*")
+
+(defun golint-process-setup ()
+ "Setup compilation variables and buffer for `golint'."
+ (run-hooks 'golint-setup-hook))
+
+(define-compilation-mode golint-mode "golint"
+ "Golint is a linter for Go source code."
+ (set (make-local-variable 'compilation-scroll-output) nil)
+ (set (make-local-variable 'compilation-disable-input) t)
+ (set (make-local-variable 'compilation-process-setup-function)
+ 'golint-process-setup)
+)
+
+;;;###autoload
+(defun golint ()
+ "Run golint on the current file and populate the fix list. Pressing C-x ` will jump directly to the line in your code which caused the first message."
+ (interactive)
+ (compilation-start (concat "golint " buffer-file-name)
+ 'golint-mode))
+
+(provide 'golint)
+
+;;; golint.el ends here
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/misc/vim/ftplugin/go/lint.vim b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/misc/vim/ftplugin/go/lint.vim
new file mode 100644
index 00000000000..7dffd181767
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/misc/vim/ftplugin/go/lint.vim
@@ -0,0 +1,31 @@
+" Copyright 2013 The Go Authors. All rights reserved.
+" Use of this source code is governed by a BSD-style
+" license that can be found in the LICENSE file.
+"
+" lint.vim: Vim command to lint Go files with golint.
+"
+" https://github.com/golang/lint
+"
+" This filetype plugin add a new commands for go buffers:
+"
+" :Lint
+"
+" Run golint for the current Go file.
+"
+if exists("b:did_ftplugin_go_lint")
+ finish
+endif
+
+if !executable("golint")
+ finish
+endif
+
+command! -buffer Lint call s:GoLint()
+
+function! s:GoLint() abort
+ cexpr system('golint ' . shellescape(expand('%')))
+endfunction
+
+let b:did_ftplugin_go_lint = 1
+
+" vim:ts=4:sw=4:et
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/4.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/4.go
new file mode 100644
index 00000000000..2303a9a3170
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/4.go
@@ -0,0 +1,34 @@
+// Test that exported names have correct comments.
+
+// Package pkg does something.
+package pkg
+
+import "time"
+
+type T int // MATCH /exported type T.*should.*comment.*or.*unexport/
+
+func (T) F() {} // MATCH /exported method T\.F.*should.*comment.*or.*unexport/
+
+// this is a nice type.
+// MATCH /comment.*exported type U.*should.*form.*"U ..."/
+type U string
+
+// this is a neat function.
+// MATCH /comment.*exported method U\.G.*should.*form.*"G ..."/
+func (U) G() {}
+
+// A V is a string.
+type V string
+
+// V.H has a pointer receiver
+
+func (*V) H() {} // MATCH /exported method V\.H.*should.*comment.*or.*unexport/
+
+var W = "foo" // MATCH /exported var W.*should.*comment.*or.*unexport/
+
+const X = "bar" // MATCH /exported const X.*should.*comment.*or.*unexport/
+
+var Y, Z int // MATCH /exported var Z.*own declaration/
+
+// Location should be okay, since the other var name is an underscore.
+var Location, _ = time.LoadLocation("Europe/Istanbul") // not Constantinople
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/5_test.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/5_test.go
new file mode 100644
index 00000000000..af174587c0a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/5_test.go
@@ -0,0 +1,17 @@
+// This file ends in _test.go, so we should not warn about doc comments.
+// OK
+
+package pkg
+
+import "testing"
+
+type H int
+
+func TestSomething(t *testing.T) {
+}
+
+func TestSomething_suffix(t *testing.T) {
+}
+
+func ExampleBuffer_reader() {
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-lib.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-lib.go
new file mode 100644
index 00000000000..edac0d75c8b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-lib.go
@@ -0,0 +1,33 @@
+// Test that blank imports in library packages are flagged.
+
+// Package foo ...
+package foo
+
+// The instructions need to go before the imports below so they will not be
+// mistaken for documentation.
+
+/* MATCH /blank import/ */ import _ "encoding/json"
+
+import (
+ "fmt"
+ /* MATCH /blank import/ */ _ "os"
+
+ /* MATCH /blank import/ */ _ "net/http"
+ _ "path"
+)
+
+import _ "encoding/base64" // Don't gripe about this
+
+import (
+ // Don't gripe about these next two lines.
+ _ "compress/zlib"
+ _ "syscall"
+
+ /* MATCH /blank import/ */ _ "path/filepath"
+)
+
+import (
+ "go/ast"
+ _ "go/scanner" // Don't gripe about this or the following line.
+ _ "go/token"
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-lib_test.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-lib_test.go
new file mode 100644
index 00000000000..0307985f86c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-lib_test.go
@@ -0,0 +1,20 @@
+// Test that blank imports in test packages are not flagged.
+// OK
+
+// Package foo ...
+package foo
+
+// These are essentially the same imports as in the "library" package, but
+// these should not trigger the warning because this is a test.
+
+import _ "encoding/json"
+
+import (
+ "fmt"
+ "testing"
+
+ _ "os"
+
+ _ "net/http"
+ _ "path"
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-main.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-main.go
new file mode 100644
index 00000000000..9b72b1cb02b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/blank-import-main.go
@@ -0,0 +1,12 @@
+// Test that blank imports in package main are not flagged.
+// OK
+
+// Binary foo ...
+package main
+
+import _ "fmt"
+
+import (
+ "os"
+ _ "path"
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/common-methods.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/common-methods.go
new file mode 100644
index 00000000000..c0bb1363a57
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/common-methods.go
@@ -0,0 +1,16 @@
+// Test that we don't nag for comments on common methods.
+// OK
+
+// Package pkg ...
+package pkg
+
+import "net/http"
+
+// T is ...
+type T int
+
+func (T) Error() string { return "" }
+func (T) String() string { return "" }
+func (T) ServeHTTP(w http.ResponseWriter, r *http.Request) {}
+func (T) Read(p []byte) (n int, err error) { return 0, nil }
+func (T) Write(p []byte) (n int, err error) { return 0, nil }
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/const-block.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/const-block.go
new file mode 100644
index 00000000000..4b89d6f60e7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/const-block.go
@@ -0,0 +1,36 @@
+// Test for docs in const blocks
+
+// Package foo ...
+package foo
+
+const (
+ // Prefix for something.
+ // MATCH /InlineWhatever.*form/
+ InlineWhatever = "blah"
+
+ Whatsit = "missing_comment" // MATCH /Whatsit.*should have comment.*block/
+
+ // We should only warn once per block for missing comments,
+ // but always complain about malformed comments.
+
+ WhosYourDaddy = "another_missing_one"
+
+ // Something
+ // MATCH /WhatDoesHeDo.*form/
+ WhatDoesHeDo = "it's not a tumor!"
+)
+
+// These shouldn't need doc comments.
+const (
+ Alpha = "a"
+ Beta = "b"
+ Gamma = "g"
+)
+
+// The comment on the previous const block shouldn't flow through to here.
+
+const UndocAgain = 6 // MATCH /UndocAgain.*should have comment/
+
+const (
+ SomeUndocumented = 7 // MATCH /SomeUndocumented.*should have comment.*block/
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/else-multi.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/else-multi.go
new file mode 100644
index 00000000000..98f39a3eb5b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/else-multi.go
@@ -0,0 +1,18 @@
+// Test of return+else warning; should not trigger on multi-branch if/else.
+// OK
+
+// Package pkg ...
+package pkg
+
+import "log"
+
+func f(x int) bool {
+ if x == 0 {
+ log.Print("x is zero")
+ } else if x > 0 {
+ return true
+ } else {
+ log.Printf("non-positive x: %d", x)
+ }
+ return false
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/else.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/else.go
new file mode 100644
index 00000000000..515c043d306
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/else.go
@@ -0,0 +1,23 @@
+// Test of return+else warning.
+
+// Package pkg ...
+package pkg
+
+import "log"
+
+func f(x int) bool {
+ if x > 0 {
+ return true
+ } else { // MATCH /if.*return.*else.*outdent/
+ log.Printf("non-positive x: %d", x)
+ }
+ return false
+}
+
+func g(f func() bool) string {
+ if ok := f(); ok {
+ return "it's okay"
+ } else { // MATCH /if.*return.*else.*outdent.*short.*var.*declaration/
+ return "it's NOT okay!"
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/errorf.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/errorf.go
new file mode 100644
index 00000000000..768fb8ce60f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/errorf.go
@@ -0,0 +1,22 @@
+// Test for not using fmt.Errorf.
+
+// Package foo ...
+package foo
+
+import (
+ "errors"
+ "fmt"
+)
+
+func f(x int) error {
+ if x > 10 {
+ return errors.New(fmt.Sprintf("something %d", x)) // MATCH /should replace.*errors\.New\(fmt\.Sprintf\(\.\.\.\)\).*fmt\.Errorf\(\.\.\.\)/
+ }
+ if x > 5 {
+ return errors.New(g("blah")) // ok
+ }
+ if x > 4 {
+ return errors.New("something else") // ok
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/errors.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/errors.go
new file mode 100644
index 00000000000..2882738e01a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/errors.go
@@ -0,0 +1,35 @@
+// Test for naming errors.
+
+// Package foo ...
+package foo
+
+import (
+ "errors"
+ "fmt"
+)
+
+var unexp = errors.New("some unexported error") // MATCH /error var.*unexp.*errFoo/
+
+// Exp ...
+var Exp = errors.New("some exported error") // MATCH /error var.*Exp.*ErrFoo/
+
+var (
+ e1 = fmt.Errorf("blah %d", 4) // MATCH /error var.*e1.*errFoo/
+ // E2 ...
+ E2 = fmt.Errorf("blah %d", 5) // MATCH /error var.*E2.*ErrFoo/
+)
+
+func f() {
+ var whatever = errors.New("ok") // ok
+}
+
+// Check for the error strings themselves.
+
+func g(x int) error {
+ if x < 1 {
+ return fmt.Errorf("This %d is too low", x) // MATCH /error strings.*not be capitalized/
+ } else if x == 0 {
+ return fmt.Errorf("XML time") // ok
+ }
+ return errors.New(`too much stuff.`) // MATCH /error strings.*not end with punctuation/
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/import-dot.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/import-dot.go
new file mode 100644
index 00000000000..bb4c2675927
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/import-dot.go
@@ -0,0 +1,6 @@
+// Test that dot imports are flagged.
+
+// Package pkg ...
+package pkg
+
+import . "fmt" // MATCH /dot import/
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/inc.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/inc.go
new file mode 100644
index 00000000000..3868beea116
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/inc.go
@@ -0,0 +1,14 @@
+// Test for use of x++ and x--.
+
+// Package pkg ...
+package pkg
+
+func addOne(x int) int {
+ x += 1 // MATCH /x\+\+/
+ return x
+}
+
+func subOneInLoop(y int) {
+ for ; y > 0; y -= 1 { // MATCH /y--/
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/make.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/make.go
new file mode 100644
index 00000000000..5211375fe69
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/make.go
@@ -0,0 +1,10 @@
+// Test for pointless make() calls.
+
+// Package pkg ...
+package pkg
+
+func f() {
+ x := make([]T, 0) // MATCH /var x \[\]T/
+ y := make([]somepkg.Foo_Bar, 0) // MATCH /var y \[\]somepkg.Foo_Bar/
+ z = make([]T, 0) // ok, because we don't know where z is declared
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/names.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/names.go
new file mode 100644
index 00000000000..ca7ffde6e10
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/names.go
@@ -0,0 +1,54 @@
+// Test for name linting.
+
+// Package pkg_with_underscores ...
+package pkg_with_underscores // MATCH /underscore.*package name/
+
+var var_name int // MATCH /underscore.*var.*var_name/
+
+type t_wow struct { // MATCH /underscore.*type.*t_wow/
+ x_damn int // MATCH /underscore.*field.*x_damn/
+ Url *url.URL // MATCH /struct field.*Url.*URL/
+}
+
+const fooId = "blah" // MATCH /fooId.*fooID/
+
+func f_it() { // MATCH /underscore.*func.*f_it/
+ more_underscore := 4 // MATCH /underscore.*var.*more_underscore/
+ if isEof := (err == io.EOF); isEof { // MATCH /var.*isEof.*isEOF/
+ more_underscore = 7 // should be okay
+ }
+
+ x := foo_proto.Blah{} // should be okay
+
+ for _, theIp := range ips { // MATCH /range var.*theIp.*theIP/
+ }
+
+ switch myJson := g(); { // MATCH /var.*myJson.*myJSON/
+ }
+ switch tApi := x.(type) { // MATCH /var.*tApi.*tAPI/
+ }
+
+ select {
+ case qId := <-c: // MATCH /var.*qId.*qID/
+ }
+}
+
+// Common styles in other languages that don't belong in Go.
+const (
+ CPP_CONST = 1 // MATCH /ALL_CAPS.*CamelCase/
+ kLeadingKay = 2 // MATCH /k.*leadingKay/
+
+ HTML = 3 // okay; no underscore
+ X509B = 4 // ditto
+)
+
+func f(bad_name int) {} // MATCH /underscore.*func parameter.*bad_name/
+func g() (no_way int) {} // MATCH /underscore.*func result.*no_way/
+func (t *t_wow) f(more_under string) {} // MATCH /underscore.*method parameter.*more_under/
+func (t *t_wow) g() (still_more string) {} // MATCH /underscore.*method result.*still_more/
+
+type i interface {
+ CheckHtml() string // okay; interface method names are often constrained by the concrete types' method names
+
+ F(foo_bar int) // MATCH /foo_bar.*fooBar/
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc1.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc1.go
new file mode 100644
index 00000000000..8197a8ee0f4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc1.go
@@ -0,0 +1,3 @@
+// Test of missing package comment.
+
+package foo // MATCH /should.*package comment.*unless/
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc2.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc2.go
new file mode 100644
index 00000000000..c61febd0e9b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc2.go
@@ -0,0 +1,5 @@
+// Test of package comment in an incorrect form.
+
+// Some random package doc that isn't in the right form.
+// MATCH /package comment should.*form.*"Package testdata .*"/
+package testdata
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc3.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc3.go
new file mode 100644
index 00000000000..95e814e0a46
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc3.go
@@ -0,0 +1,7 @@
+// Test of block package comment.
+// OK
+
+/*
+Package foo is pretty sweet.
+*/
+package foo
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc4.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc4.go
new file mode 100644
index 00000000000..23448dec31e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-doc4.go
@@ -0,0 +1,7 @@
+// Test of block package comment with leading space.
+
+/*
+ Package foo is pretty sweet.
+MATCH /package comment.*leading space/
+*/
+package foo
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-main.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-main.go
new file mode 100644
index 00000000000..c261945d69a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/pkg-main.go
@@ -0,0 +1,5 @@
+// Test of package comment for package main.
+// OK
+
+// This binary does something awesome.
+package main
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/range.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/range.go
new file mode 100644
index 00000000000..e8629edc342
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/range.go
@@ -0,0 +1,27 @@
+// Test for range construction.
+
+// Package foo ...
+package foo
+
+func f() {
+ // with :=
+ for x, _ := range m { // MATCH /should omit 2nd value.*range.*equivalent.*for x := range/
+ }
+ // with =
+ for y, _ = range m { // MATCH /should omit 2nd value.*range.*equivalent.*for y = range/
+ }
+
+ // all OK:
+ for x := range m {
+ }
+ for x, y := range m {
+ }
+ for _, y := range m {
+ }
+ for x = range m {
+ }
+ for x, y = range m {
+ }
+ for _, y = range m {
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/receiver-names.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/receiver-names.go
new file mode 100644
index 00000000000..58f567dae43
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/receiver-names.go
@@ -0,0 +1,38 @@
+// Test for bad receiver names.
+
+// Package foo ...
+package foo
+
+type foo struct{}
+
+func (this foo) f1() { // MATCH /should be a reflection of its identity/
+}
+
+func (self foo) f2() { // MATCH /should be a reflection of its identity/
+}
+
+func (f foo) f3() {
+}
+
+func (foo) f4() {
+}
+
+type bar struct{}
+
+func (b bar) f1() {
+}
+
+func (b bar) f2() {
+}
+
+func (a bar) f3() { // MATCH /receiver name a should be consistent with previous receiver name b for bar/
+}
+
+func (a *bar) f4() { // MATCH /receiver name a should be consistent with previous receiver name b for bar/
+}
+
+func (b *bar) f5() {
+}
+
+func (bar) f6() {
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/sort.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/sort.go
new file mode 100644
index 00000000000..c0990494285
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/sort.go
@@ -0,0 +1,20 @@
+// Test that we don't ask for comments on sort.Interface methods.
+
+// Package pkg ...
+package pkg
+
+// T is ...
+type T []int
+
+// Len by itself should get documented.
+
+func (t T) Len() int { return len(t) } // MATCH /exported method T\.Len.*should.*comment/
+
+// U is ...
+type U []int
+
+func (u U) Len() int { return len(u) }
+func (u U) Less(i, j int) bool { return u[i] < u[j] }
+func (u U) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
+
+func (u U) Other() {} // MATCH /exported method U\.Other.*should.*comment/
diff --git a/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/var-decl.go b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/var-decl.go
new file mode 100644
index 00000000000..bbc687c333b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/3rf/mongo-lint/testdata/var-decl.go
@@ -0,0 +1,48 @@
+// Test for redundant type declaration.
+
+// Package foo ...
+package foo
+
+import "fmt"
+import "net/http"
+
+var mux *http.ServeMux = http.NewServeMux() // MATCH /should.*\*http\.ServeMux.*inferred/
+var myInt int = 7 // MATCH /should.*int.*myInt.*inferred/
+
+var myZeroInt int = 0 // MATCH /should.*= 0.*myZeroInt.*zero value/
+var myZeroFlt float32 = 0. // MATCH /should.*= 0\..*myZeroFlt.*zero value/
+var myZeroF64 float64 = 0.0 // MATCH /should.*= 0\..*myZeroF64.*zero value/
+var myZeroImg complex = 0i // MATCH /should.*= 0i.*myZeroImg.*zero value/
+var myZeroStr string = "" // MATCH /should.*= "".*myZeroStr.*zero value/
+var myZeroRaw string = `` // MATCH /should.*= ``.*myZeroRaw.*zero value/
+var myZeroPtr *Q = nil // MATCH /should.*= nil.*myZeroPtr.*zero value/
+var myZeroRune rune = '\x00' // MATCH /should.*= '\\x00'.*myZeroRune.*zero value/
+var myZeroRune2 rune = '\000' // MATCH /should.*= '\\000'.*myZeroRune2.*zero value/
+
+// No warning because there's no type on the LHS
+var x = 0
+
+// This shouldn't get a warning because there's no initial values.
+var str fmt.Stringer
+
+// No warning because this is a const.
+const x uint64 = 7
+
+// No warnings because the RHS is an ideal int, and the LHS is a different int type.
+var userID int64 = 1235
+var negID int64 = -1
+var parenID int64 = (17)
+var crazyID int64 = -(-(-(-9)))
+
+// Same, but for strings and floats.
+type stringT string
+type floatT float64
+
+var stringV stringT = "abc"
+var floatV floatT = 123.45
+
+// No warning because the LHS names an interface type.
+var data interface{} = googleIPs
+
+// No warning because it's a common idiom for interface satisfaction.
+var _ Server = (*serverImpl)(nil)
diff --git a/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/LICENSE.txt b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/LICENSE.txt
new file mode 100644
index 00000000000..65e7260a6ae
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/LICENSE.txt
@@ -0,0 +1,13 @@
+Copyright (c) 2012 Chris Howey
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/README.md b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/README.md
new file mode 100644
index 00000000000..c7a00435112
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/README.md
@@ -0,0 +1,21 @@
+# getpasswd in Go [![GoDoc](https://godoc.org/github.com/howeyc/gopass?status.svg)](https://godoc.org/github.com/howeyc/gopass)
+
+Retrieve password from user terminal input without echo
+
+Verified on BSD, Linux, and Windows.
+
+Example:
+```go
+package main
+
+import "fmt"
+import "github.com/howeyc/gopass"
+
+func main() {
+ fmt.Printf("Password: ")
+ pass := gopass.GetPasswd() // Silent, for *'s use gopass.GetPasswdMasked()
+ // Do something with pass
+}
+```
+
+Caution: Multi-byte characters not supported!
diff --git a/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/nix.go b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/nix.go
new file mode 100644
index 00000000000..76c9da42093
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/nix.go
@@ -0,0 +1,29 @@
+// +build linux darwin freebsd netbsd openbsd
+
+package gopass
+
+import (
+ "io"
+ "syscall"
+
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+const lineEnding = "\n"
+
+func getch() (byte, error) {
+ if oldState, err := terminal.MakeRaw(0); err != nil {
+ return 0, err
+ } else {
+ defer terminal.Restore(0, oldState)
+ }
+
+ var buf [1]byte
+ if n, err := syscall.Read(0, buf[:]); n == 0 || err != nil {
+ if err != nil {
+ return 0, err
+ }
+ return 0, io.EOF
+ }
+ return buf[0], nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/pass.go b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/pass.go
new file mode 100644
index 00000000000..d6f1c7ef8e3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/pass.go
@@ -0,0 +1,56 @@
+package gopass
+
+import (
+ "errors"
+ "os"
+)
+
+var (
+ ErrInterrupted = errors.New("Interrupted")
+)
+
+// getPasswd returns the input read from terminal.
+// If masked is true, typing will be matched by asterisks on the screen.
+// Otherwise, typing will echo nothing.
+func getPasswd(masked bool) ([]byte, error) {
+ var err error
+ var pass, bs, mask []byte
+ if masked {
+ bs = []byte("\b \b")
+ mask = []byte("*")
+ }
+
+ for {
+ if v, e := getch(); v == 127 || v == 8 {
+ if l := len(pass); l > 0 {
+ pass = pass[:l-1]
+ os.Stdout.Write(bs)
+ }
+ } else if v == 13 || v == 10 {
+ break
+ } else if v == 3 {
+ err = ErrInterrupted
+ break
+ } else if v != 0 {
+ pass = append(pass, v)
+ os.Stdout.Write(mask)
+ } else if e != nil {
+ err = e
+ break
+ }
+ }
+ os.Stdout.WriteString(lineEnding)
+ return pass, err
+}
+
+// GetPasswd returns the password read from the terminal without echoing input.
+// The returned byte array does not include end-of-line characters.
+func GetPasswd() ([]byte, error) {
+ return getPasswd(false)
+}
+
+// GetPasswdMasked returns the password read from the terminal, echoing asterisks.
+// The returned byte array does not include end-of-line characters.
+func GetPasswdMasked() ([]byte, error) {
+ return getPasswd(true)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/win.go b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/win.go
new file mode 100644
index 00000000000..499dcbdb0ca
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/howeyc/gopass/win.go
@@ -0,0 +1,44 @@
+// +build windows
+
+package gopass
+
+import "errors"
+import "syscall"
+import "unsafe"
+import "unicode/utf16"
+
+const lineEnding = "\r\n"
+
+func getch() (byte, error) {
+ modkernel32 := syscall.NewLazyDLL("kernel32.dll")
+ procReadConsole := modkernel32.NewProc("ReadConsoleW")
+ procGetConsoleMode := modkernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode := modkernel32.NewProc("SetConsoleMode")
+
+ var mode uint32
+ pMode := &mode
+ procGetConsoleMode.Call(uintptr(syscall.Stdin), uintptr(unsafe.Pointer(pMode)))
+
+ var echoMode, lineMode uint32
+ echoMode = 4
+ lineMode = 2
+ var newMode uint32
+ newMode = mode &^ (echoMode | lineMode)
+
+ procSetConsoleMode.Call(uintptr(syscall.Stdin), uintptr(newMode))
+ defer procSetConsoleMode.Call(uintptr(syscall.Stdin), uintptr(mode))
+
+ line := make([]uint16, 1)
+ pLine := &line[0]
+ var n uint16
+ procReadConsole.Call(uintptr(syscall.Stdin), uintptr(unsafe.Pointer(pLine)), uintptr(len(line)), uintptr(unsafe.Pointer(&n)))
+
+ b := []byte(string(utf16.Decode(line)))
+
+ // Not sure how this could happen, but it did for someone
+ if len(b) > 0 {
+ return b[0], nil
+ } else {
+ return 13, errors.New("Read error")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/.gitignore b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/.gitignore
new file mode 100644
index 00000000000..dd8fc7468f4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/.gitignore
@@ -0,0 +1,5 @@
+*.6
+6.out
+_obj/
+_test/
+_testmain.go
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/LICENSE b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/README.md b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/README.md
new file mode 100644
index 00000000000..8f36612218f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/README.md
@@ -0,0 +1,58 @@
+[![GoDoc](https://godoc.org/github.com/jacobsa/oglematchers?status.svg)](https://godoc.org/github.com/jacobsa/oglematchers)
+
+`oglematchers` is a package for the Go programming language containing a set of
+matchers, useful in a testing or mocking framework, inspired by and mostly
+compatible with [Google Test][googletest] for C++ and
+[Google JS Test][google-js-test]. The package is used by the
+[ogletest][ogletest] testing framework and [oglemock][oglemock] mocking
+framework, which may be more directly useful to you, but can be generically used
+elsewhere as well.
+
+A "matcher" is simply an object with a `Matches` method defining a set of golang
+values matched by the matcher, and a `Description` method describing that set.
+For example, here are some matchers:
+
+```go
+// Numbers
+Equals(17.13)
+LessThan(19)
+
+// Strings
+Equals("taco")
+HasSubstr("burrito")
+MatchesRegex("t.*o")
+
+// Combining matchers
+AnyOf(LessThan(17), GreaterThan(19))
+```
+
+There are lots more; see [here][reference] for a reference. You can also add
+your own simply by implementing the `oglematchers.Matcher` interface.
+
+
+Installation
+------------
+
+First, make sure you have installed Go 1.0.2 or newer. See
+[here][golang-install] for instructions.
+
+Use the following command to install `oglematchers` and keep it up to date:
+
+ go get -u github.com/jacobsa/oglematchers
+
+
+Documentation
+-------------
+
+See [here][reference] for documentation. Alternatively, you can install the
+package and then use `godoc`:
+
+ godoc github.com/jacobsa/oglematchers
+
+
+[reference]: http://godoc.org/github.com/jacobsa/oglematchers
+[golang-install]: http://golang.org/doc/install.html
+[googletest]: http://code.google.com/p/googletest/
+[google-js-test]: http://code.google.com/p/google-js-test/
+[ogletest]: http://github.com/jacobsa/ogletest
+[oglemock]: http://github.com/jacobsa/oglemock
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/all_of.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/all_of.go
new file mode 100644
index 00000000000..d93a9740443
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/all_of.go
@@ -0,0 +1,70 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "strings"
+)
+
+// AllOf accepts a set of matchers S and returns a matcher that follows the
+// algorithm below when considering a candidate c:
+//
+// 1. Return true if for every Matcher m in S, m matches c.
+//
+// 2. Otherwise, if there is a matcher m in S such that m returns a fatal
+// error for c, return that matcher's error message.
+//
+// 3. Otherwise, return false with the error from some wrapped matcher.
+//
+// This is akin to a logical AND operation for matchers.
+func AllOf(matchers ...Matcher) Matcher {
+ return &allOfMatcher{matchers}
+}
+
+type allOfMatcher struct {
+ wrappedMatchers []Matcher
+}
+
+func (m *allOfMatcher) Description() string {
+ // Special case: the empty set.
+ if len(m.wrappedMatchers) == 0 {
+ return "is anything"
+ }
+
+ // Join the descriptions for the wrapped matchers.
+ wrappedDescs := make([]string, len(m.wrappedMatchers))
+ for i, wrappedMatcher := range m.wrappedMatchers {
+ wrappedDescs[i] = wrappedMatcher.Description()
+ }
+
+ return strings.Join(wrappedDescs, ", and ")
+}
+
+func (m *allOfMatcher) Matches(c interface{}) (err error) {
+ for _, wrappedMatcher := range m.wrappedMatchers {
+ if wrappedErr := wrappedMatcher.Matches(c); wrappedErr != nil {
+ err = wrappedErr
+
+ // If the error is fatal, return immediately with this error.
+ _, ok := wrappedErr.(*FatalError)
+ if ok {
+ return
+ }
+ }
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/all_of_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/all_of_test.go
new file mode 100644
index 00000000000..f07306f10ad
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/all_of_test.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "errors"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type allOfFakeMatcher struct {
+ desc string
+ err error
+}
+
+func (m *allOfFakeMatcher) Matches(c interface{}) error {
+ return m.err
+}
+
+func (m *allOfFakeMatcher) Description() string {
+ return m.desc
+}
+
+type AllOfTest struct {
+}
+
+func init() { RegisterTestSuite(&AllOfTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *AllOfTest) DescriptionWithEmptySet() {
+ m := AllOf()
+ ExpectEq("is anything", m.Description())
+}
+
+func (t *AllOfTest) DescriptionWithOneMatcher() {
+ m := AllOf(&allOfFakeMatcher{"taco", errors.New("")})
+ ExpectEq("taco", m.Description())
+}
+
+func (t *AllOfTest) DescriptionWithMultipleMatchers() {
+ m := AllOf(
+ &allOfFakeMatcher{"taco", errors.New("")},
+ &allOfFakeMatcher{"burrito", errors.New("")},
+ &allOfFakeMatcher{"enchilada", errors.New("")})
+
+ ExpectEq("taco, and burrito, and enchilada", m.Description())
+}
+
+func (t *AllOfTest) EmptySet() {
+ m := AllOf()
+ err := m.Matches(17)
+
+ ExpectEq(nil, err)
+}
+
+func (t *AllOfTest) OneMatcherReturnsFatalErrorAndSomeOthersFail() {
+ m := AllOf(
+ &allOfFakeMatcher{"", errors.New("")},
+ &allOfFakeMatcher{"", NewFatalError("taco")},
+ &allOfFakeMatcher{"", errors.New("")},
+ &allOfFakeMatcher{"", nil})
+
+ err := m.Matches(17)
+
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *AllOfTest) OneMatcherReturnsNonFatalAndOthersSayTrue() {
+ m := AllOf(
+ &allOfFakeMatcher{"", nil},
+ &allOfFakeMatcher{"", errors.New("taco")},
+ &allOfFakeMatcher{"", nil})
+
+ err := m.Matches(17)
+
+ ExpectFalse(isFatal(err))
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *AllOfTest) AllMatchersSayTrue() {
+ m := AllOf(
+ &allOfFakeMatcher{"", nil},
+ &allOfFakeMatcher{"", nil},
+ &allOfFakeMatcher{"", nil})
+
+ err := m.Matches(17)
+
+ ExpectEq(nil, err)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any.go
new file mode 100644
index 00000000000..f6991ec1020
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any.go
@@ -0,0 +1,32 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// Any returns a matcher that matches any value.
+func Any() Matcher {
+ return &anyMatcher{}
+}
+
+type anyMatcher struct {
+}
+
+func (m *anyMatcher) Description() string {
+ return "is anything"
+}
+
+func (m *anyMatcher) Matches(c interface{}) error {
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_of.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_of.go
new file mode 100644
index 00000000000..2918b51f21a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_of.go
@@ -0,0 +1,94 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// AnyOf accepts a set of values S and returns a matcher that follows the
+// algorithm below when considering a candidate c:
+//
+// 1. If there exists a value m in S such that m implements the Matcher
+// interface and m matches c, return true.
+//
+// 2. Otherwise, if there exists a value v in S such that v does not implement
+// the Matcher interface and the matcher Equals(v) matches c, return true.
+//
+// 3. Otherwise, if there is a value m in S such that m implements the Matcher
+// interface and m returns a fatal error for c, return that fatal error.
+//
+// 4. Otherwise, return false.
+//
+// This is akin to a logical OR operation for matchers, with non-matchers x
+// being treated as Equals(x).
+func AnyOf(vals ...interface{}) Matcher {
+ // Get ahold of a type variable for the Matcher interface.
+ var dummy *Matcher
+ matcherType := reflect.TypeOf(dummy).Elem()
+
+ // Create a matcher for each value, or use the value itself if it's already a
+ // matcher.
+ wrapped := make([]Matcher, len(vals))
+ for i, v := range vals {
+ t := reflect.TypeOf(v)
+ if t != nil && t.Implements(matcherType) {
+ wrapped[i] = v.(Matcher)
+ } else {
+ wrapped[i] = Equals(v)
+ }
+ }
+
+ return &anyOfMatcher{wrapped}
+}
+
+type anyOfMatcher struct {
+ wrapped []Matcher
+}
+
+func (m *anyOfMatcher) Description() string {
+ wrappedDescs := make([]string, len(m.wrapped))
+ for i, matcher := range m.wrapped {
+ wrappedDescs[i] = matcher.Description()
+ }
+
+ return fmt.Sprintf("or(%s)", strings.Join(wrappedDescs, ", "))
+}
+
+func (m *anyOfMatcher) Matches(c interface{}) (err error) {
+ err = errors.New("")
+
+ // Try each matcher in turn.
+ for _, matcher := range m.wrapped {
+ wrappedErr := matcher.Matches(c)
+
+ // Return immediately if there's a match.
+ if wrappedErr == nil {
+ err = nil
+ return
+ }
+
+ // Note the fatal error, if any.
+ if _, isFatal := wrappedErr.(*FatalError); isFatal {
+ err = wrappedErr
+ }
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_of_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_of_test.go
new file mode 100644
index 00000000000..b0f85efc4a3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_of_test.go
@@ -0,0 +1,139 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type fakeAnyOfMatcher struct {
+ desc string
+ err error
+}
+
+func (m *fakeAnyOfMatcher) Matches(c interface{}) error {
+ return m.err
+}
+
+func (m *fakeAnyOfMatcher) Description() string {
+ return m.desc
+}
+
+type AnyOfTest struct {
+}
+
+func init() { RegisterTestSuite(&AnyOfTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *AnyOfTest) EmptySet() {
+ matcher := AnyOf()
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *AnyOfTest) OneTrue() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"", NewFatalError("foo")},
+ 17,
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ &fakeAnyOfMatcher{"", nil},
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ )
+
+ err := matcher.Matches(0)
+ ExpectEq(nil, err)
+}
+
+func (t *AnyOfTest) OneEqual() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"", NewFatalError("foo")},
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ 13,
+ "taco",
+ 19,
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ )
+
+ err := matcher.Matches("taco")
+ ExpectEq(nil, err)
+}
+
+func (t *AnyOfTest) OneFatal() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ 17,
+ &fakeAnyOfMatcher{"", NewFatalError("taco")},
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ )
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *AnyOfTest) OneNil() {
+ var err error
+ matcher := AnyOf(
+ 13,
+ nil,
+ 19,
+ )
+
+ // No match
+ err = matcher.Matches(14)
+ ExpectNe(nil, err)
+
+ // Match
+ err = matcher.Matches(nil)
+ ExpectEq(nil, err)
+}
+
+func (t *AnyOfTest) AllFalseAndNotEqual() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ 17,
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ 19,
+ )
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *AnyOfTest) DescriptionForEmptySet() {
+ matcher := AnyOf()
+ ExpectEq("or()", matcher.Description())
+}
+
+func (t *AnyOfTest) DescriptionForNonEmptySet() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"taco", nil},
+ "burrito",
+ &fakeAnyOfMatcher{"enchilada", nil},
+ )
+
+ ExpectEq("or(taco, burrito, enchilada)", matcher.Description())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_test.go
new file mode 100644
index 00000000000..7b6f6075868
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/any_test.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type AnyTest struct {
+}
+
+func init() { RegisterTestSuite(&AnyTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *AnyTest) Description() {
+ m := Any()
+ ExpectEq("is anything", m.Description())
+}
+
+func (t *AnyTest) Matches() {
+ var err error
+ m := Any()
+
+ err = m.Matches(nil)
+ ExpectEq(nil, err)
+
+ err = m.Matches(17)
+ ExpectEq(nil, err)
+
+ err = m.Matches("taco")
+ ExpectEq(nil, err)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/contains.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/contains.go
new file mode 100644
index 00000000000..2f326dbc5d6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/contains.go
@@ -0,0 +1,61 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Return a matcher that matches arrays slices with at least one element that
+// matches the supplied argument. If the argument x is not itself a Matcher,
+// this is equivalent to Contains(Equals(x)).
+func Contains(x interface{}) Matcher {
+ var result containsMatcher
+ var ok bool
+
+ if result.elementMatcher, ok = x.(Matcher); !ok {
+ result.elementMatcher = Equals(x)
+ }
+
+ return &result
+}
+
+type containsMatcher struct {
+ elementMatcher Matcher
+}
+
+func (m *containsMatcher) Description() string {
+ return fmt.Sprintf("contains: %s", m.elementMatcher.Description())
+}
+
+func (m *containsMatcher) Matches(candidate interface{}) error {
+ // The candidate must be a slice or an array.
+ v := reflect.ValueOf(candidate)
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
+ return NewFatalError("which is not a slice or array")
+ }
+
+ // Check each element.
+ for i := 0; i < v.Len(); i++ {
+ elem := v.Index(i)
+ if matchErr := m.elementMatcher.Matches(elem.Interface()); matchErr == nil {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/contains_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/contains_test.go
new file mode 100644
index 00000000000..34625fcaa5f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/contains_test.go
@@ -0,0 +1,233 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type ContainsTest struct {}
+func init() { RegisterTestSuite(&ContainsTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *ContainsTest) WrongTypeCandidates() {
+ m := Contains("")
+ ExpectEq("contains: ", m.Description())
+
+ var err error
+
+ // Nil candidate
+ err = m.Matches(nil)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+
+ // String candidate
+ err = m.Matches("")
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+
+ // Map candidate
+ err = m.Matches(make(map[string]string))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+}
+
+func (t *ContainsTest) NilArgument() {
+ m := Contains(nil)
+ ExpectEq("contains: is nil", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Empty array of pointers
+ c = [...]*int{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Empty slice of pointers
+ c = []*int{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-empty array of integers
+ c = [...]int{17, 0, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-empty slice of integers
+ c = []int{17, 0, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching array of pointers
+ c = [...]*int{new(int), new(int)}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of pointers
+ c = []*int{new(int), new(int)}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of pointers
+ c = [...]*int{new(int), nil, new(int)}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of pointers
+ c = []*int{new(int), nil, new(int)}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching slice of pointers from matching array
+ someArray := [...]*int{new(int), nil, new(int)}
+ c = someArray[0:1]
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *ContainsTest) StringArgument() {
+ m := Contains("taco")
+ ExpectEq("contains: taco", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Non-matching array of strings
+ c = [...]string{"burrito", "enchilada"}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of strings
+ c = []string{"burrito", "enchilada"}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of strings
+ c = [...]string{"burrito", "taco", "enchilada"}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of strings
+ c = []string{"burrito", "taco", "enchilada"}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching slice of strings from matching array
+ someArray := [...]string{"burrito", "taco", "enchilada"}
+ c = someArray[0:1]
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *ContainsTest) IntegerArgument() {
+ m := Contains(int(17))
+ ExpectEq("contains: 17", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Non-matching array of integers
+ c = [...]int{13, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of integers
+ c = []int{13, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of integers
+ c = [...]int{13, 17, 19}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of integers
+ c = []int{13, 17, 19}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching slice of integers from matching array
+ someArray := [...]int{13, 17, 19}
+ c = someArray[0:1]
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching array of floats
+ c = [...]float32{13, 17.5, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of floats
+ c = []float32{13, 17.5, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of floats
+ c = [...]float32{13, 17, 19}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of floats
+ c = []float32{13, 17, 19}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+}
+
+func (t *ContainsTest) MatcherArgument() {
+ m := Contains(HasSubstr("ac"))
+ ExpectEq("contains: has substring \"ac\"", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Non-matching array of strings
+ c = [...]string{"burrito", "enchilada"}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of strings
+ c = []string{"burrito", "enchilada"}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of strings
+ c = [...]string{"burrito", "taco", "enchilada"}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of strings
+ c = []string{"burrito", "taco", "enchilada"}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching slice of strings from matching array
+ someArray := [...]string{"burrito", "taco", "enchilada"}
+ c = someArray[0:1]
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/deep_equals.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/deep_equals.go
new file mode 100644
index 00000000000..1d91baef32e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/deep_equals.go
@@ -0,0 +1,88 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+var byteSliceType reflect.Type = reflect.TypeOf([]byte{})
+
+// DeepEquals returns a matcher that matches based on 'deep equality', as
+// defined by the reflect package. This matcher requires that values have
+// identical types to x.
+func DeepEquals(x interface{}) Matcher {
+ return &deepEqualsMatcher{x}
+}
+
+type deepEqualsMatcher struct {
+ x interface{}
+}
+
+func (m *deepEqualsMatcher) Description() string {
+ xDesc := fmt.Sprintf("%v", m.x)
+ xValue := reflect.ValueOf(m.x)
+
+ // Special case: fmt.Sprintf presents nil slices as "[]", but
+ // reflect.DeepEqual makes a distinction between nil and empty slices. Make
+ // this less confusing.
+ if xValue.Kind() == reflect.Slice && xValue.IsNil() {
+ xDesc = "<nil slice>"
+ }
+
+ return fmt.Sprintf("deep equals: %s", xDesc)
+}
+
+func (m *deepEqualsMatcher) Matches(c interface{}) error {
+ // Make sure the types match.
+ ct := reflect.TypeOf(c)
+ xt := reflect.TypeOf(m.x)
+
+ if ct != xt {
+ return NewFatalError(fmt.Sprintf("which is of type %v", ct))
+ }
+
+ // Special case: handle byte slices more efficiently.
+ cValue := reflect.ValueOf(c)
+ xValue := reflect.ValueOf(m.x)
+
+ if ct == byteSliceType && !cValue.IsNil() && !xValue.IsNil() {
+ xBytes := m.x.([]byte)
+ cBytes := c.([]byte)
+
+ if bytes.Equal(cBytes, xBytes) {
+ return nil
+ }
+
+ return errors.New("")
+ }
+
+ // Defer to the reflect package.
+ if reflect.DeepEqual(m.x, c) {
+ return nil
+ }
+
+ // Special case: if the comparison failed because c is the nil slice, given
+ // an indication of this (since its value is printed as "[]").
+ if cValue.Kind() == reflect.Slice && cValue.IsNil() {
+ return errors.New("which is nil")
+ }
+
+ return errors.New("")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/deep_equals_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/deep_equals_test.go
new file mode 100644
index 00000000000..9fedfd77017
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/deep_equals_test.go
@@ -0,0 +1,343 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "bytes"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type DeepEqualsTest struct {}
+func init() { RegisterTestSuite(&DeepEqualsTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *DeepEqualsTest) WrongTypeCandidateWithScalarValue() {
+ var x int = 17
+ m := DeepEquals(x)
+
+ var err error
+
+ // Nil candidate.
+ err = m.Matches(nil)
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("<nil>")))
+
+ // Int alias candidate.
+ type intAlias int
+ err = m.Matches(intAlias(x))
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("intAlias")))
+
+ // String candidate.
+ err = m.Matches("taco")
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("string")))
+
+ // Byte slice candidate.
+ err = m.Matches([]byte{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint8")))
+
+ // Other slice candidate.
+ err = m.Matches([]uint16{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint16")))
+
+ // Unsigned int candidate.
+ err = m.Matches(uint(17))
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("uint")))
+}
+
+func (t *DeepEqualsTest) WrongTypeCandidateWithByteSliceValue() {
+ x := []byte{}
+ m := DeepEquals(x)
+
+ var err error
+
+ // Nil candidate.
+ err = m.Matches(nil)
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("<nil>")))
+
+ // String candidate.
+ err = m.Matches("taco")
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("string")))
+
+ // Slice candidate with wrong value type.
+ err = m.Matches([]uint16{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint16")))
+}
+
+func (t *DeepEqualsTest) WrongTypeCandidateWithOtherSliceValue() {
+ x := []uint16{}
+ m := DeepEquals(x)
+
+ var err error
+
+ // Nil candidate.
+ err = m.Matches(nil)
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("<nil>")))
+
+ // String candidate.
+ err = m.Matches("taco")
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("string")))
+
+ // Byte slice candidate with wrong value type.
+ err = m.Matches([]byte{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint8")))
+
+ // Other slice candidate with wrong value type.
+ err = m.Matches([]uint32{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint32")))
+}
+
+func (t *DeepEqualsTest) WrongTypeCandidateWithNilLiteralValue() {
+ m := DeepEquals(nil)
+
+ var err error
+
+ // String candidate.
+ err = m.Matches("taco")
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("string")))
+
+ // Nil byte slice candidate.
+ err = m.Matches([]byte(nil))
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint8")))
+
+ // Nil other slice candidate.
+ err = m.Matches([]uint16(nil))
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint16")))
+}
+
+func (t *DeepEqualsTest) NilLiteralValue() {
+ m := DeepEquals(nil)
+ ExpectEq("deep equals: <nil>", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Nil literal candidate.
+ c = nil
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+}
+
+func (t *DeepEqualsTest) IntValue() {
+ m := DeepEquals(int(17))
+ ExpectEq("deep equals: 17", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Matching int.
+ c = int(17)
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching int.
+ c = int(18)
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *DeepEqualsTest) ByteSliceValue() {
+ x := []byte{17, 19}
+ m := DeepEquals(x)
+ ExpectEq("deep equals: [17 19]", m.Description())
+
+ var c []byte
+ var err error
+
+ // Matching.
+ c = make([]byte, len(x))
+ AssertEq(len(x), copy(c, x))
+
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Nil slice.
+ c = []byte(nil)
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("which is nil")))
+
+ // Prefix.
+ AssertGt(len(x), 1)
+ c = make([]byte, len(x)-1)
+ AssertEq(len(x)-1, copy(c, x))
+
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Suffix.
+ c = make([]byte, len(x)+1)
+ AssertEq(len(x), copy(c, x))
+
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *DeepEqualsTest) OtherSliceValue() {
+ x := []uint16{17, 19}
+ m := DeepEquals(x)
+ ExpectEq("deep equals: [17 19]", m.Description())
+
+ var c []uint16
+ var err error
+
+ // Matching.
+ c = make([]uint16, len(x))
+ AssertEq(len(x), copy(c, x))
+
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Nil slice.
+ c = []uint16(nil)
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("which is nil")))
+
+ // Prefix.
+ AssertGt(len(x), 1)
+ c = make([]uint16, len(x)-1)
+ AssertEq(len(x)-1, copy(c, x))
+
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Suffix.
+ c = make([]uint16, len(x)+1)
+ AssertEq(len(x), copy(c, x))
+
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *DeepEqualsTest) NilByteSliceValue() {
+ x := []byte(nil)
+ m := DeepEquals(x)
+ ExpectEq("deep equals: <nil slice>", m.Description())
+
+ var c []byte
+ var err error
+
+ // Nil slice.
+ c = []byte(nil)
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-nil slice.
+ c = []byte{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *DeepEqualsTest) NilOtherSliceValue() {
+ x := []uint16(nil)
+ m := DeepEquals(x)
+ ExpectEq("deep equals: <nil slice>", m.Description())
+
+ var c []uint16
+ var err error
+
+ // Nil slice.
+ c = []uint16(nil)
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-nil slice.
+ c = []uint16{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Benchmarks
+////////////////////////////////////////////////////////////////////////
+
+func benchmarkWithSize(b *testing.B, size int) {
+ b.StopTimer()
+ buf := bytes.Repeat([]byte{0x01}, size)
+ bufCopy := make([]byte, size)
+ copy(bufCopy, buf)
+
+ matcher := DeepEquals(buf)
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ matcher.Matches(bufCopy)
+ }
+
+ b.SetBytes(int64(size))
+}
+
+func BenchmarkShortByteSlice(b *testing.B) {
+ benchmarkWithSize(b, 256)
+}
+
+func BenchmarkLongByteSlice(b *testing.B) {
+ benchmarkWithSize(b, 1<<24)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/elements_are.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/elements_are.go
new file mode 100644
index 00000000000..2941847c705
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/elements_are.go
@@ -0,0 +1,91 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Given a list of arguments M, ElementsAre returns a matcher that matches
+// arrays and slices A where all of the following hold:
+//
+// * A is the same length as M.
+//
+// * For each i < len(A) where M[i] is a matcher, A[i] matches M[i].
+//
+// * For each i < len(A) where M[i] is not a matcher, A[i] matches
+// Equals(M[i]).
+//
+func ElementsAre(M ...interface{}) Matcher {
+ // Copy over matchers, or convert to Equals(x) for non-matcher x.
+ subMatchers := make([]Matcher, len(M))
+ for i, x := range M {
+ if matcher, ok := x.(Matcher); ok {
+ subMatchers[i] = matcher
+ continue
+ }
+
+ subMatchers[i] = Equals(x)
+ }
+
+ return &elementsAreMatcher{subMatchers}
+}
+
+type elementsAreMatcher struct {
+ subMatchers []Matcher
+}
+
+func (m *elementsAreMatcher) Description() string {
+ subDescs := make([]string, len(m.subMatchers))
+ for i, sm := range m.subMatchers {
+ subDescs[i] = sm.Description()
+ }
+
+ return fmt.Sprintf("elements are: [%s]", strings.Join(subDescs, ", "))
+}
+
+func (m *elementsAreMatcher) Matches(candidates interface{}) error {
+ // The candidate must be a slice or an array.
+ v := reflect.ValueOf(candidates)
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
+ return NewFatalError("which is not a slice or array")
+ }
+
+ // The length must be correct.
+ if v.Len() != len(m.subMatchers) {
+ return errors.New(fmt.Sprintf("which is of length %d", v.Len()))
+ }
+
+ // Check each element.
+ for i, subMatcher := range m.subMatchers {
+ c := v.Index(i)
+ if matchErr := subMatcher.Matches(c.Interface()); matchErr != nil {
+ // Return an errors indicating which element doesn't match. If the
+ // matcher error was fatal, make this one fatal too.
+ err := errors.New(fmt.Sprintf("whose element %d doesn't match", i))
+ if _, isFatal := matchErr.(*FatalError); isFatal {
+ err = NewFatalError(err.Error())
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/elements_are_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/elements_are_test.go
new file mode 100644
index 00000000000..56e0f375d85
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/elements_are_test.go
@@ -0,0 +1,208 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type ElementsAreTest struct {
+}
+
+func init() { RegisterTestSuite(&ElementsAreTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *ElementsAreTest) EmptySet() {
+ m := ElementsAre()
+ ExpectEq("elements are: []", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // No candidates.
+ c = []interface{}{}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // One candidate.
+ c = []interface{}{17}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 1")))
+}
+
+func (t *ElementsAreTest) OneMatcher() {
+ m := ElementsAre(LessThan(17))
+ ExpectEq("elements are: [less than 17]", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // No candidates.
+ c = []interface{}{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 0")))
+
+ // Matching candidate.
+ c = []interface{}{16}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching candidate.
+ c = []interface{}{19}
+ err = m.Matches(c)
+ ExpectNe(nil, err)
+
+ // Two candidates.
+ c = []interface{}{17, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 2")))
+}
+
+func (t *ElementsAreTest) OneValue() {
+ m := ElementsAre(17)
+ ExpectEq("elements are: [17]", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // No candidates.
+ c = []interface{}{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 0")))
+
+ // Matching int.
+ c = []interface{}{int(17)}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching float.
+ c = []interface{}{float32(17)}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching candidate.
+ c = []interface{}{19}
+ err = m.Matches(c)
+ ExpectNe(nil, err)
+
+ // Two candidates.
+ c = []interface{}{17, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 2")))
+}
+
+func (t *ElementsAreTest) MultipleElements() {
+ m := ElementsAre("taco", LessThan(17))
+ ExpectEq("elements are: [taco, less than 17]", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // One candidate.
+ c = []interface{}{17}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 1")))
+
+ // Both matching.
+ c = []interface{}{"taco", 16}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // First non-matching.
+ c = []interface{}{"burrito", 16}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("whose element 0 doesn't match")))
+
+ // Second non-matching.
+ c = []interface{}{"taco", 17}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("whose element 1 doesn't match")))
+
+ // Three candidates.
+ c = []interface{}{"taco", 17, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 3")))
+}
+
+func (t *ElementsAreTest) ArrayCandidates() {
+ m := ElementsAre("taco", LessThan(17))
+
+ var err error
+
+ // One candidate.
+ err = m.Matches([1]interface{}{"taco"})
+ ExpectThat(err, Error(HasSubstr("length 1")))
+
+ // Both matching.
+ err = m.Matches([2]interface{}{"taco", 16})
+ ExpectEq(nil, err)
+
+ // First non-matching.
+ err = m.Matches([2]interface{}{"burrito", 16})
+ ExpectThat(err, Error(Equals("whose element 0 doesn't match")))
+}
+
+func (t *ElementsAreTest) WrongTypeCandidate() {
+ m := ElementsAre("taco")
+
+ var err error
+
+ // String candidate.
+ err = m.Matches("taco")
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+
+ // Map candidate.
+ err = m.Matches(map[string]string{})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+
+ // Nil candidate.
+ err = m.Matches(nil)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+}
+
+func (t *ElementsAreTest) PropagatesFatality() {
+ m := ElementsAre(LessThan(17))
+ ExpectEq("elements are: [less than 17]", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // Non-fatal error.
+ c = []interface{}{19}
+ err = m.Matches(c)
+ AssertNe(nil, err)
+ ExpectFalse(isFatal(err))
+
+ // Fatal error.
+ c = []interface{}{"taco"}
+ err = m.Matches(c)
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/equals.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/equals.go
new file mode 100644
index 00000000000..26280c5a6eb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/equals.go
@@ -0,0 +1,557 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+// Equals(x) returns a matcher that matches values v such that v and x are
+// equivalent. This includes the case when the comparison v == x using Go's
+// built-in comparison operator is legal (except for structs, which this
+// matcher does not support), but for convenience the following rules also
+// apply:
+//
+// * Type checking is done based on underlying types rather than actual
+// types, so that e.g. two aliases for string can be compared:
+//
+// type stringAlias1 string
+// type stringAlias2 string
+//
+// a := "taco"
+// b := stringAlias1("taco")
+// c := stringAlias2("taco")
+//
+// ExpectTrue(a == b) // Legal, passes
+// ExpectTrue(b == c) // Illegal, doesn't compile
+//
+// ExpectThat(a, Equals(b)) // Passes
+// ExpectThat(b, Equals(c)) // Passes
+//
+// * Values of numeric type are treated as if they were abstract numbers, and
+// compared accordingly. Therefore Equals(17) will match int(17),
+// int16(17), uint(17), float32(17), complex64(17), and so on.
+//
+// If you want a stricter matcher that contains no such cleverness, see
+// IdenticalTo instead.
+//
+// Arrays are supported by this matcher, but do not participate in the
+// exceptions above. Two arrays compared with this matcher must have identical
+// types, and their element type must itself be comparable according to Go's ==
+// operator.
+func Equals(x interface{}) Matcher {
+ v := reflect.ValueOf(x)
+
+ // This matcher doesn't support structs.
+ if v.Kind() == reflect.Struct {
+ panic(fmt.Sprintf("oglematchers.Equals: unsupported kind %v", v.Kind()))
+ }
+
+ // The == operator is not defined for non-nil slices.
+ if v.Kind() == reflect.Slice && v.Pointer() != uintptr(0) {
+ panic(fmt.Sprintf("oglematchers.Equals: non-nil slice"))
+ }
+
+ return &equalsMatcher{v}
+}
+
+type equalsMatcher struct {
+ expectedValue reflect.Value
+}
+
+////////////////////////////////////////////////////////////////////////
+// Numeric types
+////////////////////////////////////////////////////////////////////////
+
+func isSignedInteger(v reflect.Value) bool {
+ k := v.Kind()
+ return k >= reflect.Int && k <= reflect.Int64
+}
+
+func isUnsignedInteger(v reflect.Value) bool {
+ k := v.Kind()
+ return k >= reflect.Uint && k <= reflect.Uint64
+}
+
+func isInteger(v reflect.Value) bool {
+ return isSignedInteger(v) || isUnsignedInteger(v)
+}
+
+func isFloat(v reflect.Value) bool {
+ k := v.Kind()
+ return k == reflect.Float32 || k == reflect.Float64
+}
+
+func isComplex(v reflect.Value) bool {
+ k := v.Kind()
+ return k == reflect.Complex64 || k == reflect.Complex128
+}
+
+func checkAgainstInt64(e int64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ if c.Int() == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ u := c.Uint()
+ if u <= math.MaxInt64 && int64(u) == e {
+ err = nil
+ }
+
+ // Turn around the various floating point types so that the checkAgainst*
+ // functions for them can deal with precision issues.
+ case isFloat(c), isComplex(c):
+ return Equals(c.Interface()).Matches(e)
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstUint64(e uint64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ i := c.Int()
+ if i >= 0 && uint64(i) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if c.Uint() == e {
+ err = nil
+ }
+
+ // Turn around the various floating point types so that the checkAgainst*
+ // functions for them can deal with precision issues.
+ case isFloat(c), isComplex(c):
+ return Equals(c.Interface()).Matches(e)
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstFloat32(e float32, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ if float32(c.Int()) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if float32(c.Uint()) == e {
+ err = nil
+ }
+
+ case isFloat(c):
+ // Compare using float32 to avoid a false sense of precision; otherwise
+ // e.g. Equals(float32(0.1)) won't match float32(0.1).
+ if float32(c.Float()) == e {
+ err = nil
+ }
+
+ case isComplex(c):
+ comp := c.Complex()
+ rl := real(comp)
+ im := imag(comp)
+
+ // Compare using float32 to avoid a false sense of precision; otherwise
+ // e.g. Equals(float32(0.1)) won't match (0.1 + 0i).
+ if im == 0 && float32(rl) == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstFloat64(e float64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ ck := c.Kind()
+
+ switch {
+ case isSignedInteger(c):
+ if float64(c.Int()) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if float64(c.Uint()) == e {
+ err = nil
+ }
+
+ // If the actual value is lower precision, turn the comparison around so we
+ // apply the low-precision rules. Otherwise, e.g. Equals(0.1) may not match
+ // float32(0.1).
+ case ck == reflect.Float32 || ck == reflect.Complex64:
+ return Equals(c.Interface()).Matches(e)
+
+ // Otherwise, compare with double precision.
+ case isFloat(c):
+ if c.Float() == e {
+ err = nil
+ }
+
+ case isComplex(c):
+ comp := c.Complex()
+ rl := real(comp)
+ im := imag(comp)
+
+ if im == 0 && rl == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstComplex64(e complex64, c reflect.Value) (err error) {
+ err = errors.New("")
+ realPart := real(e)
+ imaginaryPart := imag(e)
+
+ switch {
+ case isInteger(c) || isFloat(c):
+ // If we have no imaginary part, then we should just compare against the
+ // real part. Otherwise, we can't be equal.
+ if imaginaryPart != 0 {
+ return
+ }
+
+ return checkAgainstFloat32(realPart, c)
+
+ case isComplex(c):
+ // Compare using complex64 to avoid a false sense of precision; otherwise
+ // e.g. Equals(0.1 + 0i) won't match float32(0.1).
+ if complex64(c.Complex()) == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstComplex128(e complex128, c reflect.Value) (err error) {
+ err = errors.New("")
+ realPart := real(e)
+ imaginaryPart := imag(e)
+
+ switch {
+ case isInteger(c) || isFloat(c):
+ // If we have no imaginary part, then we should just compare against the
+ // real part. Otherwise, we can't be equal.
+ if imaginaryPart != 0 {
+ return
+ }
+
+ return checkAgainstFloat64(realPart, c)
+
+ case isComplex(c):
+ if c.Complex() == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+////////////////////////////////////////////////////////////////////////
+// Other types
+////////////////////////////////////////////////////////////////////////
+
+func checkAgainstBool(e bool, c reflect.Value) (err error) {
+ if c.Kind() != reflect.Bool {
+ err = NewFatalError("which is not a bool")
+ return
+ }
+
+ err = errors.New("")
+ if c.Bool() == e {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstUintptr(e uintptr, c reflect.Value) (err error) {
+ if c.Kind() != reflect.Uintptr {
+ err = NewFatalError("which is not a uintptr")
+ return
+ }
+
+ err = errors.New("")
+ if uintptr(c.Uint()) == e {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstChan(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "chan int".
+ typeStr := fmt.Sprintf("%s %s", e.Type().ChanDir(), e.Type().Elem())
+
+ // Make sure c is a chan of the correct type.
+ if c.Kind() != reflect.Chan ||
+ c.Type().ChanDir() != e.Type().ChanDir() ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstFunc(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a function.
+ if c.Kind() != reflect.Func {
+ err = NewFatalError("which is not a function")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstMap(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a map.
+ if c.Kind() != reflect.Map {
+ err = NewFatalError("which is not a map")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstPtr(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "*int".
+ typeStr := fmt.Sprintf("*%v", e.Type().Elem())
+
+ // Make sure c is a pointer of the correct type.
+ if c.Kind() != reflect.Ptr ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstSlice(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "[]int".
+ typeStr := fmt.Sprintf("[]%v", e.Type().Elem())
+
+ // Make sure c is a slice of the correct type.
+ if c.Kind() != reflect.Slice ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstString(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a string.
+ if c.Kind() != reflect.String {
+ err = NewFatalError("which is not a string")
+ return
+ }
+
+ err = errors.New("")
+ if c.String() == e.String() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstArray(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "[2]int".
+ typeStr := fmt.Sprintf("%v", e.Type())
+
+ // Make sure c is the correct type.
+ if c.Type() != e.Type() {
+ err = NewFatalError(fmt.Sprintf("which is not %s", typeStr))
+ return
+ }
+
+ // Check for equality.
+ if e.Interface() != c.Interface() {
+ err = errors.New("")
+ return
+ }
+
+ return
+}
+
+func checkAgainstUnsafePointer(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a pointer.
+ if c.Kind() != reflect.UnsafePointer {
+ err = NewFatalError("which is not a unsafe.Pointer")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkForNil(c reflect.Value) (err error) {
+ err = errors.New("")
+
+ // Make sure it is legal to call IsNil.
+ switch c.Kind() {
+ case reflect.Invalid:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Interface:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.Slice:
+
+ default:
+ err = NewFatalError("which cannot be compared to nil")
+ return
+ }
+
+ // Ask whether the value is nil. Handle a nil literal (kind Invalid)
+ // specially, since it's not legal to call IsNil there.
+ if c.Kind() == reflect.Invalid || c.IsNil() {
+ err = nil
+ }
+ return
+}
+
+////////////////////////////////////////////////////////////////////////
+// Public implementation
+////////////////////////////////////////////////////////////////////////
+
+func (m *equalsMatcher) Matches(candidate interface{}) error {
+ e := m.expectedValue
+ c := reflect.ValueOf(candidate)
+ ek := e.Kind()
+
+ switch {
+ case ek == reflect.Bool:
+ return checkAgainstBool(e.Bool(), c)
+
+ case isSignedInteger(e):
+ return checkAgainstInt64(e.Int(), c)
+
+ case isUnsignedInteger(e):
+ return checkAgainstUint64(e.Uint(), c)
+
+ case ek == reflect.Uintptr:
+ return checkAgainstUintptr(uintptr(e.Uint()), c)
+
+ case ek == reflect.Float32:
+ return checkAgainstFloat32(float32(e.Float()), c)
+
+ case ek == reflect.Float64:
+ return checkAgainstFloat64(e.Float(), c)
+
+ case ek == reflect.Complex64:
+ return checkAgainstComplex64(complex64(e.Complex()), c)
+
+ case ek == reflect.Complex128:
+ return checkAgainstComplex128(complex128(e.Complex()), c)
+
+ case ek == reflect.Chan:
+ return checkAgainstChan(e, c)
+
+ case ek == reflect.Func:
+ return checkAgainstFunc(e, c)
+
+ case ek == reflect.Map:
+ return checkAgainstMap(e, c)
+
+ case ek == reflect.Ptr:
+ return checkAgainstPtr(e, c)
+
+ case ek == reflect.Slice:
+ return checkAgainstSlice(e, c)
+
+ case ek == reflect.String:
+ return checkAgainstString(e, c)
+
+ case ek == reflect.Array:
+ return checkAgainstArray(e, c)
+
+ case ek == reflect.UnsafePointer:
+ return checkAgainstUnsafePointer(e, c)
+
+ case ek == reflect.Invalid:
+ return checkForNil(c)
+ }
+
+ panic(fmt.Sprintf("equalsMatcher.Matches: unexpected kind: %v", ek))
+}
+
+func (m *equalsMatcher) Description() string {
+ // Special case: handle nil.
+ if !m.expectedValue.IsValid() {
+ return "is nil"
+ }
+
+ return fmt.Sprintf("%v", m.expectedValue.Interface())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/equals_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/equals_test.go
new file mode 100644
index 00000000000..6fdbd9b6cb7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/equals_test.go
@@ -0,0 +1,3843 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "fmt"
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "math"
+ "unsafe"
+)
+
+var someInt int = -17
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type EqualsTest struct {
+}
+
+func init() { RegisterTestSuite(&EqualsTest{}) }
+
+type equalsTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *EqualsTest) checkTestCases(matcher Matcher, cases []equalsTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+ ExpectEq(
+ c.expectedResult,
+ (err == nil),
+ "Result for case %d: %v (Error: %v)", i, c, err)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(c.shouldBeFatal, isFatal, "Fatality for case %d: %v", i, c)
+
+ ExpectThat(err, Error(Equals(c.expectedError)), "Case %d: %v", i, c)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// nil
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) EqualsNil() {
+ matcher := Equals(nil)
+ ExpectEq("is nil", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Legal types
+ equalsTestCase{nil, true, false, ""},
+ equalsTestCase{chan int(nil), true, false, ""},
+ equalsTestCase{(func())(nil), true, false, ""},
+ equalsTestCase{interface{}(nil), true, false, ""},
+ equalsTestCase{map[int]int(nil), true, false, ""},
+ equalsTestCase{(*int)(nil), true, false, ""},
+ equalsTestCase{[]int(nil), true, false, ""},
+
+ equalsTestCase{make(chan int), false, false, ""},
+ equalsTestCase{func() {}, false, false, ""},
+ equalsTestCase{map[int]int{}, false, false, ""},
+ equalsTestCase{&someInt, false, false, ""},
+ equalsTestCase{[]int{}, false, false, ""},
+
+ // Illegal types
+ equalsTestCase{17, false, true, "which cannot be compared to nil"},
+ equalsTestCase{int8(17), false, true, "which cannot be compared to nil"},
+ equalsTestCase{uintptr(17), false, true, "which cannot be compared to nil"},
+ equalsTestCase{[...]int{}, false, true, "which cannot be compared to nil"},
+ equalsTestCase{"taco", false, true, "which cannot be compared to nil"},
+ equalsTestCase{equalsTestCase{}, false, true, "which cannot be compared to nil"},
+ equalsTestCase{unsafe.Pointer(&someInt), false, true, "which cannot be compared to nil"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegerLiteral() {
+ // -2^30
+ matcher := Equals(-1073741824)
+ ExpectEq("-1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1073741824.
+ equalsTestCase{-1073741824, true, false, ""},
+ equalsTestCase{-1073741824.0, true, false, ""},
+ equalsTestCase{-1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(-1073741824), true, false, ""},
+ equalsTestCase{int32(-1073741824), true, false, ""},
+ equalsTestCase{int64(-1073741824), true, false, ""},
+ equalsTestCase{float32(-1073741824), true, false, ""},
+ equalsTestCase{float64(-1073741824), true, false, ""},
+ equalsTestCase{complex64(-1073741824), true, false, ""},
+ equalsTestCase{complex128(-1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(-1073741824)), true, false, ""},
+
+ // Values that would be -1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-1073741823), false, false, ""},
+ equalsTestCase{int32(-1073741823), false, false, ""},
+ equalsTestCase{int64(-1073741823), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1073741824.1), false, false, ""},
+ equalsTestCase{float64(-1073741823.9), false, false, ""},
+ equalsTestCase{complex128(-1073741823), false, false, ""},
+ equalsTestCase{complex128(-1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegerLiteral() {
+ // 2^30
+ matcher := Equals(1073741824)
+ ExpectEq("1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1073741824.
+ equalsTestCase{1073741824, true, false, ""},
+ equalsTestCase{1073741824.0, true, false, ""},
+ equalsTestCase{1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(1073741824), true, false, ""},
+ equalsTestCase{uint(1073741824), true, false, ""},
+ equalsTestCase{int32(1073741824), true, false, ""},
+ equalsTestCase{int64(1073741824), true, false, ""},
+ equalsTestCase{uint32(1073741824), true, false, ""},
+ equalsTestCase{uint64(1073741824), true, false, ""},
+ equalsTestCase{float32(1073741824), true, false, ""},
+ equalsTestCase{float64(1073741824), true, false, ""},
+ equalsTestCase{complex64(1073741824), true, false, ""},
+ equalsTestCase{complex128(1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(1073741824)), true, false, ""},
+ equalsTestCase{interface{}(uint(1073741824)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1073741823), false, false, ""},
+ equalsTestCase{int32(1073741823), false, false, ""},
+ equalsTestCase{int64(1073741823), false, false, ""},
+ equalsTestCase{float64(1073741824.1), false, false, ""},
+ equalsTestCase{float64(1073741823.9), false, false, ""},
+ equalsTestCase{complex128(1073741823), false, false, ""},
+ equalsTestCase{complex128(1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Floating point literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralFloatingPointLiteral() {
+ // -2^30
+ matcher := Equals(-1073741824.0)
+ ExpectEq("-1.073741824e+09", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1073741824.
+ equalsTestCase{-1073741824, true, false, ""},
+ equalsTestCase{-1073741824.0, true, false, ""},
+ equalsTestCase{-1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(-1073741824), true, false, ""},
+ equalsTestCase{int32(-1073741824), true, false, ""},
+ equalsTestCase{int64(-1073741824), true, false, ""},
+ equalsTestCase{float32(-1073741824), true, false, ""},
+ equalsTestCase{float64(-1073741824), true, false, ""},
+ equalsTestCase{complex64(-1073741824), true, false, ""},
+ equalsTestCase{complex128(-1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(-1073741824)), true, false, ""},
+ equalsTestCase{interface{}(float64(-1073741824)), true, false, ""},
+
+ // Values that would be -1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-1073741823), false, false, ""},
+ equalsTestCase{int32(-1073741823), false, false, ""},
+ equalsTestCase{int64(-1073741823), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1073741824.1), false, false, ""},
+ equalsTestCase{float64(-1073741823.9), false, false, ""},
+ equalsTestCase{complex128(-1073741823), false, false, ""},
+ equalsTestCase{complex128(-1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralFloatingPointLiteral() {
+ // 2^30
+ matcher := Equals(1073741824.0)
+ ExpectEq("1.073741824e+09", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1073741824.
+ equalsTestCase{1073741824, true, false, ""},
+ equalsTestCase{1073741824.0, true, false, ""},
+ equalsTestCase{1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(1073741824), true, false, ""},
+ equalsTestCase{int32(1073741824), true, false, ""},
+ equalsTestCase{int64(1073741824), true, false, ""},
+ equalsTestCase{uint(1073741824), true, false, ""},
+ equalsTestCase{uint32(1073741824), true, false, ""},
+ equalsTestCase{uint64(1073741824), true, false, ""},
+ equalsTestCase{float32(1073741824), true, false, ""},
+ equalsTestCase{float64(1073741824), true, false, ""},
+ equalsTestCase{complex64(1073741824), true, false, ""},
+ equalsTestCase{complex128(1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(1073741824)), true, false, ""},
+ equalsTestCase{interface{}(float64(1073741824)), true, false, ""},
+
+ // Values that would be 1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1073741823), false, false, ""},
+ equalsTestCase{int32(1073741823), false, false, ""},
+ equalsTestCase{int64(1073741823), false, false, ""},
+ equalsTestCase{uint(1073741823), false, false, ""},
+ equalsTestCase{uint32(1073741823), false, false, ""},
+ equalsTestCase{uint64(1073741823), false, false, ""},
+ equalsTestCase{float64(1073741824.1), false, false, ""},
+ equalsTestCase{float64(1073741823.9), false, false, ""},
+ equalsTestCase{complex128(1073741823), false, false, ""},
+ equalsTestCase{complex128(1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonIntegralFloatingPointLiteral() {
+ matcher := Equals(17.1)
+ ExpectEq("17.1", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 17.1.
+ equalsTestCase{17.1, true, false, ""},
+ equalsTestCase{17.1, true, false, ""},
+ equalsTestCase{17.1 + 0i, true, false, ""},
+ equalsTestCase{float32(17.1), true, false, ""},
+ equalsTestCase{float64(17.1), true, false, ""},
+ equalsTestCase{complex64(17.1), true, false, ""},
+ equalsTestCase{complex128(17.1), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{17, false, false, ""},
+ equalsTestCase{17.2, false, false, ""},
+ equalsTestCase{18, false, false, ""},
+ equalsTestCase{int(17), false, false, ""},
+ equalsTestCase{int(18), false, false, ""},
+ equalsTestCase{int32(17), false, false, ""},
+ equalsTestCase{int64(17), false, false, ""},
+ equalsTestCase{uint(17), false, false, ""},
+ equalsTestCase{uint32(17), false, false, ""},
+ equalsTestCase{uint64(17), false, false, ""},
+ equalsTestCase{complex128(17.1 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// bool
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) False() {
+ matcher := Equals(false)
+ ExpectEq("false", matcher.Description())
+
+ cases := []equalsTestCase{
+ // bools
+ equalsTestCase{false, true, false, ""},
+ equalsTestCase{bool(false), true, false, ""},
+
+ equalsTestCase{true, false, false, ""},
+ equalsTestCase{bool(true), false, false, ""},
+
+ // Other types.
+ equalsTestCase{int(0), false, true, "which is not a bool"},
+ equalsTestCase{int8(0), false, true, "which is not a bool"},
+ equalsTestCase{int16(0), false, true, "which is not a bool"},
+ equalsTestCase{int32(0), false, true, "which is not a bool"},
+ equalsTestCase{int64(0), false, true, "which is not a bool"},
+ equalsTestCase{uint(0), false, true, "which is not a bool"},
+ equalsTestCase{uint8(0), false, true, "which is not a bool"},
+ equalsTestCase{uint16(0), false, true, "which is not a bool"},
+ equalsTestCase{uint32(0), false, true, "which is not a bool"},
+ equalsTestCase{uint64(0), false, true, "which is not a bool"},
+ equalsTestCase{uintptr(0), false, true, "which is not a bool"},
+ equalsTestCase{[...]int{}, false, true, "which is not a bool"},
+ equalsTestCase{make(chan int), false, true, "which is not a bool"},
+ equalsTestCase{func() {}, false, true, "which is not a bool"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a bool"},
+ equalsTestCase{&someInt, false, true, "which is not a bool"},
+ equalsTestCase{[]int{}, false, true, "which is not a bool"},
+ equalsTestCase{"taco", false, true, "which is not a bool"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a bool"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) True() {
+ matcher := Equals(true)
+ ExpectEq("true", matcher.Description())
+
+ cases := []equalsTestCase{
+ // bools
+ equalsTestCase{true, true, false, ""},
+ equalsTestCase{bool(true), true, false, ""},
+
+ equalsTestCase{false, false, false, ""},
+ equalsTestCase{bool(false), false, false, ""},
+
+ // Other types.
+ equalsTestCase{int(1), false, true, "which is not a bool"},
+ equalsTestCase{int8(1), false, true, "which is not a bool"},
+ equalsTestCase{int16(1), false, true, "which is not a bool"},
+ equalsTestCase{int32(1), false, true, "which is not a bool"},
+ equalsTestCase{int64(1), false, true, "which is not a bool"},
+ equalsTestCase{uint(1), false, true, "which is not a bool"},
+ equalsTestCase{uint8(1), false, true, "which is not a bool"},
+ equalsTestCase{uint16(1), false, true, "which is not a bool"},
+ equalsTestCase{uint32(1), false, true, "which is not a bool"},
+ equalsTestCase{uint64(1), false, true, "which is not a bool"},
+ equalsTestCase{uintptr(1), false, true, "which is not a bool"},
+ equalsTestCase{[...]int{}, false, true, "which is not a bool"},
+ equalsTestCase{make(chan int), false, true, "which is not a bool"},
+ equalsTestCase{func() {}, false, true, "which is not a bool"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a bool"},
+ equalsTestCase{&someInt, false, true, "which is not a bool"},
+ equalsTestCase{[]int{}, false, true, "which is not a bool"},
+ equalsTestCase{"taco", false, true, "which is not a bool"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a bool"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt() {
+ // -2^30
+ matcher := Equals(int(-1073741824))
+ ExpectEq("-1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1073741824.
+ equalsTestCase{-1073741824, true, false, ""},
+ equalsTestCase{-1073741824.0, true, false, ""},
+ equalsTestCase{-1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(-1073741824), true, false, ""},
+ equalsTestCase{int32(-1073741824), true, false, ""},
+ equalsTestCase{int64(-1073741824), true, false, ""},
+ equalsTestCase{float32(-1073741824), true, false, ""},
+ equalsTestCase{float64(-1073741824), true, false, ""},
+ equalsTestCase{complex64(-1073741824), true, false, ""},
+ equalsTestCase{complex128(-1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(-1073741824)), true, false, ""},
+
+ // Values that would be -1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-1073741823), false, false, ""},
+ equalsTestCase{int32(-1073741823), false, false, ""},
+ equalsTestCase{int64(-1073741823), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1073741824.1), false, false, ""},
+ equalsTestCase{float64(-1073741823.9), false, false, ""},
+ equalsTestCase{complex128(-1073741823), false, false, ""},
+ equalsTestCase{complex128(-1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt() {
+ // 2^30
+ matcher := Equals(int(1073741824))
+ ExpectEq("1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1073741824.
+ equalsTestCase{1073741824, true, false, ""},
+ equalsTestCase{1073741824.0, true, false, ""},
+ equalsTestCase{1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(1073741824), true, false, ""},
+ equalsTestCase{uint(1073741824), true, false, ""},
+ equalsTestCase{int32(1073741824), true, false, ""},
+ equalsTestCase{int64(1073741824), true, false, ""},
+ equalsTestCase{uint32(1073741824), true, false, ""},
+ equalsTestCase{uint64(1073741824), true, false, ""},
+ equalsTestCase{float32(1073741824), true, false, ""},
+ equalsTestCase{float64(1073741824), true, false, ""},
+ equalsTestCase{complex64(1073741824), true, false, ""},
+ equalsTestCase{complex128(1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(1073741824)), true, false, ""},
+ equalsTestCase{interface{}(uint(1073741824)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1073741823), false, false, ""},
+ equalsTestCase{int32(1073741823), false, false, ""},
+ equalsTestCase{int64(1073741823), false, false, ""},
+ equalsTestCase{float64(1073741824.1), false, false, ""},
+ equalsTestCase{float64(1073741823.9), false, false, ""},
+ equalsTestCase{complex128(1073741823), false, false, ""},
+ equalsTestCase{complex128(1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int8
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt8() {
+ matcher := Equals(int8(-17))
+ ExpectEq("-17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -17.
+ equalsTestCase{-17, true, false, ""},
+ equalsTestCase{-17.0, true, false, ""},
+ equalsTestCase{-17 + 0i, true, false, ""},
+ equalsTestCase{int(-17), true, false, ""},
+ equalsTestCase{int8(-17), true, false, ""},
+ equalsTestCase{int16(-17), true, false, ""},
+ equalsTestCase{int32(-17), true, false, ""},
+ equalsTestCase{int64(-17), true, false, ""},
+ equalsTestCase{float32(-17), true, false, ""},
+ equalsTestCase{float64(-17), true, false, ""},
+ equalsTestCase{complex64(-17), true, false, ""},
+ equalsTestCase{complex128(-17), true, false, ""},
+ equalsTestCase{interface{}(int(-17)), true, false, ""},
+
+ // Values that would be -17 in two's complement.
+ equalsTestCase{uint((1 << 32) - 17), false, false, ""},
+ equalsTestCase{uint8((1 << 8) - 17), false, false, ""},
+ equalsTestCase{uint16((1 << 16) - 17), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 17), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 17), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-16), false, false, ""},
+ equalsTestCase{int8(-16), false, false, ""},
+ equalsTestCase{int16(-16), false, false, ""},
+ equalsTestCase{int32(-16), false, false, ""},
+ equalsTestCase{int64(-16), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float32(-17.1), false, false, ""},
+ equalsTestCase{float32(-16.9), false, false, ""},
+ equalsTestCase{complex64(-16), false, false, ""},
+ equalsTestCase{complex64(-17 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr((1 << 32) - 17), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{-17}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{-17}, false, true, "which is not numeric"},
+ equalsTestCase{"-17", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroInt8() {
+ matcher := Equals(int8(0))
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 0.
+ equalsTestCase{0, true, false, ""},
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(int(0)), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1), false, false, ""},
+ equalsTestCase{int8(1), false, false, ""},
+ equalsTestCase{int16(1), false, false, ""},
+ equalsTestCase{int32(1), false, false, ""},
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{float32(-0.1), false, false, ""},
+ equalsTestCase{float32(0.1), false, false, ""},
+ equalsTestCase{complex64(1), false, false, ""},
+ equalsTestCase{complex64(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{0}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{0}, false, true, "which is not numeric"},
+ equalsTestCase{"0", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt8() {
+ matcher := Equals(int8(17))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 17.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(17), true, false, ""},
+ equalsTestCase{int8(17), true, false, ""},
+ equalsTestCase{int16(17), true, false, ""},
+ equalsTestCase{int32(17), true, false, ""},
+ equalsTestCase{int64(17), true, false, ""},
+ equalsTestCase{float32(17), true, false, ""},
+ equalsTestCase{float64(17), true, false, ""},
+ equalsTestCase{complex64(17), true, false, ""},
+ equalsTestCase{complex128(17), true, false, ""},
+ equalsTestCase{interface{}(int(17)), true, false, ""},
+ equalsTestCase{uint(17), true, false, ""},
+ equalsTestCase{uint8(17), true, false, ""},
+ equalsTestCase{uint16(17), true, false, ""},
+ equalsTestCase{uint32(17), true, false, ""},
+ equalsTestCase{uint64(17), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(16), false, false, ""},
+ equalsTestCase{int8(16), false, false, ""},
+ equalsTestCase{int16(16), false, false, ""},
+ equalsTestCase{int32(16), false, false, ""},
+ equalsTestCase{int64(16), false, false, ""},
+ equalsTestCase{float32(16.9), false, false, ""},
+ equalsTestCase{float32(17.1), false, false, ""},
+ equalsTestCase{complex64(16), false, false, ""},
+ equalsTestCase{complex64(17 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(17), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{17}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{17}, false, true, "which is not numeric"},
+ equalsTestCase{"17", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int16
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt16() {
+ matcher := Equals(int16(-32766))
+ ExpectEq("-32766", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -32766.
+ equalsTestCase{-32766, true, false, ""},
+ equalsTestCase{-32766.0, true, false, ""},
+ equalsTestCase{-32766 + 0i, true, false, ""},
+ equalsTestCase{int(-32766), true, false, ""},
+ equalsTestCase{int16(-32766), true, false, ""},
+ equalsTestCase{int32(-32766), true, false, ""},
+ equalsTestCase{int64(-32766), true, false, ""},
+ equalsTestCase{float32(-32766), true, false, ""},
+ equalsTestCase{float64(-32766), true, false, ""},
+ equalsTestCase{complex64(-32766), true, false, ""},
+ equalsTestCase{complex128(-32766), true, false, ""},
+ equalsTestCase{interface{}(int(-32766)), true, false, ""},
+
+ // Values that would be -32766 in two's complement.
+ equalsTestCase{uint((1 << 32) - 32766), false, false, ""},
+ equalsTestCase{uint16((1 << 16) - 32766), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 32766), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 32766), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-16), false, false, ""},
+ equalsTestCase{int8(-16), false, false, ""},
+ equalsTestCase{int16(-16), false, false, ""},
+ equalsTestCase{int32(-16), false, false, ""},
+ equalsTestCase{int64(-16), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float32(-32766.1), false, false, ""},
+ equalsTestCase{float32(-32765.9), false, false, ""},
+ equalsTestCase{complex64(-32766.1), false, false, ""},
+ equalsTestCase{complex64(-32766 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr((1 << 32) - 32766), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{-32766}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{-32766}, false, true, "which is not numeric"},
+ equalsTestCase{"-32766", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroInt16() {
+ matcher := Equals(int16(0))
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 0.
+ equalsTestCase{0, true, false, ""},
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(int(0)), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1), false, false, ""},
+ equalsTestCase{int8(1), false, false, ""},
+ equalsTestCase{int16(1), false, false, ""},
+ equalsTestCase{int32(1), false, false, ""},
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{float32(-0.1), false, false, ""},
+ equalsTestCase{float32(0.1), false, false, ""},
+ equalsTestCase{complex64(1), false, false, ""},
+ equalsTestCase{complex64(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{0}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{0}, false, true, "which is not numeric"},
+ equalsTestCase{"0", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt16() {
+ matcher := Equals(int16(32765))
+ ExpectEq("32765", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32765.
+ equalsTestCase{32765, true, false, ""},
+ equalsTestCase{32765.0, true, false, ""},
+ equalsTestCase{32765 + 0i, true, false, ""},
+ equalsTestCase{int(32765), true, false, ""},
+ equalsTestCase{int16(32765), true, false, ""},
+ equalsTestCase{int32(32765), true, false, ""},
+ equalsTestCase{int64(32765), true, false, ""},
+ equalsTestCase{float32(32765), true, false, ""},
+ equalsTestCase{float64(32765), true, false, ""},
+ equalsTestCase{complex64(32765), true, false, ""},
+ equalsTestCase{complex128(32765), true, false, ""},
+ equalsTestCase{interface{}(int(32765)), true, false, ""},
+ equalsTestCase{uint(32765), true, false, ""},
+ equalsTestCase{uint16(32765), true, false, ""},
+ equalsTestCase{uint32(32765), true, false, ""},
+ equalsTestCase{uint64(32765), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(32764), false, false, ""},
+ equalsTestCase{int16(32764), false, false, ""},
+ equalsTestCase{int32(32764), false, false, ""},
+ equalsTestCase{int64(32764), false, false, ""},
+ equalsTestCase{float32(32764.9), false, false, ""},
+ equalsTestCase{float32(32765.1), false, false, ""},
+ equalsTestCase{complex64(32765.9), false, false, ""},
+ equalsTestCase{complex64(32765 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(32765), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{32765}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{32765}, false, true, "which is not numeric"},
+ equalsTestCase{"32765", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int32
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt32() {
+ // -2^30
+ matcher := Equals(int32(-1073741824))
+ ExpectEq("-1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1073741824.
+ equalsTestCase{-1073741824, true, false, ""},
+ equalsTestCase{-1073741824.0, true, false, ""},
+ equalsTestCase{-1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(-1073741824), true, false, ""},
+ equalsTestCase{int32(-1073741824), true, false, ""},
+ equalsTestCase{int64(-1073741824), true, false, ""},
+ equalsTestCase{float32(-1073741824), true, false, ""},
+ equalsTestCase{float64(-1073741824), true, false, ""},
+ equalsTestCase{complex64(-1073741824), true, false, ""},
+ equalsTestCase{complex128(-1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(-1073741824)), true, false, ""},
+
+ // Values that would be -1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-1073741823), false, false, ""},
+ equalsTestCase{int32(-1073741823), false, false, ""},
+ equalsTestCase{int64(-1073741823), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1073741824.1), false, false, ""},
+ equalsTestCase{float64(-1073741823.9), false, false, ""},
+ equalsTestCase{complex128(-1073741823), false, false, ""},
+ equalsTestCase{complex128(-1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt32() {
+ // 2^30
+ matcher := Equals(int32(1073741824))
+ ExpectEq("1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1073741824.
+ equalsTestCase{1073741824, true, false, ""},
+ equalsTestCase{1073741824.0, true, false, ""},
+ equalsTestCase{1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(1073741824), true, false, ""},
+ equalsTestCase{uint(1073741824), true, false, ""},
+ equalsTestCase{int32(1073741824), true, false, ""},
+ equalsTestCase{int64(1073741824), true, false, ""},
+ equalsTestCase{uint32(1073741824), true, false, ""},
+ equalsTestCase{uint64(1073741824), true, false, ""},
+ equalsTestCase{float32(1073741824), true, false, ""},
+ equalsTestCase{float64(1073741824), true, false, ""},
+ equalsTestCase{complex64(1073741824), true, false, ""},
+ equalsTestCase{complex128(1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(1073741824)), true, false, ""},
+ equalsTestCase{interface{}(uint(1073741824)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1073741823), false, false, ""},
+ equalsTestCase{int32(1073741823), false, false, ""},
+ equalsTestCase{int64(1073741823), false, false, ""},
+ equalsTestCase{float64(1073741824.1), false, false, ""},
+ equalsTestCase{float64(1073741823.9), false, false, ""},
+ equalsTestCase{complex128(1073741823), false, false, ""},
+ equalsTestCase{complex128(1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int64
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt64() {
+ // -2^40
+ matcher := Equals(int64(-1099511627776))
+ ExpectEq("-1099511627776", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1099511627776.
+ equalsTestCase{-1099511627776.0, true, false, ""},
+ equalsTestCase{-1099511627776 + 0i, true, false, ""},
+ equalsTestCase{int64(-1099511627776), true, false, ""},
+ equalsTestCase{float32(-1099511627776), true, false, ""},
+ equalsTestCase{float64(-1099511627776), true, false, ""},
+ equalsTestCase{complex64(-1099511627776), true, false, ""},
+ equalsTestCase{complex128(-1099511627776), true, false, ""},
+ equalsTestCase{interface{}(int64(-1099511627776)), true, false, ""},
+
+ // Values that would be -1099511627776 in two's complement.
+ equalsTestCase{uint64((1 << 64) - 1099511627776), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int64(-1099511627775), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1099511627776.1), false, false, ""},
+ equalsTestCase{float64(-1099511627775.9), false, false, ""},
+ equalsTestCase{complex128(-1099511627775), false, false, ""},
+ equalsTestCase{complex128(-1099511627776 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt64() {
+ // 2^40
+ matcher := Equals(int64(1099511627776))
+ ExpectEq("1099511627776", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1099511627776.
+ equalsTestCase{1099511627776.0, true, false, ""},
+ equalsTestCase{1099511627776 + 0i, true, false, ""},
+ equalsTestCase{int64(1099511627776), true, false, ""},
+ equalsTestCase{uint64(1099511627776), true, false, ""},
+ equalsTestCase{float32(1099511627776), true, false, ""},
+ equalsTestCase{float64(1099511627776), true, false, ""},
+ equalsTestCase{complex64(1099511627776), true, false, ""},
+ equalsTestCase{complex128(1099511627776), true, false, ""},
+ equalsTestCase{interface{}(int64(1099511627776)), true, false, ""},
+ equalsTestCase{interface{}(uint64(1099511627776)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1099511627775), false, false, ""},
+ equalsTestCase{uint64(1099511627775), false, false, ""},
+ equalsTestCase{float64(1099511627776.1), false, false, ""},
+ equalsTestCase{float64(1099511627775.9), false, false, ""},
+ equalsTestCase{complex128(1099511627775), false, false, ""},
+ equalsTestCase{complex128(1099511627776 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(int64(kTwoTo25 + 1))
+ ExpectEq("33554433", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := Equals(int64(kTwoTo54 + 1))
+ ExpectEq("18014398509481985", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint() {
+ const kExpected = 17
+ matcher := Equals(uint(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUint() {
+ const kExpected = (1 << 16) + 17
+ matcher := Equals(uint(kExpected))
+ ExpectEq("65553", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{65553, true, false, ""},
+ equalsTestCase{65553.0, true, false, ""},
+ equalsTestCase{65553 + 0i, true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int16(17), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(17), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) UintNotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(uint(kTwoTo25 + 1))
+ ExpectEq("33554433", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint8
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint8() {
+ const kExpected = 17
+ matcher := Equals(uint8(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint16
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint16() {
+ const kExpected = 17
+ matcher := Equals(uint16(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUint16() {
+ const kExpected = (1 << 8) + 17
+ matcher := Equals(uint16(kExpected))
+ ExpectEq("273", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{273, true, false, ""},
+ equalsTestCase{273.0, true, false, ""},
+ equalsTestCase{273 + 0i, true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int8(17), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(17), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint32
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint32() {
+ const kExpected = 17
+ matcher := Equals(uint32(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUint32() {
+ const kExpected = (1 << 16) + 17
+ matcher := Equals(uint32(kExpected))
+ ExpectEq("65553", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{65553, true, false, ""},
+ equalsTestCase{65553.0, true, false, ""},
+ equalsTestCase{65553 + 0i, true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int16(17), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(17), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Uint32NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(uint32(kTwoTo25 + 1))
+ ExpectEq("33554433", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint64
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint64() {
+ const kExpected = 17
+ matcher := Equals(uint64(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUint64() {
+ const kExpected = (1 << 32) + 17
+ matcher := Equals(uint64(kExpected))
+ ExpectEq("4294967313", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{4294967313.0, true, false, ""},
+ equalsTestCase{4294967313 + 0i, true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int(17), false, false, ""},
+ equalsTestCase{int32(17), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(17), false, false, ""},
+ equalsTestCase{uint32(17), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(uint64(kTwoTo25 + 1))
+ ExpectEq("33554433", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := Equals(uint64(kTwoTo54 + 1))
+ ExpectEq("18014398509481985", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uintptr
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilUintptr() {
+ var ptr1 uintptr
+ var ptr2 uintptr
+
+ matcher := Equals(ptr1)
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // uintptrs
+ equalsTestCase{ptr1, true, false, ""},
+ equalsTestCase{ptr2, true, false, ""},
+ equalsTestCase{uintptr(0), true, false, ""},
+ equalsTestCase{uintptr(17), false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a uintptr"},
+ equalsTestCase{bool(false), false, true, "which is not a uintptr"},
+ equalsTestCase{int(0), false, true, "which is not a uintptr"},
+ equalsTestCase{int8(0), false, true, "which is not a uintptr"},
+ equalsTestCase{int16(0), false, true, "which is not a uintptr"},
+ equalsTestCase{int32(0), false, true, "which is not a uintptr"},
+ equalsTestCase{int64(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint8(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint16(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint32(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint64(0), false, true, "which is not a uintptr"},
+ equalsTestCase{true, false, true, "which is not a uintptr"},
+ equalsTestCase{[...]int{}, false, true, "which is not a uintptr"},
+ equalsTestCase{make(chan int), false, true, "which is not a uintptr"},
+ equalsTestCase{func() {}, false, true, "which is not a uintptr"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a uintptr"},
+ equalsTestCase{&someInt, false, true, "which is not a uintptr"},
+ equalsTestCase{[]int{}, false, true, "which is not a uintptr"},
+ equalsTestCase{"taco", false, true, "which is not a uintptr"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a uintptr"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilUintptr() {
+ matcher := Equals(uintptr(17))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // uintptrs
+ equalsTestCase{uintptr(17), true, false, ""},
+ equalsTestCase{uintptr(16), false, false, ""},
+ equalsTestCase{uintptr(0), false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a uintptr"},
+ equalsTestCase{bool(false), false, true, "which is not a uintptr"},
+ equalsTestCase{int(0), false, true, "which is not a uintptr"},
+ equalsTestCase{int8(0), false, true, "which is not a uintptr"},
+ equalsTestCase{int16(0), false, true, "which is not a uintptr"},
+ equalsTestCase{int32(0), false, true, "which is not a uintptr"},
+ equalsTestCase{int64(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint8(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint16(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint32(0), false, true, "which is not a uintptr"},
+ equalsTestCase{uint64(0), false, true, "which is not a uintptr"},
+ equalsTestCase{true, false, true, "which is not a uintptr"},
+ equalsTestCase{[...]int{}, false, true, "which is not a uintptr"},
+ equalsTestCase{make(chan int), false, true, "which is not a uintptr"},
+ equalsTestCase{func() {}, false, true, "which is not a uintptr"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a uintptr"},
+ equalsTestCase{&someInt, false, true, "which is not a uintptr"},
+ equalsTestCase{[]int{}, false, true, "which is not a uintptr"},
+ equalsTestCase{"taco", false, true, "which is not a uintptr"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a uintptr"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// float32
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralFloat32() {
+ matcher := Equals(float32(-32769))
+ ExpectEq("-32769", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -32769.
+ equalsTestCase{-32769.0, true, false, ""},
+ equalsTestCase{-32769 + 0i, true, false, ""},
+ equalsTestCase{int32(-32769), true, false, ""},
+ equalsTestCase{int64(-32769), true, false, ""},
+ equalsTestCase{float32(-32769), true, false, ""},
+ equalsTestCase{float64(-32769), true, false, ""},
+ equalsTestCase{complex64(-32769), true, false, ""},
+ equalsTestCase{complex128(-32769), true, false, ""},
+ equalsTestCase{interface{}(float32(-32769)), true, false, ""},
+ equalsTestCase{interface{}(int64(-32769)), true, false, ""},
+
+ // Values that would be -32769 in two's complement.
+ equalsTestCase{uint64((1 << 64) - 32769), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(-32770), false, false, ""},
+ equalsTestCase{float32(-32769.1), false, false, ""},
+ equalsTestCase{float32(-32768.9), false, false, ""},
+ equalsTestCase{float64(-32769.1), false, false, ""},
+ equalsTestCase{float64(-32768.9), false, false, ""},
+ equalsTestCase{complex128(-32768), false, false, ""},
+ equalsTestCase{complex128(-32769 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NegativeNonIntegralFloat32() {
+ matcher := Equals(float32(-32769.1))
+ ExpectEq("-32769.1", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -32769.1.
+ equalsTestCase{-32769.1, true, false, ""},
+ equalsTestCase{-32769.1 + 0i, true, false, ""},
+ equalsTestCase{float32(-32769.1), true, false, ""},
+ equalsTestCase{float64(-32769.1), true, false, ""},
+ equalsTestCase{complex64(-32769.1), true, false, ""},
+ equalsTestCase{complex128(-32769.1), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int32(-32769), false, false, ""},
+ equalsTestCase{int32(-32770), false, false, ""},
+ equalsTestCase{int64(-32769), false, false, ""},
+ equalsTestCase{int64(-32770), false, false, ""},
+ equalsTestCase{float32(-32769.2), false, false, ""},
+ equalsTestCase{float32(-32769.0), false, false, ""},
+ equalsTestCase{float64(-32769.2), false, false, ""},
+ equalsTestCase{complex128(-32769.1 + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeNegativeFloat32() {
+ const kExpected = -1 * (1 << 65)
+ matcher := Equals(float32(kExpected))
+ ExpectEq("-3.689349e+19", matcher.Description())
+
+ floatExpected := float32(kExpected)
+ castedInt := int64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroFloat32() {
+ matcher := Equals(float32(0))
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of zero.
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(float32(0)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{int64(-1), false, false, ""},
+ equalsTestCase{float32(1), false, false, ""},
+ equalsTestCase{float32(-1), false, false, ""},
+ equalsTestCase{complex128(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralFloat32() {
+ matcher := Equals(float32(32769))
+ ExpectEq("32769", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.
+ equalsTestCase{32769.0, true, false, ""},
+ equalsTestCase{32769 + 0i, true, false, ""},
+ equalsTestCase{int(32769), true, false, ""},
+ equalsTestCase{int32(32769), true, false, ""},
+ equalsTestCase{int64(32769), true, false, ""},
+ equalsTestCase{uint(32769), true, false, ""},
+ equalsTestCase{uint32(32769), true, false, ""},
+ equalsTestCase{uint64(32769), true, false, ""},
+ equalsTestCase{float32(32769), true, false, ""},
+ equalsTestCase{float64(32769), true, false, ""},
+ equalsTestCase{complex64(32769), true, false, ""},
+ equalsTestCase{complex128(32769), true, false, ""},
+ equalsTestCase{interface{}(float32(32769)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(32770), false, false, ""},
+ equalsTestCase{uint64(32770), false, false, ""},
+ equalsTestCase{float32(32769.1), false, false, ""},
+ equalsTestCase{float32(32768.9), false, false, ""},
+ equalsTestCase{float64(32769.1), false, false, ""},
+ equalsTestCase{float64(32768.9), false, false, ""},
+ equalsTestCase{complex128(32768), false, false, ""},
+ equalsTestCase{complex128(32769 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveNonIntegralFloat32() {
+ matcher := Equals(float32(32769.1))
+ ExpectEq("32769.1", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.1.
+ equalsTestCase{32769.1, true, false, ""},
+ equalsTestCase{32769.1 + 0i, true, false, ""},
+ equalsTestCase{float32(32769.1), true, false, ""},
+ equalsTestCase{float64(32769.1), true, false, ""},
+ equalsTestCase{complex64(32769.1), true, false, ""},
+ equalsTestCase{complex128(32769.1), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int32(32769), false, false, ""},
+ equalsTestCase{int32(32770), false, false, ""},
+ equalsTestCase{uint64(32769), false, false, ""},
+ equalsTestCase{uint64(32770), false, false, ""},
+ equalsTestCase{float32(32769.2), false, false, ""},
+ equalsTestCase{float32(32769.0), false, false, ""},
+ equalsTestCase{float64(32769.2), false, false, ""},
+ equalsTestCase{complex128(32769.1 + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargePositiveFloat32() {
+ const kExpected = 1 << 65
+ matcher := Equals(float32(kExpected))
+ ExpectEq("3.689349e+19", matcher.Description())
+
+ floatExpected := float32(kExpected)
+ castedInt := uint64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{uint64(0), false, false, ""},
+ equalsTestCase{uint64(math.MaxUint64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(float32(kTwoTo25 + 1))
+ ExpectEq("3.3554432e+07", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 3), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// float64
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralFloat64() {
+ const kExpected = -(1 << 50)
+ matcher := Equals(float64(kExpected))
+ ExpectEq("-1.125899906842624e+15", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{-1125899906842624.0, true, false, ""},
+ equalsTestCase{-1125899906842624.0 + 0i, true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Values that would be kExpected in two's complement.
+ equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NegativeNonIntegralFloat64() {
+ const kTwoTo50 = 1 << 50
+ const kExpected = -kTwoTo50 - 0.25
+
+ matcher := Equals(float64(kExpected))
+ ExpectEq("-1.1258999068426242e+15", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(-kTwoTo50), false, false, ""},
+ equalsTestCase{int64(-kTwoTo50 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeNegativeFloat64() {
+ const kExpected = -1 * (1 << 65)
+ matcher := Equals(float64(kExpected))
+ ExpectEq("-3.6893488147419103e+19", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := int64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroFloat64() {
+ matcher := Equals(float64(0))
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of zero.
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(float32(0)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{int64(-1), false, false, ""},
+ equalsTestCase{float32(1), false, false, ""},
+ equalsTestCase{float32(-1), false, false, ""},
+ equalsTestCase{complex128(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralFloat64() {
+ const kExpected = 1 << 50
+ matcher := Equals(float64(kExpected))
+ ExpectEq("1.125899906842624e+15", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.
+ equalsTestCase{1125899906842624.0, true, false, ""},
+ equalsTestCase{1125899906842624.0 + 0i, true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveNonIntegralFloat64() {
+ const kTwoTo50 = 1 << 50
+ const kExpected = kTwoTo50 + 0.25
+ matcher := Equals(float64(kExpected))
+ ExpectEq("1.1258999068426242e+15", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kTwoTo50), false, false, ""},
+ equalsTestCase{int64(kTwoTo50 - 1), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargePositiveFloat64() {
+ const kExpected = 1 << 65
+ matcher := Equals(float64(kExpected))
+ ExpectEq("3.6893488147419103e+19", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := uint64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{uint64(0), false, false, ""},
+ equalsTestCase{uint64(math.MaxUint64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := Equals(float64(kTwoTo54 + 1))
+ ExpectEq("1.8014398509481984e+16", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// complex64
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralComplex64() {
+ const kExpected = -32769
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(-32769+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{-32769.0, true, false, ""},
+ equalsTestCase{-32769.0 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Values that would be kExpected in two's complement.
+ equalsTestCase{uint32((1 << 32) + kExpected), false, false, ""},
+ equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NegativeNonIntegralComplex64() {
+ const kTwoTo20 = 1 << 20
+ const kExpected = -kTwoTo20 - 0.25
+
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(-1.0485762e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(-kTwoTo20), false, false, ""},
+ equalsTestCase{int(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{int32(-kTwoTo20), false, false, ""},
+ equalsTestCase{int32(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{int64(-kTwoTo20), false, false, ""},
+ equalsTestCase{int64(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex64(kExpected - 0.75), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 0.75), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeNegativeComplex64() {
+ const kExpected = -1 * (1 << 65)
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(-3.689349e+19+0i)", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := int64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroComplex64() {
+ matcher := Equals(complex64(0))
+ ExpectEq("(0+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of zero.
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(float32(0)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{int64(-1), false, false, ""},
+ equalsTestCase{float32(1), false, false, ""},
+ equalsTestCase{float32(-1), false, false, ""},
+ equalsTestCase{float64(1), false, false, ""},
+ equalsTestCase{float64(-1), false, false, ""},
+ equalsTestCase{complex64(0 + 2i), false, false, ""},
+ equalsTestCase{complex128(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralComplex64() {
+ const kExpected = 1 << 20
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(1.048576e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.
+ equalsTestCase{1048576.0, true, false, ""},
+ equalsTestCase{1048576.0 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveNonIntegralComplex64() {
+ const kTwoTo20 = 1 << 20
+ const kExpected = kTwoTo20 + 0.25
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(1.0485762e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kTwoTo20), false, false, ""},
+ equalsTestCase{int64(kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{uint64(kTwoTo20), false, false, ""},
+ equalsTestCase{uint64(kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargePositiveComplex64() {
+ const kExpected = 1 << 65
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(3.689349e+19+0i)", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := uint64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{uint64(0), false, false, ""},
+ equalsTestCase{uint64(math.MaxUint64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Complex64AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(complex64(kTwoTo25 + 1))
+ ExpectEq("(3.3554432e+07+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 3), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Complex64WithNonZeroImaginaryPart() {
+ const kRealPart = 17
+ const kImagPart = 0.25i
+ const kExpected = kRealPart + kImagPart
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(17+0.25i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kRealPart + kImagPart, true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(kRealPart), false, false, ""},
+ equalsTestCase{int8(kRealPart), false, false, ""},
+ equalsTestCase{int16(kRealPart), false, false, ""},
+ equalsTestCase{int32(kRealPart), false, false, ""},
+ equalsTestCase{int64(kRealPart), false, false, ""},
+ equalsTestCase{uint(kRealPart), false, false, ""},
+ equalsTestCase{uint8(kRealPart), false, false, ""},
+ equalsTestCase{uint16(kRealPart), false, false, ""},
+ equalsTestCase{uint32(kRealPart), false, false, ""},
+ equalsTestCase{uint64(kRealPart), false, false, ""},
+ equalsTestCase{float32(kRealPart), false, false, ""},
+ equalsTestCase{float64(kRealPart), false, false, ""},
+ equalsTestCase{complex64(kRealPart), false, false, ""},
+ equalsTestCase{complex64(kRealPart + kImagPart + 0.5), false, false, ""},
+ equalsTestCase{complex64(kRealPart + kImagPart + 0.5i), false, false, ""},
+ equalsTestCase{complex128(kRealPart), false, false, ""},
+ equalsTestCase{complex128(kRealPart + kImagPart + 0.5), false, false, ""},
+ equalsTestCase{complex128(kRealPart + kImagPart + 0.5i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// complex128
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralComplex128() {
+ const kExpected = -32769
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(-32769+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{-32769.0, true, false, ""},
+ equalsTestCase{-32769.0 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Values that would be kExpected in two's complement.
+ equalsTestCase{uint32((1 << 32) + kExpected), false, false, ""},
+ equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NegativeNonIntegralComplex128() {
+ const kTwoTo20 = 1 << 20
+ const kExpected = -kTwoTo20 - 0.25
+
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(-1.04857625e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(-kTwoTo20), false, false, ""},
+ equalsTestCase{int(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{int32(-kTwoTo20), false, false, ""},
+ equalsTestCase{int32(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{int64(-kTwoTo20), false, false, ""},
+ equalsTestCase{int64(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex64(kExpected - 0.75), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 0.75), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeNegativeComplex128() {
+ const kExpected = -1 * (1 << 65)
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(-3.6893488147419103e+19+0i)", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := int64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroComplex128() {
+ matcher := Equals(complex128(0))
+ ExpectEq("(0+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of zero.
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(float32(0)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{int64(-1), false, false, ""},
+ equalsTestCase{float32(1), false, false, ""},
+ equalsTestCase{float32(-1), false, false, ""},
+ equalsTestCase{float64(1), false, false, ""},
+ equalsTestCase{float64(-1), false, false, ""},
+ equalsTestCase{complex64(0 + 2i), false, false, ""},
+ equalsTestCase{complex128(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralComplex128() {
+ const kExpected = 1 << 20
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(1.048576e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.
+ equalsTestCase{1048576.0, true, false, ""},
+ equalsTestCase{1048576.0 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{uintptr(0), false, true, "which is not numeric"},
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveNonIntegralComplex128() {
+ const kTwoTo20 = 1 << 20
+ const kExpected = kTwoTo20 + 0.25
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(1.04857625e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kTwoTo20), false, false, ""},
+ equalsTestCase{int64(kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{uint64(kTwoTo20), false, false, ""},
+ equalsTestCase{uint64(kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargePositiveComplex128() {
+ const kExpected = 1 << 65
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(3.6893488147419103e+19+0i)", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := uint64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{uint64(0), false, false, ""},
+ equalsTestCase{uint64(math.MaxUint64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Complex128AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := Equals(complex128(kTwoTo54 + 1))
+ ExpectEq("(1.8014398509481984e+16+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Complex128WithNonZeroImaginaryPart() {
+ const kRealPart = 17
+ const kImagPart = 0.25i
+ const kExpected = kRealPart + kImagPart
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(17+0.25i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kRealPart + kImagPart, true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(kRealPart), false, false, ""},
+ equalsTestCase{int8(kRealPart), false, false, ""},
+ equalsTestCase{int16(kRealPart), false, false, ""},
+ equalsTestCase{int32(kRealPart), false, false, ""},
+ equalsTestCase{int64(kRealPart), false, false, ""},
+ equalsTestCase{uint(kRealPart), false, false, ""},
+ equalsTestCase{uint8(kRealPart), false, false, ""},
+ equalsTestCase{uint16(kRealPart), false, false, ""},
+ equalsTestCase{uint32(kRealPart), false, false, ""},
+ equalsTestCase{uint64(kRealPart), false, false, ""},
+ equalsTestCase{float32(kRealPart), false, false, ""},
+ equalsTestCase{float64(kRealPart), false, false, ""},
+ equalsTestCase{complex64(kRealPart), false, false, ""},
+ equalsTestCase{complex64(kRealPart + kImagPart + 0.5), false, false, ""},
+ equalsTestCase{complex64(kRealPart + kImagPart + 0.5i), false, false, ""},
+ equalsTestCase{complex128(kRealPart), false, false, ""},
+ equalsTestCase{complex128(kRealPart + kImagPart + 0.5), false, false, ""},
+ equalsTestCase{complex128(kRealPart + kImagPart + 0.5i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Arrays
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) ArrayOfComparableType() {
+ expected := [3]uint{17, 19, 23}
+
+ matcher := Equals(expected)
+ ExpectEq("[17 19 23]", matcher.Description())
+
+ // To defeat constant de-duping by the compiler.
+ makeArray := func(i, j, k uint) [3]uint { return [3]uint{ i, j, k} }
+
+ type arrayAlias [3]uint
+ type uintAlias uint
+
+ cases := []equalsTestCase{
+ // Correct types, equal.
+ equalsTestCase{expected, true, false, ""},
+ equalsTestCase{[3]uint{17, 19, 23}, true, false, ""},
+ equalsTestCase{makeArray(17, 19, 23), true, false, ""},
+
+ // Correct types, not equal.
+ equalsTestCase{[3]uint{0, 0, 0}, false, false, ""},
+ equalsTestCase{[3]uint{18, 19, 23}, false, false, ""},
+ equalsTestCase{[3]uint{17, 20, 23}, false, false, ""},
+ equalsTestCase{[3]uint{17, 19, 22}, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not [3]uint"},
+ equalsTestCase{bool(false), false, true, "which is not [3]uint"},
+ equalsTestCase{int(0), false, true, "which is not [3]uint"},
+ equalsTestCase{int8(0), false, true, "which is not [3]uint"},
+ equalsTestCase{int16(0), false, true, "which is not [3]uint"},
+ equalsTestCase{int32(0), false, true, "which is not [3]uint"},
+ equalsTestCase{int64(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint8(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint16(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint32(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint64(0), false, true, "which is not [3]uint"},
+ equalsTestCase{true, false, true, "which is not [3]uint"},
+ equalsTestCase{[...]int{}, false, true, "which is not [3]uint"},
+ equalsTestCase{func() {}, false, true, "which is not [3]uint"},
+ equalsTestCase{map[int]int{}, false, true, "which is not [3]uint"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not [3]uint"},
+ equalsTestCase{[2]uint{17, 19}, false, true, "which is not [3]uint"},
+ equalsTestCase{[4]uint{17, 19, 23, 0}, false, true, "which is not [3]uint"},
+ equalsTestCase{arrayAlias{17, 19, 23}, false, true, "which is not [3]uint"},
+ equalsTestCase{[3]uintAlias{17, 19, 23}, false, true, "which is not [3]uint"},
+ equalsTestCase{[3]int32{17, 19, 23}, false, true, "which is not [3]uint"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ArrayOfNonComparableType() {
+ type nonComparableArray [2]map[string]string
+ f := func() {
+ ExpectEq(nonComparableArray{}, nonComparableArray{})
+ }
+
+ ExpectThat(f, Panics(MatchesRegexp("uncomparable.*nonComparableArray")))
+}
+
+////////////////////////////////////////////////////////////////////////
+// chan
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilChan() {
+ var nilChan1 chan int
+ var nilChan2 chan int
+ var nilChan3 chan uint
+ var nonNilChan1 chan int = make(chan int)
+ var nonNilChan2 chan uint = make(chan uint)
+
+ matcher := Equals(nilChan1)
+ ExpectEq("<nil>", matcher.Description())
+
+ cases := []equalsTestCase{
+ // int channels
+ equalsTestCase{nilChan1, true, false, ""},
+ equalsTestCase{nilChan2, true, false, ""},
+ equalsTestCase{nonNilChan1, false, false, ""},
+
+ // uint channels
+ equalsTestCase{nilChan3, false, true, "which is not a chan int"},
+ equalsTestCase{nonNilChan2, false, true, "which is not a chan int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a chan int"},
+ equalsTestCase{bool(false), false, true, "which is not a chan int"},
+ equalsTestCase{int(0), false, true, "which is not a chan int"},
+ equalsTestCase{int8(0), false, true, "which is not a chan int"},
+ equalsTestCase{int16(0), false, true, "which is not a chan int"},
+ equalsTestCase{int32(0), false, true, "which is not a chan int"},
+ equalsTestCase{int64(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint8(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint16(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint32(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint64(0), false, true, "which is not a chan int"},
+ equalsTestCase{true, false, true, "which is not a chan int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{func() {}, false, true, "which is not a chan int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{&someInt, false, true, "which is not a chan int"},
+ equalsTestCase{[]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{"taco", false, true, "which is not a chan int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a chan int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilChan() {
+ var nilChan1 chan int
+ var nilChan2 chan uint
+ var nonNilChan1 chan int = make(chan int)
+ var nonNilChan2 chan int = make(chan int)
+ var nonNilChan3 chan uint = make(chan uint)
+
+ matcher := Equals(nonNilChan1)
+ ExpectEq(fmt.Sprintf("%v", nonNilChan1), matcher.Description())
+
+ cases := []equalsTestCase{
+ // int channels
+ equalsTestCase{nonNilChan1, true, false, ""},
+ equalsTestCase{nonNilChan2, false, false, ""},
+ equalsTestCase{nilChan1, false, false, ""},
+
+ // uint channels
+ equalsTestCase{nilChan2, false, true, "which is not a chan int"},
+ equalsTestCase{nonNilChan3, false, true, "which is not a chan int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a chan int"},
+ equalsTestCase{bool(false), false, true, "which is not a chan int"},
+ equalsTestCase{int(0), false, true, "which is not a chan int"},
+ equalsTestCase{int8(0), false, true, "which is not a chan int"},
+ equalsTestCase{int16(0), false, true, "which is not a chan int"},
+ equalsTestCase{int32(0), false, true, "which is not a chan int"},
+ equalsTestCase{int64(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint8(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint16(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint32(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint64(0), false, true, "which is not a chan int"},
+ equalsTestCase{true, false, true, "which is not a chan int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{func() {}, false, true, "which is not a chan int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{&someInt, false, true, "which is not a chan int"},
+ equalsTestCase{[]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{"taco", false, true, "which is not a chan int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a chan int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ChanDirection() {
+ var chan1 chan<- int
+ var chan2 <-chan int
+ var chan3 chan int
+
+ matcher := Equals(chan1)
+ ExpectEq(fmt.Sprintf("%v", chan1), matcher.Description())
+
+ cases := []equalsTestCase{
+ equalsTestCase{chan1, true, false, ""},
+ equalsTestCase{chan2, false, true, "which is not a chan<- int"},
+ equalsTestCase{chan3, false, true, "which is not a chan<- int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// func
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) Functions() {
+ func1 := func() {}
+ func2 := func() {}
+ func3 := func(x int) {}
+
+ matcher := Equals(func1)
+ ExpectEq(fmt.Sprintf("%v", func1), matcher.Description())
+
+ cases := []equalsTestCase{
+ // Functions.
+ equalsTestCase{func1, true, false, ""},
+ equalsTestCase{func2, false, false, ""},
+ equalsTestCase{func3, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a function"},
+ equalsTestCase{bool(false), false, true, "which is not a function"},
+ equalsTestCase{int(0), false, true, "which is not a function"},
+ equalsTestCase{int8(0), false, true, "which is not a function"},
+ equalsTestCase{int16(0), false, true, "which is not a function"},
+ equalsTestCase{int32(0), false, true, "which is not a function"},
+ equalsTestCase{int64(0), false, true, "which is not a function"},
+ equalsTestCase{uint(0), false, true, "which is not a function"},
+ equalsTestCase{uint8(0), false, true, "which is not a function"},
+ equalsTestCase{uint16(0), false, true, "which is not a function"},
+ equalsTestCase{uint32(0), false, true, "which is not a function"},
+ equalsTestCase{uint64(0), false, true, "which is not a function"},
+ equalsTestCase{true, false, true, "which is not a function"},
+ equalsTestCase{[...]int{}, false, true, "which is not a function"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a function"},
+ equalsTestCase{&someInt, false, true, "which is not a function"},
+ equalsTestCase{[]int{}, false, true, "which is not a function"},
+ equalsTestCase{"taco", false, true, "which is not a function"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a function"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// map
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilMap() {
+ var nilMap1 map[int]int
+ var nilMap2 map[int]int
+ var nilMap3 map[int]uint
+ var nonNilMap1 map[int]int = make(map[int]int)
+ var nonNilMap2 map[int]uint = make(map[int]uint)
+
+ matcher := Equals(nilMap1)
+ ExpectEq("map[]", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nilMap1, true, false, ""},
+ equalsTestCase{nilMap2, true, false, ""},
+ equalsTestCase{nilMap3, true, false, ""},
+ equalsTestCase{nonNilMap1, false, false, ""},
+ equalsTestCase{nonNilMap2, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a map"},
+ equalsTestCase{bool(false), false, true, "which is not a map"},
+ equalsTestCase{int(0), false, true, "which is not a map"},
+ equalsTestCase{int8(0), false, true, "which is not a map"},
+ equalsTestCase{int16(0), false, true, "which is not a map"},
+ equalsTestCase{int32(0), false, true, "which is not a map"},
+ equalsTestCase{int64(0), false, true, "which is not a map"},
+ equalsTestCase{uint(0), false, true, "which is not a map"},
+ equalsTestCase{uint8(0), false, true, "which is not a map"},
+ equalsTestCase{uint16(0), false, true, "which is not a map"},
+ equalsTestCase{uint32(0), false, true, "which is not a map"},
+ equalsTestCase{uint64(0), false, true, "which is not a map"},
+ equalsTestCase{true, false, true, "which is not a map"},
+ equalsTestCase{[...]int{}, false, true, "which is not a map"},
+ equalsTestCase{func() {}, false, true, "which is not a map"},
+ equalsTestCase{&someInt, false, true, "which is not a map"},
+ equalsTestCase{[]int{}, false, true, "which is not a map"},
+ equalsTestCase{"taco", false, true, "which is not a map"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a map"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilMap() {
+ var nilMap1 map[int]int
+ var nilMap2 map[int]uint
+ var nonNilMap1 map[int]int = make(map[int]int)
+ var nonNilMap2 map[int]int = make(map[int]int)
+ var nonNilMap3 map[int]uint = make(map[int]uint)
+
+ matcher := Equals(nonNilMap1)
+ ExpectEq("map[]", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nonNilMap1, true, false, ""},
+ equalsTestCase{nonNilMap2, false, false, ""},
+ equalsTestCase{nonNilMap3, false, false, ""},
+ equalsTestCase{nilMap1, false, false, ""},
+ equalsTestCase{nilMap2, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a map"},
+ equalsTestCase{bool(false), false, true, "which is not a map"},
+ equalsTestCase{int(0), false, true, "which is not a map"},
+ equalsTestCase{int8(0), false, true, "which is not a map"},
+ equalsTestCase{int16(0), false, true, "which is not a map"},
+ equalsTestCase{int32(0), false, true, "which is not a map"},
+ equalsTestCase{int64(0), false, true, "which is not a map"},
+ equalsTestCase{uint(0), false, true, "which is not a map"},
+ equalsTestCase{uint8(0), false, true, "which is not a map"},
+ equalsTestCase{uint16(0), false, true, "which is not a map"},
+ equalsTestCase{uint32(0), false, true, "which is not a map"},
+ equalsTestCase{uint64(0), false, true, "which is not a map"},
+ equalsTestCase{true, false, true, "which is not a map"},
+ equalsTestCase{[...]int{}, false, true, "which is not a map"},
+ equalsTestCase{func() {}, false, true, "which is not a map"},
+ equalsTestCase{&someInt, false, true, "which is not a map"},
+ equalsTestCase{[]int{}, false, true, "which is not a map"},
+ equalsTestCase{"taco", false, true, "which is not a map"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a map"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Pointers
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilPointer() {
+ var someInt int = 17
+ var someUint uint = 17
+
+ var nilInt1 *int
+ var nilInt2 *int
+ var nilUint *uint
+ var nonNilInt *int = &someInt
+ var nonNilUint *uint = &someUint
+
+ matcher := Equals(nilInt1)
+ ExpectEq("<nil>", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nilInt1, true, false, ""},
+ equalsTestCase{nilInt2, true, false, ""},
+ equalsTestCase{nonNilInt, false, false, ""},
+
+ // Incorrect type.
+ equalsTestCase{nilUint, false, true, "which is not a *int"},
+ equalsTestCase{nonNilUint, false, true, "which is not a *int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a *int"},
+ equalsTestCase{bool(false), false, true, "which is not a *int"},
+ equalsTestCase{int(0), false, true, "which is not a *int"},
+ equalsTestCase{int8(0), false, true, "which is not a *int"},
+ equalsTestCase{int16(0), false, true, "which is not a *int"},
+ equalsTestCase{int32(0), false, true, "which is not a *int"},
+ equalsTestCase{int64(0), false, true, "which is not a *int"},
+ equalsTestCase{uint(0), false, true, "which is not a *int"},
+ equalsTestCase{uint8(0), false, true, "which is not a *int"},
+ equalsTestCase{uint16(0), false, true, "which is not a *int"},
+ equalsTestCase{uint32(0), false, true, "which is not a *int"},
+ equalsTestCase{uint64(0), false, true, "which is not a *int"},
+ equalsTestCase{true, false, true, "which is not a *int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a *int"},
+ equalsTestCase{func() {}, false, true, "which is not a *int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a *int"},
+ equalsTestCase{[]int{}, false, true, "which is not a *int"},
+ equalsTestCase{"taco", false, true, "which is not a *int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a *int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilPointer() {
+ var someInt int = 17
+ var someOtherInt int = 17
+ var someUint uint = 17
+
+ var nilInt *int
+ var nilUint *uint
+ var nonNilInt1 *int = &someInt
+ var nonNilInt2 *int = &someOtherInt
+ var nonNilUint *uint = &someUint
+
+ matcher := Equals(nonNilInt1)
+ ExpectEq(fmt.Sprintf("%v", nonNilInt1), matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nonNilInt1, true, false, ""},
+ equalsTestCase{nonNilInt2, false, false, ""},
+ equalsTestCase{nilInt, false, false, ""},
+
+ // Incorrect type.
+ equalsTestCase{nilUint, false, true, "which is not a *int"},
+ equalsTestCase{nonNilUint, false, true, "which is not a *int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a *int"},
+ equalsTestCase{bool(false), false, true, "which is not a *int"},
+ equalsTestCase{int(0), false, true, "which is not a *int"},
+ equalsTestCase{int8(0), false, true, "which is not a *int"},
+ equalsTestCase{int16(0), false, true, "which is not a *int"},
+ equalsTestCase{int32(0), false, true, "which is not a *int"},
+ equalsTestCase{int64(0), false, true, "which is not a *int"},
+ equalsTestCase{uint(0), false, true, "which is not a *int"},
+ equalsTestCase{uint8(0), false, true, "which is not a *int"},
+ equalsTestCase{uint16(0), false, true, "which is not a *int"},
+ equalsTestCase{uint32(0), false, true, "which is not a *int"},
+ equalsTestCase{uint64(0), false, true, "which is not a *int"},
+ equalsTestCase{true, false, true, "which is not a *int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a *int"},
+ equalsTestCase{func() {}, false, true, "which is not a *int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a *int"},
+ equalsTestCase{[]int{}, false, true, "which is not a *int"},
+ equalsTestCase{"taco", false, true, "which is not a *int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a *int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Slices
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilSlice() {
+ var nilInt1 []int
+ var nilInt2 []int
+ var nilUint []uint
+
+ var nonNilInt []int = make([]int, 0)
+ var nonNilUint []uint = make([]uint, 0)
+
+ matcher := Equals(nilInt1)
+ ExpectEq("[]", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nilInt1, true, false, ""},
+ equalsTestCase{nilInt2, true, false, ""},
+ equalsTestCase{nonNilInt, false, false, ""},
+
+ // Incorrect type.
+ equalsTestCase{nilUint, false, true, "which is not a []int"},
+ equalsTestCase{nonNilUint, false, true, "which is not a []int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a []int"},
+ equalsTestCase{bool(false), false, true, "which is not a []int"},
+ equalsTestCase{int(0), false, true, "which is not a []int"},
+ equalsTestCase{int8(0), false, true, "which is not a []int"},
+ equalsTestCase{int16(0), false, true, "which is not a []int"},
+ equalsTestCase{int32(0), false, true, "which is not a []int"},
+ equalsTestCase{int64(0), false, true, "which is not a []int"},
+ equalsTestCase{uint(0), false, true, "which is not a []int"},
+ equalsTestCase{uint8(0), false, true, "which is not a []int"},
+ equalsTestCase{uint16(0), false, true, "which is not a []int"},
+ equalsTestCase{uint32(0), false, true, "which is not a []int"},
+ equalsTestCase{uint64(0), false, true, "which is not a []int"},
+ equalsTestCase{true, false, true, "which is not a []int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a []int"},
+ equalsTestCase{func() {}, false, true, "which is not a []int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a []int"},
+ equalsTestCase{"taco", false, true, "which is not a []int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a []int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilSlice() {
+ nonNil := make([]int, 0)
+ f := func() { Equals(nonNil) }
+ ExpectThat(f, Panics(HasSubstr("non-nil slice")))
+}
+
+////////////////////////////////////////////////////////////////////////
+// string
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) String() {
+ partial := "taco"
+ expected := fmt.Sprintf("%s%d", partial, 1)
+
+ matcher := Equals(expected)
+ ExpectEq("taco1", matcher.Description())
+
+ type stringAlias string
+
+ cases := []equalsTestCase{
+ // Correct types.
+ equalsTestCase{"taco1", true, false, ""},
+ equalsTestCase{"taco" + "1", true, false, ""},
+ equalsTestCase{expected, true, false, ""},
+ equalsTestCase{stringAlias("taco1"), true, false, ""},
+
+ equalsTestCase{"", false, false, ""},
+ equalsTestCase{"taco", false, false, ""},
+ equalsTestCase{"taco1\x00", false, false, ""},
+ equalsTestCase{"taco2", false, false, ""},
+ equalsTestCase{stringAlias("taco2"), false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a string"},
+ equalsTestCase{bool(false), false, true, "which is not a string"},
+ equalsTestCase{int(0), false, true, "which is not a string"},
+ equalsTestCase{int8(0), false, true, "which is not a string"},
+ equalsTestCase{int16(0), false, true, "which is not a string"},
+ equalsTestCase{int32(0), false, true, "which is not a string"},
+ equalsTestCase{int64(0), false, true, "which is not a string"},
+ equalsTestCase{uint(0), false, true, "which is not a string"},
+ equalsTestCase{uint8(0), false, true, "which is not a string"},
+ equalsTestCase{uint16(0), false, true, "which is not a string"},
+ equalsTestCase{uint32(0), false, true, "which is not a string"},
+ equalsTestCase{uint64(0), false, true, "which is not a string"},
+ equalsTestCase{true, false, true, "which is not a string"},
+ equalsTestCase{[...]int{}, false, true, "which is not a string"},
+ equalsTestCase{func() {}, false, true, "which is not a string"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a string"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a string"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) StringAlias() {
+ type stringAlias string
+
+ matcher := Equals(stringAlias("taco"))
+ ExpectEq("taco", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct types.
+ equalsTestCase{stringAlias("taco"), true, false, ""},
+ equalsTestCase{"taco", true, false, ""},
+
+ equalsTestCase{"burrito", false, false, ""},
+ equalsTestCase{stringAlias("burrito"), false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a string"},
+ equalsTestCase{bool(false), false, true, "which is not a string"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// struct
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) Struct() {
+ type someStruct struct{ foo uint }
+ f := func() { Equals(someStruct{17}) }
+ ExpectThat(f, Panics(HasSubstr("unsupported kind struct")))
+}
+
+////////////////////////////////////////////////////////////////////////
+// unsafe.Pointer
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilUnsafePointer() {
+ someInt := int(17)
+
+ var nilPtr1 unsafe.Pointer
+ var nilPtr2 unsafe.Pointer
+ var nonNilPtr unsafe.Pointer = unsafe.Pointer(&someInt)
+
+ matcher := Equals(nilPtr1)
+ ExpectEq("<nil>", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nilPtr1, true, false, ""},
+ equalsTestCase{nilPtr2, true, false, ""},
+ equalsTestCase{nonNilPtr, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{bool(false), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int8(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int16(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int32(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int64(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint8(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint16(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint32(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint64(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uintptr(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{true, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{[...]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{make(chan int), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{func() {}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{&someInt, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{[]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{"taco", false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a unsafe.Pointer"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilUnsafePointer() {
+ someInt := int(17)
+ someOtherInt := int(17)
+
+ var nilPtr unsafe.Pointer
+ var nonNilPtr1 unsafe.Pointer = unsafe.Pointer(&someInt)
+ var nonNilPtr2 unsafe.Pointer = unsafe.Pointer(&someOtherInt)
+
+ matcher := Equals(nonNilPtr1)
+ ExpectEq(fmt.Sprintf("%v", nonNilPtr1), matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nonNilPtr1, true, false, ""},
+ equalsTestCase{nonNilPtr2, false, false, ""},
+ equalsTestCase{nilPtr, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{bool(false), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int8(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int16(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int32(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int64(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint8(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint16(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint32(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint64(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uintptr(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{true, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{[...]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{make(chan int), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{func() {}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{&someInt, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{[]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{"taco", false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a unsafe.Pointer"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/error.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/error.go
new file mode 100644
index 00000000000..8a078e36d86
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/error.go
@@ -0,0 +1,51 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// Error returns a matcher that matches non-nil values implementing the
+// built-in error interface for whom the return value of Error() matches the
+// supplied matcher.
+//
+// For example:
+//
+// err := errors.New("taco burrito")
+//
+// Error(Equals("taco burrito")) // matches err
+// Error(HasSubstr("taco")) // matches err
+// Error(HasSubstr("enchilada")) // doesn't match err
+//
+func Error(m Matcher) Matcher {
+ return &errorMatcher{m}
+}
+
+type errorMatcher struct {
+ wrappedMatcher Matcher
+}
+
+func (m *errorMatcher) Description() string {
+ return "error " + m.wrappedMatcher.Description()
+}
+
+func (m *errorMatcher) Matches(c interface{}) error {
+ // Make sure that c is an error.
+ e, ok := c.(error)
+ if !ok {
+ return NewFatalError("which is not an error")
+ }
+
+ // Pass on the error text to the wrapped matcher.
+ return m.wrappedMatcher.Matches(e.Error())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/error_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/error_test.go
new file mode 100644
index 00000000000..42f226d95ed
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/error_test.go
@@ -0,0 +1,92 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type ErrorTest struct {
+ matcherCalled bool
+ suppliedCandidate interface{}
+ wrappedError error
+
+ matcher Matcher
+}
+
+func init() { RegisterTestSuite(&ErrorTest{}) }
+
+func (t *ErrorTest) SetUp(i *TestInfo) {
+ wrapped := &fakeMatcher{
+ func(c interface{}) error {
+ t.matcherCalled = true
+ t.suppliedCandidate = c
+ return t.wrappedError
+ },
+ "is foo",
+ }
+
+ t.matcher = Error(wrapped)
+}
+
+func isFatal(err error) bool {
+ _, isFatal := err.(*FatalError)
+ return isFatal
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *ErrorTest) Description() {
+ ExpectThat(t.matcher.Description(), Equals("error is foo"))
+}
+
+func (t *ErrorTest) CandidateIsNil() {
+ err := t.matcher.Matches(nil)
+
+ ExpectThat(t.matcherCalled, Equals(false))
+ ExpectThat(err.Error(), Equals("which is not an error"))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *ErrorTest) CandidateIsString() {
+ err := t.matcher.Matches("taco")
+
+ ExpectThat(t.matcherCalled, Equals(false))
+ ExpectThat(err.Error(), Equals("which is not an error"))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *ErrorTest) CallsWrappedMatcher() {
+ candidate := errors.New("taco")
+ t.matcher.Matches(candidate)
+
+ ExpectThat(t.matcherCalled, Equals(true))
+ ExpectThat(t.suppliedCandidate, Equals("taco"))
+}
+
+func (t *ErrorTest) ReturnsWrappedMatcherResult() {
+ t.wrappedError = errors.New("burrito")
+ err := t.matcher.Matches(errors.New(""))
+ ExpectThat(err, Equals(t.wrappedError))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_or_equal.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_or_equal.go
new file mode 100644
index 00000000000..4b9d103a381
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_or_equal.go
@@ -0,0 +1,39 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// GreaterOrEqual returns a matcher that matches integer, floating point, or
+// strings values v such that v >= x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// GreaterOrEqual will panic.
+func GreaterOrEqual(x interface{}) Matcher {
+ desc := fmt.Sprintf("greater than or equal to %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("greater than or equal to \"%s\"", x)
+ }
+
+ return transformDescription(Not(LessThan(x)), desc)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_or_equal_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_or_equal_test.go
new file mode 100644
index 00000000000..639f0e0acf9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_or_equal_test.go
@@ -0,0 +1,1059 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "math"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type GreaterOrEqualTest struct {
+}
+
+func init() { RegisterTestSuite(&GreaterOrEqualTest{}) }
+
+type geTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *GreaterOrEqualTest) checkTestCases(matcher Matcher, cases []geTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+
+ ExpectThat(
+ (err == nil),
+ Equals(c.expectedResult),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(
+ c.shouldBeFatal,
+ isFatal,
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ ExpectThat(
+ err,
+ Error(Equals(c.expectedError)),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) IntegerCandidateBadTypes() {
+ matcher := GreaterOrEqual(int(-150))
+
+ cases := []geTestCase{
+ geTestCase{true, false, true, "which is not comparable"},
+ geTestCase{uintptr(17), false, true, "which is not comparable"},
+ geTestCase{complex64(-151), false, true, "which is not comparable"},
+ geTestCase{complex128(-151), false, true, "which is not comparable"},
+ geTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ geTestCase{make(chan int), false, true, "which is not comparable"},
+ geTestCase{func() {}, false, true, "which is not comparable"},
+ geTestCase{map[int]int{}, false, true, "which is not comparable"},
+ geTestCase{&geTestCase{}, false, true, "which is not comparable"},
+ geTestCase{make([]int, 0), false, true, "which is not comparable"},
+ geTestCase{"-151", false, true, "which is not comparable"},
+ geTestCase{geTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) FloatCandidateBadTypes() {
+ matcher := GreaterOrEqual(float32(-150))
+
+ cases := []geTestCase{
+ geTestCase{true, false, true, "which is not comparable"},
+ geTestCase{uintptr(17), false, true, "which is not comparable"},
+ geTestCase{complex64(-151), false, true, "which is not comparable"},
+ geTestCase{complex128(-151), false, true, "which is not comparable"},
+ geTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ geTestCase{make(chan int), false, true, "which is not comparable"},
+ geTestCase{func() {}, false, true, "which is not comparable"},
+ geTestCase{map[int]int{}, false, true, "which is not comparable"},
+ geTestCase{&geTestCase{}, false, true, "which is not comparable"},
+ geTestCase{make([]int, 0), false, true, "which is not comparable"},
+ geTestCase{"-151", false, true, "which is not comparable"},
+ geTestCase{geTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) StringCandidateBadTypes() {
+ matcher := GreaterOrEqual("17")
+
+ cases := []geTestCase{
+ geTestCase{true, false, true, "which is not comparable"},
+ geTestCase{int(0), false, true, "which is not comparable"},
+ geTestCase{int8(0), false, true, "which is not comparable"},
+ geTestCase{int16(0), false, true, "which is not comparable"},
+ geTestCase{int32(0), false, true, "which is not comparable"},
+ geTestCase{int64(0), false, true, "which is not comparable"},
+ geTestCase{uint(0), false, true, "which is not comparable"},
+ geTestCase{uint8(0), false, true, "which is not comparable"},
+ geTestCase{uint16(0), false, true, "which is not comparable"},
+ geTestCase{uint32(0), false, true, "which is not comparable"},
+ geTestCase{uint64(0), false, true, "which is not comparable"},
+ geTestCase{uintptr(17), false, true, "which is not comparable"},
+ geTestCase{float32(0), false, true, "which is not comparable"},
+ geTestCase{float64(0), false, true, "which is not comparable"},
+ geTestCase{complex64(-151), false, true, "which is not comparable"},
+ geTestCase{complex128(-151), false, true, "which is not comparable"},
+ geTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ geTestCase{make(chan int), false, true, "which is not comparable"},
+ geTestCase{func() {}, false, true, "which is not comparable"},
+ geTestCase{map[int]int{}, false, true, "which is not comparable"},
+ geTestCase{&geTestCase{}, false, true, "which is not comparable"},
+ geTestCase{make([]int, 0), false, true, "which is not comparable"},
+ geTestCase{geTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) BadArgument() {
+ panicked := false
+
+ defer func() {
+ ExpectThat(panicked, Equals(true))
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ GreaterOrEqual(complex128(0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) NegativeIntegerLiteral() {
+ matcher := GreaterOrEqual(-150)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to -150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-(1 << 30), false, false, ""},
+ geTestCase{-151, false, false, ""},
+ geTestCase{-150, true, false, ""},
+ geTestCase{0, true, false, ""},
+ geTestCase{17, true, false, ""},
+
+ geTestCase{int(-(1 << 30)), false, false, ""},
+ geTestCase{int(-151), false, false, ""},
+ geTestCase{int(-150), true, false, ""},
+ geTestCase{int(0), true, false, ""},
+ geTestCase{int(17), true, false, ""},
+
+ geTestCase{int8(-127), true, false, ""},
+ geTestCase{int8(0), true, false, ""},
+ geTestCase{int8(17), true, false, ""},
+
+ geTestCase{int16(-(1 << 14)), false, false, ""},
+ geTestCase{int16(-151), false, false, ""},
+ geTestCase{int16(-150), true, false, ""},
+ geTestCase{int16(0), true, false, ""},
+ geTestCase{int16(17), true, false, ""},
+
+ geTestCase{int32(-(1 << 30)), false, false, ""},
+ geTestCase{int32(-151), false, false, ""},
+ geTestCase{int32(-150), true, false, ""},
+ geTestCase{int32(0), true, false, ""},
+ geTestCase{int32(17), true, false, ""},
+
+ geTestCase{int64(-(1 << 30)), false, false, ""},
+ geTestCase{int64(-151), false, false, ""},
+ geTestCase{int64(-150), true, false, ""},
+ geTestCase{int64(0), true, false, ""},
+ geTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint((1 << 32) - 151), true, false, ""},
+ geTestCase{uint(0), true, false, ""},
+ geTestCase{uint(17), true, false, ""},
+
+ geTestCase{uint8(0), true, false, ""},
+ geTestCase{uint8(17), true, false, ""},
+ geTestCase{uint8(253), true, false, ""},
+
+ geTestCase{uint16((1 << 16) - 151), true, false, ""},
+ geTestCase{uint16(0), true, false, ""},
+ geTestCase{uint16(17), true, false, ""},
+
+ geTestCase{uint32((1 << 32) - 151), true, false, ""},
+ geTestCase{uint32(0), true, false, ""},
+ geTestCase{uint32(17), true, false, ""},
+
+ geTestCase{uint64((1 << 64) - 151), true, false, ""},
+ geTestCase{uint64(0), true, false, ""},
+ geTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-(1 << 30)), false, false, ""},
+ geTestCase{float32(-151), false, false, ""},
+ geTestCase{float32(-150.1), false, false, ""},
+ geTestCase{float32(-150), true, false, ""},
+ geTestCase{float32(-149.9), true, false, ""},
+ geTestCase{float32(0), true, false, ""},
+ geTestCase{float32(17), true, false, ""},
+ geTestCase{float32(160), true, false, ""},
+
+ geTestCase{float64(-(1 << 30)), false, false, ""},
+ geTestCase{float64(-151), false, false, ""},
+ geTestCase{float64(-150.1), false, false, ""},
+ geTestCase{float64(-150), true, false, ""},
+ geTestCase{float64(-149.9), true, false, ""},
+ geTestCase{float64(0), true, false, ""},
+ geTestCase{float64(17), true, false, ""},
+ geTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) ZeroIntegerLiteral() {
+ matcher := GreaterOrEqual(0)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 0"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-(1 << 30), false, false, ""},
+ geTestCase{-1, false, false, ""},
+ geTestCase{0, true, false, ""},
+ geTestCase{1, true, false, ""},
+ geTestCase{17, true, false, ""},
+ geTestCase{(1 << 30), true, false, ""},
+
+ geTestCase{int(-(1 << 30)), false, false, ""},
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(0), true, false, ""},
+ geTestCase{int(1), true, false, ""},
+ geTestCase{int(17), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(0), true, false, ""},
+ geTestCase{int8(1), true, false, ""},
+
+ geTestCase{int16(-(1 << 14)), false, false, ""},
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), true, false, ""},
+ geTestCase{int16(1), true, false, ""},
+ geTestCase{int16(17), true, false, ""},
+
+ geTestCase{int32(-(1 << 30)), false, false, ""},
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(0), true, false, ""},
+ geTestCase{int32(1), true, false, ""},
+ geTestCase{int32(17), true, false, ""},
+
+ geTestCase{int64(-(1 << 30)), false, false, ""},
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(0), true, false, ""},
+ geTestCase{int64(1), true, false, ""},
+ geTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint((1 << 32) - 1), true, false, ""},
+ geTestCase{uint(0), true, false, ""},
+ geTestCase{uint(17), true, false, ""},
+
+ geTestCase{uint8(0), true, false, ""},
+ geTestCase{uint8(17), true, false, ""},
+ geTestCase{uint8(253), true, false, ""},
+
+ geTestCase{uint16((1 << 16) - 1), true, false, ""},
+ geTestCase{uint16(0), true, false, ""},
+ geTestCase{uint16(17), true, false, ""},
+
+ geTestCase{uint32((1 << 32) - 1), true, false, ""},
+ geTestCase{uint32(0), true, false, ""},
+ geTestCase{uint32(17), true, false, ""},
+
+ geTestCase{uint64((1 << 64) - 1), true, false, ""},
+ geTestCase{uint64(0), true, false, ""},
+ geTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-(1 << 30)), false, false, ""},
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(-0.1), false, false, ""},
+ geTestCase{float32(-0.0), true, false, ""},
+ geTestCase{float32(0), true, false, ""},
+ geTestCase{float32(0.1), true, false, ""},
+ geTestCase{float32(17), true, false, ""},
+ geTestCase{float32(160), true, false, ""},
+
+ geTestCase{float64(-(1 << 30)), false, false, ""},
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(-0.1), false, false, ""},
+ geTestCase{float64(-0), true, false, ""},
+ geTestCase{float64(0), true, false, ""},
+ geTestCase{float64(17), true, false, ""},
+ geTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) PositiveIntegerLiteral() {
+ matcher := GreaterOrEqual(150)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{149, false, false, ""},
+ geTestCase{150, true, false, ""},
+ geTestCase{151, true, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(149), false, false, ""},
+ geTestCase{int(150), true, false, ""},
+ geTestCase{int(151), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(0), false, false, ""},
+ geTestCase{int8(17), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(149), false, false, ""},
+ geTestCase{int16(150), true, false, ""},
+ geTestCase{int16(151), true, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(149), false, false, ""},
+ geTestCase{int32(150), true, false, ""},
+ geTestCase{int32(151), true, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(149), false, false, ""},
+ geTestCase{int64(150), true, false, ""},
+ geTestCase{int64(151), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(149), false, false, ""},
+ geTestCase{uint(150), true, false, ""},
+ geTestCase{uint(151), true, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(127), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(149), false, false, ""},
+ geTestCase{uint16(150), true, false, ""},
+ geTestCase{uint16(151), true, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(149), false, false, ""},
+ geTestCase{uint32(150), true, false, ""},
+ geTestCase{uint32(151), true, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(149), false, false, ""},
+ geTestCase{uint64(150), true, false, ""},
+ geTestCase{uint64(151), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(149), false, false, ""},
+ geTestCase{float32(149.9), false, false, ""},
+ geTestCase{float32(150), true, false, ""},
+ geTestCase{float32(150.1), true, false, ""},
+ geTestCase{float32(151), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(149), false, false, ""},
+ geTestCase{float64(149.9), false, false, ""},
+ geTestCase{float64(150), true, false, ""},
+ geTestCase{float64(150.1), true, false, ""},
+ geTestCase{float64(151), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Float literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) NegativeFloatLiteral() {
+ matcher := GreaterOrEqual(-150.1)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to -150.1"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-(1 << 30), false, false, ""},
+ geTestCase{-151, false, false, ""},
+ geTestCase{-150, true, false, ""},
+ geTestCase{0, true, false, ""},
+ geTestCase{17, true, false, ""},
+
+ geTestCase{int(-(1 << 30)), false, false, ""},
+ geTestCase{int(-151), false, false, ""},
+ geTestCase{int(-150), true, false, ""},
+ geTestCase{int(0), true, false, ""},
+ geTestCase{int(17), true, false, ""},
+
+ geTestCase{int8(-127), true, false, ""},
+ geTestCase{int8(0), true, false, ""},
+ geTestCase{int8(17), true, false, ""},
+
+ geTestCase{int16(-(1 << 14)), false, false, ""},
+ geTestCase{int16(-151), false, false, ""},
+ geTestCase{int16(-150), true, false, ""},
+ geTestCase{int16(0), true, false, ""},
+ geTestCase{int16(17), true, false, ""},
+
+ geTestCase{int32(-(1 << 30)), false, false, ""},
+ geTestCase{int32(-151), false, false, ""},
+ geTestCase{int32(-150), true, false, ""},
+ geTestCase{int32(0), true, false, ""},
+ geTestCase{int32(17), true, false, ""},
+
+ geTestCase{int64(-(1 << 30)), false, false, ""},
+ geTestCase{int64(-151), false, false, ""},
+ geTestCase{int64(-150), true, false, ""},
+ geTestCase{int64(0), true, false, ""},
+ geTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint((1 << 32) - 151), true, false, ""},
+ geTestCase{uint(0), true, false, ""},
+ geTestCase{uint(17), true, false, ""},
+
+ geTestCase{uint8(0), true, false, ""},
+ geTestCase{uint8(17), true, false, ""},
+ geTestCase{uint8(253), true, false, ""},
+
+ geTestCase{uint16((1 << 16) - 151), true, false, ""},
+ geTestCase{uint16(0), true, false, ""},
+ geTestCase{uint16(17), true, false, ""},
+
+ geTestCase{uint32((1 << 32) - 151), true, false, ""},
+ geTestCase{uint32(0), true, false, ""},
+ geTestCase{uint32(17), true, false, ""},
+
+ geTestCase{uint64((1 << 64) - 151), true, false, ""},
+ geTestCase{uint64(0), true, false, ""},
+ geTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-(1 << 30)), false, false, ""},
+ geTestCase{float32(-151), false, false, ""},
+ geTestCase{float32(-150.2), false, false, ""},
+ geTestCase{float32(-150.1), true, false, ""},
+ geTestCase{float32(-150), true, false, ""},
+ geTestCase{float32(0), true, false, ""},
+ geTestCase{float32(17), true, false, ""},
+ geTestCase{float32(160), true, false, ""},
+
+ geTestCase{float64(-(1 << 30)), false, false, ""},
+ geTestCase{float64(-151), false, false, ""},
+ geTestCase{float64(-150.2), false, false, ""},
+ geTestCase{float64(-150.1), true, false, ""},
+ geTestCase{float64(-150), true, false, ""},
+ geTestCase{float64(0), true, false, ""},
+ geTestCase{float64(17), true, false, ""},
+ geTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) PositiveFloatLiteral() {
+ matcher := GreaterOrEqual(149.9)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 149.9"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{149, false, false, ""},
+ geTestCase{150, true, false, ""},
+ geTestCase{151, true, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(149), false, false, ""},
+ geTestCase{int(150), true, false, ""},
+ geTestCase{int(151), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(0), false, false, ""},
+ geTestCase{int8(17), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(149), false, false, ""},
+ geTestCase{int16(150), true, false, ""},
+ geTestCase{int16(151), true, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(149), false, false, ""},
+ geTestCase{int32(150), true, false, ""},
+ geTestCase{int32(151), true, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(149), false, false, ""},
+ geTestCase{int64(150), true, false, ""},
+ geTestCase{int64(151), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(149), false, false, ""},
+ geTestCase{uint(150), true, false, ""},
+ geTestCase{uint(151), true, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(127), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(149), false, false, ""},
+ geTestCase{uint16(150), true, false, ""},
+ geTestCase{uint16(151), true, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(149), false, false, ""},
+ geTestCase{uint32(150), true, false, ""},
+ geTestCase{uint32(151), true, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(149), false, false, ""},
+ geTestCase{uint64(150), true, false, ""},
+ geTestCase{uint64(151), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(149), false, false, ""},
+ geTestCase{float32(149.8), false, false, ""},
+ geTestCase{float32(149.9), true, false, ""},
+ geTestCase{float32(150), true, false, ""},
+ geTestCase{float32(151), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(149), false, false, ""},
+ geTestCase{float64(149.8), false, false, ""},
+ geTestCase{float64(149.9), true, false, ""},
+ geTestCase{float64(150), true, false, ""},
+ geTestCase{float64(151), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Subtle cases
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterOrEqual(int64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{kTwoTo25 + 0, false, false, ""},
+ geTestCase{kTwoTo25 + 1, true, false, ""},
+ geTestCase{kTwoTo25 + 2, true, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), false, false, ""},
+ geTestCase{int16(32767), false, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int32(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 2), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(255), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(65535), false, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint32(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ geTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterOrEqual(int64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{1 << 30, false, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(math.MaxInt32), false, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), false, false, ""},
+ geTestCase{int16(32767), false, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(math.MaxInt32), false, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ geTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ geTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 2), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(math.MaxUint32), false, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(255), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(65535), false, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(math.MaxUint32), false, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ geTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+
+ // Floating point.
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterOrEqual(uint64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{kTwoTo25 + 0, false, false, ""},
+ geTestCase{kTwoTo25 + 1, true, false, ""},
+ geTestCase{kTwoTo25 + 2, true, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), false, false, ""},
+ geTestCase{int16(32767), false, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int32(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 2), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(255), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(65535), false, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint32(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ geTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterOrEqual(uint64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{1 << 30, false, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(math.MaxInt32), false, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), false, false, ""},
+ geTestCase{int16(32767), false, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(math.MaxInt32), false, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ geTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ geTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 2), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(math.MaxUint32), false, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(255), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(65535), false, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(math.MaxUint32), false, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ geTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+
+ // Floating point.
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterOrEqual(float32(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 3.3554432e+07"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{int64(kTwoTo25 - 1), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 3), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{uint64(kTwoTo25 - 1), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 3), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterOrEqual(float64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 1.8014398509481984e+16"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 3), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 3), true, false, ""},
+
+ // Floating point.
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// String literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) EmptyString() {
+ matcher := GreaterOrEqual("")
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to \"\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ geTestCase{"", true, false, ""},
+ geTestCase{"\x00", true, false, ""},
+ geTestCase{"a", true, false, ""},
+ geTestCase{"foo", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) SingleNullByte() {
+ matcher := GreaterOrEqual("\x00")
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to \"\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ geTestCase{"", false, false, ""},
+ geTestCase{"\x00", true, false, ""},
+ geTestCase{"a", true, false, ""},
+ geTestCase{"foo", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) LongerString() {
+ matcher := GreaterOrEqual("foo\x00")
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to \"foo\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ geTestCase{"", false, false, ""},
+ geTestCase{"\x00", false, false, ""},
+ geTestCase{"bar", false, false, ""},
+ geTestCase{"foo", false, false, ""},
+ geTestCase{"foo\x00", true, false, ""},
+ geTestCase{"fooa", true, false, ""},
+ geTestCase{"qux", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_than.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_than.go
new file mode 100644
index 00000000000..3eef32178f8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_than.go
@@ -0,0 +1,39 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// GreaterThan returns a matcher that matches integer, floating point, or
+// strings values v such that v > x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// GreaterThan will panic.
+func GreaterThan(x interface{}) Matcher {
+ desc := fmt.Sprintf("greater than %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("greater than \"%s\"", x)
+ }
+
+ return transformDescription(Not(LessOrEqual(x)), desc)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_than_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_than_test.go
new file mode 100644
index 00000000000..784692525cb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/greater_than_test.go
@@ -0,0 +1,1079 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "math"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type GreaterThanTest struct {
+}
+
+func init() { RegisterTestSuite(&GreaterThanTest{}) }
+
+type gtTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *GreaterThanTest) checkTestCases(matcher Matcher, cases []gtTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+
+ ExpectThat(
+ (err == nil),
+ Equals(c.expectedResult),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(
+ c.shouldBeFatal,
+ isFatal,
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ ExpectThat(
+ err,
+ Error(Equals(c.expectedError)),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) IntegerCandidateBadTypes() {
+ matcher := GreaterThan(int(-150))
+
+ cases := []gtTestCase{
+ gtTestCase{true, false, true, "which is not comparable"},
+ gtTestCase{uintptr(17), false, true, "which is not comparable"},
+ gtTestCase{complex64(-151), false, true, "which is not comparable"},
+ gtTestCase{complex128(-151), false, true, "which is not comparable"},
+ gtTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ gtTestCase{make(chan int), false, true, "which is not comparable"},
+ gtTestCase{func() {}, false, true, "which is not comparable"},
+ gtTestCase{map[int]int{}, false, true, "which is not comparable"},
+ gtTestCase{&gtTestCase{}, false, true, "which is not comparable"},
+ gtTestCase{make([]int, 0), false, true, "which is not comparable"},
+ gtTestCase{"-151", false, true, "which is not comparable"},
+ gtTestCase{gtTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) FloatCandidateBadTypes() {
+ matcher := GreaterThan(float32(-150))
+
+ cases := []gtTestCase{
+ gtTestCase{true, false, true, "which is not comparable"},
+ gtTestCase{uintptr(17), false, true, "which is not comparable"},
+ gtTestCase{complex64(-151), false, true, "which is not comparable"},
+ gtTestCase{complex128(-151), false, true, "which is not comparable"},
+ gtTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ gtTestCase{make(chan int), false, true, "which is not comparable"},
+ gtTestCase{func() {}, false, true, "which is not comparable"},
+ gtTestCase{map[int]int{}, false, true, "which is not comparable"},
+ gtTestCase{&gtTestCase{}, false, true, "which is not comparable"},
+ gtTestCase{make([]int, 0), false, true, "which is not comparable"},
+ gtTestCase{"-151", false, true, "which is not comparable"},
+ gtTestCase{gtTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) StringCandidateBadTypes() {
+ matcher := GreaterThan("17")
+
+ cases := []gtTestCase{
+ gtTestCase{true, false, true, "which is not comparable"},
+ gtTestCase{int(0), false, true, "which is not comparable"},
+ gtTestCase{int8(0), false, true, "which is not comparable"},
+ gtTestCase{int16(0), false, true, "which is not comparable"},
+ gtTestCase{int32(0), false, true, "which is not comparable"},
+ gtTestCase{int64(0), false, true, "which is not comparable"},
+ gtTestCase{uint(0), false, true, "which is not comparable"},
+ gtTestCase{uint8(0), false, true, "which is not comparable"},
+ gtTestCase{uint16(0), false, true, "which is not comparable"},
+ gtTestCase{uint32(0), false, true, "which is not comparable"},
+ gtTestCase{uint64(0), false, true, "which is not comparable"},
+ gtTestCase{uintptr(17), false, true, "which is not comparable"},
+ gtTestCase{float32(0), false, true, "which is not comparable"},
+ gtTestCase{float64(0), false, true, "which is not comparable"},
+ gtTestCase{complex64(-151), false, true, "which is not comparable"},
+ gtTestCase{complex128(-151), false, true, "which is not comparable"},
+ gtTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ gtTestCase{make(chan int), false, true, "which is not comparable"},
+ gtTestCase{func() {}, false, true, "which is not comparable"},
+ gtTestCase{map[int]int{}, false, true, "which is not comparable"},
+ gtTestCase{&gtTestCase{}, false, true, "which is not comparable"},
+ gtTestCase{make([]int, 0), false, true, "which is not comparable"},
+ gtTestCase{gtTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) BadArgument() {
+ panicked := false
+
+ defer func() {
+ ExpectThat(panicked, Equals(true))
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ GreaterThan(complex128(0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) NegativeIntegerLiteral() {
+ matcher := GreaterThan(-150)
+ desc := matcher.Description()
+ expectedDesc := "greater than -150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-(1 << 30), false, false, ""},
+ gtTestCase{-151, false, false, ""},
+ gtTestCase{-150, false, false, ""},
+ gtTestCase{-149, true, false, ""},
+ gtTestCase{0, true, false, ""},
+ gtTestCase{17, true, false, ""},
+
+ gtTestCase{int(-(1 << 30)), false, false, ""},
+ gtTestCase{int(-151), false, false, ""},
+ gtTestCase{int(-150), false, false, ""},
+ gtTestCase{int(-149), true, false, ""},
+ gtTestCase{int(0), true, false, ""},
+ gtTestCase{int(17), true, false, ""},
+
+ gtTestCase{int8(-127), true, false, ""},
+ gtTestCase{int8(0), true, false, ""},
+ gtTestCase{int8(17), true, false, ""},
+
+ gtTestCase{int16(-(1 << 14)), false, false, ""},
+ gtTestCase{int16(-151), false, false, ""},
+ gtTestCase{int16(-150), false, false, ""},
+ gtTestCase{int16(-149), true, false, ""},
+ gtTestCase{int16(0), true, false, ""},
+ gtTestCase{int16(17), true, false, ""},
+
+ gtTestCase{int32(-(1 << 30)), false, false, ""},
+ gtTestCase{int32(-151), false, false, ""},
+ gtTestCase{int32(-150), false, false, ""},
+ gtTestCase{int32(-149), true, false, ""},
+ gtTestCase{int32(0), true, false, ""},
+ gtTestCase{int32(17), true, false, ""},
+
+ gtTestCase{int64(-(1 << 30)), false, false, ""},
+ gtTestCase{int64(-151), false, false, ""},
+ gtTestCase{int64(-150), false, false, ""},
+ gtTestCase{int64(-149), true, false, ""},
+ gtTestCase{int64(0), true, false, ""},
+ gtTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint((1 << 32) - 151), true, false, ""},
+ gtTestCase{uint(0), true, false, ""},
+ gtTestCase{uint(17), true, false, ""},
+
+ gtTestCase{uint8(0), true, false, ""},
+ gtTestCase{uint8(17), true, false, ""},
+ gtTestCase{uint8(253), true, false, ""},
+
+ gtTestCase{uint16((1 << 16) - 151), true, false, ""},
+ gtTestCase{uint16(0), true, false, ""},
+ gtTestCase{uint16(17), true, false, ""},
+
+ gtTestCase{uint32((1 << 32) - 151), true, false, ""},
+ gtTestCase{uint32(0), true, false, ""},
+ gtTestCase{uint32(17), true, false, ""},
+
+ gtTestCase{uint64((1 << 64) - 151), true, false, ""},
+ gtTestCase{uint64(0), true, false, ""},
+ gtTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-(1 << 30)), false, false, ""},
+ gtTestCase{float32(-151), false, false, ""},
+ gtTestCase{float32(-150.1), false, false, ""},
+ gtTestCase{float32(-150), false, false, ""},
+ gtTestCase{float32(-149.9), true, false, ""},
+ gtTestCase{float32(0), true, false, ""},
+ gtTestCase{float32(17), true, false, ""},
+ gtTestCase{float32(160), true, false, ""},
+
+ gtTestCase{float64(-(1 << 30)), false, false, ""},
+ gtTestCase{float64(-151), false, false, ""},
+ gtTestCase{float64(-150.1), false, false, ""},
+ gtTestCase{float64(-150), false, false, ""},
+ gtTestCase{float64(-149.9), true, false, ""},
+ gtTestCase{float64(0), true, false, ""},
+ gtTestCase{float64(17), true, false, ""},
+ gtTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) ZeroIntegerLiteral() {
+ matcher := GreaterThan(0)
+ desc := matcher.Description()
+ expectedDesc := "greater than 0"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-(1 << 30), false, false, ""},
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{0, false, false, ""},
+ gtTestCase{1, true, false, ""},
+ gtTestCase{17, true, false, ""},
+ gtTestCase{(1 << 30), true, false, ""},
+
+ gtTestCase{int(-(1 << 30)), false, false, ""},
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(0), false, false, ""},
+ gtTestCase{int(1), true, false, ""},
+ gtTestCase{int(17), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(0), false, false, ""},
+ gtTestCase{int8(1), true, false, ""},
+
+ gtTestCase{int16(-(1 << 14)), false, false, ""},
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(1), true, false, ""},
+ gtTestCase{int16(17), true, false, ""},
+
+ gtTestCase{int32(-(1 << 30)), false, false, ""},
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(0), false, false, ""},
+ gtTestCase{int32(1), true, false, ""},
+ gtTestCase{int32(17), true, false, ""},
+
+ gtTestCase{int64(-(1 << 30)), false, false, ""},
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(0), false, false, ""},
+ gtTestCase{int64(1), true, false, ""},
+ gtTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint((1 << 32) - 1), true, false, ""},
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(1), true, false, ""},
+ gtTestCase{uint(17), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(1), true, false, ""},
+ gtTestCase{uint8(17), true, false, ""},
+ gtTestCase{uint8(253), true, false, ""},
+
+ gtTestCase{uint16((1 << 16) - 1), true, false, ""},
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(1), true, false, ""},
+ gtTestCase{uint16(17), true, false, ""},
+
+ gtTestCase{uint32((1 << 32) - 1), true, false, ""},
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(1), true, false, ""},
+ gtTestCase{uint32(17), true, false, ""},
+
+ gtTestCase{uint64((1 << 64) - 1), true, false, ""},
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(1), true, false, ""},
+ gtTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-(1 << 30)), false, false, ""},
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(-0.1), false, false, ""},
+ gtTestCase{float32(-0.0), false, false, ""},
+ gtTestCase{float32(0), false, false, ""},
+ gtTestCase{float32(0.1), true, false, ""},
+ gtTestCase{float32(17), true, false, ""},
+ gtTestCase{float32(160), true, false, ""},
+
+ gtTestCase{float64(-(1 << 30)), false, false, ""},
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(-0.1), false, false, ""},
+ gtTestCase{float64(-0), false, false, ""},
+ gtTestCase{float64(0), false, false, ""},
+ gtTestCase{float64(0.1), true, false, ""},
+ gtTestCase{float64(17), true, false, ""},
+ gtTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) PositiveIntegerLiteral() {
+ matcher := GreaterThan(150)
+ desc := matcher.Description()
+ expectedDesc := "greater than 150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{149, false, false, ""},
+ gtTestCase{150, false, false, ""},
+ gtTestCase{151, true, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(149), false, false, ""},
+ gtTestCase{int(150), false, false, ""},
+ gtTestCase{int(151), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(0), false, false, ""},
+ gtTestCase{int8(17), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(149), false, false, ""},
+ gtTestCase{int16(150), false, false, ""},
+ gtTestCase{int16(151), true, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(149), false, false, ""},
+ gtTestCase{int32(150), false, false, ""},
+ gtTestCase{int32(151), true, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(149), false, false, ""},
+ gtTestCase{int64(150), false, false, ""},
+ gtTestCase{int64(151), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(149), false, false, ""},
+ gtTestCase{uint(150), false, false, ""},
+ gtTestCase{uint(151), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(127), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(149), false, false, ""},
+ gtTestCase{uint16(150), false, false, ""},
+ gtTestCase{uint16(151), true, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(149), false, false, ""},
+ gtTestCase{uint32(150), false, false, ""},
+ gtTestCase{uint32(151), true, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(149), false, false, ""},
+ gtTestCase{uint64(150), false, false, ""},
+ gtTestCase{uint64(151), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(149), false, false, ""},
+ gtTestCase{float32(149.9), false, false, ""},
+ gtTestCase{float32(150), false, false, ""},
+ gtTestCase{float32(150.1), true, false, ""},
+ gtTestCase{float32(151), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(149), false, false, ""},
+ gtTestCase{float64(149.9), false, false, ""},
+ gtTestCase{float64(150), false, false, ""},
+ gtTestCase{float64(150.1), true, false, ""},
+ gtTestCase{float64(151), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Float literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) NegativeFloatLiteral() {
+ matcher := GreaterThan(-150.1)
+ desc := matcher.Description()
+ expectedDesc := "greater than -150.1"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-(1 << 30), false, false, ""},
+ gtTestCase{-151, false, false, ""},
+ gtTestCase{-150.1, false, false, ""},
+ gtTestCase{-150, true, false, ""},
+ gtTestCase{-149, true, false, ""},
+ gtTestCase{0, true, false, ""},
+ gtTestCase{17, true, false, ""},
+
+ gtTestCase{int(-(1 << 30)), false, false, ""},
+ gtTestCase{int(-151), false, false, ""},
+ gtTestCase{int(-150), true, false, ""},
+ gtTestCase{int(-149), true, false, ""},
+ gtTestCase{int(0), true, false, ""},
+ gtTestCase{int(17), true, false, ""},
+
+ gtTestCase{int8(-127), true, false, ""},
+ gtTestCase{int8(0), true, false, ""},
+ gtTestCase{int8(17), true, false, ""},
+
+ gtTestCase{int16(-(1 << 14)), false, false, ""},
+ gtTestCase{int16(-151), false, false, ""},
+ gtTestCase{int16(-150), true, false, ""},
+ gtTestCase{int16(-149), true, false, ""},
+ gtTestCase{int16(0), true, false, ""},
+ gtTestCase{int16(17), true, false, ""},
+
+ gtTestCase{int32(-(1 << 30)), false, false, ""},
+ gtTestCase{int32(-151), false, false, ""},
+ gtTestCase{int32(-150), true, false, ""},
+ gtTestCase{int32(-149), true, false, ""},
+ gtTestCase{int32(0), true, false, ""},
+ gtTestCase{int32(17), true, false, ""},
+
+ gtTestCase{int64(-(1 << 30)), false, false, ""},
+ gtTestCase{int64(-151), false, false, ""},
+ gtTestCase{int64(-150), true, false, ""},
+ gtTestCase{int64(-149), true, false, ""},
+ gtTestCase{int64(0), true, false, ""},
+ gtTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint((1 << 32) - 151), true, false, ""},
+ gtTestCase{uint(0), true, false, ""},
+ gtTestCase{uint(17), true, false, ""},
+
+ gtTestCase{uint8(0), true, false, ""},
+ gtTestCase{uint8(17), true, false, ""},
+ gtTestCase{uint8(253), true, false, ""},
+
+ gtTestCase{uint16((1 << 16) - 151), true, false, ""},
+ gtTestCase{uint16(0), true, false, ""},
+ gtTestCase{uint16(17), true, false, ""},
+
+ gtTestCase{uint32((1 << 32) - 151), true, false, ""},
+ gtTestCase{uint32(0), true, false, ""},
+ gtTestCase{uint32(17), true, false, ""},
+
+ gtTestCase{uint64((1 << 64) - 151), true, false, ""},
+ gtTestCase{uint64(0), true, false, ""},
+ gtTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-(1 << 30)), false, false, ""},
+ gtTestCase{float32(-151), false, false, ""},
+ gtTestCase{float32(-150.2), false, false, ""},
+ gtTestCase{float32(-150.1), false, false, ""},
+ gtTestCase{float32(-150), true, false, ""},
+ gtTestCase{float32(0), true, false, ""},
+ gtTestCase{float32(17), true, false, ""},
+ gtTestCase{float32(160), true, false, ""},
+
+ gtTestCase{float64(-(1 << 30)), false, false, ""},
+ gtTestCase{float64(-151), false, false, ""},
+ gtTestCase{float64(-150.2), false, false, ""},
+ gtTestCase{float64(-150.1), false, false, ""},
+ gtTestCase{float64(-150), true, false, ""},
+ gtTestCase{float64(0), true, false, ""},
+ gtTestCase{float64(17), true, false, ""},
+ gtTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) PositiveFloatLiteral() {
+ matcher := GreaterThan(149.9)
+ desc := matcher.Description()
+ expectedDesc := "greater than 149.9"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{149, false, false, ""},
+ gtTestCase{149.9, false, false, ""},
+ gtTestCase{150, true, false, ""},
+ gtTestCase{151, true, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(149), false, false, ""},
+ gtTestCase{int(150), true, false, ""},
+ gtTestCase{int(151), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(0), false, false, ""},
+ gtTestCase{int8(17), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(149), false, false, ""},
+ gtTestCase{int16(150), true, false, ""},
+ gtTestCase{int16(151), true, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(149), false, false, ""},
+ gtTestCase{int32(150), true, false, ""},
+ gtTestCase{int32(151), true, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(149), false, false, ""},
+ gtTestCase{int64(150), true, false, ""},
+ gtTestCase{int64(151), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(149), false, false, ""},
+ gtTestCase{uint(150), true, false, ""},
+ gtTestCase{uint(151), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(127), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(149), false, false, ""},
+ gtTestCase{uint16(150), true, false, ""},
+ gtTestCase{uint16(151), true, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(149), false, false, ""},
+ gtTestCase{uint32(150), true, false, ""},
+ gtTestCase{uint32(151), true, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(149), false, false, ""},
+ gtTestCase{uint64(150), true, false, ""},
+ gtTestCase{uint64(151), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(149), false, false, ""},
+ gtTestCase{float32(149.8), false, false, ""},
+ gtTestCase{float32(149.9), false, false, ""},
+ gtTestCase{float32(150), true, false, ""},
+ gtTestCase{float32(151), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(149), false, false, ""},
+ gtTestCase{float64(149.8), false, false, ""},
+ gtTestCase{float64(149.9), false, false, ""},
+ gtTestCase{float64(150), true, false, ""},
+ gtTestCase{float64(151), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Subtle cases
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterThan(int64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{kTwoTo25 + 0, false, false, ""},
+ gtTestCase{kTwoTo25 + 1, false, false, ""},
+ gtTestCase{kTwoTo25 + 2, true, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(32767), false, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 2), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(255), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(65535), false, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ gtTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterThan(int64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{1 << 30, false, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(math.MaxInt32), false, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(32767), false, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(math.MaxInt32), false, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 2), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(math.MaxUint32), false, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(255), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(65535), false, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(math.MaxUint32), false, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterThan(uint64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{kTwoTo25 + 0, false, false, ""},
+ gtTestCase{kTwoTo25 + 1, false, false, ""},
+ gtTestCase{kTwoTo25 + 2, true, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(32767), false, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 2), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(255), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(65535), false, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ gtTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterThan(uint64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{1 << 30, false, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(math.MaxInt32), false, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(32767), false, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(math.MaxInt32), false, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 2), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(math.MaxUint32), false, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(255), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(65535), false, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(math.MaxUint32), false, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterThan(float32(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 3.3554432e+07"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{int64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 3), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 3), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterThan(float64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 1.8014398509481984e+16"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 3), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 3), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// String literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) EmptyString() {
+ matcher := GreaterThan("")
+ desc := matcher.Description()
+ expectedDesc := "greater than \"\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ gtTestCase{"", false, false, ""},
+ gtTestCase{"\x00", true, false, ""},
+ gtTestCase{"a", true, false, ""},
+ gtTestCase{"foo", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) SingleNullByte() {
+ matcher := GreaterThan("\x00")
+ desc := matcher.Description()
+ expectedDesc := "greater than \"\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ gtTestCase{"", false, false, ""},
+ gtTestCase{"\x00", false, false, ""},
+ gtTestCase{"\x00\x00", true, false, ""},
+ gtTestCase{"a", true, false, ""},
+ gtTestCase{"foo", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) LongerString() {
+ matcher := GreaterThan("foo\x00")
+ desc := matcher.Description()
+ expectedDesc := "greater than \"foo\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ gtTestCase{"", false, false, ""},
+ gtTestCase{"\x00", false, false, ""},
+ gtTestCase{"bar", false, false, ""},
+ gtTestCase{"foo", false, false, ""},
+ gtTestCase{"foo\x00", false, false, ""},
+ gtTestCase{"foo\x00\x00", true, false, ""},
+ gtTestCase{"fooa", true, false, ""},
+ gtTestCase{"qux", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_same_type_as.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_same_type_as.go
new file mode 100644
index 00000000000..3b286f73218
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_same_type_as.go
@@ -0,0 +1,37 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// HasSameTypeAs returns a matcher that matches values with exactly the same
+// type as the supplied prototype.
+func HasSameTypeAs(p interface{}) Matcher {
+ expected := reflect.TypeOf(p)
+ pred := func(c interface{}) error {
+ actual := reflect.TypeOf(c)
+ if actual != expected {
+ return fmt.Errorf("which has type %v", actual)
+ }
+
+ return nil
+ }
+
+ return NewMatcher(pred, fmt.Sprintf("has type %v", expected))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_same_type_as_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_same_type_as_test.go
new file mode 100644
index 00000000000..2030d5f9b1a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_same_type_as_test.go
@@ -0,0 +1,181 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "io"
+ "testing"
+
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+func TestHasSameTypeAs(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Boilerplate
+////////////////////////////////////////////////////////////////////////
+
+type HasSameTypeAsTest struct {
+}
+
+func init() { RegisterTestSuite(&HasSameTypeAsTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *HasSameTypeAsTest) CandidateIsLiteralNil() {
+ matcher := HasSameTypeAs(nil)
+ var err error
+
+ // Description
+ ExpectEq("has type <nil>", matcher.Description())
+
+ // Literal nil
+ err = matcher.Matches(nil)
+ ExpectEq(nil, err)
+
+ // nil in interface variable
+ var r io.Reader
+ err = matcher.Matches(r)
+ ExpectEq(nil, err)
+
+ // int
+ err = matcher.Matches(17)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type int")))
+
+ // string
+ err = matcher.Matches("")
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type string")))
+
+ // nil map
+ var m map[string]string
+ err = matcher.Matches(m)
+
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type map[string]string")))
+
+ // Non-nil map
+ m = make(map[string]string)
+ err = matcher.Matches(m)
+
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type map[string]string")))
+}
+
+func (t *HasSameTypeAsTest) CandidateIsNilMap() {
+ var m map[string]string
+ matcher := HasSameTypeAs(m)
+ var err error
+
+ // Description
+ ExpectEq("has type map[string]string", matcher.Description())
+
+ // nil map
+ m = nil
+ err = matcher.Matches(m)
+ ExpectEq(nil, err)
+
+ // Non-nil map
+ m = make(map[string]string)
+ err = matcher.Matches(m)
+ ExpectEq(nil, err)
+
+ // Literal nil
+ err = matcher.Matches(nil)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type <nil>")))
+
+ // int
+ err = matcher.Matches(17)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type int")))
+
+ // string
+ err = matcher.Matches("")
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type string")))
+}
+
+func (t *HasSameTypeAsTest) CandidateIsNilInInterfaceVariable() {
+ var r io.Reader
+ matcher := HasSameTypeAs(r)
+ var err error
+
+ // Description
+ ExpectEq("has type <nil>", matcher.Description())
+
+ // nil in interface variable
+ r = nil
+ err = matcher.Matches(r)
+ ExpectEq(nil, err)
+
+ // Literal nil
+ err = matcher.Matches(nil)
+ ExpectEq(nil, err)
+
+ // int
+ err = matcher.Matches(17)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type int")))
+}
+
+func (t *HasSameTypeAsTest) CandidateIsString() {
+ matcher := HasSameTypeAs("")
+ var err error
+
+ // Description
+ ExpectEq("has type string", matcher.Description())
+
+ // string
+ err = matcher.Matches("taco")
+ ExpectEq(nil, err)
+
+ // string alias
+ type Foo string
+ err = matcher.Matches(Foo("taco"))
+ ExpectThat(err, Error(MatchesRegexp("which has type .*Foo")))
+
+ // Literal nil
+ err = matcher.Matches(nil)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type <nil>")))
+
+ // int
+ err = matcher.Matches(17)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type int")))
+}
+
+func (t *HasSameTypeAsTest) CandidateIsStringAlias() {
+ type Foo string
+ matcher := HasSameTypeAs(Foo(""))
+ var err error
+
+ // Description
+ ExpectThat(matcher.Description(), MatchesRegexp("has type .*Foo"))
+
+ // string alias
+ err = matcher.Matches(Foo("taco"))
+ ExpectEq(nil, err)
+
+ // string
+ err = matcher.Matches("taco")
+ ExpectThat(err, Error(Equals("which has type string")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_substr.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_substr.go
new file mode 100644
index 00000000000..bf5bd6ae6d3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_substr.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// HasSubstr returns a matcher that matches strings containing s as a
+// substring.
+func HasSubstr(s string) Matcher {
+ return NewMatcher(
+ func(c interface{}) error { return hasSubstr(s, c) },
+ fmt.Sprintf("has substring \"%s\"", s))
+}
+
+func hasSubstr(needle string, c interface{}) error {
+ v := reflect.ValueOf(c)
+ if v.Kind() != reflect.String {
+ return NewFatalError("which is not a string")
+ }
+
+ // Perform the substring search.
+ haystack := v.String()
+ if strings.Contains(haystack, needle) {
+ return nil
+ }
+
+ return errors.New("")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_substr_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_substr_test.go
new file mode 100644
index 00000000000..e36dcd8f03f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/has_substr_test.go
@@ -0,0 +1,93 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type HasSubstrTest struct {
+
+}
+
+func init() { RegisterTestSuite(&HasSubstrTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *HasSubstrTest) Description() {
+ matcher := HasSubstr("taco")
+ ExpectThat(matcher.Description(), Equals("has substring \"taco\""))
+}
+
+func (t *HasSubstrTest) CandidateIsNil() {
+ matcher := HasSubstr("")
+ err := matcher.Matches(nil)
+
+ ExpectThat(err, Error(Equals("which is not a string")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *HasSubstrTest) CandidateIsInteger() {
+ matcher := HasSubstr("")
+ err := matcher.Matches(17)
+
+ ExpectThat(err, Error(Equals("which is not a string")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *HasSubstrTest) CandidateIsByteSlice() {
+ matcher := HasSubstr("")
+ err := matcher.Matches([]byte{17})
+
+ ExpectThat(err, Error(Equals("which is not a string")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *HasSubstrTest) CandidateDoesntHaveSubstring() {
+ matcher := HasSubstr("taco")
+ err := matcher.Matches("tac")
+
+ ExpectThat(err, Error(Equals("")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *HasSubstrTest) CandidateEqualsArg() {
+ matcher := HasSubstr("taco")
+ err := matcher.Matches("taco")
+
+ ExpectThat(err, Equals(nil))
+}
+
+func (t *HasSubstrTest) CandidateHasProperSubstring() {
+ matcher := HasSubstr("taco")
+ err := matcher.Matches("burritos and tacos")
+
+ ExpectThat(err, Equals(nil))
+}
+
+func (t *HasSubstrTest) EmptyStringIsAlwaysSubString() {
+ matcher := HasSubstr("")
+ err := matcher.Matches("asdf")
+
+ ExpectThat(err, Equals(nil))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/identical_to.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/identical_to.go
new file mode 100644
index 00000000000..ae6460ed966
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/identical_to.go
@@ -0,0 +1,134 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Is the type comparable according to the definition here?
+//
+// http://weekly.golang.org/doc/go_spec.html#Comparison_operators
+//
+func isComparable(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Array:
+ return isComparable(t.Elem())
+
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if !isComparable(t.Field(i).Type) {
+ return false
+ }
+ }
+
+ return true
+
+ case reflect.Slice, reflect.Map, reflect.Func:
+ return false
+ }
+
+ return true
+}
+
+// Should the supplied type be allowed as an argument to IdenticalTo?
+func isLegalForIdenticalTo(t reflect.Type) (bool, error) {
+ // Allow the zero type.
+ if t == nil {
+ return true, nil
+ }
+
+ // Reference types are always okay; we compare pointers.
+ switch t.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan:
+ return true, nil
+ }
+
+ // Reject other non-comparable types.
+ if !isComparable(t) {
+ return false, errors.New(fmt.Sprintf("%v is not comparable", t))
+ }
+
+ return true, nil
+}
+
+// IdenticalTo(x) returns a matcher that matches values v with type identical
+// to x such that:
+//
+// 1. If v and x are of a reference type (slice, map, function, channel), then
+// they are either both nil or are references to the same object.
+//
+// 2. Otherwise, if v and x are not of a reference type but have a valid type,
+// then v == x.
+//
+// If v and x are both the invalid type (which results from the predeclared nil
+// value, or from nil interface variables), then the matcher is satisfied.
+//
+// This function will panic if x is of a value type that is not comparable. For
+// example, x cannot be an array of functions.
+func IdenticalTo(x interface{}) Matcher {
+ t := reflect.TypeOf(x)
+
+ // Reject illegal arguments.
+ if ok, err := isLegalForIdenticalTo(t); !ok {
+ panic("IdenticalTo: " + err.Error())
+ }
+
+ return &identicalToMatcher{x}
+}
+
+type identicalToMatcher struct {
+ x interface{}
+}
+
+func (m *identicalToMatcher) Description() string {
+ t := reflect.TypeOf(m.x)
+ return fmt.Sprintf("identical to <%v> %v", t, m.x)
+}
+
+func (m *identicalToMatcher) Matches(c interface{}) error {
+ // Make sure the candidate's type is correct.
+ t := reflect.TypeOf(m.x)
+ if ct := reflect.TypeOf(c); t != ct {
+ return NewFatalError(fmt.Sprintf("which is of type %v", ct))
+ }
+
+ // Special case: two values of the invalid type are always identical.
+ if t == nil {
+ return nil
+ }
+
+ // Handle reference types.
+ switch t.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan:
+ xv := reflect.ValueOf(m.x)
+ cv := reflect.ValueOf(c)
+ if xv.Pointer() == cv.Pointer() {
+ return nil
+ }
+
+ return errors.New("which is not an identical reference")
+ }
+
+ // Are the values equal?
+ if m.x == c {
+ return nil
+ }
+
+ return errors.New("")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/identical_to_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/identical_to_test.go
new file mode 100644
index 00000000000..3e68652b641
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/identical_to_test.go
@@ -0,0 +1,849 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "fmt"
+ "io"
+ "unsafe"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type IdenticalToTest struct {
+}
+
+func init() { RegisterTestSuite(&IdenticalToTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *IdenticalToTest) TypesNotIdentical() {
+ var m Matcher
+ var err error
+
+ type intAlias int
+
+ // Type alias expected value
+ m = IdenticalTo(intAlias(17))
+ err = m.Matches(int(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int")))
+
+ // Type alias candidate
+ m = IdenticalTo(int(17))
+ err = m.Matches(intAlias(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.intAlias")))
+
+ // int and uint
+ m = IdenticalTo(int(17))
+ err = m.Matches(uint(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type uint")))
+}
+
+func (t *IdenticalToTest) PredeclaredNilIdentifier() {
+ var m Matcher
+ var err error
+
+ // Nil literal
+ m = IdenticalTo(nil)
+ err = m.Matches(nil)
+ ExpectEq(nil, err)
+
+ // Zero interface var (which is the same as above since IdenticalTo takes an
+ // interface{} as an arg)
+ var nilReader io.Reader
+ var nilWriter io.Writer
+
+ m = IdenticalTo(nilReader)
+ err = m.Matches(nilWriter)
+ ExpectEq(nil, err)
+
+ // Typed nil value.
+ m = IdenticalTo(nil)
+ err = m.Matches((chan int)(nil))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type chan int")))
+
+ // Non-nil value.
+ m = IdenticalTo(nil)
+ err = m.Matches("taco")
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type string")))
+}
+
+func (t *IdenticalToTest) Slices() {
+ var m Matcher
+ var err error
+
+ // Nil expected value
+ m = IdenticalTo(([]int)(nil))
+ ExpectEq("identical to <[]int> []", m.Description())
+
+ err = m.Matches(([]int)(nil))
+ ExpectEq(nil, err)
+
+ err = m.Matches([]int{})
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+
+ // Non-nil expected value
+ o1 := make([]int, 1)
+ o2 := make([]int, 1)
+ m = IdenticalTo(o1)
+ ExpectEq(fmt.Sprintf("identical to <[]int> %v", o1), m.Description())
+
+ err = m.Matches(o1)
+ ExpectEq(nil, err)
+
+ err = m.Matches(o2)
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+}
+
+func (t *IdenticalToTest) Maps() {
+ var m Matcher
+ var err error
+
+ // Nil expected value
+ m = IdenticalTo((map[int]int)(nil))
+ ExpectEq("identical to <map[int]int> map[]", m.Description())
+
+ err = m.Matches((map[int]int)(nil))
+ ExpectEq(nil, err)
+
+ err = m.Matches(map[int]int{})
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+
+ // Non-nil expected value
+ o1 := map[int]int{}
+ o2 := map[int]int{}
+ m = IdenticalTo(o1)
+ ExpectEq(fmt.Sprintf("identical to <map[int]int> %v", o1), m.Description())
+
+ err = m.Matches(o1)
+ ExpectEq(nil, err)
+
+ err = m.Matches(o2)
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+}
+
+func (t *IdenticalToTest) Functions() {
+ var m Matcher
+ var err error
+
+ // Nil expected value
+ m = IdenticalTo((func())(nil))
+ ExpectEq("identical to <func()> <nil>", m.Description())
+
+ err = m.Matches((func())(nil))
+ ExpectEq(nil, err)
+
+ err = m.Matches(func(){})
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+
+ // Non-nil expected value
+ o1 := func() {}
+ o2 := func() {}
+ m = IdenticalTo(o1)
+ ExpectEq(fmt.Sprintf("identical to <func()> %v", o1), m.Description())
+
+ err = m.Matches(o1)
+ ExpectEq(nil, err)
+
+ err = m.Matches(o2)
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+}
+
+func (t *IdenticalToTest) Channels() {
+ var m Matcher
+ var err error
+
+ // Nil expected value
+ m = IdenticalTo((chan int)(nil))
+ ExpectEq("identical to <chan int> <nil>", m.Description())
+
+ err = m.Matches((chan int)(nil))
+ ExpectEq(nil, err)
+
+ err = m.Matches(make(chan int))
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+
+ // Non-nil expected value
+ o1 := make(chan int)
+ o2 := make(chan int)
+ m = IdenticalTo(o1)
+ ExpectEq(fmt.Sprintf("identical to <chan int> %v", o1), m.Description())
+
+ err = m.Matches(o1)
+ ExpectEq(nil, err)
+
+ err = m.Matches(o2)
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+}
+
+func (t *IdenticalToTest) Bools() {
+ var m Matcher
+ var err error
+
+ // false
+ m = IdenticalTo(false)
+ ExpectEq("identical to <bool> false", m.Description())
+
+ err = m.Matches(false)
+ ExpectEq(nil, err)
+
+ err = m.Matches(true)
+ ExpectThat(err, Error(Equals("")))
+
+ // true
+ m = IdenticalTo(true)
+ ExpectEq("identical to <bool> true", m.Description())
+
+ err = m.Matches(false)
+ ExpectThat(err, Error(Equals("")))
+
+ err = m.Matches(true)
+ ExpectEq(nil, err)
+}
+
+func (t *IdenticalToTest) Ints() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int(17))
+ ExpectEq("identical to <int> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Int8s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int8(17))
+ ExpectEq("identical to <int8> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int8(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int8
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Int16s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int16(17))
+ ExpectEq("identical to <int16> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int16(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int16
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Int32s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int32(17))
+ ExpectEq("identical to <int32> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int32(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int32
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int16(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int16")))
+}
+
+func (t *IdenticalToTest) Int64s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int64(17))
+ ExpectEq("identical to <int64> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int64(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int64
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uints() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint(17))
+ ExpectEq("identical to <uint> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uint8s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint8(17))
+ ExpectEq("identical to <uint8> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint8(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint8
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uint16s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint16(17))
+ ExpectEq("identical to <uint16> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint16(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint16
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uint32s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint32(17))
+ ExpectEq("identical to <uint32> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint32(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint32
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uint64s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint64(17))
+ ExpectEq("identical to <uint64> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint64(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint64
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uintptrs() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uintptr(17))
+ ExpectEq("identical to <uintptr> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uintptr(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uintptr
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Float32s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(float32(17))
+ ExpectEq("identical to <float32> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(float32(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType float32
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Float64s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(float64(17))
+ ExpectEq("identical to <float64> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(float64(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType float64
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Complex64s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(complex64(17))
+ ExpectEq("identical to <complex64> (17+0i)", m.Description())
+
+ // Identical value
+ err = m.Matches(complex64(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType complex64
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Complex128s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(complex128(17))
+ ExpectEq("identical to <complex128> (17+0i)", m.Description())
+
+ // Identical value
+ err = m.Matches(complex128(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType complex128
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) EmptyComparableArrays() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo([0]int{})
+ ExpectEq("identical to <[0]int> []", m.Description())
+
+ // Identical value
+ err = m.Matches([0]int{})
+ ExpectEq(nil, err)
+
+ // Length too long
+ err = m.Matches([1]int{17})
+ ExpectThat(err, Error(Equals("which is of type [1]int")))
+
+ // Element type alias
+ type myType int
+ err = m.Matches([0]myType{})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [0]oglematchers_test.myType")))
+
+ // Completely wrong element type
+ err = m.Matches([0]int32{})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [0]int32")))
+}
+
+func (t *IdenticalToTest) NonEmptyComparableArrays() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo([2]int{17, 19})
+ ExpectEq("identical to <[2]int> [17 19]", m.Description())
+
+ // Identical value
+ err = m.Matches([2]int{17, 19})
+ ExpectEq(nil, err)
+
+ // Length too short
+ err = m.Matches([1]int{17})
+ ExpectThat(err, Error(Equals("which is of type [1]int")))
+
+ // Length too long
+ err = m.Matches([3]int{17, 19, 23})
+ ExpectThat(err, Error(Equals("which is of type [3]int")))
+
+ // First element different
+ err = m.Matches([2]int{13, 19})
+ ExpectThat(err, Error(Equals("")))
+
+ // Second element different
+ err = m.Matches([2]int{17, 23})
+ ExpectThat(err, Error(Equals("")))
+
+ // Element type alias
+ type myType int
+ err = m.Matches([2]myType{17, 19})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [2]oglematchers_test.myType")))
+
+ // Completely wrong element type
+ err = m.Matches([2]int32{17, 19})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [2]int32")))
+}
+
+func (t *IdenticalToTest) NonEmptyArraysOfComparableArrays() {
+ var m Matcher
+ var err error
+
+ x := [2][2]int{
+ [2]int{17, 19},
+ [2]int{23, 29},
+ }
+ m = IdenticalTo(x)
+ ExpectEq("identical to <[2][2]int> [[17 19] [23 29]]", m.Description())
+
+ // Identical value
+ err = m.Matches([2][2]int{[2]int{17, 19}, [2]int{23, 29}})
+ ExpectEq(nil, err)
+
+ // Outer length too short
+ err = m.Matches([1][2]int{[2]int{17, 19}})
+ ExpectThat(err, Error(Equals("which is of type [1][2]int")))
+
+ // Inner length too short
+ err = m.Matches([2][1]int{[1]int{17}, [1]int{23}})
+ ExpectThat(err, Error(Equals("which is of type [2][1]int")))
+
+ // First element different
+ err = m.Matches([2][2]int{[2]int{13, 19}, [2]int{23, 29}})
+ ExpectThat(err, Error(Equals("")))
+
+ // Element type alias
+ type myType int
+ err = m.Matches([2][2]myType{[2]myType{17, 19}, [2]myType{23, 29}})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [2][2]oglematchers_test.myType")))
+}
+
+func (t *IdenticalToTest) NonComparableArrays() {
+ x := [0]func(){}
+ f := func() { IdenticalTo(x) }
+ ExpectThat(f, Panics(HasSubstr("is not comparable")))
+}
+
+func (t *IdenticalToTest) ArraysOfNonComparableArrays() {
+ x := [0][0]func(){}
+ f := func() { IdenticalTo(x) }
+ ExpectThat(f, Panics(HasSubstr("is not comparable")))
+}
+
+func (t *IdenticalToTest) Strings() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo("taco")
+ ExpectEq("identical to <string> taco", m.Description())
+
+ // Identical value
+ err = m.Matches("ta" + "co")
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType string
+ err = m.Matches(myType("taco"))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) ComparableStructs() {
+ var m Matcher
+ var err error
+
+ type subStruct struct {
+ i int
+ }
+
+ type myStruct struct {
+ u uint
+ s subStruct
+ }
+
+ x := myStruct{17, subStruct{19}}
+ m = IdenticalTo(x)
+ ExpectEq("identical to <oglematchers_test.myStruct> {17 {19}}", m.Description())
+
+ // Identical value
+ err = m.Matches(myStruct{17, subStruct{19}})
+ ExpectEq(nil, err)
+
+ // Wrong outer field
+ err = m.Matches(myStruct{13, subStruct{19}})
+ ExpectThat(err, Error(Equals("")))
+
+ // Wrong inner field
+ err = m.Matches(myStruct{17, subStruct{23}})
+ ExpectThat(err, Error(Equals("")))
+
+ // Type alias
+ type myType myStruct
+ err = m.Matches(myType{17, subStruct{19}})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) NonComparableStructs() {
+ type subStruct struct {
+ s []int
+ }
+
+ type myStruct struct {
+ u uint
+ s subStruct
+ }
+
+ x := myStruct{17, subStruct{[]int{19}}}
+ f := func() { IdenticalTo(x) }
+ ExpectThat(f, Panics(AllOf(HasSubstr("IdenticalTo"), HasSubstr("comparable"))))
+}
+
+func (t *IdenticalToTest) NilUnsafePointer() {
+ var m Matcher
+ var err error
+
+ x := unsafe.Pointer(nil)
+ m = IdenticalTo(x)
+ ExpectEq(fmt.Sprintf("identical to <unsafe.Pointer> %v", x), m.Description())
+
+ // Identical value
+ err = m.Matches(unsafe.Pointer(nil))
+ ExpectEq(nil, err)
+
+ // Wrong value
+ j := 17
+ err = m.Matches(unsafe.Pointer(&j))
+ ExpectThat(err, Error(Equals("")))
+
+ // Type alias
+ type myType unsafe.Pointer
+ err = m.Matches(myType(unsafe.Pointer(nil)))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) NonNilUnsafePointer() {
+ var m Matcher
+ var err error
+
+ i := 17
+ x := unsafe.Pointer(&i)
+ m = IdenticalTo(x)
+ ExpectEq(fmt.Sprintf("identical to <unsafe.Pointer> %v", x), m.Description())
+
+ // Identical value
+ err = m.Matches(unsafe.Pointer(&i))
+ ExpectEq(nil, err)
+
+ // Nil value
+ err = m.Matches(unsafe.Pointer(nil))
+ ExpectThat(err, Error(Equals("")))
+
+ // Wrong value
+ j := 17
+ err = m.Matches(unsafe.Pointer(&j))
+ ExpectThat(err, Error(Equals("")))
+
+ // Type alias
+ type myType unsafe.Pointer
+ err = m.Matches(myType(unsafe.Pointer(&i)))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) IntAlias() {
+ var m Matcher
+ var err error
+
+ type intAlias int
+
+ m = IdenticalTo(intAlias(17))
+ ExpectEq("identical to <oglematchers_test.intAlias> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(intAlias(17))
+ ExpectEq(nil, err)
+
+ // Int
+ err = m.Matches(int(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_or_equal.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_or_equal.go
new file mode 100644
index 00000000000..8402cdeaf09
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_or_equal.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// LessOrEqual returns a matcher that matches integer, floating point, or
+// strings values v such that v <= x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// LessOrEqual will panic.
+func LessOrEqual(x interface{}) Matcher {
+ desc := fmt.Sprintf("less than or equal to %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("less than or equal to \"%s\"", x)
+ }
+
+ // Put LessThan last so that its error messages will be used in the event of
+ // failure.
+ return transformDescription(AnyOf(Equals(x), LessThan(x)), desc)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_or_equal_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_or_equal_test.go
new file mode 100644
index 00000000000..bdb4a8866fb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_or_equal_test.go
@@ -0,0 +1,1079 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "math"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type LessOrEqualTest struct {
+}
+
+func init() { RegisterTestSuite(&LessOrEqualTest{}) }
+
+type leTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *LessOrEqualTest) checkTestCases(matcher Matcher, cases []leTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+
+ ExpectThat(
+ (err == nil),
+ Equals(c.expectedResult),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(
+ c.shouldBeFatal,
+ isFatal,
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ ExpectThat(
+ err,
+ Error(Equals(c.expectedError)),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) IntegerCandidateBadTypes() {
+ matcher := LessOrEqual(int(-150))
+
+ cases := []leTestCase{
+ leTestCase{true, false, true, "which is not comparable"},
+ leTestCase{uintptr(17), false, true, "which is not comparable"},
+ leTestCase{complex64(-151), false, true, "which is not comparable"},
+ leTestCase{complex128(-151), false, true, "which is not comparable"},
+ leTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ leTestCase{make(chan int), false, true, "which is not comparable"},
+ leTestCase{func() {}, false, true, "which is not comparable"},
+ leTestCase{map[int]int{}, false, true, "which is not comparable"},
+ leTestCase{&leTestCase{}, false, true, "which is not comparable"},
+ leTestCase{make([]int, 0), false, true, "which is not comparable"},
+ leTestCase{"-151", false, true, "which is not comparable"},
+ leTestCase{leTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) FloatCandidateBadTypes() {
+ matcher := LessOrEqual(float32(-150))
+
+ cases := []leTestCase{
+ leTestCase{true, false, true, "which is not comparable"},
+ leTestCase{uintptr(17), false, true, "which is not comparable"},
+ leTestCase{complex64(-151), false, true, "which is not comparable"},
+ leTestCase{complex128(-151), false, true, "which is not comparable"},
+ leTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ leTestCase{make(chan int), false, true, "which is not comparable"},
+ leTestCase{func() {}, false, true, "which is not comparable"},
+ leTestCase{map[int]int{}, false, true, "which is not comparable"},
+ leTestCase{&leTestCase{}, false, true, "which is not comparable"},
+ leTestCase{make([]int, 0), false, true, "which is not comparable"},
+ leTestCase{"-151", false, true, "which is not comparable"},
+ leTestCase{leTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) StringCandidateBadTypes() {
+ matcher := LessOrEqual("17")
+
+ cases := []leTestCase{
+ leTestCase{true, false, true, "which is not comparable"},
+ leTestCase{int(0), false, true, "which is not comparable"},
+ leTestCase{int8(0), false, true, "which is not comparable"},
+ leTestCase{int16(0), false, true, "which is not comparable"},
+ leTestCase{int32(0), false, true, "which is not comparable"},
+ leTestCase{int64(0), false, true, "which is not comparable"},
+ leTestCase{uint(0), false, true, "which is not comparable"},
+ leTestCase{uint8(0), false, true, "which is not comparable"},
+ leTestCase{uint16(0), false, true, "which is not comparable"},
+ leTestCase{uint32(0), false, true, "which is not comparable"},
+ leTestCase{uint64(0), false, true, "which is not comparable"},
+ leTestCase{uintptr(17), false, true, "which is not comparable"},
+ leTestCase{float32(0), false, true, "which is not comparable"},
+ leTestCase{float64(0), false, true, "which is not comparable"},
+ leTestCase{complex64(-151), false, true, "which is not comparable"},
+ leTestCase{complex128(-151), false, true, "which is not comparable"},
+ leTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ leTestCase{make(chan int), false, true, "which is not comparable"},
+ leTestCase{func() {}, false, true, "which is not comparable"},
+ leTestCase{map[int]int{}, false, true, "which is not comparable"},
+ leTestCase{&leTestCase{}, false, true, "which is not comparable"},
+ leTestCase{make([]int, 0), false, true, "which is not comparable"},
+ leTestCase{leTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) BadArgument() {
+ panicked := false
+
+ defer func() {
+ ExpectThat(panicked, Equals(true))
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ LessOrEqual(complex128(0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) NegativeIntegerLiteral() {
+ matcher := LessOrEqual(-150)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to -150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-(1 << 30), true, false, ""},
+ leTestCase{-151, true, false, ""},
+ leTestCase{-150, true, false, ""},
+ leTestCase{-149, false, false, ""},
+ leTestCase{0, false, false, ""},
+ leTestCase{17, false, false, ""},
+
+ leTestCase{int(-(1 << 30)), true, false, ""},
+ leTestCase{int(-151), true, false, ""},
+ leTestCase{int(-150), true, false, ""},
+ leTestCase{int(-149), false, false, ""},
+ leTestCase{int(0), false, false, ""},
+ leTestCase{int(17), false, false, ""},
+
+ leTestCase{int8(-127), false, false, ""},
+ leTestCase{int8(0), false, false, ""},
+ leTestCase{int8(17), false, false, ""},
+
+ leTestCase{int16(-(1 << 14)), true, false, ""},
+ leTestCase{int16(-151), true, false, ""},
+ leTestCase{int16(-150), true, false, ""},
+ leTestCase{int16(-149), false, false, ""},
+ leTestCase{int16(0), false, false, ""},
+ leTestCase{int16(17), false, false, ""},
+
+ leTestCase{int32(-(1 << 30)), true, false, ""},
+ leTestCase{int32(-151), true, false, ""},
+ leTestCase{int32(-150), true, false, ""},
+ leTestCase{int32(-149), false, false, ""},
+ leTestCase{int32(0), false, false, ""},
+ leTestCase{int32(17), false, false, ""},
+
+ leTestCase{int64(-(1 << 30)), true, false, ""},
+ leTestCase{int64(-151), true, false, ""},
+ leTestCase{int64(-150), true, false, ""},
+ leTestCase{int64(-149), false, false, ""},
+ leTestCase{int64(0), false, false, ""},
+ leTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint((1 << 32) - 151), false, false, ""},
+ leTestCase{uint(0), false, false, ""},
+ leTestCase{uint(17), false, false, ""},
+
+ leTestCase{uint8(0), false, false, ""},
+ leTestCase{uint8(17), false, false, ""},
+ leTestCase{uint8(253), false, false, ""},
+
+ leTestCase{uint16((1 << 16) - 151), false, false, ""},
+ leTestCase{uint16(0), false, false, ""},
+ leTestCase{uint16(17), false, false, ""},
+
+ leTestCase{uint32((1 << 32) - 151), false, false, ""},
+ leTestCase{uint32(0), false, false, ""},
+ leTestCase{uint32(17), false, false, ""},
+
+ leTestCase{uint64((1 << 64) - 151), false, false, ""},
+ leTestCase{uint64(0), false, false, ""},
+ leTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-(1 << 30)), true, false, ""},
+ leTestCase{float32(-151), true, false, ""},
+ leTestCase{float32(-150.1), true, false, ""},
+ leTestCase{float32(-150), true, false, ""},
+ leTestCase{float32(-149.9), false, false, ""},
+ leTestCase{float32(0), false, false, ""},
+ leTestCase{float32(17), false, false, ""},
+ leTestCase{float32(160), false, false, ""},
+
+ leTestCase{float64(-(1 << 30)), true, false, ""},
+ leTestCase{float64(-151), true, false, ""},
+ leTestCase{float64(-150.1), true, false, ""},
+ leTestCase{float64(-150), true, false, ""},
+ leTestCase{float64(-149.9), false, false, ""},
+ leTestCase{float64(0), false, false, ""},
+ leTestCase{float64(17), false, false, ""},
+ leTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) ZeroIntegerLiteral() {
+ matcher := LessOrEqual(0)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 0"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-(1 << 30), true, false, ""},
+ leTestCase{-1, true, false, ""},
+ leTestCase{0, true, false, ""},
+ leTestCase{1, false, false, ""},
+ leTestCase{17, false, false, ""},
+ leTestCase{(1 << 30), false, false, ""},
+
+ leTestCase{int(-(1 << 30)), true, false, ""},
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(0), true, false, ""},
+ leTestCase{int(1), false, false, ""},
+ leTestCase{int(17), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(0), true, false, ""},
+ leTestCase{int8(1), false, false, ""},
+
+ leTestCase{int16(-(1 << 14)), true, false, ""},
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(1), false, false, ""},
+ leTestCase{int16(17), false, false, ""},
+
+ leTestCase{int32(-(1 << 30)), true, false, ""},
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(0), true, false, ""},
+ leTestCase{int32(1), false, false, ""},
+ leTestCase{int32(17), false, false, ""},
+
+ leTestCase{int64(-(1 << 30)), true, false, ""},
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(0), true, false, ""},
+ leTestCase{int64(1), false, false, ""},
+ leTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint((1 << 32) - 1), false, false, ""},
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(1), false, false, ""},
+ leTestCase{uint(17), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(1), false, false, ""},
+ leTestCase{uint8(17), false, false, ""},
+ leTestCase{uint8(253), false, false, ""},
+
+ leTestCase{uint16((1 << 16) - 1), false, false, ""},
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(1), false, false, ""},
+ leTestCase{uint16(17), false, false, ""},
+
+ leTestCase{uint32((1 << 32) - 1), false, false, ""},
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(1), false, false, ""},
+ leTestCase{uint32(17), false, false, ""},
+
+ leTestCase{uint64((1 << 64) - 1), false, false, ""},
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(1), false, false, ""},
+ leTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-(1 << 30)), true, false, ""},
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(-0.1), true, false, ""},
+ leTestCase{float32(-0.0), true, false, ""},
+ leTestCase{float32(0), true, false, ""},
+ leTestCase{float32(0.1), false, false, ""},
+ leTestCase{float32(17), false, false, ""},
+ leTestCase{float32(160), false, false, ""},
+
+ leTestCase{float64(-(1 << 30)), true, false, ""},
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(-0.1), true, false, ""},
+ leTestCase{float64(-0), true, false, ""},
+ leTestCase{float64(0), true, false, ""},
+ leTestCase{float64(0.1), false, false, ""},
+ leTestCase{float64(17), false, false, ""},
+ leTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) PositiveIntegerLiteral() {
+ matcher := LessOrEqual(150)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{149, true, false, ""},
+ leTestCase{150, true, false, ""},
+ leTestCase{151, false, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(149), true, false, ""},
+ leTestCase{int(150), true, false, ""},
+ leTestCase{int(151), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(0), true, false, ""},
+ leTestCase{int8(17), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(149), true, false, ""},
+ leTestCase{int16(150), true, false, ""},
+ leTestCase{int16(151), false, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(149), true, false, ""},
+ leTestCase{int32(150), true, false, ""},
+ leTestCase{int32(151), false, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(149), true, false, ""},
+ leTestCase{int64(150), true, false, ""},
+ leTestCase{int64(151), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(149), true, false, ""},
+ leTestCase{uint(150), true, false, ""},
+ leTestCase{uint(151), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(127), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(149), true, false, ""},
+ leTestCase{uint16(150), true, false, ""},
+ leTestCase{uint16(151), false, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(149), true, false, ""},
+ leTestCase{uint32(150), true, false, ""},
+ leTestCase{uint32(151), false, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(149), true, false, ""},
+ leTestCase{uint64(150), true, false, ""},
+ leTestCase{uint64(151), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(149), true, false, ""},
+ leTestCase{float32(149.9), true, false, ""},
+ leTestCase{float32(150), true, false, ""},
+ leTestCase{float32(150.1), false, false, ""},
+ leTestCase{float32(151), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(149), true, false, ""},
+ leTestCase{float64(149.9), true, false, ""},
+ leTestCase{float64(150), true, false, ""},
+ leTestCase{float64(150.1), false, false, ""},
+ leTestCase{float64(151), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Float literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) NegativeFloatLiteral() {
+ matcher := LessOrEqual(-150.1)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to -150.1"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-(1 << 30), true, false, ""},
+ leTestCase{-151, true, false, ""},
+ leTestCase{-150.1, true, false, ""},
+ leTestCase{-150, false, false, ""},
+ leTestCase{-149, false, false, ""},
+ leTestCase{0, false, false, ""},
+ leTestCase{17, false, false, ""},
+
+ leTestCase{int(-(1 << 30)), true, false, ""},
+ leTestCase{int(-151), true, false, ""},
+ leTestCase{int(-150), false, false, ""},
+ leTestCase{int(-149), false, false, ""},
+ leTestCase{int(0), false, false, ""},
+ leTestCase{int(17), false, false, ""},
+
+ leTestCase{int8(-127), false, false, ""},
+ leTestCase{int8(0), false, false, ""},
+ leTestCase{int8(17), false, false, ""},
+
+ leTestCase{int16(-(1 << 14)), true, false, ""},
+ leTestCase{int16(-151), true, false, ""},
+ leTestCase{int16(-150), false, false, ""},
+ leTestCase{int16(-149), false, false, ""},
+ leTestCase{int16(0), false, false, ""},
+ leTestCase{int16(17), false, false, ""},
+
+ leTestCase{int32(-(1 << 30)), true, false, ""},
+ leTestCase{int32(-151), true, false, ""},
+ leTestCase{int32(-150), false, false, ""},
+ leTestCase{int32(-149), false, false, ""},
+ leTestCase{int32(0), false, false, ""},
+ leTestCase{int32(17), false, false, ""},
+
+ leTestCase{int64(-(1 << 30)), true, false, ""},
+ leTestCase{int64(-151), true, false, ""},
+ leTestCase{int64(-150), false, false, ""},
+ leTestCase{int64(-149), false, false, ""},
+ leTestCase{int64(0), false, false, ""},
+ leTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint((1 << 32) - 151), false, false, ""},
+ leTestCase{uint(0), false, false, ""},
+ leTestCase{uint(17), false, false, ""},
+
+ leTestCase{uint8(0), false, false, ""},
+ leTestCase{uint8(17), false, false, ""},
+ leTestCase{uint8(253), false, false, ""},
+
+ leTestCase{uint16((1 << 16) - 151), false, false, ""},
+ leTestCase{uint16(0), false, false, ""},
+ leTestCase{uint16(17), false, false, ""},
+
+ leTestCase{uint32((1 << 32) - 151), false, false, ""},
+ leTestCase{uint32(0), false, false, ""},
+ leTestCase{uint32(17), false, false, ""},
+
+ leTestCase{uint64((1 << 64) - 151), false, false, ""},
+ leTestCase{uint64(0), false, false, ""},
+ leTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-(1 << 30)), true, false, ""},
+ leTestCase{float32(-151), true, false, ""},
+ leTestCase{float32(-150.2), true, false, ""},
+ leTestCase{float32(-150.1), true, false, ""},
+ leTestCase{float32(-150), false, false, ""},
+ leTestCase{float32(0), false, false, ""},
+ leTestCase{float32(17), false, false, ""},
+ leTestCase{float32(160), false, false, ""},
+
+ leTestCase{float64(-(1 << 30)), true, false, ""},
+ leTestCase{float64(-151), true, false, ""},
+ leTestCase{float64(-150.2), true, false, ""},
+ leTestCase{float64(-150.1), true, false, ""},
+ leTestCase{float64(-150), false, false, ""},
+ leTestCase{float64(0), false, false, ""},
+ leTestCase{float64(17), false, false, ""},
+ leTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) PositiveFloatLiteral() {
+ matcher := LessOrEqual(149.9)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 149.9"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{149, true, false, ""},
+ leTestCase{149.9, true, false, ""},
+ leTestCase{150, false, false, ""},
+ leTestCase{151, false, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(149), true, false, ""},
+ leTestCase{int(150), false, false, ""},
+ leTestCase{int(151), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(0), true, false, ""},
+ leTestCase{int8(17), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(149), true, false, ""},
+ leTestCase{int16(150), false, false, ""},
+ leTestCase{int16(151), false, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(149), true, false, ""},
+ leTestCase{int32(150), false, false, ""},
+ leTestCase{int32(151), false, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(149), true, false, ""},
+ leTestCase{int64(150), false, false, ""},
+ leTestCase{int64(151), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(149), true, false, ""},
+ leTestCase{uint(150), false, false, ""},
+ leTestCase{uint(151), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(127), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(149), true, false, ""},
+ leTestCase{uint16(150), false, false, ""},
+ leTestCase{uint16(151), false, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(149), true, false, ""},
+ leTestCase{uint32(150), false, false, ""},
+ leTestCase{uint32(151), false, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(149), true, false, ""},
+ leTestCase{uint64(150), false, false, ""},
+ leTestCase{uint64(151), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(149), true, false, ""},
+ leTestCase{float32(149.8), true, false, ""},
+ leTestCase{float32(149.9), true, false, ""},
+ leTestCase{float32(150), false, false, ""},
+ leTestCase{float32(151), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(149), true, false, ""},
+ leTestCase{float64(149.8), true, false, ""},
+ leTestCase{float64(149.9), true, false, ""},
+ leTestCase{float64(150), false, false, ""},
+ leTestCase{float64(151), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Subtle cases
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessOrEqual(int64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{kTwoTo25 + 0, true, false, ""},
+ leTestCase{kTwoTo25 + 1, true, false, ""},
+ leTestCase{kTwoTo25 + 2, false, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(32767), true, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(255), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(65535), true, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ leTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessOrEqual(int64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{1 << 30, true, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(math.MaxInt32), true, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(32767), true, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(math.MaxInt32), true, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(math.MaxUint32), true, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(255), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(65535), true, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(math.MaxUint32), true, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Floating point.
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessOrEqual(uint64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{kTwoTo25 + 0, true, false, ""},
+ leTestCase{kTwoTo25 + 1, true, false, ""},
+ leTestCase{kTwoTo25 + 2, false, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(32767), true, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(255), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(65535), true, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ leTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessOrEqual(uint64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{1 << 30, true, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(math.MaxInt32), true, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(32767), true, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(math.MaxInt32), true, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(math.MaxUint32), true, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(255), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(65535), true, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(math.MaxUint32), true, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Floating point.
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessOrEqual(float32(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 3.3554432e+07"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{int64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 2), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 3), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{uint64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 3), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessOrEqual(float64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 1.8014398509481984e+16"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 3), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 3), false, false, ""},
+
+ // Floating point.
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// String literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) EmptyString() {
+ matcher := LessOrEqual("")
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to \"\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ leTestCase{"", true, false, ""},
+ leTestCase{"\x00", false, false, ""},
+ leTestCase{"a", false, false, ""},
+ leTestCase{"foo", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) SingleNullByte() {
+ matcher := LessOrEqual("\x00")
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to \"\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ leTestCase{"", true, false, ""},
+ leTestCase{"\x00", true, false, ""},
+ leTestCase{"\x00\x00", false, false, ""},
+ leTestCase{"a", false, false, ""},
+ leTestCase{"foo", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) LongerString() {
+ matcher := LessOrEqual("foo\x00")
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to \"foo\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ leTestCase{"", true, false, ""},
+ leTestCase{"\x00", true, false, ""},
+ leTestCase{"bar", true, false, ""},
+ leTestCase{"foo", true, false, ""},
+ leTestCase{"foo\x00", true, false, ""},
+ leTestCase{"foo\x00\x00", false, false, ""},
+ leTestCase{"fooa", false, false, ""},
+ leTestCase{"qux", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_than.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_than.go
new file mode 100644
index 00000000000..8258e45d99d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_than.go
@@ -0,0 +1,152 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+// LessThan returns a matcher that matches integer, floating point, or strings
+// values v such that v < x. Comparison is not defined between numeric and
+// string types, but is defined between all integer and floating point types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// LessThan will panic.
+func LessThan(x interface{}) Matcher {
+ v := reflect.ValueOf(x)
+ kind := v.Kind()
+
+ switch {
+ case isInteger(v):
+ case isFloat(v):
+ case kind == reflect.String:
+
+ default:
+ panic(fmt.Sprintf("LessThan: unexpected kind %v", kind))
+ }
+
+ return &lessThanMatcher{v}
+}
+
+type lessThanMatcher struct {
+ limit reflect.Value
+}
+
+func (m *lessThanMatcher) Description() string {
+ // Special case: make it clear that strings are strings.
+ if m.limit.Kind() == reflect.String {
+ return fmt.Sprintf("less than \"%s\"", m.limit.String())
+ }
+
+ return fmt.Sprintf("less than %v", m.limit.Interface())
+}
+
+func compareIntegers(v1, v2 reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(v1) && isSignedInteger(v2):
+ if v1.Int() < v2.Int() {
+ err = nil
+ }
+ return
+
+ case isSignedInteger(v1) && isUnsignedInteger(v2):
+ if v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() {
+ err = nil
+ }
+ return
+
+ case isUnsignedInteger(v1) && isSignedInteger(v2):
+ if v1.Uint() <= math.MaxInt64 && int64(v1.Uint()) < v2.Int() {
+ err = nil
+ }
+ return
+
+ case isUnsignedInteger(v1) && isUnsignedInteger(v2):
+ if v1.Uint() < v2.Uint() {
+ err = nil
+ }
+ return
+ }
+
+ panic(fmt.Sprintf("compareIntegers: %v %v", v1, v2))
+}
+
+func getFloat(v reflect.Value) float64 {
+ switch {
+ case isSignedInteger(v):
+ return float64(v.Int())
+
+ case isUnsignedInteger(v):
+ return float64(v.Uint())
+
+ case isFloat(v):
+ return v.Float()
+ }
+
+ panic(fmt.Sprintf("getFloat: %v", v))
+}
+
+func (m *lessThanMatcher) Matches(c interface{}) (err error) {
+ v1 := reflect.ValueOf(c)
+ v2 := m.limit
+
+ err = errors.New("")
+
+ // Handle strings as a special case.
+ if v1.Kind() == reflect.String && v2.Kind() == reflect.String {
+ if v1.String() < v2.String() {
+ err = nil
+ }
+ return
+ }
+
+ // If we get here, we require that we are dealing with integers or floats.
+ v1Legal := isInteger(v1) || isFloat(v1)
+ v2Legal := isInteger(v2) || isFloat(v2)
+ if !v1Legal || !v2Legal {
+ err = NewFatalError("which is not comparable")
+ return
+ }
+
+ // Handle the various comparison cases.
+ switch {
+ // Both integers
+ case isInteger(v1) && isInteger(v2):
+ return compareIntegers(v1, v2)
+
+ // At least one float32
+ case v1.Kind() == reflect.Float32 || v2.Kind() == reflect.Float32:
+ if float32(getFloat(v1)) < float32(getFloat(v2)) {
+ err = nil
+ }
+ return
+
+ // At least one float64
+ case v1.Kind() == reflect.Float64 || v2.Kind() == reflect.Float64:
+ if getFloat(v1) < getFloat(v2) {
+ err = nil
+ }
+ return
+ }
+
+ // We shouldn't get here.
+ panic(fmt.Sprintf("lessThanMatcher.Matches: Shouldn't get here: %v %v", v1, v2))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_than_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_than_test.go
new file mode 100644
index 00000000000..6ee6e9f2e63
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/less_than_test.go
@@ -0,0 +1,1059 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "math"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type LessThanTest struct {
+}
+
+func init() { RegisterTestSuite(&LessThanTest{}) }
+
+type ltTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *LessThanTest) checkTestCases(matcher Matcher, cases []ltTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+
+ ExpectThat(
+ (err == nil),
+ Equals(c.expectedResult),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(
+ c.shouldBeFatal,
+ isFatal,
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ ExpectThat(
+ err,
+ Error(Equals(c.expectedError)),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) IntegerCandidateBadTypes() {
+ matcher := LessThan(int(-150))
+
+ cases := []ltTestCase{
+ ltTestCase{true, false, true, "which is not comparable"},
+ ltTestCase{uintptr(17), false, true, "which is not comparable"},
+ ltTestCase{complex64(-151), false, true, "which is not comparable"},
+ ltTestCase{complex128(-151), false, true, "which is not comparable"},
+ ltTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ ltTestCase{make(chan int), false, true, "which is not comparable"},
+ ltTestCase{func() {}, false, true, "which is not comparable"},
+ ltTestCase{map[int]int{}, false, true, "which is not comparable"},
+ ltTestCase{&ltTestCase{}, false, true, "which is not comparable"},
+ ltTestCase{make([]int, 0), false, true, "which is not comparable"},
+ ltTestCase{"-151", false, true, "which is not comparable"},
+ ltTestCase{ltTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) FloatCandidateBadTypes() {
+ matcher := LessThan(float32(-150))
+
+ cases := []ltTestCase{
+ ltTestCase{true, false, true, "which is not comparable"},
+ ltTestCase{uintptr(17), false, true, "which is not comparable"},
+ ltTestCase{complex64(-151), false, true, "which is not comparable"},
+ ltTestCase{complex128(-151), false, true, "which is not comparable"},
+ ltTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ ltTestCase{make(chan int), false, true, "which is not comparable"},
+ ltTestCase{func() {}, false, true, "which is not comparable"},
+ ltTestCase{map[int]int{}, false, true, "which is not comparable"},
+ ltTestCase{&ltTestCase{}, false, true, "which is not comparable"},
+ ltTestCase{make([]int, 0), false, true, "which is not comparable"},
+ ltTestCase{"-151", false, true, "which is not comparable"},
+ ltTestCase{ltTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) StringCandidateBadTypes() {
+ matcher := LessThan("17")
+
+ cases := []ltTestCase{
+ ltTestCase{true, false, true, "which is not comparable"},
+ ltTestCase{int(0), false, true, "which is not comparable"},
+ ltTestCase{int8(0), false, true, "which is not comparable"},
+ ltTestCase{int16(0), false, true, "which is not comparable"},
+ ltTestCase{int32(0), false, true, "which is not comparable"},
+ ltTestCase{int64(0), false, true, "which is not comparable"},
+ ltTestCase{uint(0), false, true, "which is not comparable"},
+ ltTestCase{uint8(0), false, true, "which is not comparable"},
+ ltTestCase{uint16(0), false, true, "which is not comparable"},
+ ltTestCase{uint32(0), false, true, "which is not comparable"},
+ ltTestCase{uint64(0), false, true, "which is not comparable"},
+ ltTestCase{uintptr(17), false, true, "which is not comparable"},
+ ltTestCase{float32(0), false, true, "which is not comparable"},
+ ltTestCase{float64(0), false, true, "which is not comparable"},
+ ltTestCase{complex64(-151), false, true, "which is not comparable"},
+ ltTestCase{complex128(-151), false, true, "which is not comparable"},
+ ltTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ ltTestCase{make(chan int), false, true, "which is not comparable"},
+ ltTestCase{func() {}, false, true, "which is not comparable"},
+ ltTestCase{map[int]int{}, false, true, "which is not comparable"},
+ ltTestCase{&ltTestCase{}, false, true, "which is not comparable"},
+ ltTestCase{make([]int, 0), false, true, "which is not comparable"},
+ ltTestCase{ltTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) BadArgument() {
+ panicked := false
+
+ defer func() {
+ ExpectThat(panicked, Equals(true))
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ LessThan(complex128(0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) NegativeIntegerLiteral() {
+ matcher := LessThan(-150)
+ desc := matcher.Description()
+ expectedDesc := "less than -150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-(1 << 30), true, false, ""},
+ ltTestCase{-151, true, false, ""},
+ ltTestCase{-150, false, false, ""},
+ ltTestCase{0, false, false, ""},
+ ltTestCase{17, false, false, ""},
+
+ ltTestCase{int(-(1 << 30)), true, false, ""},
+ ltTestCase{int(-151), true, false, ""},
+ ltTestCase{int(-150), false, false, ""},
+ ltTestCase{int(0), false, false, ""},
+ ltTestCase{int(17), false, false, ""},
+
+ ltTestCase{int8(-127), false, false, ""},
+ ltTestCase{int8(0), false, false, ""},
+ ltTestCase{int8(17), false, false, ""},
+
+ ltTestCase{int16(-(1 << 14)), true, false, ""},
+ ltTestCase{int16(-151), true, false, ""},
+ ltTestCase{int16(-150), false, false, ""},
+ ltTestCase{int16(0), false, false, ""},
+ ltTestCase{int16(17), false, false, ""},
+
+ ltTestCase{int32(-(1 << 30)), true, false, ""},
+ ltTestCase{int32(-151), true, false, ""},
+ ltTestCase{int32(-150), false, false, ""},
+ ltTestCase{int32(0), false, false, ""},
+ ltTestCase{int32(17), false, false, ""},
+
+ ltTestCase{int64(-(1 << 30)), true, false, ""},
+ ltTestCase{int64(-151), true, false, ""},
+ ltTestCase{int64(-150), false, false, ""},
+ ltTestCase{int64(0), false, false, ""},
+ ltTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint((1 << 32) - 151), false, false, ""},
+ ltTestCase{uint(0), false, false, ""},
+ ltTestCase{uint(17), false, false, ""},
+
+ ltTestCase{uint8(0), false, false, ""},
+ ltTestCase{uint8(17), false, false, ""},
+ ltTestCase{uint8(253), false, false, ""},
+
+ ltTestCase{uint16((1 << 16) - 151), false, false, ""},
+ ltTestCase{uint16(0), false, false, ""},
+ ltTestCase{uint16(17), false, false, ""},
+
+ ltTestCase{uint32((1 << 32) - 151), false, false, ""},
+ ltTestCase{uint32(0), false, false, ""},
+ ltTestCase{uint32(17), false, false, ""},
+
+ ltTestCase{uint64((1 << 64) - 151), false, false, ""},
+ ltTestCase{uint64(0), false, false, ""},
+ ltTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-(1 << 30)), true, false, ""},
+ ltTestCase{float32(-151), true, false, ""},
+ ltTestCase{float32(-150.1), true, false, ""},
+ ltTestCase{float32(-150), false, false, ""},
+ ltTestCase{float32(-149.9), false, false, ""},
+ ltTestCase{float32(0), false, false, ""},
+ ltTestCase{float32(17), false, false, ""},
+ ltTestCase{float32(160), false, false, ""},
+
+ ltTestCase{float64(-(1 << 30)), true, false, ""},
+ ltTestCase{float64(-151), true, false, ""},
+ ltTestCase{float64(-150.1), true, false, ""},
+ ltTestCase{float64(-150), false, false, ""},
+ ltTestCase{float64(-149.9), false, false, ""},
+ ltTestCase{float64(0), false, false, ""},
+ ltTestCase{float64(17), false, false, ""},
+ ltTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) ZeroIntegerLiteral() {
+ matcher := LessThan(0)
+ desc := matcher.Description()
+ expectedDesc := "less than 0"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-(1 << 30), true, false, ""},
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{0, false, false, ""},
+ ltTestCase{1, false, false, ""},
+ ltTestCase{17, false, false, ""},
+ ltTestCase{(1 << 30), false, false, ""},
+
+ ltTestCase{int(-(1 << 30)), true, false, ""},
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(0), false, false, ""},
+ ltTestCase{int(1), false, false, ""},
+ ltTestCase{int(17), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(0), false, false, ""},
+ ltTestCase{int8(1), false, false, ""},
+
+ ltTestCase{int16(-(1 << 14)), true, false, ""},
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), false, false, ""},
+ ltTestCase{int16(1), false, false, ""},
+ ltTestCase{int16(17), false, false, ""},
+
+ ltTestCase{int32(-(1 << 30)), true, false, ""},
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(0), false, false, ""},
+ ltTestCase{int32(1), false, false, ""},
+ ltTestCase{int32(17), false, false, ""},
+
+ ltTestCase{int64(-(1 << 30)), true, false, ""},
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(0), false, false, ""},
+ ltTestCase{int64(1), false, false, ""},
+ ltTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint((1 << 32) - 1), false, false, ""},
+ ltTestCase{uint(0), false, false, ""},
+ ltTestCase{uint(17), false, false, ""},
+
+ ltTestCase{uint8(0), false, false, ""},
+ ltTestCase{uint8(17), false, false, ""},
+ ltTestCase{uint8(253), false, false, ""},
+
+ ltTestCase{uint16((1 << 16) - 1), false, false, ""},
+ ltTestCase{uint16(0), false, false, ""},
+ ltTestCase{uint16(17), false, false, ""},
+
+ ltTestCase{uint32((1 << 32) - 1), false, false, ""},
+ ltTestCase{uint32(0), false, false, ""},
+ ltTestCase{uint32(17), false, false, ""},
+
+ ltTestCase{uint64((1 << 64) - 1), false, false, ""},
+ ltTestCase{uint64(0), false, false, ""},
+ ltTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-(1 << 30)), true, false, ""},
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(-0.1), true, false, ""},
+ ltTestCase{float32(-0.0), false, false, ""},
+ ltTestCase{float32(0), false, false, ""},
+ ltTestCase{float32(0.1), false, false, ""},
+ ltTestCase{float32(17), false, false, ""},
+ ltTestCase{float32(160), false, false, ""},
+
+ ltTestCase{float64(-(1 << 30)), true, false, ""},
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(-0.1), true, false, ""},
+ ltTestCase{float64(-0), false, false, ""},
+ ltTestCase{float64(0), false, false, ""},
+ ltTestCase{float64(17), false, false, ""},
+ ltTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) PositiveIntegerLiteral() {
+ matcher := LessThan(150)
+ desc := matcher.Description()
+ expectedDesc := "less than 150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{149, true, false, ""},
+ ltTestCase{150, false, false, ""},
+ ltTestCase{151, false, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(149), true, false, ""},
+ ltTestCase{int(150), false, false, ""},
+ ltTestCase{int(151), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(0), true, false, ""},
+ ltTestCase{int8(17), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(149), true, false, ""},
+ ltTestCase{int16(150), false, false, ""},
+ ltTestCase{int16(151), false, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(149), true, false, ""},
+ ltTestCase{int32(150), false, false, ""},
+ ltTestCase{int32(151), false, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(149), true, false, ""},
+ ltTestCase{int64(150), false, false, ""},
+ ltTestCase{int64(151), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(149), true, false, ""},
+ ltTestCase{uint(150), false, false, ""},
+ ltTestCase{uint(151), false, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(127), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(149), true, false, ""},
+ ltTestCase{uint16(150), false, false, ""},
+ ltTestCase{uint16(151), false, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(149), true, false, ""},
+ ltTestCase{uint32(150), false, false, ""},
+ ltTestCase{uint32(151), false, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(149), true, false, ""},
+ ltTestCase{uint64(150), false, false, ""},
+ ltTestCase{uint64(151), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(149), true, false, ""},
+ ltTestCase{float32(149.9), true, false, ""},
+ ltTestCase{float32(150), false, false, ""},
+ ltTestCase{float32(150.1), false, false, ""},
+ ltTestCase{float32(151), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(149), true, false, ""},
+ ltTestCase{float64(149.9), true, false, ""},
+ ltTestCase{float64(150), false, false, ""},
+ ltTestCase{float64(150.1), false, false, ""},
+ ltTestCase{float64(151), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Float literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) NegativeFloatLiteral() {
+ matcher := LessThan(-150.1)
+ desc := matcher.Description()
+ expectedDesc := "less than -150.1"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-(1 << 30), true, false, ""},
+ ltTestCase{-151, true, false, ""},
+ ltTestCase{-150, false, false, ""},
+ ltTestCase{0, false, false, ""},
+ ltTestCase{17, false, false, ""},
+
+ ltTestCase{int(-(1 << 30)), true, false, ""},
+ ltTestCase{int(-151), true, false, ""},
+ ltTestCase{int(-150), false, false, ""},
+ ltTestCase{int(0), false, false, ""},
+ ltTestCase{int(17), false, false, ""},
+
+ ltTestCase{int8(-127), false, false, ""},
+ ltTestCase{int8(0), false, false, ""},
+ ltTestCase{int8(17), false, false, ""},
+
+ ltTestCase{int16(-(1 << 14)), true, false, ""},
+ ltTestCase{int16(-151), true, false, ""},
+ ltTestCase{int16(-150), false, false, ""},
+ ltTestCase{int16(0), false, false, ""},
+ ltTestCase{int16(17), false, false, ""},
+
+ ltTestCase{int32(-(1 << 30)), true, false, ""},
+ ltTestCase{int32(-151), true, false, ""},
+ ltTestCase{int32(-150), false, false, ""},
+ ltTestCase{int32(0), false, false, ""},
+ ltTestCase{int32(17), false, false, ""},
+
+ ltTestCase{int64(-(1 << 30)), true, false, ""},
+ ltTestCase{int64(-151), true, false, ""},
+ ltTestCase{int64(-150), false, false, ""},
+ ltTestCase{int64(0), false, false, ""},
+ ltTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint((1 << 32) - 151), false, false, ""},
+ ltTestCase{uint(0), false, false, ""},
+ ltTestCase{uint(17), false, false, ""},
+
+ ltTestCase{uint8(0), false, false, ""},
+ ltTestCase{uint8(17), false, false, ""},
+ ltTestCase{uint8(253), false, false, ""},
+
+ ltTestCase{uint16((1 << 16) - 151), false, false, ""},
+ ltTestCase{uint16(0), false, false, ""},
+ ltTestCase{uint16(17), false, false, ""},
+
+ ltTestCase{uint32((1 << 32) - 151), false, false, ""},
+ ltTestCase{uint32(0), false, false, ""},
+ ltTestCase{uint32(17), false, false, ""},
+
+ ltTestCase{uint64((1 << 64) - 151), false, false, ""},
+ ltTestCase{uint64(0), false, false, ""},
+ ltTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-(1 << 30)), true, false, ""},
+ ltTestCase{float32(-151), true, false, ""},
+ ltTestCase{float32(-150.2), true, false, ""},
+ ltTestCase{float32(-150.1), false, false, ""},
+ ltTestCase{float32(-150), false, false, ""},
+ ltTestCase{float32(0), false, false, ""},
+ ltTestCase{float32(17), false, false, ""},
+ ltTestCase{float32(160), false, false, ""},
+
+ ltTestCase{float64(-(1 << 30)), true, false, ""},
+ ltTestCase{float64(-151), true, false, ""},
+ ltTestCase{float64(-150.2), true, false, ""},
+ ltTestCase{float64(-150.1), false, false, ""},
+ ltTestCase{float64(-150), false, false, ""},
+ ltTestCase{float64(0), false, false, ""},
+ ltTestCase{float64(17), false, false, ""},
+ ltTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) PositiveFloatLiteral() {
+ matcher := LessThan(149.9)
+ desc := matcher.Description()
+ expectedDesc := "less than 149.9"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{149, true, false, ""},
+ ltTestCase{150, false, false, ""},
+ ltTestCase{151, false, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(149), true, false, ""},
+ ltTestCase{int(150), false, false, ""},
+ ltTestCase{int(151), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(0), true, false, ""},
+ ltTestCase{int8(17), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(149), true, false, ""},
+ ltTestCase{int16(150), false, false, ""},
+ ltTestCase{int16(151), false, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(149), true, false, ""},
+ ltTestCase{int32(150), false, false, ""},
+ ltTestCase{int32(151), false, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(149), true, false, ""},
+ ltTestCase{int64(150), false, false, ""},
+ ltTestCase{int64(151), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(149), true, false, ""},
+ ltTestCase{uint(150), false, false, ""},
+ ltTestCase{uint(151), false, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(127), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(149), true, false, ""},
+ ltTestCase{uint16(150), false, false, ""},
+ ltTestCase{uint16(151), false, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(149), true, false, ""},
+ ltTestCase{uint32(150), false, false, ""},
+ ltTestCase{uint32(151), false, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(149), true, false, ""},
+ ltTestCase{uint64(150), false, false, ""},
+ ltTestCase{uint64(151), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(149), true, false, ""},
+ ltTestCase{float32(149.8), true, false, ""},
+ ltTestCase{float32(149.9), false, false, ""},
+ ltTestCase{float32(150), false, false, ""},
+ ltTestCase{float32(151), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(149), true, false, ""},
+ ltTestCase{float64(149.8), true, false, ""},
+ ltTestCase{float64(149.9), false, false, ""},
+ ltTestCase{float64(150), false, false, ""},
+ ltTestCase{float64(151), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Subtle cases
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessThan(int64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{kTwoTo25 + 0, true, false, ""},
+ ltTestCase{kTwoTo25 + 1, false, false, ""},
+ ltTestCase{kTwoTo25 + 2, false, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), true, false, ""},
+ ltTestCase{int16(32767), true, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int32(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(255), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(65535), true, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessThan(int64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{1 << 30, true, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(math.MaxInt32), true, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), true, false, ""},
+ ltTestCase{int16(32767), true, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(math.MaxInt32), true, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ ltTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(math.MaxUint32), true, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(255), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(65535), true, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(math.MaxUint32), true, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessThan(uint64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{kTwoTo25 + 0, true, false, ""},
+ ltTestCase{kTwoTo25 + 1, false, false, ""},
+ ltTestCase{kTwoTo25 + 2, false, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), true, false, ""},
+ ltTestCase{int16(32767), true, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int32(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(255), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(65535), true, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessThan(uint64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{1 << 30, true, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(math.MaxInt32), true, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), true, false, ""},
+ ltTestCase{int16(32767), true, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(math.MaxInt32), true, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ ltTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(math.MaxUint32), true, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(255), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(65535), true, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(math.MaxUint32), true, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessThan(float32(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 3.3554432e+07"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{int64(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 3), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 3), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessThan(float64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 1.8014398509481984e+16"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 3), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 3), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// String literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) EmptyString() {
+ matcher := LessThan("")
+ desc := matcher.Description()
+ expectedDesc := "less than \"\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ ltTestCase{"", false, false, ""},
+ ltTestCase{"\x00", false, false, ""},
+ ltTestCase{"a", false, false, ""},
+ ltTestCase{"foo", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) SingleNullByte() {
+ matcher := LessThan("\x00")
+ desc := matcher.Description()
+ expectedDesc := "less than \"\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ ltTestCase{"", true, false, ""},
+ ltTestCase{"\x00", false, false, ""},
+ ltTestCase{"a", false, false, ""},
+ ltTestCase{"foo", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) LongerString() {
+ matcher := LessThan("foo\x00")
+ desc := matcher.Description()
+ expectedDesc := "less than \"foo\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ ltTestCase{"", true, false, ""},
+ ltTestCase{"\x00", true, false, ""},
+ ltTestCase{"bar", true, false, ""},
+ ltTestCase{"foo", true, false, ""},
+ ltTestCase{"foo\x00", false, false, ""},
+ ltTestCase{"fooa", false, false, ""},
+ ltTestCase{"qux", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matcher.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matcher.go
new file mode 100644
index 00000000000..8cf1cbbc963
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matcher.go
@@ -0,0 +1,86 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package oglematchers provides a set of matchers useful in a testing or
+// mocking framework. These matchers are inspired by and mostly compatible with
+// Google Test for C++ and Google JS Test.
+//
+// This package is used by github.com/jacobsa/ogletest and
+// github.com/jacobsa/oglemock, which may be more directly useful if you're not
+// writing your own testing package or defining your own matchers.
+package oglematchers
+
+// A Matcher is some predicate implicitly defining a set of values that it
+// matches. For example, GreaterThan(17) matches all numeric values greater
+// than 17, and HasSubstr("taco") matches all strings with the substring
+// "taco".
+//
+// Matchers are typically exposed to tests via constructor functions like
+// HasSubstr. In order to implement such a function you can either define your
+// own matcher type or use NewMatcher.
+type Matcher interface {
+ // Check whether the supplied value belongs to the the set defined by the
+ // matcher. Return a non-nil error if and only if it does not.
+ //
+ // The error describes why the value doesn't match. The error text is a
+ // relative clause that is suitable for being placed after the value. For
+ // example, a predicate that matches strings with a particular substring may,
+ // when presented with a numerical value, return the following error text:
+ //
+ // "which is not a string"
+ //
+ // Then the failure message may look like:
+ //
+ // Expected: has substring "taco"
+ // Actual: 17, which is not a string
+ //
+ // If the error is self-apparent based on the description of the matcher, the
+ // error text may be empty (but the error still non-nil). For example:
+ //
+ // Expected: 17
+ // Actual: 19
+ //
+ // If you are implementing a new matcher, see also the documentation on
+ // FatalError.
+ Matches(candidate interface{}) error
+
+ // Description returns a string describing the property that values matching
+ // this matcher have, as a verb phrase where the subject is the value. For
+ // example, "is greather than 17" or "has substring "taco"".
+ Description() string
+}
+
+// FatalError is an implementation of the error interface that may be returned
+// from matchers, indicating the error should be propagated. Returning a
+// *FatalError indicates that the matcher doesn't process values of the
+// supplied type, or otherwise doesn't know how to handle the value.
+//
+// For example, if GreaterThan(17) returned false for the value "taco" without
+// a fatal error, then Not(GreaterThan(17)) would return true. This is
+// technically correct, but is surprising and may mask failures where the wrong
+// sort of matcher is accidentally used. Instead, GreaterThan(17) can return a
+// fatal error, which will be propagated by Not().
+type FatalError struct {
+ errorText string
+}
+
+// NewFatalError creates a FatalError struct with the supplied error text.
+func NewFatalError(s string) *FatalError {
+ return &FatalError{s}
+}
+
+func (e *FatalError) Error() string {
+ return e.errorText
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matches_regexp.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matches_regexp.go
new file mode 100644
index 00000000000..1ed63f30c4e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matches_regexp.go
@@ -0,0 +1,69 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+)
+
+// MatchesRegexp returns a matcher that matches strings and byte slices whose
+// contents match the supplied regular expression. The semantics are those of
+// regexp.Match. In particular, that means the match is not implicitly anchored
+// to the ends of the string: MatchesRegexp("bar") will match "foo bar baz".
+func MatchesRegexp(pattern string) Matcher {
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ panic("MatchesRegexp: " + err.Error())
+ }
+
+ return &matchesRegexpMatcher{re}
+}
+
+type matchesRegexpMatcher struct {
+ re *regexp.Regexp
+}
+
+func (m *matchesRegexpMatcher) Description() string {
+ return fmt.Sprintf("matches regexp \"%s\"", m.re.String())
+}
+
+func (m *matchesRegexpMatcher) Matches(c interface{}) (err error) {
+ v := reflect.ValueOf(c)
+ isString := v.Kind() == reflect.String
+ isByteSlice := v.Kind() == reflect.Slice && v.Elem().Kind() == reflect.Uint8
+
+ err = errors.New("")
+
+ switch {
+ case isString:
+ if m.re.MatchString(v.String()) {
+ err = nil
+ }
+
+ case isByteSlice:
+ if m.re.Match(v.Bytes()) {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not a string or []byte")
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matches_regexp_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matches_regexp_test.go
new file mode 100644
index 00000000000..7b69ce80105
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/matches_regexp_test.go
@@ -0,0 +1,92 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type MatchesRegexpTest struct {
+}
+
+func init() { RegisterTestSuite(&MatchesRegexpTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *MatchesRegexpTest) Description() {
+ m := MatchesRegexp("foo.*bar")
+ ExpectEq("matches regexp \"foo.*bar\"", m.Description())
+}
+
+func (t *MatchesRegexpTest) InvalidRegexp() {
+ ExpectThat(
+ func() { MatchesRegexp("(foo") },
+ Panics(HasSubstr("missing closing )")))
+}
+
+func (t *MatchesRegexpTest) CandidateIsNil() {
+ m := MatchesRegexp("")
+ err := m.Matches(nil)
+
+ ExpectThat(err, Error(Equals("which is not a string or []byte")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *MatchesRegexpTest) CandidateIsInteger() {
+ m := MatchesRegexp("")
+ err := m.Matches(17)
+
+ ExpectThat(err, Error(Equals("which is not a string or []byte")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *MatchesRegexpTest) NonMatchingCandidates() {
+ m := MatchesRegexp("fo[op]\\s+x")
+ var err error
+
+ err = m.Matches("fon x")
+ ExpectThat(err, Error(Equals("")))
+ ExpectFalse(isFatal(err))
+
+ err = m.Matches("fopx")
+ ExpectThat(err, Error(Equals("")))
+ ExpectFalse(isFatal(err))
+
+ err = m.Matches("fop ")
+ ExpectThat(err, Error(Equals("")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *MatchesRegexpTest) MatchingCandidates() {
+ m := MatchesRegexp("fo[op]\\s+x")
+ var err error
+
+ err = m.Matches("foo x")
+ ExpectEq(nil, err)
+
+ err = m.Matches("fop x")
+ ExpectEq(nil, err)
+
+ err = m.Matches("blah blah foo x blah blah")
+ ExpectEq(nil, err)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/new_matcher.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/new_matcher.go
new file mode 100644
index 00000000000..c9d8398ee63
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/new_matcher.go
@@ -0,0 +1,43 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// Create a matcher with the given description and predicate function, which
+// will be invoked to handle calls to Matchers.
+//
+// Using this constructor may be a convenience over defining your own type that
+// implements Matcher if you do not need any logic in your Description method.
+func NewMatcher(
+ predicate func(interface{}) error,
+ description string) Matcher {
+ return &predicateMatcher{
+ predicate: predicate,
+ description: description,
+ }
+}
+
+type predicateMatcher struct {
+ predicate func(interface{}) error
+ description string
+}
+
+func (pm *predicateMatcher) Matches(c interface{}) error {
+ return pm.predicate(c)
+}
+
+func (pm *predicateMatcher) Description() string {
+ return pm.description
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/not.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/not.go
new file mode 100644
index 00000000000..623789fe28a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/not.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Not returns a matcher that inverts the set of values matched by the wrapped
+// matcher. It does not transform the result for values for which the wrapped
+// matcher returns a fatal error.
+func Not(m Matcher) Matcher {
+ return &notMatcher{m}
+}
+
+type notMatcher struct {
+ wrapped Matcher
+}
+
+func (m *notMatcher) Matches(c interface{}) (err error) {
+ err = m.wrapped.Matches(c)
+
+ // Did the wrapped matcher say yes?
+ if err == nil {
+ return errors.New("")
+ }
+
+ // Did the wrapped matcher return a fatal error?
+ if _, isFatal := err.(*FatalError); isFatal {
+ return err
+ }
+
+ // The wrapped matcher returned a non-fatal error.
+ return nil
+}
+
+func (m *notMatcher) Description() string {
+ return fmt.Sprintf("not(%s)", m.wrapped.Description())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/not_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/not_test.go
new file mode 100644
index 00000000000..d5a12967524
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/not_test.go
@@ -0,0 +1,108 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type fakeMatcher struct {
+ matchFunc func(interface{}) error
+ description string
+}
+
+func (m *fakeMatcher) Matches(c interface{}) error {
+ return m.matchFunc(c)
+}
+
+func (m *fakeMatcher) Description() string {
+ return m.description
+}
+
+type NotTest struct {
+
+}
+
+func init() { RegisterTestSuite(&NotTest{}) }
+func TestOgletest(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *NotTest) CallsWrapped() {
+ var suppliedCandidate interface{}
+ matchFunc := func(c interface{}) error {
+ suppliedCandidate = c
+ return nil
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Not(wrapped)
+
+ matcher.Matches(17)
+ ExpectThat(suppliedCandidate, Equals(17))
+}
+
+func (t *NotTest) WrappedReturnsTrue() {
+ matchFunc := func(c interface{}) error {
+ return nil
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Not(wrapped)
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *NotTest) WrappedReturnsNonFatalError() {
+ matchFunc := func(c interface{}) error {
+ return errors.New("taco")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Not(wrapped)
+
+ err := matcher.Matches(0)
+ ExpectEq(nil, err)
+}
+
+func (t *NotTest) WrappedReturnsFatalError() {
+ matchFunc := func(c interface{}) error {
+ return NewFatalError("taco")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Not(wrapped)
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *NotTest) Description() {
+ wrapped := &fakeMatcher{nil, "taco"}
+ matcher := Not(wrapped)
+
+ ExpectEq("not(taco)", matcher.Description())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/panics.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/panics.go
new file mode 100644
index 00000000000..d2cfc97869b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/panics.go
@@ -0,0 +1,74 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Panics matches zero-arg functions which, when invoked, panic with an error
+// that matches the supplied matcher.
+//
+// NOTE(jacobsa): This matcher cannot detect the case where the function panics
+// using panic(nil), by design of the language. See here for more info:
+//
+// http://goo.gl/9aIQL
+//
+func Panics(m Matcher) Matcher {
+ return &panicsMatcher{m}
+}
+
+type panicsMatcher struct {
+ wrappedMatcher Matcher
+}
+
+func (m *panicsMatcher) Description() string {
+ return "panics with: " + m.wrappedMatcher.Description()
+}
+
+func (m *panicsMatcher) Matches(c interface{}) (err error) {
+ // Make sure c is a zero-arg function.
+ v := reflect.ValueOf(c)
+ if v.Kind() != reflect.Func || v.Type().NumIn() != 0 {
+ err = NewFatalError("which is not a zero-arg function")
+ return
+ }
+
+ // Call the function and check its panic error.
+ defer func() {
+ if e := recover(); e != nil {
+ err = m.wrappedMatcher.Matches(e)
+
+ // Set a clearer error message if the matcher said no.
+ if err != nil {
+ wrappedClause := ""
+ if err.Error() != "" {
+ wrappedClause = ", " + err.Error()
+ }
+
+ err = errors.New(fmt.Sprintf("which panicked with: %v%s", e, wrappedClause))
+ }
+ }
+ }()
+
+ v.Call([]reflect.Value{})
+
+ // If we get here, the function didn't panic.
+ err = errors.New("which didn't panic")
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/panics_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/panics_test.go
new file mode 100644
index 00000000000..a2b494f3232
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/panics_test.go
@@ -0,0 +1,141 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type PanicsTest struct {
+ matcherCalled bool
+ suppliedCandidate interface{}
+ wrappedError error
+
+ matcher Matcher
+}
+
+func init() { RegisterTestSuite(&PanicsTest{}) }
+
+func (t *PanicsTest) SetUp(i *TestInfo) {
+ wrapped := &fakeMatcher{
+ func(c interface{}) error {
+ t.matcherCalled = true
+ t.suppliedCandidate = c
+ return t.wrappedError
+ },
+ "foo",
+ }
+
+ t.matcher = Panics(wrapped)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *PanicsTest) Description() {
+ ExpectThat(t.matcher.Description(), Equals("panics with: foo"))
+}
+
+func (t *PanicsTest) CandidateIsNil() {
+ err := t.matcher.Matches(nil)
+
+ ExpectThat(err, Error(Equals("which is not a zero-arg function")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PanicsTest) CandidateIsString() {
+ err := t.matcher.Matches("taco")
+
+ ExpectThat(err, Error(Equals("which is not a zero-arg function")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PanicsTest) CandidateTakesArgs() {
+ err := t.matcher.Matches(func(i int) string { return "" })
+
+ ExpectThat(err, Error(Equals("which is not a zero-arg function")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PanicsTest) CallsFunction() {
+ callCount := 0
+ t.matcher.Matches(func() string {
+ callCount++
+ return ""
+ })
+
+ ExpectThat(callCount, Equals(1))
+}
+
+func (t *PanicsTest) FunctionDoesntPanic() {
+ err := t.matcher.Matches(func() {})
+
+ ExpectThat(err, Error(Equals("which didn't panic")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *PanicsTest) CallsWrappedMatcher() {
+ expectedErr := 17
+ t.wrappedError = errors.New("")
+ t.matcher.Matches(func() { panic(expectedErr) })
+
+ ExpectThat(t.suppliedCandidate, Equals(expectedErr))
+}
+
+func (t *PanicsTest) WrappedReturnsTrue() {
+ err := t.matcher.Matches(func() { panic("") })
+
+ ExpectEq(nil, err)
+}
+
+func (t *PanicsTest) WrappedReturnsFatalErrorWithoutText() {
+ t.wrappedError = NewFatalError("")
+ err := t.matcher.Matches(func() { panic(17) })
+
+ ExpectThat(err, Error(Equals("which panicked with: 17")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *PanicsTest) WrappedReturnsFatalErrorWithText() {
+ t.wrappedError = NewFatalError("which blah")
+ err := t.matcher.Matches(func() { panic(17) })
+
+ ExpectThat(err, Error(Equals("which panicked with: 17, which blah")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *PanicsTest) WrappedReturnsNonFatalErrorWithoutText() {
+ t.wrappedError = errors.New("")
+ err := t.matcher.Matches(func() { panic(17) })
+
+ ExpectThat(err, Error(Equals("which panicked with: 17")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *PanicsTest) WrappedReturnsNonFatalErrorWithText() {
+ t.wrappedError = errors.New("which blah")
+ err := t.matcher.Matches(func() { panic(17) })
+
+ ExpectThat(err, Error(Equals("which panicked with: 17, which blah")))
+ ExpectFalse(isFatal(err))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/pointee.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/pointee.go
new file mode 100644
index 00000000000..c5383f2402f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/pointee.go
@@ -0,0 +1,65 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Return a matcher that matches non-nil pointers whose pointee matches the
+// wrapped matcher.
+func Pointee(m Matcher) Matcher {
+ return &pointeeMatcher{m}
+}
+
+type pointeeMatcher struct {
+ wrapped Matcher
+}
+
+func (m *pointeeMatcher) Matches(c interface{}) (err error) {
+ // Make sure the candidate is of the appropriate type.
+ cv := reflect.ValueOf(c)
+ if !cv.IsValid() || cv.Kind() != reflect.Ptr {
+ return NewFatalError("which is not a pointer")
+ }
+
+ // Make sure the candidate is non-nil.
+ if cv.IsNil() {
+ return NewFatalError("")
+ }
+
+ // Defer to the wrapped matcher. Fix up empty errors so that failure messages
+ // are more helpful than just printing a pointer for "Actual".
+ pointee := cv.Elem().Interface()
+ err = m.wrapped.Matches(pointee)
+ if err != nil && err.Error() == "" {
+ s := fmt.Sprintf("whose pointee is %v", pointee)
+
+ if _, ok := err.(*FatalError); ok {
+ err = NewFatalError(s)
+ } else {
+ err = errors.New(s)
+ }
+ }
+
+ return err
+}
+
+func (m *pointeeMatcher) Description() string {
+ return fmt.Sprintf("pointee(%s)", m.wrapped.Description())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/pointee_test.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/pointee_test.go
new file mode 100644
index 00000000000..58a8381c640
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/pointee_test.go
@@ -0,0 +1,152 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+ . "github.com/jacobsa/oglematchers"
+ . "github.com/jacobsa/ogletest"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type PointeeTest struct {}
+func init() { RegisterTestSuite(&PointeeTest{}) }
+
+func TestPointee(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *PointeeTest) Description() {
+ wrapped := &fakeMatcher{nil, "taco"}
+ matcher := Pointee(wrapped)
+
+ ExpectEq("pointee(taco)", matcher.Description())
+}
+
+func (t *PointeeTest) CandidateIsNotAPointer() {
+ matcher := Pointee(HasSubstr(""))
+ err := matcher.Matches([]byte{})
+
+ ExpectThat(err, Error(Equals("which is not a pointer")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PointeeTest) CandidateIsANilLiteral() {
+ matcher := Pointee(HasSubstr(""))
+ err := matcher.Matches(nil)
+
+ ExpectThat(err, Error(Equals("which is not a pointer")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PointeeTest) CandidateIsANilPointer() {
+ matcher := Pointee(HasSubstr(""))
+ err := matcher.Matches((*int)(nil))
+
+ ExpectThat(err, Error(Equals("")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PointeeTest) CallsWrapped() {
+ var suppliedCandidate interface{}
+ matchFunc := func(c interface{}) error {
+ suppliedCandidate = c
+ return nil
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ someSlice := []byte{}
+ matcher.Matches(&someSlice)
+ ExpectThat(suppliedCandidate, IdenticalTo(someSlice))
+}
+
+func (t *PointeeTest) WrappedReturnsOkay() {
+ matchFunc := func(c interface{}) error {
+ return nil
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ err := matcher.Matches(new(int))
+ ExpectEq(nil, err)
+}
+
+func (t *PointeeTest) WrappedReturnsNonFatalNonEmptyError() {
+ matchFunc := func(c interface{}) error {
+ return errors.New("taco")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ i := 17
+ err := matcher.Matches(&i)
+ ExpectFalse(isFatal(err))
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *PointeeTest) WrappedReturnsNonFatalEmptyError() {
+ matchFunc := func(c interface{}) error {
+ return errors.New("")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ i := 17
+ err := matcher.Matches(&i)
+ ExpectFalse(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("whose pointee")))
+ ExpectThat(err, Error(HasSubstr("17")))
+}
+
+func (t *PointeeTest) WrappedReturnsFatalNonEmptyError() {
+ matchFunc := func(c interface{}) error {
+ return NewFatalError("taco")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ i := 17
+ err := matcher.Matches(&i)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *PointeeTest) WrappedReturnsFatalEmptyError() {
+ matchFunc := func(c interface{}) error {
+ return NewFatalError("")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ i := 17
+ err := matcher.Matches(&i)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("whose pointee")))
+ ExpectThat(err, Error(HasSubstr("17")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/transform_description.go b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/transform_description.go
new file mode 100644
index 00000000000..f79d0c03db1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jacobsa/oglematchers/transform_description.go
@@ -0,0 +1,36 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// transformDescription returns a matcher that is equivalent to the supplied
+// one, except that it has the supplied description instead of the one attached
+// to the existing matcher.
+func transformDescription(m Matcher, newDesc string) Matcher {
+ return &transformDescriptionMatcher{newDesc, m}
+}
+
+type transformDescriptionMatcher struct {
+ desc string
+ wrappedMatcher Matcher
+}
+
+func (m *transformDescriptionMatcher) Description() string {
+ return m.desc
+}
+
+func (m *transformDescriptionMatcher) Matches(c interface{}) error {
+ return m.wrappedMatcher.Matches(c)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/.travis.yml b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/.travis.yml
new file mode 100644
index 00000000000..3165f004261
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/.travis.yml
@@ -0,0 +1,35 @@
+language: go
+
+install:
+ # go-flags
+ - go get -d -v ./...
+ - go build -v ./...
+
+ # linting
+ - go get golang.org/x/tools/cmd/vet
+ - go get github.com/golang/lint
+ - go install github.com/golang/lint/golint
+
+ # code coverage
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/onsi/ginkgo/ginkgo
+ - go get github.com/modocache/gover
+ - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi
+
+script:
+ # go-flags
+ - $(exit $(gofmt -l . | wc -l))
+ - go test -v ./...
+
+ # linting
+ - go tool vet -all=true -v=true . || true
+ - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./...
+
+ # code coverage
+ - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover
+ - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover
+ - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi
+
+env:
+ # coveralls.io
+ secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU="
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/LICENSE b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/LICENSE
new file mode 100644
index 00000000000..bcca0d521be
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2012 Jesse van den Kieboom. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/README.md b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/README.md
new file mode 100644
index 00000000000..9378b760bda
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/README.md
@@ -0,0 +1,135 @@
+go-flags: a go library for parsing command line arguments
+=========================================================
+
+[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) [![Build Status](https://travis-ci.org/jessevdk/go-flags.svg?branch=master)](https://travis-ci.org/jessevdk/go-flags) [![Coverage Status](https://img.shields.io/coveralls/jessevdk/go-flags.svg)](https://coveralls.io/r/jessevdk/go-flags?branch=master)
+
+This library provides similar functionality to the builtin flag library of
+go, but provides much more functionality and nicer formatting. From the
+documentation:
+
+Package flags provides an extensive command line option parser.
+The flags package is similar in functionality to the go builtin flag package
+but provides more options and uses reflection to provide a convenient and
+succinct way of specifying command line options.
+
+Supported features:
+* Options with short names (-v)
+* Options with long names (--verbose)
+* Options with and without arguments (bool v.s. other type)
+* Options with optional arguments and default values
+* Multiple option groups each containing a set of options
+* Generate and print well-formatted help message
+* Passing remaining command line arguments after -- (optional)
+* Ignoring unknown command line options (optional)
+* Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification
+* Supports multiple short options -aux
+* Supports all primitive go types (string, int{8..64}, uint{8..64}, float)
+* Supports same option multiple times (can store in slice or last option counts)
+* Supports maps
+* Supports function callbacks
+* Supports namespaces for (nested) option groups
+
+The flags package uses structs, reflection and struct field tags
+to allow users to specify command line options. This results in very simple
+and concise specification of your application options. For example:
+
+```go
+type Options struct {
+ Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+}
+```
+
+This specifies one option with a short name -v and a long name --verbose.
+When either -v or --verbose is found on the command line, a 'true' value
+will be appended to the Verbose field. e.g. when specifying -vvv, the
+resulting value of Verbose will be {[true, true, true]}.
+
+Example:
+--------
+```go
+var opts struct {
+ // Slice of bool will append 'true' each time the option
+ // is encountered (can be set multiple times, like -vvv)
+ Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+
+ // Example of automatic marshalling to desired type (uint)
+ Offset uint `long:"offset" description:"Offset"`
+
+ // Example of a callback, called each time the option is found.
+ Call func(string) `short:"c" description:"Call phone number"`
+
+ // Example of a required flag
+ Name string `short:"n" long:"name" description:"A name" required:"true"`
+
+ // Example of a value name
+ File string `short:"f" long:"file" description:"A file" value-name:"FILE"`
+
+ // Example of a pointer
+ Ptr *int `short:"p" description:"A pointer to an integer"`
+
+ // Example of a slice of strings
+ StringSlice []string `short:"s" description:"A slice of strings"`
+
+ // Example of a slice of pointers
+ PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"`
+
+ // Example of a map
+ IntMap map[string]int `long:"intmap" description:"A map from string to int"`
+}
+
+// Callback which will invoke callto:<argument> to call a number.
+// Note that this works just on OS X (and probably only with
+// Skype) but it shows the idea.
+opts.Call = func(num string) {
+ cmd := exec.Command("open", "callto:"+num)
+ cmd.Start()
+ cmd.Process.Release()
+}
+
+// Make some fake arguments to parse.
+args := []string{
+ "-vv",
+ "--offset=5",
+ "-n", "Me",
+ "-p", "3",
+ "-s", "hello",
+ "-s", "world",
+ "--ptrslice", "hello",
+ "--ptrslice", "world",
+ "--intmap", "a:1",
+ "--intmap", "b:5",
+ "arg1",
+ "arg2",
+ "arg3",
+}
+
+// Parse flags from `args'. Note that here we use flags.ParseArgs for
+// the sake of making a working example. Normally, you would simply use
+// flags.Parse(&opts) which uses os.Args
+args, err := flags.ParseArgs(&opts, args)
+
+if err != nil {
+ panic(err)
+ os.Exit(1)
+}
+
+fmt.Printf("Verbosity: %v\n", opts.Verbose)
+fmt.Printf("Offset: %d\n", opts.Offset)
+fmt.Printf("Name: %s\n", opts.Name)
+fmt.Printf("Ptr: %d\n", *opts.Ptr)
+fmt.Printf("StringSlice: %v\n", opts.StringSlice)
+fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1])
+fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"])
+fmt.Printf("Remaining args: %s\n", strings.Join(args, " "))
+
+// Output: Verbosity: [true true]
+// Offset: 5
+// Name: Me
+// Ptr: 3
+// StringSlice: [hello world]
+// PtrSlice: [hello world]
+// IntMap: [a:1 b:5]
+// Remaining args: arg1 arg2 arg3
+```
+
+More information can be found in the godocs: <http://godoc.org/github.com/jessevdk/go-flags>
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/arg.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/arg.go
new file mode 100644
index 00000000000..d1606440715
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/arg.go
@@ -0,0 +1,24 @@
+package flags
+
+import (
+ "reflect"
+)
+
+// Arg represents a positional argument on the command line.
+type Arg struct {
+ // The name of the positional argument (used in the help)
+ Name string
+
+ // A description of the positional argument (used in the help)
+ Description string
+
+ // Whether a positional argument is required
+ Required int
+
+ value reflect.Value
+ tag multiTag
+}
+
+func (a *Arg) isRemaining() bool {
+ return a.value.Type().Kind() == reflect.Slice
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/arg_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/arg_test.go
new file mode 100644
index 00000000000..117e90ec698
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/arg_test.go
@@ -0,0 +1,133 @@
+package flags
+
+import (
+ "testing"
+)
+
+func TestPositional(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Positional struct {
+ Command int
+ Filename string
+ Rest []string
+ } `positional-args:"yes" required:"yes"`
+ }{}
+
+ p := NewParser(&opts, Default)
+ ret, err := p.ParseArgs([]string{"10", "arg_test.go", "a", "b"})
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ if opts.Positional.Command != 10 {
+ t.Fatalf("Expected opts.Positional.Command to be 10, but got %v", opts.Positional.Command)
+ }
+
+ if opts.Positional.Filename != "arg_test.go" {
+ t.Fatalf("Expected opts.Positional.Filename to be \"arg_test.go\", but got %v", opts.Positional.Filename)
+ }
+
+ assertStringArray(t, opts.Positional.Rest, []string{"a", "b"})
+ assertStringArray(t, ret, []string{})
+}
+
+func TestPositionalRequired(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Positional struct {
+ Command int
+ Filename string
+ Rest []string
+ } `positional-args:"yes" required:"yes"`
+ }{}
+
+ p := NewParser(&opts, None)
+ _, err := p.ParseArgs([]string{"10"})
+
+ assertError(t, err, ErrRequired, "the required argument `Filename` was not provided")
+}
+
+func TestPositionalRequiredRest1Fail(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Positional struct {
+ Rest []string `required:"yes"`
+ } `positional-args:"yes"`
+ }{}
+
+ p := NewParser(&opts, None)
+ _, err := p.ParseArgs([]string{})
+
+ assertError(t, err, ErrRequired, "the required argument `Rest (at least 1 argument)` was not provided")
+}
+
+func TestPositionalRequiredRest1Pass(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Positional struct {
+ Rest []string `required:"yes"`
+ } `positional-args:"yes"`
+ }{}
+
+ p := NewParser(&opts, None)
+ _, err := p.ParseArgs([]string{"rest1"})
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ if len(opts.Positional.Rest) != 1 {
+ t.Fatalf("Expected 1 positional rest argument")
+ }
+
+ assertString(t, opts.Positional.Rest[0], "rest1")
+}
+
+func TestPositionalRequiredRest2Fail(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Positional struct {
+ Rest []string `required:"2"`
+ } `positional-args:"yes"`
+ }{}
+
+ p := NewParser(&opts, None)
+ _, err := p.ParseArgs([]string{"rest1"})
+
+ assertError(t, err, ErrRequired, "the required argument `Rest (at least 2 arguments, but got only 1)` was not provided")
+}
+
+func TestPositionalRequiredRest2Pass(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Positional struct {
+ Rest []string `required:"2"`
+ } `positional-args:"yes"`
+ }{}
+
+ p := NewParser(&opts, None)
+ _, err := p.ParseArgs([]string{"rest1", "rest2", "rest3"})
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ if len(opts.Positional.Rest) != 3 {
+ t.Fatalf("Expected 3 positional rest argument")
+ }
+
+ assertString(t, opts.Positional.Rest[0], "rest1")
+ assertString(t, opts.Positional.Rest[1], "rest2")
+ assertString(t, opts.Positional.Rest[2], "rest3")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/assert_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/assert_test.go
new file mode 100644
index 00000000000..8e06636b66d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/assert_test.go
@@ -0,0 +1,177 @@
+package flags
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "runtime"
+ "testing"
+)
+
+func assertCallerInfo() (string, int) {
+ ptr := make([]uintptr, 15)
+ n := runtime.Callers(1, ptr)
+
+ if n == 0 {
+ return "", 0
+ }
+
+ mef := runtime.FuncForPC(ptr[0])
+ mefile, meline := mef.FileLine(ptr[0])
+
+ for i := 2; i < n; i++ {
+ f := runtime.FuncForPC(ptr[i])
+ file, line := f.FileLine(ptr[i])
+
+ if file != mefile {
+ return file, line
+ }
+ }
+
+ return mefile, meline
+}
+
+func assertErrorf(t *testing.T, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+
+ file, line := assertCallerInfo()
+
+ t.Errorf("%s:%d: %s", path.Base(file), line, msg)
+}
+
+func assertFatalf(t *testing.T, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+
+ file, line := assertCallerInfo()
+
+ t.Fatalf("%s:%d: %s", path.Base(file), line, msg)
+}
+
+func assertString(t *testing.T, a string, b string) {
+ if a != b {
+ assertErrorf(t, "Expected %#v, but got %#v", b, a)
+ }
+}
+
+func assertStringArray(t *testing.T, a []string, b []string) {
+ if len(a) != len(b) {
+ assertErrorf(t, "Expected %#v, but got %#v", b, a)
+ return
+ }
+
+ for i, v := range a {
+ if b[i] != v {
+ assertErrorf(t, "Expected %#v, but got %#v", b, a)
+ return
+ }
+ }
+}
+
+func assertBoolArray(t *testing.T, a []bool, b []bool) {
+ if len(a) != len(b) {
+ assertErrorf(t, "Expected %#v, but got %#v", b, a)
+ return
+ }
+
+ for i, v := range a {
+ if b[i] != v {
+ assertErrorf(t, "Expected %#v, but got %#v", b, a)
+ return
+ }
+ }
+}
+
+func assertParserSuccess(t *testing.T, data interface{}, args ...string) (*Parser, []string) {
+ parser := NewParser(data, Default&^PrintErrors)
+ ret, err := parser.ParseArgs(args)
+
+ if err != nil {
+ t.Fatalf("Unexpected parse error: %s", err)
+ return nil, nil
+ }
+
+ return parser, ret
+}
+
+func assertParseSuccess(t *testing.T, data interface{}, args ...string) []string {
+ _, ret := assertParserSuccess(t, data, args...)
+ return ret
+}
+
+func assertError(t *testing.T, err error, typ ErrorType, msg string) {
+ if err == nil {
+ assertFatalf(t, "Expected error: %s", msg)
+ return
+ }
+
+ if e, ok := err.(*Error); !ok {
+ assertFatalf(t, "Expected Error type, but got %#v", err)
+ } else {
+ if e.Type != typ {
+ assertErrorf(t, "Expected error type {%s}, but got {%s}", typ, e.Type)
+ }
+
+ if e.Message != msg {
+ assertErrorf(t, "Expected error message %#v, but got %#v", msg, e.Message)
+ }
+ }
+}
+
+func assertParseFail(t *testing.T, typ ErrorType, msg string, data interface{}, args ...string) []string {
+ parser := NewParser(data, Default&^PrintErrors)
+ ret, err := parser.ParseArgs(args)
+
+ assertError(t, err, typ, msg)
+ return ret
+}
+
+func diff(a, b string) (string, error) {
+ atmp, err := ioutil.TempFile("", "help-diff")
+
+ if err != nil {
+ return "", err
+ }
+
+ btmp, err := ioutil.TempFile("", "help-diff")
+
+ if err != nil {
+ return "", err
+ }
+
+ if _, err := io.WriteString(atmp, a); err != nil {
+ return "", err
+ }
+
+ if _, err := io.WriteString(btmp, b); err != nil {
+ return "", err
+ }
+
+ ret, err := exec.Command("diff", "-u", "-d", "--label", "got", atmp.Name(), "--label", "expected", btmp.Name()).Output()
+
+ os.Remove(atmp.Name())
+ os.Remove(btmp.Name())
+
+ if err.Error() == "exit status 1" {
+ return string(ret), nil
+ }
+
+ return string(ret), err
+}
+
+func assertDiff(t *testing.T, actual, expected, msg string) {
+ if actual == expected {
+ return
+ }
+
+ ret, err := diff(actual, expected)
+
+ if err != nil {
+ assertErrorf(t, "Unexpected diff error: %s", err)
+ assertErrorf(t, "Unexpected %s, expected:\n\n%s\n\nbut got\n\n%s", msg, expected, actual)
+ } else {
+ assertErrorf(t, "Unexpected %s:\n\n%s", msg, ret)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/check_crosscompile.sh b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/check_crosscompile.sh
new file mode 100755
index 00000000000..c494f6119d3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/check_crosscompile.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -e
+
+echo '# linux arm7'
+GOARM=7 GOARCH=arm GOOS=linux go build
+echo '# linux arm5'
+GOARM=5 GOARCH=arm GOOS=linux go build
+echo '# windows 386'
+GOARCH=386 GOOS=windows go build
+echo '# windows amd64'
+GOARCH=amd64 GOOS=windows go build
+echo '# darwin'
+GOARCH=amd64 GOOS=darwin go build
+echo '# freebsd'
+GOARCH=amd64 GOOS=freebsd go build
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/closest.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/closest.go
new file mode 100644
index 00000000000..3b518757c43
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/closest.go
@@ -0,0 +1,59 @@
+package flags
+
+func levenshtein(s string, t string) int {
+ if len(s) == 0 {
+ return len(t)
+ }
+
+ if len(t) == 0 {
+ return len(s)
+ }
+
+ dists := make([][]int, len(s)+1)
+ for i := range dists {
+ dists[i] = make([]int, len(t)+1)
+ dists[i][0] = i
+ }
+
+ for j := range t {
+ dists[0][j] = j
+ }
+
+ for i, sc := range s {
+ for j, tc := range t {
+ if sc == tc {
+ dists[i+1][j+1] = dists[i][j]
+ } else {
+ dists[i+1][j+1] = dists[i][j] + 1
+ if dists[i+1][j] < dists[i+1][j+1] {
+ dists[i+1][j+1] = dists[i+1][j] + 1
+ }
+ if dists[i][j+1] < dists[i+1][j+1] {
+ dists[i+1][j+1] = dists[i][j+1] + 1
+ }
+ }
+ }
+ }
+
+ return dists[len(s)][len(t)]
+}
+
+func closestChoice(cmd string, choices []string) (string, int) {
+ if len(choices) == 0 {
+ return "", 0
+ }
+
+ mincmd := -1
+ mindist := -1
+
+ for i, c := range choices {
+ l := levenshtein(cmd, c)
+
+ if mincmd < 0 || l < mindist {
+ mindist = l
+ mincmd = i
+ }
+ }
+
+ return choices[mincmd], mindist
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/command.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/command.go
new file mode 100644
index 00000000000..a30f5609099
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/command.go
@@ -0,0 +1,441 @@
+package flags
+
+import (
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unsafe"
+)
+
+// Command represents an application command. Commands can be added to the
+// parser (which itself is a command) and are selected/executed when its name
+// is specified on the command line. The Command type embeds a Group and
+// therefore also carries a set of command specific options.
+type Command struct {
+ // Embedded, see Group for more information
+ *Group
+
+ // The name by which the command can be invoked
+ Name string
+
+ // The active sub command (set by parsing) or nil
+ Active *Command
+
+ // Whether subcommands are optional
+ SubcommandsOptional bool
+
+ // Aliases for the command
+ Aliases []string
+
+ // Whether positional arguments are required
+ ArgsRequired bool
+
+ commands []*Command
+ hasBuiltinHelpGroup bool
+ args []*Arg
+}
+
+// Commander is an interface which can be implemented by any command added in
+// the options. When implemented, the Execute method will be called for the last
+// specified (sub)command providing the remaining command line arguments.
+type Commander interface {
+ // Execute will be called for the last active (sub)command. The
+ // args argument contains the remaining command line arguments. The
+ // error that Execute returns will be eventually passed out of the
+ // Parse method of the Parser.
+ Execute(args []string) error
+}
+
+// Usage is an interface which can be implemented to show a custom usage string
+// in the help message shown for a command.
+type Usage interface {
+ // Usage is called for commands to allow customized printing of command
+ // usage in the generated help message.
+ Usage() string
+}
+
+type lookup struct {
+ shortNames map[string]*Option
+ longNames map[string]*Option
+
+ commands map[string]*Command
+}
+
+// AddCommand adds a new command to the parser with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the command. The provided data can implement the Command and
+// Usage interfaces.
+func (c *Command) AddCommand(command string, shortDescription string, longDescription string, data interface{}) (*Command, error) {
+ cmd := newCommand(command, shortDescription, longDescription, data)
+
+ cmd.parent = c
+
+ if err := cmd.scan(); err != nil {
+ return nil, err
+ }
+
+ c.commands = append(c.commands, cmd)
+ return cmd, nil
+}
+
+// AddGroup adds a new group to the command with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the group.
+func (c *Command) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) {
+ group := newGroup(shortDescription, longDescription, data)
+
+ group.parent = c
+
+ if err := group.scanType(c.scanSubcommandHandler(group)); err != nil {
+ return nil, err
+ }
+
+ c.groups = append(c.groups, group)
+ return group, nil
+}
+
+// Commands returns a list of subcommands of this command.
+func (c *Command) Commands() []*Command {
+ return c.commands
+}
+
+// Find locates the subcommand with the given name and returns it. If no such
+// command can be found Find will return nil.
+func (c *Command) Find(name string) *Command {
+ for _, cc := range c.commands {
+ if cc.match(name) {
+ return cc
+ }
+ }
+
+ return nil
+}
+
+// Find an option that is part of the command, or any of its
+// parent commands, by matching its long name
+// (including the option namespace).
+func (c *Command) FindOptionByLongName(longName string) (option *Option) {
+ for option == nil && c != nil {
+ option = c.Group.FindOptionByLongName(longName)
+
+ c, _ = c.parent.(*Command)
+ }
+
+ return option
+}
+
+// Find an option that is part of the command, or any of its
+// parent commands, by matching its long name
+// (including the option namespace).
+func (c *Command) FindOptionByShortName(shortName rune) (option *Option) {
+ for option == nil && c != nil {
+ option = c.Group.FindOptionByShortName(shortName)
+
+ c, _ = c.parent.(*Command)
+ }
+
+ return option
+}
+
+// Args returns a list of positional arguments associated with this command.
+func (c *Command) Args() []*Arg {
+ ret := make([]*Arg, len(c.args))
+ copy(ret, c.args)
+
+ return ret
+}
+
+func newCommand(name string, shortDescription string, longDescription string, data interface{}) *Command {
+ return &Command{
+ Group: newGroup(shortDescription, longDescription, data),
+ Name: name,
+ }
+}
+
+func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler {
+ f := func(realval reflect.Value, sfield *reflect.StructField) (bool, error) {
+ mtag := newMultiTag(string(sfield.Tag))
+
+ if err := mtag.Parse(); err != nil {
+ return true, err
+ }
+
+ positional := mtag.Get("positional-args")
+
+ if len(positional) != 0 {
+ stype := realval.Type()
+
+ for i := 0; i < stype.NumField(); i++ {
+ field := stype.Field(i)
+
+ m := newMultiTag((string(field.Tag)))
+
+ if err := m.Parse(); err != nil {
+ return true, err
+ }
+
+ name := m.Get("positional-arg-name")
+
+ if len(name) == 0 {
+ name = field.Name
+ }
+
+ var required int
+
+ sreq := m.Get("required")
+
+ if sreq != "" {
+ required = 1
+
+ if preq, err := strconv.ParseInt(sreq, 10, 32); err == nil {
+ required = int(preq)
+ }
+ }
+
+ arg := &Arg{
+ Name: name,
+ Description: m.Get("description"),
+ Required: required,
+
+ value: realval.Field(i),
+ tag: m,
+ }
+
+ c.args = append(c.args, arg)
+
+ if len(mtag.Get("required")) != 0 {
+ c.ArgsRequired = true
+ }
+ }
+
+ return true, nil
+ }
+
+ subcommand := mtag.Get("command")
+
+ if len(subcommand) != 0 {
+ ptrval := reflect.NewAt(realval.Type(), unsafe.Pointer(realval.UnsafeAddr()))
+
+ shortDescription := mtag.Get("description")
+ longDescription := mtag.Get("long-description")
+ subcommandsOptional := mtag.Get("subcommands-optional")
+ aliases := mtag.GetMany("alias")
+
+ subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface())
+ if err != nil {
+ return true, err
+ }
+
+ subc.Hidden = mtag.Get("hidden") != ""
+
+ if len(subcommandsOptional) > 0 {
+ subc.SubcommandsOptional = true
+ }
+
+ if len(aliases) > 0 {
+ subc.Aliases = aliases
+ }
+
+ return true, nil
+ }
+
+ return parentg.scanSubGroupHandler(realval, sfield)
+ }
+
+ return f
+}
+
+func (c *Command) scan() error {
+ return c.scanType(c.scanSubcommandHandler(c.Group))
+}
+
+func (c *Command) eachOption(f func(*Command, *Group, *Option)) {
+ c.eachCommand(func(c *Command) {
+ c.eachGroup(func(g *Group) {
+ for _, option := range g.options {
+ f(c, g, option)
+ }
+ })
+ }, true)
+}
+
+func (c *Command) eachCommand(f func(*Command), recurse bool) {
+ f(c)
+
+ for _, cc := range c.commands {
+ if recurse {
+ cc.eachCommand(f, true)
+ } else {
+ f(cc)
+ }
+ }
+}
+
+func (c *Command) eachActiveGroup(f func(cc *Command, g *Group)) {
+ c.eachGroup(func(g *Group) {
+ f(c, g)
+ })
+
+ if c.Active != nil {
+ c.Active.eachActiveGroup(f)
+ }
+}
+
+func (c *Command) addHelpGroups(showHelp func() error) {
+ if !c.hasBuiltinHelpGroup {
+ c.addHelpGroup(showHelp)
+ c.hasBuiltinHelpGroup = true
+ }
+
+ for _, cc := range c.commands {
+ cc.addHelpGroups(showHelp)
+ }
+}
+
+func (c *Command) makeLookup() lookup {
+ ret := lookup{
+ shortNames: make(map[string]*Option),
+ longNames: make(map[string]*Option),
+ commands: make(map[string]*Command),
+ }
+
+ parent := c.parent
+
+ var parents []*Command
+
+ for parent != nil {
+ if cmd, ok := parent.(*Command); ok {
+ parents = append(parents, cmd)
+ parent = cmd.parent
+ } else {
+ parent = nil
+ }
+ }
+
+ for i := len(parents) - 1; i >= 0; i-- {
+ parents[i].fillLookup(&ret, true)
+ }
+
+ c.fillLookup(&ret, false)
+ return ret
+}
+
+func (c *Command) fillLookup(ret *lookup, onlyOptions bool) {
+ c.eachGroup(func(g *Group) {
+ for _, option := range g.options {
+ if option.ShortName != 0 {
+ ret.shortNames[string(option.ShortName)] = option
+ }
+
+ if len(option.LongName) > 0 {
+ ret.longNames[option.LongNameWithNamespace()] = option
+ }
+ }
+ })
+
+ if onlyOptions {
+ return
+ }
+
+ for _, subcommand := range c.commands {
+ ret.commands[subcommand.Name] = subcommand
+
+ for _, a := range subcommand.Aliases {
+ ret.commands[a] = subcommand
+ }
+ }
+}
+
+func (c *Command) groupByName(name string) *Group {
+ if grp := c.Group.groupByName(name); grp != nil {
+ return grp
+ }
+
+ for _, subc := range c.commands {
+ prefix := subc.Name + "."
+
+ if strings.HasPrefix(name, prefix) {
+ if grp := subc.groupByName(name[len(prefix):]); grp != nil {
+ return grp
+ }
+ } else if name == subc.Name {
+ return subc.Group
+ }
+ }
+
+ return nil
+}
+
+type commandList []*Command
+
+func (c commandList) Less(i, j int) bool {
+ return c[i].Name < c[j].Name
+}
+
+func (c commandList) Len() int {
+ return len(c)
+}
+
+func (c commandList) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
+
+func (c *Command) sortedVisibleCommands() []*Command {
+ ret := commandList(c.visibleCommands())
+ sort.Sort(ret)
+
+ return []*Command(ret)
+}
+
+func (c *Command) visibleCommands() []*Command {
+ ret := make([]*Command, 0, len(c.commands))
+
+ for _, cmd := range c.commands {
+ if !cmd.Hidden {
+ ret = append(ret, cmd)
+ }
+ }
+
+ return ret
+}
+
+func (c *Command) match(name string) bool {
+ if c.Name == name {
+ return true
+ }
+
+ for _, v := range c.Aliases {
+ if v == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (c *Command) hasCliOptions() bool {
+ ret := false
+
+ c.eachGroup(func(g *Group) {
+ if g.isBuiltinHelp {
+ return
+ }
+
+ for _, opt := range g.options {
+ if opt.canCli() {
+ ret = true
+ }
+ }
+ })
+
+ return ret
+}
+
+func (c *Command) fillParseState(s *parseState) {
+ s.positional = make([]*Arg, len(c.args))
+ copy(s.positional, c.args)
+
+ s.lookup = c.makeLookup()
+ s.command = c
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/command_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/command_test.go
new file mode 100644
index 00000000000..72d397d2c7c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/command_test.go
@@ -0,0 +1,544 @@
+package flags
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestCommandInline(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ G bool `short:"g"`
+ } `command:"cmd"`
+ }{}
+
+ p, ret := assertParserSuccess(t, &opts, "-v", "cmd", "-g")
+
+ assertStringArray(t, ret, []string{})
+
+ if p.Active == nil {
+ t.Errorf("Expected active command")
+ }
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !opts.Command.G {
+ t.Errorf("Expected Command.G to be true")
+ }
+
+ if p.Command.Find("cmd") != p.Active {
+ t.Errorf("Expected to find command `cmd' to be active")
+ }
+}
+
+func TestCommandInlineMulti(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ C1 struct {
+ } `command:"c1"`
+
+ C2 struct {
+ G bool `short:"g"`
+ } `command:"c2"`
+ }{}
+
+ p, ret := assertParserSuccess(t, &opts, "-v", "c2", "-g")
+
+ assertStringArray(t, ret, []string{})
+
+ if p.Active == nil {
+ t.Errorf("Expected active command")
+ }
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !opts.C2.G {
+ t.Errorf("Expected C2.G to be true")
+ }
+
+ if p.Command.Find("c1") == nil {
+ t.Errorf("Expected to find command `c1'")
+ }
+
+ if c2 := p.Command.Find("c2"); c2 == nil {
+ t.Errorf("Expected to find command `c2'")
+ } else if c2 != p.Active {
+ t.Errorf("Expected to find command `c2' to be active")
+ }
+}
+
+func TestCommandFlagOrder1(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ G bool `short:"g"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseFail(t, ErrUnknownFlag, "unknown flag `g'", &opts, "-v", "-g", "cmd")
+}
+
+func TestCommandFlagOrder2(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ G bool `short:"g"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseSuccess(t, &opts, "cmd", "-v", "-g")
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !opts.Command.G {
+ t.Errorf("Expected Command.G to be true")
+ }
+}
+
+func TestCommandFlagOrderSub(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ G bool `short:"g"`
+
+ SubCommand struct {
+ B bool `short:"b"`
+ } `command:"sub"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseSuccess(t, &opts, "cmd", "sub", "-v", "-g", "-b")
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !opts.Command.G {
+ t.Errorf("Expected Command.G to be true")
+ }
+
+ if !opts.Command.SubCommand.B {
+ t.Errorf("Expected Command.SubCommand.B to be true")
+ }
+}
+
+func TestCommandFlagOverride1(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ Value bool `short:"v"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseSuccess(t, &opts, "-v", "cmd")
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if opts.Command.Value {
+ t.Errorf("Expected Command.Value to be false")
+ }
+}
+
+func TestCommandFlagOverride2(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ Value bool `short:"v"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseSuccess(t, &opts, "cmd", "-v")
+
+ if opts.Value {
+ t.Errorf("Expected Value to be false")
+ }
+
+ if !opts.Command.Value {
+ t.Errorf("Expected Command.Value to be true")
+ }
+}
+
+func TestCommandFlagOverrideSub(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ Value bool `short:"v"`
+
+ SubCommand struct {
+ Value bool `short:"v"`
+ } `command:"sub"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseSuccess(t, &opts, "cmd", "sub", "-v")
+
+ if opts.Value {
+ t.Errorf("Expected Value to be false")
+ }
+
+ if opts.Command.Value {
+ t.Errorf("Expected Command.Value to be false")
+ }
+
+ if !opts.Command.SubCommand.Value {
+ t.Errorf("Expected Command.Value to be true")
+ }
+}
+
+func TestCommandFlagOverrideSub2(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ Value bool `short:"v"`
+
+ SubCommand struct {
+ G bool `short:"g"`
+ } `command:"sub"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseSuccess(t, &opts, "cmd", "sub", "-v")
+
+ if opts.Value {
+ t.Errorf("Expected Value to be false")
+ }
+
+ if !opts.Command.Value {
+ t.Errorf("Expected Command.Value to be true")
+ }
+}
+
+func TestCommandEstimate(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Cmd1 struct {
+ } `command:"remove"`
+
+ Cmd2 struct {
+ } `command:"add"`
+ }{}
+
+ p := NewParser(&opts, None)
+ _, err := p.ParseArgs([]string{})
+
+ assertError(t, err, ErrCommandRequired, "Please specify one command of: add or remove")
+}
+
+func TestCommandEstimate2(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Cmd1 struct {
+ } `command:"remove"`
+
+ Cmd2 struct {
+ } `command:"add"`
+ }{}
+
+ p := NewParser(&opts, None)
+ _, err := p.ParseArgs([]string{"rmive"})
+
+ assertError(t, err, ErrUnknownCommand, "Unknown command `rmive', did you mean `remove'?")
+}
+
+type testCommand struct {
+ G bool `short:"g"`
+ Executed bool
+ EArgs []string
+}
+
+func (c *testCommand) Execute(args []string) error {
+ c.Executed = true
+ c.EArgs = args
+
+ return nil
+}
+
+func TestCommandExecute(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command testCommand `command:"cmd"`
+ }{}
+
+ assertParseSuccess(t, &opts, "-v", "cmd", "-g", "a", "b")
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !opts.Command.Executed {
+ t.Errorf("Did not execute command")
+ }
+
+ if !opts.Command.G {
+ t.Errorf("Expected Command.C to be true")
+ }
+
+ assertStringArray(t, opts.Command.EArgs, []string{"a", "b"})
+}
+
+func TestCommandClosest(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Cmd1 struct {
+ } `command:"remove"`
+
+ Cmd2 struct {
+ } `command:"add"`
+ }{}
+
+ args := assertParseFail(t, ErrUnknownCommand, "Unknown command `addd', did you mean `add'?", &opts, "-v", "addd")
+
+ assertStringArray(t, args, []string{"addd"})
+}
+
+func TestCommandAdd(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+ }{}
+
+ var cmd = struct {
+ G bool `short:"g"`
+ }{}
+
+ p := NewParser(&opts, Default)
+ c, err := p.AddCommand("cmd", "", "", &cmd)
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ ret, err := p.ParseArgs([]string{"-v", "cmd", "-g", "rest"})
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ assertStringArray(t, ret, []string{"rest"})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !cmd.G {
+ t.Errorf("Expected Command.G to be true")
+ }
+
+ if p.Command.Find("cmd") != c {
+ t.Errorf("Expected to find command `cmd'")
+ }
+
+ if p.Commands()[0] != c {
+ t.Errorf("Expected command %#v, but got %#v", c, p.Commands()[0])
+ }
+
+ if c.Options()[0].ShortName != 'g' {
+ t.Errorf("Expected short name `g' but got %v", c.Options()[0].ShortName)
+ }
+}
+
+func TestCommandNestedInline(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Command struct {
+ G bool `short:"g"`
+
+ Nested struct {
+ N string `long:"n"`
+ } `command:"nested"`
+ } `command:"cmd"`
+ }{}
+
+ p, ret := assertParserSuccess(t, &opts, "-v", "cmd", "-g", "nested", "--n", "n", "rest")
+
+ assertStringArray(t, ret, []string{"rest"})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !opts.Command.G {
+ t.Errorf("Expected Command.G to be true")
+ }
+
+ assertString(t, opts.Command.Nested.N, "n")
+
+ if c := p.Command.Find("cmd"); c == nil {
+ t.Errorf("Expected to find command `cmd'")
+ } else {
+ if c != p.Active {
+ t.Errorf("Expected `cmd' to be the active parser command")
+ }
+
+ if nested := c.Find("nested"); nested == nil {
+ t.Errorf("Expected to find command `nested'")
+ } else if nested != c.Active {
+ t.Errorf("Expected to find command `nested' to be the active `cmd' command")
+ }
+ }
+}
+
+func TestRequiredOnCommand(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v" required:"true"`
+
+ Command struct {
+ G bool `short:"g"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseFail(t, ErrRequired, fmt.Sprintf("the required flag `%cv' was not specified", defaultShortOptDelimiter), &opts, "cmd")
+}
+
+func TestRequiredAllOnCommand(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v" required:"true"`
+ Missing bool `long:"missing" required:"true"`
+
+ Command struct {
+ G bool `short:"g"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseFail(t, ErrRequired, fmt.Sprintf("the required flags `%smissing' and `%cv' were not specified", defaultLongOptDelimiter, defaultShortOptDelimiter), &opts, "cmd")
+}
+
+func TestDefaultOnCommand(t *testing.T) {
+ var opts = struct {
+ Command struct {
+ G bool `short:"g" default:"true"`
+ } `command:"cmd"`
+ }{}
+
+ assertParseSuccess(t, &opts, "cmd")
+
+ if !opts.Command.G {
+ t.Errorf("Expected G to be true")
+ }
+}
+
+func TestSubcommandsOptional(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Cmd1 struct {
+ } `command:"remove"`
+
+ Cmd2 struct {
+ } `command:"add"`
+ }{}
+
+ p := NewParser(&opts, None)
+ p.SubcommandsOptional = true
+
+ _, err := p.ParseArgs([]string{"-v"})
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+}
+
+func TestCommandAlias(t *testing.T) {
+ var opts = struct {
+ Command struct {
+ G bool `short:"g" default:"true"`
+ } `command:"cmd" alias:"cm"`
+ }{}
+
+ assertParseSuccess(t, &opts, "cm")
+
+ if !opts.Command.G {
+ t.Errorf("Expected G to be true")
+ }
+}
+
+func TestSubCommandFindOptionByLongFlag(t *testing.T) {
+ var opts struct {
+ Testing bool `long:"testing" description:"Testing"`
+ }
+
+ var cmd struct {
+ Other bool `long:"other" description:"Other"`
+ }
+
+ p := NewParser(&opts, Default)
+ c, _ := p.AddCommand("command", "Short", "Long", &cmd)
+
+ opt := c.FindOptionByLongName("other")
+
+ if opt == nil {
+ t.Errorf("Expected option, but found none")
+ }
+
+ assertString(t, opt.LongName, "other")
+
+ opt = c.FindOptionByLongName("testing")
+
+ if opt == nil {
+ t.Errorf("Expected option, but found none")
+ }
+
+ assertString(t, opt.LongName, "testing")
+}
+
+func TestSubCommandFindOptionByShortFlag(t *testing.T) {
+ var opts struct {
+ Testing bool `short:"t" description:"Testing"`
+ }
+
+ var cmd struct {
+ Other bool `short:"o" description:"Other"`
+ }
+
+ p := NewParser(&opts, Default)
+ c, _ := p.AddCommand("command", "Short", "Long", &cmd)
+
+ opt := c.FindOptionByShortName('o')
+
+ if opt == nil {
+ t.Errorf("Expected option, but found none")
+ }
+
+ if opt.ShortName != 'o' {
+ t.Errorf("Expected 'o', but got %v", opt.ShortName)
+ }
+
+ opt = c.FindOptionByShortName('t')
+
+ if opt == nil {
+ t.Errorf("Expected option, but found none")
+ }
+
+ if opt.ShortName != 't' {
+ t.Errorf("Expected 'o', but got %v", opt.ShortName)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/completion.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/completion.go
new file mode 100644
index 00000000000..894f1d6aeef
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/completion.go
@@ -0,0 +1,300 @@
+package flags
+
+import (
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// Completion is a type containing information of a completion.
+type Completion struct {
+ // The completed item
+ Item string
+
+ // A description of the completed item (optional)
+ Description string
+}
+
+type completions []Completion
+
+func (c completions) Len() int {
+ return len(c)
+}
+
+func (c completions) Less(i, j int) bool {
+ return c[i].Item < c[j].Item
+}
+
+func (c completions) Swap(i, j int) {
+ c[i], c[j] = c[j], c[i]
+}
+
+// Completer is an interface which can be implemented by types
+// to provide custom command line argument completion.
+type Completer interface {
+ // Complete receives a prefix representing a (partial) value
+ // for its type and should provide a list of possible valid
+ // completions.
+ Complete(match string) []Completion
+}
+
+type completion struct {
+ parser *Parser
+}
+
+// Filename is a string alias which provides filename completion.
+type Filename string
+
+func completionsWithoutDescriptions(items []string) []Completion {
+ ret := make([]Completion, len(items))
+
+ for i, v := range items {
+ ret[i].Item = v
+ }
+
+ return ret
+}
+
+// Complete returns a list of existing files with the given
+// prefix.
+func (f *Filename) Complete(match string) []Completion {
+ ret, _ := filepath.Glob(match + "*")
+ return completionsWithoutDescriptions(ret)
+}
+
+func (c *completion) skipPositional(s *parseState, n int) {
+ if n >= len(s.positional) {
+ s.positional = nil
+ } else {
+ s.positional = s.positional[n:]
+ }
+}
+
+func (c *completion) completeOptionNames(names map[string]*Option, prefix string, match string) []Completion {
+ n := make([]Completion, 0, len(names))
+
+ for k, opt := range names {
+ if strings.HasPrefix(k, match) {
+ n = append(n, Completion{
+ Item: prefix + k,
+ Description: opt.Description,
+ })
+ }
+ }
+
+ return n
+}
+
+func (c *completion) completeLongNames(s *parseState, prefix string, match string) []Completion {
+ return c.completeOptionNames(s.lookup.longNames, prefix, match)
+}
+
+func (c *completion) completeShortNames(s *parseState, prefix string, match string) []Completion {
+ if len(match) != 0 {
+ return []Completion{
+ Completion{
+ Item: prefix + match,
+ },
+ }
+ }
+
+ return c.completeOptionNames(s.lookup.shortNames, prefix, match)
+}
+
+func (c *completion) completeCommands(s *parseState, match string) []Completion {
+ n := make([]Completion, 0, len(s.command.commands))
+
+ for _, cmd := range s.command.commands {
+ if cmd.data != c && strings.HasPrefix(cmd.Name, match) {
+ n = append(n, Completion{
+ Item: cmd.Name,
+ Description: cmd.ShortDescription,
+ })
+ }
+ }
+
+ return n
+}
+
+func (c *completion) completeValue(value reflect.Value, prefix string, match string) []Completion {
+ i := value.Interface()
+
+ var ret []Completion
+
+ if cmp, ok := i.(Completer); ok {
+ ret = cmp.Complete(match)
+ } else if value.CanAddr() {
+ if cmp, ok = value.Addr().Interface().(Completer); ok {
+ ret = cmp.Complete(match)
+ }
+ }
+
+ for i, v := range ret {
+ ret[i].Item = prefix + v.Item
+ }
+
+ return ret
+}
+
+func (c *completion) completeArg(arg *Arg, prefix string, match string) []Completion {
+ if arg.isRemaining() {
+ // For remaining positional args (that are parsed into a slice), complete
+ // based on the element type.
+ return c.completeValue(reflect.New(arg.value.Type().Elem()), prefix, match)
+ }
+
+ return c.completeValue(arg.value, prefix, match)
+}
+
+func (c *completion) complete(args []string) []Completion {
+ if len(args) == 0 {
+ args = []string{""}
+ }
+
+ s := &parseState{
+ args: args,
+ }
+
+ c.parser.fillParseState(s)
+
+ var opt *Option
+
+ for len(s.args) > 1 {
+ arg := s.pop()
+
+ if (c.parser.Options&PassDoubleDash) != None && arg == "--" {
+ opt = nil
+ c.skipPositional(s, len(s.args)-1)
+
+ break
+ }
+
+ if argumentIsOption(arg) {
+ prefix, optname, islong := stripOptionPrefix(arg)
+ optname, _, argument := splitOption(prefix, optname, islong)
+
+ if argument == nil {
+ var o *Option
+ canarg := true
+
+ if islong {
+ o = s.lookup.longNames[optname]
+ } else {
+ for i, r := range optname {
+ sname := string(r)
+ o = s.lookup.shortNames[sname]
+
+ if o == nil {
+ break
+ }
+
+ if i == 0 && o.canArgument() && len(optname) != len(sname) {
+ canarg = false
+ break
+ }
+ }
+ }
+
+ if o == nil && (c.parser.Options&PassAfterNonOption) != None {
+ opt = nil
+ c.skipPositional(s, len(s.args)-1)
+
+ break
+ } else if o != nil && o.canArgument() && !o.OptionalArgument && canarg {
+ if len(s.args) > 1 {
+ s.pop()
+ } else {
+ opt = o
+ }
+ }
+ }
+ } else {
+ if len(s.positional) > 0 {
+ if !s.positional[0].isRemaining() {
+ // Don't advance beyond a remaining positional arg (because
+ // it consumes all subsequent args).
+ s.positional = s.positional[1:]
+ }
+ } else if cmd, ok := s.lookup.commands[arg]; ok {
+ cmd.fillParseState(s)
+ }
+
+ opt = nil
+ }
+ }
+
+ lastarg := s.args[len(s.args)-1]
+ var ret []Completion
+
+ if opt != nil {
+ // Completion for the argument of 'opt'
+ ret = c.completeValue(opt.value, "", lastarg)
+ } else if argumentStartsOption(lastarg) {
+ // Complete the option
+ prefix, optname, islong := stripOptionPrefix(lastarg)
+ optname, split, argument := splitOption(prefix, optname, islong)
+
+ if argument == nil && !islong {
+ rname, n := utf8.DecodeRuneInString(optname)
+ sname := string(rname)
+
+ if opt := s.lookup.shortNames[sname]; opt != nil && opt.canArgument() {
+ ret = c.completeValue(opt.value, prefix+sname, optname[n:])
+ } else {
+ ret = c.completeShortNames(s, prefix, optname)
+ }
+ } else if argument != nil {
+ if islong {
+ opt = s.lookup.longNames[optname]
+ } else {
+ opt = s.lookup.shortNames[optname]
+ }
+
+ if opt != nil {
+ ret = c.completeValue(opt.value, prefix+optname+split, *argument)
+ }
+ } else if islong {
+ ret = c.completeLongNames(s, prefix, optname)
+ } else {
+ ret = c.completeShortNames(s, prefix, optname)
+ }
+ } else if len(s.positional) > 0 {
+ // Complete for positional argument
+ ret = c.completeArg(s.positional[0], "", lastarg)
+ } else if len(s.command.commands) > 0 {
+ // Complete for command
+ ret = c.completeCommands(s, lastarg)
+ }
+
+ sort.Sort(completions(ret))
+ return ret
+}
+
+func (c *completion) print(items []Completion, showDescriptions bool) {
+ if showDescriptions && len(items) > 1 {
+ maxl := 0
+
+ for _, v := range items {
+ if len(v.Item) > maxl {
+ maxl = len(v.Item)
+ }
+ }
+
+ for _, v := range items {
+ fmt.Printf("%s", v.Item)
+
+ if len(v.Description) > 0 {
+ fmt.Printf("%s # %s", strings.Repeat(" ", maxl-len(v.Item)), v.Description)
+ }
+
+ fmt.Printf("\n")
+ }
+ } else {
+ for _, v := range items {
+ fmt.Println(v.Item)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/completion_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/completion_test.go
new file mode 100644
index 00000000000..f440fd70fe5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/completion_test.go
@@ -0,0 +1,294 @@
+package flags
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+type TestComplete struct {
+}
+
+func (t *TestComplete) Complete(match string) []Completion {
+ options := []string{
+ "hello world",
+ "hello universe",
+ "hello multiverse",
+ }
+
+ ret := make([]Completion, 0, len(options))
+
+ for _, o := range options {
+ if strings.HasPrefix(o, match) {
+ ret = append(ret, Completion{
+ Item: o,
+ })
+ }
+ }
+
+ return ret
+}
+
+var completionTestOptions struct {
+ Verbose bool `short:"v" long:"verbose" description:"Verbose messages"`
+ Debug bool `short:"d" long:"debug" description:"Enable debug"`
+ Version bool `long:"version" description:"Show version"`
+ Required bool `long:"required" required:"true" description:"This is required"`
+
+ AddCommand struct {
+ Positional struct {
+ Filename Filename
+ } `positional-args:"yes"`
+ } `command:"add" description:"add an item"`
+
+ AddMultiCommand struct {
+ Positional struct {
+ Filename []Filename
+ } `positional-args:"yes"`
+ } `command:"add-multi" description:"add multiple items"`
+
+ RemoveCommand struct {
+ Other bool `short:"o"`
+ File Filename `short:"f" long:"filename"`
+ } `command:"rm" description:"remove an item"`
+
+ RenameCommand struct {
+ Completed TestComplete `short:"c" long:"completed"`
+ } `command:"rename" description:"rename an item"`
+}
+
+type completionTest struct {
+ Args []string
+ Completed []string
+ ShowDescriptions bool
+}
+
+var completionTests []completionTest
+
+func init() {
+ _, sourcefile, _, _ := runtime.Caller(0)
+ completionTestSourcedir := filepath.Join(filepath.SplitList(path.Dir(sourcefile))...)
+
+ completionTestFilename := []string{filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion_test.go")}
+
+ completionTests = []completionTest{
+ {
+ // Short names
+ []string{"-"},
+ []string{"-d", "-v"},
+ false,
+ },
+
+ {
+ // Short names concatenated
+ []string{"-dv"},
+ []string{"-dv"},
+ false,
+ },
+
+ {
+ // Long names
+ []string{"--"},
+ []string{"--debug", "--required", "--verbose", "--version"},
+ false,
+ },
+
+ {
+ // Long names with descriptions
+ []string{"--"},
+ []string{
+ "--debug # Enable debug",
+ "--required # This is required",
+ "--verbose # Verbose messages",
+ "--version # Show version",
+ },
+ true,
+ },
+
+ {
+ // Long names partial
+ []string{"--ver"},
+ []string{"--verbose", "--version"},
+ false,
+ },
+
+ {
+ // Commands
+ []string{""},
+ []string{"add", "add-multi", "rename", "rm"},
+ false,
+ },
+
+ {
+ // Commands with descriptions
+ []string{""},
+ []string{
+ "add # add an item",
+ "add-multi # add multiple items",
+ "rename # rename an item",
+ "rm # remove an item",
+ },
+ true,
+ },
+
+ {
+ // Commands partial
+ []string{"r"},
+ []string{"rename", "rm"},
+ false,
+ },
+
+ {
+ // Positional filename
+ []string{"add", filepath.Join(completionTestSourcedir, "completion")},
+ completionTestFilename,
+ false,
+ },
+
+ {
+ // Multiple positional filename (1 arg)
+ []string{"add-multi", filepath.Join(completionTestSourcedir, "completion")},
+ completionTestFilename,
+ false,
+ },
+ {
+ // Multiple positional filename (2 args)
+ []string{"add-multi", filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion")},
+ completionTestFilename,
+ false,
+ },
+ {
+ // Multiple positional filename (3 args)
+ []string{"add-multi", filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion.go"), filepath.Join(completionTestSourcedir, "completion")},
+ completionTestFilename,
+ false,
+ },
+
+ {
+ // Flag filename
+ []string{"rm", "-f", path.Join(completionTestSourcedir, "completion")},
+ completionTestFilename,
+ false,
+ },
+
+ {
+ // Flag short concat last filename
+ []string{"rm", "-of", path.Join(completionTestSourcedir, "completion")},
+ completionTestFilename,
+ false,
+ },
+
+ {
+ // Flag concat filename
+ []string{"rm", "-f" + path.Join(completionTestSourcedir, "completion")},
+ []string{"-f" + completionTestFilename[0], "-f" + completionTestFilename[1]},
+ false,
+ },
+
+ {
+ // Flag equal concat filename
+ []string{"rm", "-f=" + path.Join(completionTestSourcedir, "completion")},
+ []string{"-f=" + completionTestFilename[0], "-f=" + completionTestFilename[1]},
+ false,
+ },
+
+ {
+ // Flag concat long filename
+ []string{"rm", "--filename=" + path.Join(completionTestSourcedir, "completion")},
+ []string{"--filename=" + completionTestFilename[0], "--filename=" + completionTestFilename[1]},
+ false,
+ },
+
+ {
+ // Flag long filename
+ []string{"rm", "--filename", path.Join(completionTestSourcedir, "completion")},
+ completionTestFilename,
+ false,
+ },
+
+ {
+ // Custom completed
+ []string{"rename", "-c", "hello un"},
+ []string{"hello universe"},
+ false,
+ },
+ }
+}
+
+func TestCompletion(t *testing.T) {
+ p := NewParser(&completionTestOptions, Default)
+ c := &completion{parser: p}
+
+ for _, test := range completionTests {
+ if test.ShowDescriptions {
+ continue
+ }
+
+ ret := c.complete(test.Args)
+ items := make([]string, len(ret))
+
+ for i, v := range ret {
+ items[i] = v.Item
+ }
+
+ if !reflect.DeepEqual(items, test.Completed) {
+ t.Errorf("Args: %#v, %#v\n Expected: %#v\n Got: %#v", test.Args, test.ShowDescriptions, test.Completed, items)
+ }
+ }
+}
+
+func TestParserCompletion(t *testing.T) {
+ for _, test := range completionTests {
+ if test.ShowDescriptions {
+ os.Setenv("GO_FLAGS_COMPLETION", "verbose")
+ } else {
+ os.Setenv("GO_FLAGS_COMPLETION", "1")
+ }
+
+ tmp := os.Stdout
+
+ r, w, _ := os.Pipe()
+ os.Stdout = w
+
+ out := make(chan string)
+
+ go func() {
+ var buf bytes.Buffer
+
+ io.Copy(&buf, r)
+
+ out <- buf.String()
+ }()
+
+ p := NewParser(&completionTestOptions, None)
+
+ p.CompletionHandler = func(items []Completion) {
+ comp := &completion{parser: p}
+ comp.print(items, test.ShowDescriptions)
+ }
+
+ _, err := p.ParseArgs(test.Args)
+
+ w.Close()
+
+ os.Stdout = tmp
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+
+ got := strings.Split(strings.Trim(<-out, "\n"), "\n")
+
+ if !reflect.DeepEqual(got, test.Completed) {
+ t.Errorf("Expected: %#v\nGot: %#v", test.Completed, got)
+ }
+ }
+
+ os.Setenv("GO_FLAGS_COMPLETION", "")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/convert.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/convert.go
new file mode 100644
index 00000000000..938c3ac1c2c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/convert.go
@@ -0,0 +1,341 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// to a string representation of the flag.
+type Marshaler interface {
+ // MarshalFlag marshals a flag value to its string representation.
+ MarshalFlag() (string, error)
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal a flag
+// argument to themselves. The provided value is directly passed from the
+// command line.
+type Unmarshaler interface {
+ // UnmarshalFlag unmarshals a string value representation to the flag
+ // value (which therefore needs to be a pointer receiver).
+ UnmarshalFlag(value string) error
+}
+
+func getBase(options multiTag, base int) (int, error) {
+ sbase := options.Get("base")
+
+ var err error
+ var ivbase int64
+
+ if sbase != "" {
+ ivbase, err = strconv.ParseInt(sbase, 10, 32)
+ base = int(ivbase)
+ }
+
+ return base, err
+}
+
+func convertMarshal(val reflect.Value) (bool, string, error) {
+ // Check first for the Marshaler interface
+ if val.Type().NumMethod() > 0 && val.CanInterface() {
+ if marshaler, ok := val.Interface().(Marshaler); ok {
+ ret, err := marshaler.MarshalFlag()
+ return true, ret, err
+ }
+ }
+
+ return false, "", nil
+}
+
+func convertToString(val reflect.Value, options multiTag) (string, error) {
+ if ok, ret, err := convertMarshal(val); ok {
+ return ret, err
+ }
+
+ tp := val.Type()
+
+ // Support for time.Duration
+ if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() {
+ stringer := val.Interface().(fmt.Stringer)
+ return stringer.String(), nil
+ }
+
+ switch tp.Kind() {
+ case reflect.String:
+ return val.String(), nil
+ case reflect.Bool:
+ if val.Bool() {
+ return "true", nil
+ }
+
+ return "false", nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ base, err := getBase(options, 10)
+
+ if err != nil {
+ return "", err
+ }
+
+ return strconv.FormatInt(val.Int(), base), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ base, err := getBase(options, 10)
+
+ if err != nil {
+ return "", err
+ }
+
+ return strconv.FormatUint(val.Uint(), base), nil
+ case reflect.Float32, reflect.Float64:
+ return strconv.FormatFloat(val.Float(), 'g', -1, tp.Bits()), nil
+ case reflect.Slice:
+ if val.Len() == 0 {
+ return "", nil
+ }
+
+ ret := "["
+
+ for i := 0; i < val.Len(); i++ {
+ if i != 0 {
+ ret += ", "
+ }
+
+ item, err := convertToString(val.Index(i), options)
+
+ if err != nil {
+ return "", err
+ }
+
+ ret += item
+ }
+
+ return ret + "]", nil
+ case reflect.Map:
+ ret := "{"
+
+ for i, key := range val.MapKeys() {
+ if i != 0 {
+ ret += ", "
+ }
+
+ keyitem, err := convertToString(key, options)
+
+ if err != nil {
+ return "", err
+ }
+
+ item, err := convertToString(val.MapIndex(key), options)
+
+ if err != nil {
+ return "", err
+ }
+
+ ret += keyitem + ":" + item
+ }
+
+ return ret + "}", nil
+ case reflect.Ptr:
+ return convertToString(reflect.Indirect(val), options)
+ case reflect.Interface:
+ if !val.IsNil() {
+ return convertToString(val.Elem(), options)
+ }
+ }
+
+ return "", nil
+}
+
+func convertUnmarshal(val string, retval reflect.Value) (bool, error) {
+ if retval.Type().NumMethod() > 0 && retval.CanInterface() {
+ if unmarshaler, ok := retval.Interface().(Unmarshaler); ok {
+ return true, unmarshaler.UnmarshalFlag(val)
+ }
+ }
+
+ if retval.Type().Kind() != reflect.Ptr && retval.CanAddr() {
+ return convertUnmarshal(val, retval.Addr())
+ }
+
+ if retval.Type().Kind() == reflect.Interface && !retval.IsNil() {
+ return convertUnmarshal(val, retval.Elem())
+ }
+
+ return false, nil
+}
+
+func convert(val string, retval reflect.Value, options multiTag) error {
+ if ok, err := convertUnmarshal(val, retval); ok {
+ return err
+ }
+
+ tp := retval.Type()
+
+ // Support for time.Duration
+ if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() {
+ parsed, err := time.ParseDuration(val)
+
+ if err != nil {
+ return err
+ }
+
+ retval.SetInt(int64(parsed))
+ return nil
+ }
+
+ switch tp.Kind() {
+ case reflect.String:
+ retval.SetString(val)
+ case reflect.Bool:
+ if val == "" {
+ retval.SetBool(true)
+ } else {
+ b, err := strconv.ParseBool(val)
+
+ if err != nil {
+ return err
+ }
+
+ retval.SetBool(b)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ base, err := getBase(options, 10)
+
+ if err != nil {
+ return err
+ }
+
+ parsed, err := strconv.ParseInt(val, base, tp.Bits())
+
+ if err != nil {
+ return err
+ }
+
+ retval.SetInt(parsed)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ base, err := getBase(options, 10)
+
+ if err != nil {
+ return err
+ }
+
+ parsed, err := strconv.ParseUint(val, base, tp.Bits())
+
+ if err != nil {
+ return err
+ }
+
+ retval.SetUint(parsed)
+ case reflect.Float32, reflect.Float64:
+ parsed, err := strconv.ParseFloat(val, tp.Bits())
+
+ if err != nil {
+ return err
+ }
+
+ retval.SetFloat(parsed)
+ case reflect.Slice:
+ elemtp := tp.Elem()
+
+ elemvalptr := reflect.New(elemtp)
+ elemval := reflect.Indirect(elemvalptr)
+
+ if err := convert(val, elemval, options); err != nil {
+ return err
+ }
+
+ retval.Set(reflect.Append(retval, elemval))
+ case reflect.Map:
+ parts := strings.SplitN(val, ":", 2)
+
+ key := parts[0]
+ var value string
+
+ if len(parts) == 2 {
+ value = parts[1]
+ }
+
+ keytp := tp.Key()
+ keyval := reflect.New(keytp)
+
+ if err := convert(key, keyval, options); err != nil {
+ return err
+ }
+
+ valuetp := tp.Elem()
+ valueval := reflect.New(valuetp)
+
+ if err := convert(value, valueval, options); err != nil {
+ return err
+ }
+
+ if retval.IsNil() {
+ retval.Set(reflect.MakeMap(tp))
+ }
+
+ retval.SetMapIndex(reflect.Indirect(keyval), reflect.Indirect(valueval))
+ case reflect.Ptr:
+ if retval.IsNil() {
+ retval.Set(reflect.New(retval.Type().Elem()))
+ }
+
+ return convert(val, reflect.Indirect(retval), options)
+ case reflect.Interface:
+ if !retval.IsNil() {
+ return convert(val, retval.Elem(), options)
+ }
+ }
+
+ return nil
+}
+
+func isPrint(s string) bool {
+ for _, c := range s {
+ if !strconv.IsPrint(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func quoteIfNeeded(s string) string {
+ if !isPrint(s) {
+ return strconv.Quote(s)
+ }
+
+ return s
+}
+
+func quoteIfNeededV(s []string) []string {
+ ret := make([]string, len(s))
+
+ for i, v := range s {
+ ret[i] = quoteIfNeeded(v)
+ }
+
+ return ret
+}
+
+func quoteV(s []string) []string {
+ ret := make([]string, len(s))
+
+ for i, v := range s {
+ ret[i] = strconv.Quote(v)
+ }
+
+ return ret
+}
+
+func unquoteIfPossible(s string) (string, error) {
+ if len(s) == 0 || s[0] != '"' {
+ return s, nil
+ }
+
+ return strconv.Unquote(s)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/convert_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/convert_test.go
new file mode 100644
index 00000000000..ef131dc8d1e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/convert_test.go
@@ -0,0 +1,159 @@
+package flags
+
+import (
+ "testing"
+ "time"
+)
+
+func expectConvert(t *testing.T, o *Option, expected string) {
+ s, err := convertToString(o.value, o.tag)
+
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ return
+ }
+
+ assertString(t, s, expected)
+}
+
+func TestConvertToString(t *testing.T) {
+ d, _ := time.ParseDuration("1h2m4s")
+
+ var opts = struct {
+ String string `long:"string"`
+
+ Int int `long:"int"`
+ Int8 int8 `long:"int8"`
+ Int16 int16 `long:"int16"`
+ Int32 int32 `long:"int32"`
+ Int64 int64 `long:"int64"`
+
+ Uint uint `long:"uint"`
+ Uint8 uint8 `long:"uint8"`
+ Uint16 uint16 `long:"uint16"`
+ Uint32 uint32 `long:"uint32"`
+ Uint64 uint64 `long:"uint64"`
+
+ Float32 float32 `long:"float32"`
+ Float64 float64 `long:"float64"`
+
+ Duration time.Duration `long:"duration"`
+
+ Bool bool `long:"bool"`
+
+ IntSlice []int `long:"int-slice"`
+ IntFloatMap map[int]float64 `long:"int-float-map"`
+
+ PtrBool *bool `long:"ptr-bool"`
+ Interface interface{} `long:"interface"`
+
+ Int32Base int32 `long:"int32-base" base:"16"`
+ Uint32Base uint32 `long:"uint32-base" base:"16"`
+ }{
+ "string",
+
+ -2,
+ -1,
+ 0,
+ 1,
+ 2,
+
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+
+ 1.2,
+ -3.4,
+
+ d,
+ true,
+
+ []int{-3, 4, -2},
+ map[int]float64{-2: 4.5},
+
+ new(bool),
+ float32(5.2),
+
+ -5823,
+ 4232,
+ }
+
+ p := NewNamedParser("test", Default)
+ grp, _ := p.AddGroup("test group", "", &opts)
+
+ expects := []string{
+ "string",
+ "-2",
+ "-1",
+ "0",
+ "1",
+ "2",
+
+ "1",
+ "2",
+ "3",
+ "4",
+ "5",
+
+ "1.2",
+ "-3.4",
+
+ "1h2m4s",
+ "true",
+
+ "[-3, 4, -2]",
+ "{-2:4.5}",
+
+ "false",
+ "5.2",
+
+ "-16bf",
+ "1088",
+ }
+
+ for i, v := range grp.Options() {
+ expectConvert(t, v, expects[i])
+ }
+}
+
+func TestConvertToStringInvalidIntBase(t *testing.T) {
+ var opts = struct {
+ Int int `long:"int" base:"no"`
+ }{
+ 2,
+ }
+
+ p := NewNamedParser("test", Default)
+ grp, _ := p.AddGroup("test group", "", &opts)
+ o := grp.Options()[0]
+
+ _, err := convertToString(o.value, o.tag)
+
+ if err != nil {
+ err = newErrorf(ErrMarshal, "%v", err)
+ }
+
+ assertError(t, err, ErrMarshal, "strconv.ParseInt: parsing \"no\": invalid syntax")
+}
+
+func TestConvertToStringInvalidUintBase(t *testing.T) {
+ var opts = struct {
+ Uint uint `long:"uint" base:"no"`
+ }{
+ 2,
+ }
+
+ p := NewNamedParser("test", Default)
+ grp, _ := p.AddGroup("test group", "", &opts)
+ o := grp.Options()[0]
+
+ _, err := convertToString(o.value, o.tag)
+
+ if err != nil {
+ err = newErrorf(ErrMarshal, "%v", err)
+ }
+
+ assertError(t, err, ErrMarshal, "strconv.ParseInt: parsing \"no\": invalid syntax")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/error.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/error.go
new file mode 100644
index 00000000000..2f27aeeee2f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/error.go
@@ -0,0 +1,129 @@
+package flags
+
+import (
+ "fmt"
+)
+
+// ErrorType represents the type of error.
+type ErrorType uint
+
+const (
+ // ErrUnknown indicates a generic error.
+ ErrUnknown ErrorType = iota
+
+ // ErrExpectedArgument indicates that an argument was expected.
+ ErrExpectedArgument
+
+ // ErrUnknownFlag indicates an unknown flag.
+ ErrUnknownFlag
+
+ // ErrUnknownGroup indicates an unknown group.
+ ErrUnknownGroup
+
+ // ErrMarshal indicates a marshalling error while converting values.
+ ErrMarshal
+
+ // ErrHelp indicates that the built-in help was shown (the error
+ // contains the help message).
+ ErrHelp
+
+ // ErrNoArgumentForBool indicates that an argument was given for a
+ // boolean flag (which don't not take any arguments).
+ ErrNoArgumentForBool
+
+ // ErrRequired indicates that a required flag was not provided.
+ ErrRequired
+
+ // ErrShortNameTooLong indicates that a short flag name was specified,
+ // longer than one character.
+ ErrShortNameTooLong
+
+ // ErrDuplicatedFlag indicates that a short or long flag has been
+ // defined more than once
+ ErrDuplicatedFlag
+
+ // ErrTag indicates an error while parsing flag tags.
+ ErrTag
+
+ // ErrCommandRequired indicates that a command was required but not
+ // specified
+ ErrCommandRequired
+
+ // ErrUnknownCommand indicates that an unknown command was specified.
+ ErrUnknownCommand
+
+ // ErrInvalidChoice indicates an invalid option value which only allows
+ // a certain number of choices.
+ ErrInvalidChoice
+)
+
+func (e ErrorType) String() string {
+ switch e {
+ case ErrUnknown:
+ return "unknown"
+ case ErrExpectedArgument:
+ return "expected argument"
+ case ErrUnknownFlag:
+ return "unknown flag"
+ case ErrUnknownGroup:
+ return "unknown group"
+ case ErrMarshal:
+ return "marshal"
+ case ErrHelp:
+ return "help"
+ case ErrNoArgumentForBool:
+ return "no argument for bool"
+ case ErrRequired:
+ return "required"
+ case ErrShortNameTooLong:
+ return "short name too long"
+ case ErrDuplicatedFlag:
+ return "duplicated flag"
+ case ErrTag:
+ return "tag"
+ case ErrCommandRequired:
+ return "command required"
+ case ErrUnknownCommand:
+ return "unknown command"
+ case ErrInvalidChoice:
+ return "invalid choice"
+ }
+
+ return "unrecognized error type"
+}
+
+// Error represents a parser error. The error returned from Parse is of this
+// type. The error contains both a Type and Message.
+type Error struct {
+ // The type of error
+ Type ErrorType
+
+ // The error message
+ Message string
+}
+
+// Error returns the error's message
+func (e *Error) Error() string {
+ return e.Message
+}
+
+func newError(tp ErrorType, message string) *Error {
+ return &Error{
+ Type: tp,
+ Message: message,
+ }
+}
+
+func newErrorf(tp ErrorType, format string, args ...interface{}) *Error {
+ return newError(tp, fmt.Sprintf(format, args...))
+}
+
+func wrapError(err error) *Error {
+ ret, ok := err.(*Error)
+
+ if !ok {
+ return newError(ErrUnknown, err.Error())
+ }
+
+ return ret
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/example_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/example_test.go
new file mode 100644
index 00000000000..f7be2bb14f2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/example_test.go
@@ -0,0 +1,110 @@
+// Example of use of the flags package.
+package flags
+
+import (
+ "fmt"
+ "os/exec"
+)
+
+func Example() {
+ var opts struct {
+ // Slice of bool will append 'true' each time the option
+ // is encountered (can be set multiple times, like -vvv)
+ Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+
+ // Example of automatic marshalling to desired type (uint)
+ Offset uint `long:"offset" description:"Offset"`
+
+ // Example of a callback, called each time the option is found.
+ Call func(string) `short:"c" description:"Call phone number"`
+
+ // Example of a required flag
+ Name string `short:"n" long:"name" description:"A name" required:"true"`
+
+ // Example of a value name
+ File string `short:"f" long:"file" description:"A file" value-name:"FILE"`
+
+ // Example of a pointer
+ Ptr *int `short:"p" description:"A pointer to an integer"`
+
+ // Example of a slice of strings
+ StringSlice []string `short:"s" description:"A slice of strings"`
+
+ // Example of a slice of pointers
+ PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"`
+
+ // Example of a map
+ IntMap map[string]int `long:"intmap" description:"A map from string to int"`
+
+ // Example of a filename (useful for completion)
+ Filename Filename `long:"filename" description:"A filename"`
+
+ // Example of positional arguments
+ Args struct {
+ Id string
+ Num int
+ Rest []string
+ } `positional-args:"yes" required:"yes"`
+ }
+
+ // Callback which will invoke callto:<argument> to call a number.
+ // Note that this works just on OS X (and probably only with
+ // Skype) but it shows the idea.
+ opts.Call = func(num string) {
+ cmd := exec.Command("open", "callto:"+num)
+ cmd.Start()
+ cmd.Process.Release()
+ }
+
+ // Make some fake arguments to parse.
+ args := []string{
+ "-vv",
+ "--offset=5",
+ "-n", "Me",
+ "-p", "3",
+ "-s", "hello",
+ "-s", "world",
+ "--ptrslice", "hello",
+ "--ptrslice", "world",
+ "--intmap", "a:1",
+ "--intmap", "b:5",
+ "--filename", "hello.go",
+ "id",
+ "10",
+ "remaining1",
+ "remaining2",
+ }
+
+ // Parse flags from `args'. Note that here we use flags.ParseArgs for
+ // the sake of making a working example. Normally, you would simply use
+ // flags.Parse(&opts) which uses os.Args
+ _, err := ParseArgs(&opts, args)
+
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Printf("Verbosity: %v\n", opts.Verbose)
+ fmt.Printf("Offset: %d\n", opts.Offset)
+ fmt.Printf("Name: %s\n", opts.Name)
+ fmt.Printf("Ptr: %d\n", *opts.Ptr)
+ fmt.Printf("StringSlice: %v\n", opts.StringSlice)
+ fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1])
+ fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"])
+ fmt.Printf("Filename: %v\n", opts.Filename)
+ fmt.Printf("Args.Id: %s\n", opts.Args.Id)
+ fmt.Printf("Args.Num: %d\n", opts.Args.Num)
+ fmt.Printf("Args.Rest: %v\n", opts.Args.Rest)
+
+ // Output: Verbosity: [true true]
+ // Offset: 5
+ // Name: Me
+ // Ptr: 3
+ // StringSlice: [hello world]
+ // PtrSlice: [hello world]
+ // IntMap: [a:1 b:5]
+ // Filename: hello.go
+ // Args.Id: id
+ // Args.Num: 10
+ // Args.Rest: [remaining1 remaining2]
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/add.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/add.go
new file mode 100644
index 00000000000..57d8f232b21
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/add.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+ "fmt"
+)
+
+type AddCommand struct {
+ All bool `short:"a" long:"all" description:"Add all files"`
+}
+
+var addCommand AddCommand
+
+func (x *AddCommand) Execute(args []string) error {
+ fmt.Printf("Adding (all=%v): %#v\n", x.All, args)
+ return nil
+}
+
+func init() {
+ parser.AddCommand("add",
+ "Add a file",
+ "The add command adds a file to the repository. Use -a to add all files.",
+ &addCommand)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/bash-completion b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/bash-completion
new file mode 100644
index 00000000000..974f52ad43f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/bash-completion
@@ -0,0 +1,9 @@
+_examples() {
+ args=("${COMP_WORDS[@]:1:$COMP_CWORD}")
+
+ local IFS=$'\n'
+ COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}"))
+ return 1
+}
+
+complete -F _examples examples
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/main.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/main.go
new file mode 100644
index 00000000000..4a22be6e86d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/main.go
@@ -0,0 +1,75 @@
+package main
+
+import (
+ "errors"
+ "fmt"
+ "github.com/jessevdk/go-flags"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type EditorOptions struct {
+ Input flags.Filename `short:"i" long:"input" description:"Input file" default:"-"`
+ Output flags.Filename `short:"o" long:"output" description:"Output file" default:"-"`
+}
+
+type Point struct {
+ X, Y int
+}
+
+func (p *Point) UnmarshalFlag(value string) error {
+ parts := strings.Split(value, ",")
+
+ if len(parts) != 2 {
+ return errors.New("expected two numbers separated by a ,")
+ }
+
+ x, err := strconv.ParseInt(parts[0], 10, 32)
+
+ if err != nil {
+ return err
+ }
+
+ y, err := strconv.ParseInt(parts[1], 10, 32)
+
+ if err != nil {
+ return err
+ }
+
+ p.X = int(x)
+ p.Y = int(y)
+
+ return nil
+}
+
+func (p Point) MarshalFlag() (string, error) {
+ return fmt.Sprintf("%d,%d", p.X, p.Y), nil
+}
+
+type Options struct {
+ // Example of verbosity with level
+ Verbose []bool `short:"v" long:"verbose" description:"Verbose output"`
+
+ // Example of optional value
+ User string `short:"u" long:"user" description:"User name" optional:"yes" optional-value:"pancake"`
+
+ // Example of map with multiple default values
+ Users map[string]string `long:"users" description:"User e-mail map" default:"system:system@example.org" default:"admin:admin@example.org"`
+
+ // Example of option group
+ Editor EditorOptions `group:"Editor Options"`
+
+ // Example of custom type Marshal/Unmarshal
+ Point Point `long:"point" description:"A x,y point" default:"1,2"`
+}
+
+var options Options
+
+var parser = flags.NewParser(&options, flags.Default)
+
+func main() {
+ if _, err := parser.Parse(); err != nil {
+ os.Exit(1)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/rm.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/rm.go
new file mode 100644
index 00000000000..c9c1dd03a02
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/examples/rm.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+ "fmt"
+)
+
+type RmCommand struct {
+ Force bool `short:"f" long:"force" description:"Force removal of files"`
+}
+
+var rmCommand RmCommand
+
+func (x *RmCommand) Execute(args []string) error {
+ fmt.Printf("Removing (force=%v): %#v\n", x.Force, args)
+ return nil
+}
+
+func init() {
+ parser.AddCommand("rm",
+ "Remove a file",
+ "The rm command removes a file to the repository. Use -f to force removal of files.",
+ &rmCommand)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/flags.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/flags.go
new file mode 100644
index 00000000000..757d42a56ea
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/flags.go
@@ -0,0 +1,256 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package flags provides an extensive command line option parser.
+The flags package is similar in functionality to the go built-in flag package
+but provides more options and uses reflection to provide a convenient and
+succinct way of specifying command line options.
+
+
+Supported features
+
+The following features are supported in go-flags:
+
+ Options with short names (-v)
+ Options with long names (--verbose)
+ Options with and without arguments (bool v.s. other type)
+ Options with optional arguments and default values
+ Option default values from ENVIRONMENT_VARIABLES, including slice and map values
+ Multiple option groups each containing a set of options
+ Generate and print well-formatted help message
+ Passing remaining command line arguments after -- (optional)
+ Ignoring unknown command line options (optional)
+ Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification
+ Supports multiple short options -aux
+ Supports all primitive go types (string, int{8..64}, uint{8..64}, float)
+ Supports same option multiple times (can store in slice or last option counts)
+ Supports maps
+ Supports function callbacks
+ Supports namespaces for (nested) option groups
+
+Additional features specific to Windows:
+ Options with short names (/v)
+ Options with long names (/verbose)
+ Windows-style options with arguments use a colon as the delimiter
+ Modify generated help message with Windows-style / options
+
+
+Basic usage
+
+The flags package uses structs, reflection and struct field tags
+to allow users to specify command line options. This results in very simple
+and concise specification of your application options. For example:
+
+ type Options struct {
+ Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+ }
+
+This specifies one option with a short name -v and a long name --verbose.
+When either -v or --verbose is found on the command line, a 'true' value
+will be appended to the Verbose field. e.g. when specifying -vvv, the
+resulting value of Verbose will be {[true, true, true]}.
+
+Slice options work exactly the same as primitive type options, except that
+whenever the option is encountered, a value is appended to the slice.
+
+Map options from string to primitive type are also supported. On the command
+line, you specify the value for such an option as key:value. For example
+
+ type Options struct {
+ AuthorInfo string[string] `short:"a"`
+ }
+
+Then, the AuthorInfo map can be filled with something like
+-a name:Jesse -a "surname:van den Kieboom".
+
+Finally, for full control over the conversion between command line argument
+values and options, user defined types can choose to implement the Marshaler
+and Unmarshaler interfaces.
+
+
+Available field tags
+
+The following is a list of tags for struct fields supported by go-flags:
+
+ short: the short name of the option (single character)
+ long: the long name of the option
+ required: whether an option is required to appear on the command
+ line. If a required option is not present, the parser will
+ return ErrRequired (optional)
+ description: the description of the option (optional)
+ long-description: the long description of the option. Currently only
+ displayed in generated man pages (optional)
+ no-flag: if non-empty this field is ignored as an option (optional)
+
+ optional: whether an argument of the option is optional. When an
+ argument is optional it can only be specified using
+ --option=argument (optional)
+ optional-value: the value of an optional option when the option occurs
+ without an argument. This tag can be specified multiple
+ times in the case of maps or slices (optional)
+ default: the default value of an option. This tag can be specified
+ multiple times in the case of slices or maps (optional)
+ default-mask: when specified, this value will be displayed in the help
+ instead of the actual default value. This is useful
+ mostly for hiding otherwise sensitive information from
+ showing up in the help. If default-mask takes the special
+ value "-", then no default value will be shown at all
+ (optional)
+ env: the default value of the option is overridden from the
+ specified environment variable, if one has been defined.
+ (optional)
+ env-delim: the 'env' default value from environment is split into
+ multiple values with the given delimiter string, use with
+ slices and maps (optional)
+ value-name: the name of the argument value (to be shown in the help)
+ (optional)
+ choice: limits the values for an option to a set of values.
+ This tag can be specified mltiple times (optional)
+ hidden: the option is not visible in the help or man page.
+
+ base: a base (radix) used to convert strings to integer values, the
+ default base is 10 (i.e. decimal) (optional)
+
+ ini-name: the explicit ini option name (optional)
+ no-ini: if non-empty this field is ignored as an ini option
+ (optional)
+
+ group: when specified on a struct field, makes the struct
+ field a separate group with the given name (optional)
+ namespace: when specified on a group struct field, the namespace
+ gets prepended to every option's long name and
+ subgroup's namespace of this group, separated by
+ the parser's namespace delimiter (optional)
+ command: when specified on a struct field, makes the struct
+ field a (sub)command with the given name (optional)
+ subcommands-optional: when specified on a command struct field, makes
+ any subcommands of that command optional (optional)
+ alias: when specified on a command struct field, adds the
+ specified name as an alias for the command. Can be
+ be specified multiple times to add more than one
+ alias (optional)
+ positional-args: when specified on a field with a struct type,
+ uses the fields of that struct to parse remaining
+ positional command line arguments into (in order
+ of the fields). If a field has a slice type,
+ then all remaining arguments will be added to it.
+ Positional arguments are optional by default,
+ unless the "required" tag is specified together
+ with the "positional-args" tag. The "required" tag
+ can also be set on the individual rest argument
+ fields, to require only the first N positional
+ arguments. If the "required" tag is set on the
+ rest arguments slice, then its value determines
+ the minimum amount of rest arguments that needs to
+ be provided (e.g. `required:"2"`) (optional)
+ positional-arg-name: used on a field in a positional argument struct; name
+ of the positional argument placeholder to be shown in
+ the help (optional)
+
+Either the `short:` tag or the `long:` must be specified to make the field eligible as an
+option.
+
+
+Option groups
+
+Option groups are a simple way to semantically separate your options. All
+options in a particular group are shown together in the help under the name
+of the group. Namespaces can be used to specify option long names more
+precisely and emphasize the options affiliation to their group.
+
+There are currently three ways to specify option groups.
+
+ 1. Use NewNamedParser specifying the various option groups.
+ 2. Use AddGroup to add a group to an existing parser.
+ 3. Add a struct field to the top-level options annotated with the
+ group:"group-name" tag.
+
+
+
+Commands
+
+The flags package also has basic support for commands. Commands are often
+used in monolithic applications that support various commands or actions.
+Take git for example, all of the add, commit, checkout, etc. are called
+commands. Using commands you can easily separate multiple functions of your
+application.
+
+There are currently two ways to specify a command.
+
+ 1. Use AddCommand on an existing parser.
+ 2. Add a struct field to your options struct annotated with the
+ command:"command-name" tag.
+
+The most common, idiomatic way to implement commands is to define a global
+parser instance and implement each command in a separate file. These
+command files should define a go init function which calls AddCommand on
+the global parser.
+
+When parsing ends and there is an active command and that command implements
+the Commander interface, then its Execute method will be run with the
+remaining command line arguments.
+
+Command structs can have options which become valid to parse after the
+command has been specified on the command line, in addition to the options
+of all the parent commands. I.e. considering a -v flag on the parser and an
+add command, the following are equivalent:
+
+ ./app -v add
+ ./app add -v
+
+However, if the -v flag is defined on the add command, then the first of
+the two examples above would fail since the -v flag is not defined before
+the add command.
+
+
+Completion
+
+go-flags has builtin support to provide bash completion of flags, commands
+and argument values. To use completion, the binary which uses go-flags
+can be invoked in a special environment to list completion of the current
+command line argument. It should be noted that this `executes` your application,
+and it is up to the user to make sure there are no negative side effects (for
+example from init functions).
+
+Setting the environment variable `GO_FLAGS_COMPLETION=1` enables completion
+by replacing the argument parsing routine with the completion routine which
+outputs completions for the passed arguments. The basic invocation to
+complete a set of arguments is therefore:
+
+ GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3
+
+where `completion-example` is the binary, `arg1` and `arg2` are
+the current arguments, and `arg3` (the last argument) is the argument
+to be completed. If the GO_FLAGS_COMPLETION is set to "verbose", then
+descriptions of possible completion items will also be shown, if there
+are more than 1 completion items.
+
+To use this with bash completion, a simple file can be written which
+calls the binary which supports go-flags completion:
+
+ _completion_example() {
+ # All arguments except the first one
+ args=("${COMP_WORDS[@]:1:$COMP_CWORD}")
+
+ # Only split on newlines
+ local IFS=$'\n'
+
+ # Call completion (note that the first element of COMP_WORDS is
+ # the executable itself)
+ COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}"))
+ return 0
+ }
+
+ complete -F _completion_example completion-example
+
+Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set.
+
+Customized completion for argument values is supported by implementing
+the flags.Completer interface for the argument value type. An example
+of a type which does so is the flags.Filename type, an alias of string
+allowing simple filename completion. A slice or array argument value
+whose element type implements flags.Completer will also be completed.
+*/
+package flags
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/group.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/group.go
new file mode 100644
index 00000000000..debb8de482f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/group.go
@@ -0,0 +1,379 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+ "errors"
+ "reflect"
+ "strings"
+ "unicode/utf8"
+ "unsafe"
+)
+
+// ErrNotPointerToStruct indicates that a provided data container is not
+// a pointer to a struct. Only pointers to structs are valid data containers
+// for options.
+var ErrNotPointerToStruct = errors.New("provided data is not a pointer to struct")
+
+// Group represents an option group. Option groups can be used to logically
+// group options together under a description. Groups are only used to provide
+// more structure to options both for the user (as displayed in the help message)
+// and for you, since groups can be nested.
+type Group struct {
+ // A short description of the group. The
+ // short description is primarily used in the built-in generated help
+ // message
+ ShortDescription string
+
+ // A long description of the group. The long
+ // description is primarily used to present information on commands
+ // (Command embeds Group) in the built-in generated help and man pages.
+ LongDescription string
+
+ // The namespace of the group
+ Namespace string
+
+ // If true, the group is not displayed in the help or man page
+ Hidden bool
+
+ // The parent of the group or nil if it has no parent
+ parent interface{}
+
+ // All the options in the group
+ options []*Option
+
+ // All the subgroups
+ groups []*Group
+
+ // Whether the group represents the built-in help group
+ isBuiltinHelp bool
+
+ data interface{}
+}
+
+type scanHandler func(reflect.Value, *reflect.StructField) (bool, error)
+
+// AddGroup adds a new group to the command with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the group.
+func (g *Group) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) {
+ group := newGroup(shortDescription, longDescription, data)
+
+ group.parent = g
+
+ if err := group.scan(); err != nil {
+ return nil, err
+ }
+
+ g.groups = append(g.groups, group)
+ return group, nil
+}
+
+// Groups returns the list of groups embedded in this group.
+func (g *Group) Groups() []*Group {
+ return g.groups
+}
+
+// Options returns the list of options in this group.
+func (g *Group) Options() []*Option {
+ return g.options
+}
+
+// Find locates the subgroup with the given short description and returns it.
+// If no such group can be found Find will return nil. Note that the description
+// is matched case insensitively.
+func (g *Group) Find(shortDescription string) *Group {
+ lshortDescription := strings.ToLower(shortDescription)
+
+ var ret *Group
+
+ g.eachGroup(func(gg *Group) {
+ if gg != g && strings.ToLower(gg.ShortDescription) == lshortDescription {
+ ret = gg
+ }
+ })
+
+ return ret
+}
+
+func (g *Group) findOption(matcher func(*Option) bool) (option *Option) {
+ g.eachGroup(func(g *Group) {
+ for _, opt := range g.options {
+ if option == nil && matcher(opt) {
+ option = opt
+ }
+ }
+ })
+
+ return option
+}
+
+// Find an option that is part of the group, or any of its subgroups,
+// by matching its long name (including the option namespace).
+func (g *Group) FindOptionByLongName(longName string) *Option {
+ return g.findOption(func(option *Option) bool {
+ return option.LongNameWithNamespace() == longName
+ })
+}
+
+// Find an option that is part of the group, or any of its subgroups,
+// by matching its short name.
+func (g *Group) FindOptionByShortName(shortName rune) *Option {
+ return g.findOption(func(option *Option) bool {
+ return option.ShortName == shortName
+ })
+}
+
+func newGroup(shortDescription string, longDescription string, data interface{}) *Group {
+ return &Group{
+ ShortDescription: shortDescription,
+ LongDescription: longDescription,
+
+ data: data,
+ }
+}
+
+func (g *Group) optionByName(name string, namematch func(*Option, string) bool) *Option {
+ prio := 0
+ var retopt *Option
+
+ g.eachGroup(func(g *Group) {
+ for _, opt := range g.options {
+ if namematch != nil && namematch(opt, name) && prio < 4 {
+ retopt = opt
+ prio = 4
+ }
+
+ if name == opt.field.Name && prio < 3 {
+ retopt = opt
+ prio = 3
+ }
+
+ if name == opt.LongNameWithNamespace() && prio < 2 {
+ retopt = opt
+ prio = 2
+ }
+
+ if opt.ShortName != 0 && name == string(opt.ShortName) && prio < 1 {
+ retopt = opt
+ prio = 1
+ }
+ }
+ })
+
+ return retopt
+}
+
+func (g *Group) eachGroup(f func(*Group)) {
+ f(g)
+
+ for _, gg := range g.groups {
+ gg.eachGroup(f)
+ }
+}
+
+func (g *Group) scanStruct(realval reflect.Value, sfield *reflect.StructField, handler scanHandler) error {
+ stype := realval.Type()
+
+ if sfield != nil {
+ if ok, err := handler(realval, sfield); err != nil {
+ return err
+ } else if ok {
+ return nil
+ }
+ }
+
+ for i := 0; i < stype.NumField(); i++ {
+ field := stype.Field(i)
+
+ // PkgName is set only for non-exported fields, which we ignore
+ if field.PkgPath != "" && !field.Anonymous {
+ continue
+ }
+
+ mtag := newMultiTag(string(field.Tag))
+
+ if err := mtag.Parse(); err != nil {
+ return err
+ }
+
+ // Skip fields with the no-flag tag
+ if mtag.Get("no-flag") != "" {
+ continue
+ }
+
+ // Dive deep into structs or pointers to structs
+ kind := field.Type.Kind()
+ fld := realval.Field(i)
+
+ if kind == reflect.Struct {
+ if err := g.scanStruct(fld, &field, handler); err != nil {
+ return err
+ }
+ } else if kind == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct {
+ if fld.IsNil() {
+ fld.Set(reflect.New(fld.Type().Elem()))
+ }
+
+ if err := g.scanStruct(reflect.Indirect(fld), &field, handler); err != nil {
+ return err
+ }
+ }
+
+ longname := mtag.Get("long")
+ shortname := mtag.Get("short")
+
+ // Need at least either a short or long name
+ if longname == "" && shortname == "" && mtag.Get("ini-name") == "" {
+ continue
+ }
+
+ short := rune(0)
+ rc := utf8.RuneCountInString(shortname)
+
+ if rc > 1 {
+ return newErrorf(ErrShortNameTooLong,
+ "short names can only be 1 character long, not `%s'",
+ shortname)
+
+ } else if rc == 1 {
+ short, _ = utf8.DecodeRuneInString(shortname)
+ }
+
+ description := mtag.Get("description")
+ def := mtag.GetMany("default")
+
+ optionalValue := mtag.GetMany("optional-value")
+ valueName := mtag.Get("value-name")
+ defaultMask := mtag.Get("default-mask")
+
+ optional := (mtag.Get("optional") != "")
+ required := (mtag.Get("required") != "")
+ choices := mtag.GetMany("choice")
+ hidden := (mtag.Get("hidden") != "")
+
+ option := &Option{
+ Description: description,
+ ShortName: short,
+ LongName: longname,
+ Default: def,
+ EnvDefaultKey: mtag.Get("env"),
+ EnvDefaultDelim: mtag.Get("env-delim"),
+ OptionalArgument: optional,
+ OptionalValue: optionalValue,
+ Required: required,
+ ValueName: valueName,
+ DefaultMask: defaultMask,
+ Choices: choices,
+ Hidden: hidden,
+
+ group: g,
+
+ field: field,
+ value: realval.Field(i),
+ tag: mtag,
+ }
+
+ g.options = append(g.options, option)
+ }
+
+ return nil
+}
+
+func (g *Group) checkForDuplicateFlags() *Error {
+ shortNames := make(map[rune]*Option)
+ longNames := make(map[string]*Option)
+
+ var duplicateError *Error
+
+ g.eachGroup(func(g *Group) {
+ for _, option := range g.options {
+ if option.LongName != "" {
+ longName := option.LongNameWithNamespace()
+
+ if otherOption, ok := longNames[longName]; ok {
+ duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same long name as option `%s'", option, otherOption)
+ return
+ }
+ longNames[longName] = option
+ }
+ if option.ShortName != 0 {
+ if otherOption, ok := shortNames[option.ShortName]; ok {
+ duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same short name as option `%s'", option, otherOption)
+ return
+ }
+ shortNames[option.ShortName] = option
+ }
+ }
+ })
+
+ return duplicateError
+}
+
+func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.StructField) (bool, error) {
+ mtag := newMultiTag(string(sfield.Tag))
+
+ if err := mtag.Parse(); err != nil {
+ return true, err
+ }
+
+ subgroup := mtag.Get("group")
+
+ if len(subgroup) != 0 {
+ ptrval := reflect.NewAt(realval.Type(), unsafe.Pointer(realval.UnsafeAddr()))
+ description := mtag.Get("description")
+
+ group, err := g.AddGroup(subgroup, description, ptrval.Interface())
+ if err != nil {
+ return true, err
+ }
+
+ group.Namespace = mtag.Get("namespace")
+ group.Hidden = mtag.Get("hidden") != ""
+
+ return true, nil
+ }
+
+ return false, nil
+}
+
+func (g *Group) scanType(handler scanHandler) error {
+ // Get all the public fields in the data struct
+ ptrval := reflect.ValueOf(g.data)
+
+ if ptrval.Type().Kind() != reflect.Ptr {
+ panic(ErrNotPointerToStruct)
+ }
+
+ stype := ptrval.Type().Elem()
+
+ if stype.Kind() != reflect.Struct {
+ panic(ErrNotPointerToStruct)
+ }
+
+ realval := reflect.Indirect(ptrval)
+
+ if err := g.scanStruct(realval, nil, handler); err != nil {
+ return err
+ }
+
+ if err := g.checkForDuplicateFlags(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (g *Group) scan() error {
+ return g.scanType(g.scanSubGroupHandler)
+}
+
+func (g *Group) groupByName(name string) *Group {
+ if len(name) == 0 {
+ return g
+ }
+
+ return g.Find(name)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/group_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/group_test.go
new file mode 100644
index 00000000000..18cd6c17394
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/group_test.go
@@ -0,0 +1,255 @@
+package flags
+
+import (
+ "testing"
+)
+
+func TestGroupInline(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Group struct {
+ G bool `short:"g"`
+ } `group:"Grouped Options"`
+ }{}
+
+ p, ret := assertParserSuccess(t, &opts, "-v", "-g")
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !opts.Group.G {
+ t.Errorf("Expected Group.G to be true")
+ }
+
+ if p.Command.Group.Find("Grouped Options") == nil {
+ t.Errorf("Expected to find group `Grouped Options'")
+ }
+}
+
+func TestGroupAdd(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+ }{}
+
+ var grp = struct {
+ G bool `short:"g"`
+ }{}
+
+ p := NewParser(&opts, Default)
+ g, err := p.AddGroup("Grouped Options", "", &grp)
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ ret, err := p.ParseArgs([]string{"-v", "-g", "rest"})
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ assertStringArray(t, ret, []string{"rest"})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !grp.G {
+ t.Errorf("Expected Group.G to be true")
+ }
+
+ if p.Command.Group.Find("Grouped Options") != g {
+ t.Errorf("Expected to find group `Grouped Options'")
+ }
+
+ if p.Groups()[1] != g {
+ t.Errorf("Expected group %#v, but got %#v", g, p.Groups()[0])
+ }
+
+ if g.Options()[0].ShortName != 'g' {
+ t.Errorf("Expected short name `g' but got %v", g.Options()[0].ShortName)
+ }
+}
+
+func TestGroupNestedInline(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+
+ Group struct {
+ G bool `short:"g"`
+
+ Nested struct {
+ N string `long:"n"`
+ } `group:"Nested Options"`
+ } `group:"Grouped Options"`
+ }{}
+
+ p, ret := assertParserSuccess(t, &opts, "-v", "-g", "--n", "n", "rest")
+
+ assertStringArray(t, ret, []string{"rest"})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ if !opts.Group.G {
+ t.Errorf("Expected Group.G to be true")
+ }
+
+ assertString(t, opts.Group.Nested.N, "n")
+
+ if p.Command.Group.Find("Grouped Options") == nil {
+ t.Errorf("Expected to find group `Grouped Options'")
+ }
+
+ if p.Command.Group.Find("Nested Options") == nil {
+ t.Errorf("Expected to find group `Nested Options'")
+ }
+}
+
+func TestGroupNestedInlineNamespace(t *testing.T) {
+ var opts = struct {
+ Opt string `long:"opt"`
+
+ Group struct {
+ Opt string `long:"opt"`
+ Group struct {
+ Opt string `long:"opt"`
+ } `group:"Subsubgroup" namespace:"sap"`
+ } `group:"Subgroup" namespace:"sip"`
+ }{}
+
+ p, ret := assertParserSuccess(t, &opts, "--opt", "a", "--sip.opt", "b", "--sip.sap.opt", "c", "rest")
+
+ assertStringArray(t, ret, []string{"rest"})
+
+ assertString(t, opts.Opt, "a")
+ assertString(t, opts.Group.Opt, "b")
+ assertString(t, opts.Group.Group.Opt, "c")
+
+ for _, name := range []string{"Subgroup", "Subsubgroup"} {
+ if p.Command.Group.Find(name) == nil {
+ t.Errorf("Expected to find group '%s'", name)
+ }
+ }
+}
+
+func TestDuplicateShortFlags(t *testing.T) {
+ var opts struct {
+ Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+ Variables []string `short:"v" long:"variable" description:"Set a variable value."`
+ }
+
+ args := []string{
+ "--verbose",
+ "-v", "123",
+ "-v", "456",
+ }
+
+ _, err := ParseArgs(&opts, args)
+
+ if err == nil {
+ t.Errorf("Expected an error with type ErrDuplicatedFlag")
+ } else {
+ err2 := err.(*Error)
+ if err2.Type != ErrDuplicatedFlag {
+ t.Errorf("Expected an error with type ErrDuplicatedFlag")
+ }
+ }
+}
+
+func TestDuplicateLongFlags(t *testing.T) {
+ var opts struct {
+ Test1 []bool `short:"a" long:"testing" description:"Test 1"`
+ Test2 []string `short:"b" long:"testing" description:"Test 2."`
+ }
+
+ args := []string{
+ "--testing",
+ }
+
+ _, err := ParseArgs(&opts, args)
+
+ if err == nil {
+ t.Errorf("Expected an error with type ErrDuplicatedFlag")
+ } else {
+ err2 := err.(*Error)
+ if err2.Type != ErrDuplicatedFlag {
+ t.Errorf("Expected an error with type ErrDuplicatedFlag")
+ }
+ }
+}
+
+func TestFindOptionByLongFlag(t *testing.T) {
+ var opts struct {
+ Testing bool `long:"testing" description:"Testing"`
+ }
+
+ p := NewParser(&opts, Default)
+ opt := p.FindOptionByLongName("testing")
+
+ if opt == nil {
+ t.Errorf("Expected option, but found none")
+ }
+
+ assertString(t, opt.LongName, "testing")
+}
+
+func TestFindOptionByShortFlag(t *testing.T) {
+ var opts struct {
+ Testing bool `short:"t" description:"Testing"`
+ }
+
+ p := NewParser(&opts, Default)
+ opt := p.FindOptionByShortName('t')
+
+ if opt == nil {
+ t.Errorf("Expected option, but found none")
+ }
+
+ if opt.ShortName != 't' {
+ t.Errorf("Expected 't', but got %v", opt.ShortName)
+ }
+}
+
+func TestFindOptionByLongFlagInSubGroup(t *testing.T) {
+ var opts struct {
+ Group struct {
+ Testing bool `long:"testing" description:"Testing"`
+ } `group:"sub-group"`
+ }
+
+ p := NewParser(&opts, Default)
+ opt := p.FindOptionByLongName("testing")
+
+ if opt == nil {
+ t.Errorf("Expected option, but found none")
+ }
+
+ assertString(t, opt.LongName, "testing")
+}
+
+func TestFindOptionByShortFlagInSubGroup(t *testing.T) {
+ var opts struct {
+ Group struct {
+ Testing bool `short:"t" description:"Testing"`
+ } `group:"sub-group"`
+ }
+
+ p := NewParser(&opts, Default)
+ opt := p.FindOptionByShortName('t')
+
+ if opt == nil {
+ t.Errorf("Expected option, but found none")
+ }
+
+ if opt.ShortName != 't' {
+ t.Errorf("Expected 't', but got %v", opt.ShortName)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/help.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/help.go
new file mode 100644
index 00000000000..c0b808d84fb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/help.go
@@ -0,0 +1,466 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "unicode/utf8"
+)
+
+type alignmentInfo struct {
+ maxLongLen int
+ hasShort bool
+ hasValueName bool
+ terminalColumns int
+ indent bool
+}
+
+const (
+ paddingBeforeOption = 2
+ distanceBetweenOptionAndDescription = 2
+)
+
+func (a *alignmentInfo) descriptionStart() int {
+ ret := a.maxLongLen + distanceBetweenOptionAndDescription
+
+ if a.hasShort {
+ ret += 2
+ }
+
+ if a.maxLongLen > 0 {
+ ret += 4
+ }
+
+ if a.hasValueName {
+ ret += 3
+ }
+
+ return ret
+}
+
+func (a *alignmentInfo) updateLen(name string, indent bool) {
+ l := utf8.RuneCountInString(name)
+
+ if indent {
+ l = l + 4
+ }
+
+ if l > a.maxLongLen {
+ a.maxLongLen = l
+ }
+}
+
+func (p *Parser) getAlignmentInfo() alignmentInfo {
+ ret := alignmentInfo{
+ maxLongLen: 0,
+ hasShort: false,
+ hasValueName: false,
+ terminalColumns: getTerminalColumns(),
+ }
+
+ if ret.terminalColumns <= 0 {
+ ret.terminalColumns = 80
+ }
+
+ var prevcmd *Command
+
+ p.eachActiveGroup(func(c *Command, grp *Group) {
+ if c != prevcmd {
+ for _, arg := range c.args {
+ ret.updateLen(arg.Name, c != p.Command)
+ }
+ }
+
+ for _, info := range grp.options {
+ if !info.canCli() {
+ continue
+ }
+
+ if info.ShortName != 0 {
+ ret.hasShort = true
+ }
+
+ if len(info.ValueName) > 0 {
+ ret.hasValueName = true
+ }
+
+ l := info.LongNameWithNamespace() + info.ValueName
+
+ if len(info.Choices) != 0 {
+ l += "[" + strings.Join(info.Choices, "|") + "]"
+ }
+
+ ret.updateLen(l, c != p.Command)
+ }
+ })
+
+ return ret
+}
+
+func wrapText(s string, l int, prefix string) string {
+ var ret string
+
+ // Basic text wrapping of s at spaces to fit in l
+ lines := strings.Split(s, "\n")
+
+ for _, line := range lines {
+ var retline string
+
+ line = strings.TrimSpace(line)
+
+ for len(line) > l {
+ // Try to split on space
+ suffix := ""
+
+ pos := strings.LastIndex(line[:l], " ")
+
+ if pos < 0 {
+ pos = l - 1
+ suffix = "-\n"
+ }
+
+ if len(retline) != 0 {
+ retline += "\n" + prefix
+ }
+
+ retline += strings.TrimSpace(line[:pos]) + suffix
+ line = strings.TrimSpace(line[pos:])
+ }
+
+ if len(line) > 0 {
+ if len(retline) != 0 {
+ retline += "\n" + prefix
+ }
+
+ retline += line
+ }
+
+ if len(ret) > 0 {
+ ret += "\n"
+
+ if len(retline) > 0 {
+ ret += prefix
+ }
+ }
+
+ ret += retline
+ }
+
+ return ret
+}
+
+func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) {
+ line := &bytes.Buffer{}
+
+ prefix := paddingBeforeOption
+
+ if info.indent {
+ prefix += 4
+ }
+
+ if option.Hidden {
+ return
+ }
+
+ line.WriteString(strings.Repeat(" ", prefix))
+
+ if option.ShortName != 0 {
+ line.WriteRune(defaultShortOptDelimiter)
+ line.WriteRune(option.ShortName)
+ } else if info.hasShort {
+ line.WriteString(" ")
+ }
+
+ descstart := info.descriptionStart() + paddingBeforeOption
+
+ if len(option.LongName) > 0 {
+ if option.ShortName != 0 {
+ line.WriteString(", ")
+ } else if info.hasShort {
+ line.WriteString(" ")
+ }
+
+ line.WriteString(defaultLongOptDelimiter)
+ line.WriteString(option.LongNameWithNamespace())
+ }
+
+ if option.canArgument() {
+ line.WriteRune(defaultNameArgDelimiter)
+
+ if len(option.ValueName) > 0 {
+ line.WriteString(option.ValueName)
+ }
+
+ if len(option.Choices) > 0 {
+ line.WriteString("[" + strings.Join(option.Choices, "|") + "]")
+ }
+ }
+
+ written := line.Len()
+ line.WriteTo(writer)
+
+ if option.Description != "" {
+ dw := descstart - written
+ writer.WriteString(strings.Repeat(" ", dw))
+
+ var def string
+
+ if len(option.DefaultMask) != 0 && option.DefaultMask != "-" {
+ def = option.DefaultMask
+ } else {
+ def = option.defaultLiteral
+ }
+
+ var envDef string
+ if option.EnvDefaultKey != "" {
+ var envPrintable string
+ if runtime.GOOS == "windows" {
+ envPrintable = "%" + option.EnvDefaultKey + "%"
+ } else {
+ envPrintable = "$" + option.EnvDefaultKey
+ }
+ envDef = fmt.Sprintf(" [%s]", envPrintable)
+ }
+
+ var desc string
+
+ if def != "" {
+ desc = fmt.Sprintf("%s (default: %v)%s", option.Description, def, envDef)
+ } else {
+ desc = option.Description + envDef
+ }
+
+ writer.WriteString(wrapText(desc,
+ info.terminalColumns-descstart,
+ strings.Repeat(" ", descstart)))
+ }
+
+ writer.WriteString("\n")
+}
+
+func maxCommandLength(s []*Command) int {
+ if len(s) == 0 {
+ return 0
+ }
+
+ ret := len(s[0].Name)
+
+ for _, v := range s[1:] {
+ l := len(v.Name)
+
+ if l > ret {
+ ret = l
+ }
+ }
+
+ return ret
+}
+
+// WriteHelp writes a help message containing all the possible options and
+// their descriptions to the provided writer. Note that the HelpFlag parser
+// option provides a convenient way to add a -h/--help option group to the
+// command line parser which will automatically show the help messages using
+// this method.
+func (p *Parser) WriteHelp(writer io.Writer) {
+ if writer == nil {
+ return
+ }
+
+ wr := bufio.NewWriter(writer)
+ aligninfo := p.getAlignmentInfo()
+
+ cmd := p.Command
+
+ for cmd.Active != nil {
+ cmd = cmd.Active
+ }
+
+ if p.Name != "" {
+ wr.WriteString("Usage:\n")
+ wr.WriteString(" ")
+
+ allcmd := p.Command
+
+ for allcmd != nil {
+ var usage string
+
+ if allcmd == p.Command {
+ if len(p.Usage) != 0 {
+ usage = p.Usage
+ } else if p.Options&HelpFlag != 0 {
+ usage = "[OPTIONS]"
+ }
+ } else if us, ok := allcmd.data.(Usage); ok {
+ usage = us.Usage()
+ } else if allcmd.hasCliOptions() {
+ usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name)
+ }
+
+ if len(usage) != 0 {
+ fmt.Fprintf(wr, " %s %s", allcmd.Name, usage)
+ } else {
+ fmt.Fprintf(wr, " %s", allcmd.Name)
+ }
+
+ if len(allcmd.args) > 0 {
+ fmt.Fprintf(wr, " ")
+ }
+
+ for i, arg := range allcmd.args {
+ if i != 0 {
+ fmt.Fprintf(wr, " ")
+ }
+
+ name := arg.Name
+
+ if arg.isRemaining() {
+ name = name + "..."
+ }
+
+ if !allcmd.ArgsRequired {
+ fmt.Fprintf(wr, "[%s]", name)
+ } else {
+ fmt.Fprintf(wr, "%s", name)
+ }
+ }
+
+ if allcmd.Active == nil && len(allcmd.commands) > 0 {
+ var co, cc string
+
+ if allcmd.SubcommandsOptional {
+ co, cc = "[", "]"
+ } else {
+ co, cc = "<", ">"
+ }
+
+ visibleCommands := allcmd.visibleCommands()
+
+ if len(visibleCommands) > 3 {
+ fmt.Fprintf(wr, " %scommand%s", co, cc)
+ } else {
+ subcommands := allcmd.sortedVisibleCommands()
+ names := make([]string, len(subcommands))
+
+ for i, subc := range subcommands {
+ names[i] = subc.Name
+ }
+
+ fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc)
+ }
+ }
+
+ allcmd = allcmd.Active
+ }
+
+ fmt.Fprintln(wr)
+
+ if len(cmd.LongDescription) != 0 {
+ fmt.Fprintln(wr)
+
+ t := wrapText(cmd.LongDescription,
+ aligninfo.terminalColumns,
+ "")
+
+ fmt.Fprintln(wr, t)
+ }
+ }
+
+ c := p.Command
+
+ for c != nil {
+ printcmd := c != p.Command
+
+ c.eachGroup(func(grp *Group) {
+ first := true
+
+ // Skip built-in help group for all commands except the top-level
+ // parser
+ if grp.Hidden || (grp.isBuiltinHelp && c != p.Command) {
+ return
+ }
+
+ for _, info := range grp.options {
+ if !info.canCli() || info.Hidden {
+ continue
+ }
+
+ if printcmd {
+ fmt.Fprintf(wr, "\n[%s command options]\n", c.Name)
+ aligninfo.indent = true
+ printcmd = false
+ }
+
+ if first && cmd.Group != grp {
+ fmt.Fprintln(wr)
+
+ if aligninfo.indent {
+ wr.WriteString(" ")
+ }
+
+ fmt.Fprintf(wr, "%s:\n", grp.ShortDescription)
+ first = false
+ }
+
+ p.writeHelpOption(wr, info, aligninfo)
+ }
+ })
+
+ if len(c.args) > 0 {
+ if c == p.Command {
+ fmt.Fprintf(wr, "\nArguments:\n")
+ } else {
+ fmt.Fprintf(wr, "\n[%s command arguments]\n", c.Name)
+ }
+
+ maxlen := aligninfo.descriptionStart()
+
+ for _, arg := range c.args {
+ prefix := strings.Repeat(" ", paddingBeforeOption)
+ fmt.Fprintf(wr, "%s%s", prefix, arg.Name)
+
+ if len(arg.Description) > 0 {
+ align := strings.Repeat(" ", maxlen-len(arg.Name)-1)
+ fmt.Fprintf(wr, ":%s%s", align, arg.Description)
+ }
+
+ fmt.Fprintln(wr)
+ }
+ }
+
+ c = c.Active
+ }
+
+ scommands := cmd.sortedVisibleCommands()
+
+ if len(scommands) > 0 {
+ maxnamelen := maxCommandLength(scommands)
+
+ fmt.Fprintln(wr)
+ fmt.Fprintln(wr, "Available commands:")
+
+ for _, c := range scommands {
+ fmt.Fprintf(wr, " %s", c.Name)
+
+ if len(c.ShortDescription) > 0 {
+ pad := strings.Repeat(" ", maxnamelen-len(c.Name))
+ fmt.Fprintf(wr, "%s %s", pad, c.ShortDescription)
+
+ if len(c.Aliases) > 0 {
+ fmt.Fprintf(wr, " (aliases: %s)", strings.Join(c.Aliases, ", "))
+ }
+
+ }
+
+ fmt.Fprintln(wr)
+ }
+ }
+
+ wr.Flush()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/help_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/help_test.go
new file mode 100644
index 00000000000..33d21bf86df
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/help_test.go
@@ -0,0 +1,460 @@
+package flags
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "runtime"
+ "testing"
+ "time"
+)
+
+type helpOptions struct {
+ Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information" ini-name:"verbose"`
+ Call func(string) `short:"c" description:"Call phone number" ini-name:"call"`
+ PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"`
+ EmptyDescription bool `long:"empty-description"`
+
+ Default string `long:"default" default:"Some\nvalue" description:"Test default value"`
+ DefaultArray []string `long:"default-array" default:"Some value" default:"Other\tvalue" description:"Test default array value"`
+ DefaultMap map[string]string `long:"default-map" default:"some:value" default:"another:value" description:"Testdefault map value"`
+ EnvDefault1 string `long:"env-default1" default:"Some value" env:"ENV_DEFAULT" description:"Test env-default1 value"`
+ EnvDefault2 string `long:"env-default2" env:"ENV_DEFAULT" description:"Test env-default2 value"`
+ OptionWithArgName string `long:"opt-with-arg-name" value-name:"something" description:"Option with named argument"`
+ OptionWithChoices string `long:"opt-with-choices" value-name:"choice" choice:"dog" choice:"cat" description:"Option with choices"`
+ Hidden string `long:"hidden" description:"Hidden option" hidden:"yes"`
+
+ OnlyIni string `ini-name:"only-ini" description:"Option only available in ini"`
+
+ Other struct {
+ StringSlice []string `short:"s" default:"some" default:"value" description:"A slice of strings"`
+ IntMap map[string]int `long:"intmap" default:"a:1" description:"A map from string to int" ini-name:"int-map"`
+ } `group:"Other Options"`
+
+ HiddenGroup struct {
+ InsideHiddenGroup string `long:"inside-hidden-group" description:"Inside hidden group"`
+ } `group:"Hidden group" hidden:"yes"`
+
+ Group struct {
+ Opt string `long:"opt" description:"This is a subgroup option"`
+ HiddenInsideGroup string `long:"hidden-inside-group" description:"Hidden inside group" hidden:"yes"`
+
+ Group struct {
+ Opt string `long:"opt" description:"This is a subsubgroup option"`
+ } `group:"Subsubgroup" namespace:"sap"`
+ } `group:"Subgroup" namespace:"sip"`
+
+ Command struct {
+ ExtraVerbose []bool `long:"extra-verbose" description:"Use for extra verbosity"`
+ } `command:"command" alias:"cm" alias:"cmd" description:"A command"`
+
+ HiddenCommand struct {
+ ExtraVerbose []bool `long:"extra-verbose" description:"Use for extra verbosity"`
+ } `command:"hidden-command" description:"A hidden command" hidden:"yes"`
+
+ Args struct {
+ Filename string `positional-arg-name:"filename" description:"A filename"`
+ Number int `positional-arg-name:"num" description:"A number"`
+ } `positional-args:"yes"`
+}
+
+func TestHelp(t *testing.T) {
+ oldEnv := EnvSnapshot()
+ defer oldEnv.Restore()
+ os.Setenv("ENV_DEFAULT", "env-def")
+
+ var opts helpOptions
+ p := NewNamedParser("TestHelp", HelpFlag)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ _, err := p.ParseArgs([]string{"--help"})
+
+ if err == nil {
+ t.Fatalf("Expected help error")
+ }
+
+ if e, ok := err.(*Error); !ok {
+ t.Fatalf("Expected flags.Error, but got %T", err)
+ } else {
+ if e.Type != ErrHelp {
+ t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type)
+ }
+
+ var expected string
+
+ if runtime.GOOS == "windows" {
+ expected = `Usage:
+ TestHelp [OPTIONS] [filename] [num] <command>
+
+Application Options:
+ /v, /verbose Show verbose debug information
+ /c: Call phone number
+ /ptrslice: A slice of pointers to string
+ /empty-description
+ /default: Test default value (default:
+ "Some\nvalue")
+ /default-array: Test default array value (default:
+ Some value, "Other\tvalue")
+ /default-map: Testdefault map value (default:
+ some:value, another:value)
+ /env-default1: Test env-default1 value (default:
+ Some value) [%ENV_DEFAULT%]
+ /env-default2: Test env-default2 value
+ [%ENV_DEFAULT%]
+ /opt-with-arg-name:something Option with named argument
+ /opt-with-choices:choice[dog|cat] Option with choices
+
+Other Options:
+ /s: A slice of strings (default: some,
+ value)
+ /intmap: A map from string to int (default:
+ a:1)
+
+Subgroup:
+ /sip.opt: This is a subgroup option
+
+Subsubgroup:
+ /sip.sap.opt: This is a subsubgroup option
+
+Help Options:
+ /? Show this help message
+ /h, /help Show this help message
+
+Arguments:
+ filename: A filename
+ num: A number
+
+Available commands:
+ command A command (aliases: cm, cmd)
+`
+ } else {
+ expected = `Usage:
+ TestHelp [OPTIONS] [filename] [num] <command>
+
+Application Options:
+ -v, --verbose Show verbose debug information
+ -c= Call phone number
+ --ptrslice= A slice of pointers to string
+ --empty-description
+ --default= Test default value (default:
+ "Some\nvalue")
+ --default-array= Test default array value (default:
+ Some value, "Other\tvalue")
+ --default-map= Testdefault map value (default:
+ some:value, another:value)
+ --env-default1= Test env-default1 value (default:
+ Some value) [$ENV_DEFAULT]
+ --env-default2= Test env-default2 value
+ [$ENV_DEFAULT]
+ --opt-with-arg-name=something Option with named argument
+ --opt-with-choices=choice[dog|cat] Option with choices
+
+Other Options:
+ -s= A slice of strings (default: some,
+ value)
+ --intmap= A map from string to int (default:
+ a:1)
+
+Subgroup:
+ --sip.opt= This is a subgroup option
+
+Subsubgroup:
+ --sip.sap.opt= This is a subsubgroup option
+
+Help Options:
+ -h, --help Show this help message
+
+Arguments:
+ filename: A filename
+ num: A number
+
+Available commands:
+ command A command (aliases: cm, cmd)
+`
+ }
+
+ assertDiff(t, e.Message, expected, "help message")
+ }
+}
+
+func TestMan(t *testing.T) {
+ oldEnv := EnvSnapshot()
+ defer oldEnv.Restore()
+ os.Setenv("ENV_DEFAULT", "env-def")
+
+ var opts helpOptions
+ p := NewNamedParser("TestMan", HelpFlag)
+ p.ShortDescription = "Test manpage generation"
+ p.LongDescription = "This is a somewhat `longer' description of what this does"
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ p.Commands()[0].LongDescription = "Longer `command' description"
+
+ var buf bytes.Buffer
+ p.WriteManPage(&buf)
+
+ got := buf.String()
+
+ tt := time.Now()
+
+ var envDefaultName string
+
+ if runtime.GOOS == "windows" {
+ envDefaultName = "%ENV_DEFAULT%"
+ } else {
+ envDefaultName = "$ENV_DEFAULT"
+ }
+
+ expected := fmt.Sprintf(`.TH TestMan 1 "%s"
+.SH NAME
+TestMan \- Test manpage generation
+.SH SYNOPSIS
+\fBTestMan\fP [OPTIONS]
+.SH DESCRIPTION
+This is a somewhat \fBlonger\fP description of what this does
+.SH OPTIONS
+.TP
+\fB\fB\-v\fR, \fB\-\-verbose\fR\fP
+Show verbose debug information
+.TP
+\fB\fB\-c\fR\fP
+Call phone number
+.TP
+\fB\fB\-\-ptrslice\fR\fP
+A slice of pointers to string
+.TP
+\fB\fB\-\-empty-description\fR\fP
+.TP
+\fB\fB\-\-default\fR <default: \fI"Some\\nvalue"\fR>\fP
+Test default value
+.TP
+\fB\fB\-\-default-array\fR <default: \fI"Some value", "Other\\tvalue"\fR>\fP
+Test default array value
+.TP
+\fB\fB\-\-default-map\fR <default: \fI"some:value", "another:value"\fR>\fP
+Testdefault map value
+.TP
+\fB\fB\-\-env-default1\fR <default: \fI"Some value"\fR>\fP
+Test env-default1 value
+.TP
+\fB\fB\-\-env-default2\fR <default: \fI%s\fR>\fP
+Test env-default2 value
+.TP
+\fB\fB\-\-opt-with-arg-name\fR \fIsomething\fR\fP
+Option with named argument
+.TP
+\fB\fB\-\-opt-with-choices\fR \fIchoice\fR\fP
+Option with choices
+.TP
+\fB\fB\-s\fR <default: \fI"some", "value"\fR>\fP
+A slice of strings
+.TP
+\fB\fB\-\-intmap\fR <default: \fI"a:1"\fR>\fP
+A map from string to int
+.TP
+\fB\fB\-\-sip.opt\fR\fP
+This is a subgroup option
+.TP
+\fB\fB\-\-sip.sap.opt\fR\fP
+This is a subsubgroup option
+.SH COMMANDS
+.SS command
+A command
+
+Longer \fBcommand\fP description
+
+\fBUsage\fP: TestMan [OPTIONS] command [command-OPTIONS]
+.TP
+
+\fBAliases\fP: cm, cmd
+
+.TP
+\fB\fB\-\-extra-verbose\fR\fP
+Use for extra verbosity
+`, tt.Format("2 January 2006"), envDefaultName)
+
+ assertDiff(t, got, expected, "man page")
+}
+
+type helpCommandNoOptions struct {
+ Command struct {
+ } `command:"command" description:"A command"`
+}
+
+func TestHelpCommand(t *testing.T) {
+ oldEnv := EnvSnapshot()
+ defer oldEnv.Restore()
+ os.Setenv("ENV_DEFAULT", "env-def")
+
+ var opts helpCommandNoOptions
+ p := NewNamedParser("TestHelpCommand", HelpFlag)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ _, err := p.ParseArgs([]string{"command", "--help"})
+
+ if err == nil {
+ t.Fatalf("Expected help error")
+ }
+
+ if e, ok := err.(*Error); !ok {
+ t.Fatalf("Expected flags.Error, but got %T", err)
+ } else {
+ if e.Type != ErrHelp {
+ t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type)
+ }
+
+ var expected string
+
+ if runtime.GOOS == "windows" {
+ expected = `Usage:
+ TestHelpCommand [OPTIONS] command
+
+Help Options:
+ /? Show this help message
+ /h, /help Show this help message
+`
+ } else {
+ expected = `Usage:
+ TestHelpCommand [OPTIONS] command
+
+Help Options:
+ -h, --help Show this help message
+`
+ }
+
+ assertDiff(t, e.Message, expected, "help message")
+ }
+}
+
+func TestHelpDefaults(t *testing.T) {
+ var expected string
+
+ if runtime.GOOS == "windows" {
+ expected = `Usage:
+ TestHelpDefaults [OPTIONS]
+
+Application Options:
+ /with-default: With default (default: default-value)
+ /without-default: Without default
+ /with-programmatic-default: With programmatic default (default:
+ default-value)
+
+Help Options:
+ /? Show this help message
+ /h, /help Show this help message
+`
+ } else {
+ expected = `Usage:
+ TestHelpDefaults [OPTIONS]
+
+Application Options:
+ --with-default= With default (default: default-value)
+ --without-default= Without default
+ --with-programmatic-default= With programmatic default (default:
+ default-value)
+
+Help Options:
+ -h, --help Show this help message
+`
+ }
+
+ tests := []struct {
+ Args []string
+ Output string
+ }{
+ {
+ Args: []string{"-h"},
+ Output: expected,
+ },
+ {
+ Args: []string{"--with-default", "other-value", "--with-programmatic-default", "other-value", "-h"},
+ Output: expected,
+ },
+ }
+
+ for _, test := range tests {
+ var opts struct {
+ WithDefault string `long:"with-default" default:"default-value" description:"With default"`
+ WithoutDefault string `long:"without-default" description:"Without default"`
+ WithProgrammaticDefault string `long:"with-programmatic-default" description:"With programmatic default"`
+ }
+
+ opts.WithProgrammaticDefault = "default-value"
+
+ p := NewNamedParser("TestHelpDefaults", HelpFlag)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ _, err := p.ParseArgs(test.Args)
+
+ if err == nil {
+ t.Fatalf("Expected help error")
+ }
+
+ if e, ok := err.(*Error); !ok {
+ t.Fatalf("Expected flags.Error, but got %T", err)
+ } else {
+ if e.Type != ErrHelp {
+ t.Errorf("Expected flags.ErrHelp type, but got %s", e.Type)
+ }
+
+ assertDiff(t, e.Message, test.Output, "help message")
+ }
+ }
+}
+
+func TestHelpRestArgs(t *testing.T) {
+ opts := struct {
+ Verbose bool `short:"v"`
+ }{}
+
+ p := NewNamedParser("TestHelpDefaults", HelpFlag)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ retargs, err := p.ParseArgs([]string{"-h", "-v", "rest"})
+
+ if err == nil {
+ t.Fatalf("Expected help error")
+ }
+
+ assertStringArray(t, retargs, []string{"-v", "rest"})
+}
+
+func TestWrapText(t *testing.T) {
+ s := "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
+
+ got := wrapText(s, 60, " ")
+ expected := `Lorem ipsum dolor sit amet, consectetur adipisicing elit,
+ sed do eiusmod tempor incididunt ut labore et dolore magna
+ aliqua. Ut enim ad minim veniam, quis nostrud exercitation
+ ullamco laboris nisi ut aliquip ex ea commodo consequat.
+ Duis aute irure dolor in reprehenderit in voluptate velit
+ esse cillum dolore eu fugiat nulla pariatur. Excepteur sint
+ occaecat cupidatat non proident, sunt in culpa qui officia
+ deserunt mollit anim id est laborum.`
+
+ assertDiff(t, got, expected, "wrapped text")
+}
+
+func TestWrapParagraph(t *testing.T) {
+ s := "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\n\n"
+ s += "Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\n\n"
+ s += "Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\n\n"
+ s += "Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"
+
+ got := wrapText(s, 60, " ")
+ expected := `Lorem ipsum dolor sit amet, consectetur adipisicing elit,
+ sed do eiusmod tempor incididunt ut labore et dolore magna
+ aliqua.
+
+ Ut enim ad minim veniam, quis nostrud exercitation ullamco
+ laboris nisi ut aliquip ex ea commodo consequat.
+
+ Duis aute irure dolor in reprehenderit in voluptate velit
+ esse cillum dolore eu fugiat nulla pariatur.
+
+ Excepteur sint occaecat cupidatat non proident, sunt in
+ culpa qui officia deserunt mollit anim id est laborum.
+`
+
+ assertDiff(t, got, expected, "wrapped paragraph")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/ini.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/ini.go
new file mode 100644
index 00000000000..cfdf57cc2e0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/ini.go
@@ -0,0 +1,593 @@
+package flags
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// IniError contains location information on where an error occured.
+type IniError struct {
+ // The error message.
+ Message string
+
+ // The filename of the file in which the error occurred.
+ File string
+
+ // The line number at which the error occurred.
+ LineNumber uint
+}
+
+// Error provides a "file:line: message" formatted message of the ini error.
+func (x *IniError) Error() string {
+ return fmt.Sprintf(
+ "%s:%d: %s",
+ x.File,
+ x.LineNumber,
+ x.Message,
+ )
+}
+
+// IniOptions for writing
+type IniOptions uint
+
+const (
+ // IniNone indicates no options.
+ IniNone IniOptions = 0
+
+ // IniIncludeDefaults indicates that default values should be written.
+ IniIncludeDefaults = 1 << iota
+
+ // IniCommentDefaults indicates that if IniIncludeDefaults is used
+ // options with default values are written but commented out.
+ IniCommentDefaults
+
+ // IniIncludeComments indicates that comments containing the description
+ // of an option should be written.
+ IniIncludeComments
+
+ // IniDefault provides a default set of options.
+ IniDefault = IniIncludeComments
+)
+
+// IniParser is a utility to read and write flags options from and to ini
+// formatted strings.
+type IniParser struct {
+ parser *Parser
+}
+
+type iniValue struct {
+ Name string
+ Value string
+ Quoted bool
+ LineNumber uint
+}
+
+type iniSection []iniValue
+
+type ini struct {
+ File string
+ Sections map[string]iniSection
+}
+
+// NewIniParser creates a new ini parser for a given Parser.
+func NewIniParser(p *Parser) *IniParser {
+ return &IniParser{
+ parser: p,
+ }
+}
+
+// IniParse is a convenience function to parse command line options with default
+// settings from an ini formatted file. The provided data is a pointer to a struct
+// representing the default option group (named "Application Options"). For
+// more control, use flags.NewParser.
+func IniParse(filename string, data interface{}) error {
+ p := NewParser(data, Default)
+
+ return NewIniParser(p).ParseFile(filename)
+}
+
+// ParseFile parses flags from an ini formatted file. See Parse for more
+// information on the ini file format. The returned errors can be of the type
+// flags.Error or flags.IniError.
+func (i *IniParser) ParseFile(filename string) error {
+ i.parser.clearIsSet()
+
+ ini, err := readIniFromFile(filename)
+
+ if err != nil {
+ return err
+ }
+
+ return i.parse(ini)
+}
+
+// Parse parses flags from an ini format. You can use ParseFile as a
+// convenience function to parse from a filename instead of a general
+// io.Reader.
+//
+// The format of the ini file is as follows:
+//
+// [Option group name]
+// option = value
+//
+// Each section in the ini file represents an option group or command in the
+// flags parser. The default flags parser option group (i.e. when using
+// flags.Parse) is named 'Application Options'. The ini option name is matched
+// in the following order:
+//
+// 1. Compared to the ini-name tag on the option struct field (if present)
+// 2. Compared to the struct field name
+// 3. Compared to the option long name (if present)
+// 4. Compared to the option short name (if present)
+//
+// Sections for nested groups and commands can be addressed using a dot `.'
+// namespacing notation (i.e [subcommand.Options]). Group section names are
+// matched case insensitive.
+//
+// The returned errors can be of the type flags.Error or flags.IniError.
+func (i *IniParser) Parse(reader io.Reader) error {
+ i.parser.clearIsSet()
+
+ ini, err := readIni(reader, "")
+
+ if err != nil {
+ return err
+ }
+
+ return i.parse(ini)
+}
+
+// WriteFile writes the flags as ini format into a file. See WriteIni
+// for more information. The returned error occurs when the specified file
+// could not be opened for writing.
+func (i *IniParser) WriteFile(filename string, options IniOptions) error {
+ return writeIniToFile(i, filename, options)
+}
+
+// Write writes the current values of all the flags to an ini format.
+// See Parse for more information on the ini file format. You typically
+// call this only after settings have been parsed since the default values of each
+// option are stored just before parsing the flags (this is only relevant when
+// IniIncludeDefaults is _not_ set in options).
+func (i *IniParser) Write(writer io.Writer, options IniOptions) {
+ writeIni(i, writer, options)
+}
+
+func readFullLine(reader *bufio.Reader) (string, error) {
+ var line []byte
+
+ for {
+ l, more, err := reader.ReadLine()
+
+ if err != nil {
+ return "", err
+ }
+
+ if line == nil && !more {
+ return string(l), nil
+ }
+
+ line = append(line, l...)
+
+ if !more {
+ break
+ }
+ }
+
+ return string(line), nil
+}
+
+func optionIniName(option *Option) string {
+ name := option.tag.Get("_read-ini-name")
+
+ if len(name) != 0 {
+ return name
+ }
+
+ name = option.tag.Get("ini-name")
+
+ if len(name) != 0 {
+ return name
+ }
+
+ return option.field.Name
+}
+
+func writeGroupIni(cmd *Command, group *Group, namespace string, writer io.Writer, options IniOptions) {
+ var sname string
+
+ if len(namespace) != 0 {
+ sname = namespace
+ }
+
+ if cmd.Group != group && len(group.ShortDescription) != 0 {
+ if len(sname) != 0 {
+ sname += "."
+ }
+
+ sname += group.ShortDescription
+ }
+
+ sectionwritten := false
+ comments := (options & IniIncludeComments) != IniNone
+
+ for _, option := range group.options {
+ if option.isFunc() || option.Hidden {
+ continue
+ }
+
+ if len(option.tag.Get("no-ini")) != 0 {
+ continue
+ }
+
+ val := option.value
+
+ if (options&IniIncludeDefaults) == IniNone && option.valueIsDefault() {
+ continue
+ }
+
+ if !sectionwritten {
+ fmt.Fprintf(writer, "[%s]\n", sname)
+ sectionwritten = true
+ }
+
+ if comments && len(option.Description) != 0 {
+ fmt.Fprintf(writer, "; %s\n", option.Description)
+ }
+
+ oname := optionIniName(option)
+
+ commentOption := (options&(IniIncludeDefaults|IniCommentDefaults)) == IniIncludeDefaults|IniCommentDefaults && option.valueIsDefault()
+
+ kind := val.Type().Kind()
+ switch kind {
+ case reflect.Slice:
+ kind = val.Type().Elem().Kind()
+
+ if val.Len() == 0 {
+ writeOption(writer, oname, kind, "", "", true, option.iniQuote)
+ } else {
+ for idx := 0; idx < val.Len(); idx++ {
+ v, _ := convertToString(val.Index(idx), option.tag)
+
+ writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote)
+ }
+ }
+ case reflect.Map:
+ kind = val.Type().Elem().Kind()
+
+ if val.Len() == 0 {
+ writeOption(writer, oname, kind, "", "", true, option.iniQuote)
+ } else {
+ mkeys := val.MapKeys()
+ keys := make([]string, len(val.MapKeys()))
+ kkmap := make(map[string]reflect.Value)
+
+ for i, k := range mkeys {
+ keys[i], _ = convertToString(k, option.tag)
+ kkmap[keys[i]] = k
+ }
+
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v, _ := convertToString(val.MapIndex(kkmap[k]), option.tag)
+
+ writeOption(writer, oname, kind, k, v, commentOption, option.iniQuote)
+ }
+ }
+ default:
+ v, _ := convertToString(val, option.tag)
+
+ writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote)
+ }
+
+ if comments {
+ fmt.Fprintln(writer)
+ }
+ }
+
+ if sectionwritten && !comments {
+ fmt.Fprintln(writer)
+ }
+}
+
+func writeOption(writer io.Writer, optionName string, optionType reflect.Kind, optionKey string, optionValue string, commentOption bool, forceQuote bool) {
+ if forceQuote || (optionType == reflect.String && !isPrint(optionValue)) {
+ optionValue = strconv.Quote(optionValue)
+ }
+
+ comment := ""
+ if commentOption {
+ comment = "; "
+ }
+
+ fmt.Fprintf(writer, "%s%s =", comment, optionName)
+
+ if optionKey != "" {
+ fmt.Fprintf(writer, " %s:%s", optionKey, optionValue)
+ } else if optionValue != "" {
+ fmt.Fprintf(writer, " %s", optionValue)
+ }
+
+ fmt.Fprintln(writer)
+}
+
+func writeCommandIni(command *Command, namespace string, writer io.Writer, options IniOptions) {
+ command.eachGroup(func(group *Group) {
+ if !group.Hidden {
+ writeGroupIni(command, group, namespace, writer, options)
+ }
+ })
+
+ for _, c := range command.commands {
+ var nns string
+
+ if c.Hidden {
+ continue
+ }
+
+ if len(namespace) != 0 {
+ nns = c.Name + "." + nns
+ } else {
+ nns = c.Name
+ }
+
+ writeCommandIni(c, nns, writer, options)
+ }
+}
+
+func writeIni(parser *IniParser, writer io.Writer, options IniOptions) {
+ writeCommandIni(parser.parser.Command, "", writer, options)
+}
+
+func writeIniToFile(parser *IniParser, filename string, options IniOptions) error {
+ file, err := os.Create(filename)
+
+ if err != nil {
+ return err
+ }
+
+ defer file.Close()
+
+ writeIni(parser, file, options)
+
+ return nil
+}
+
+func readIniFromFile(filename string) (*ini, error) {
+ file, err := os.Open(filename)
+
+ if err != nil {
+ return nil, err
+ }
+
+ defer file.Close()
+
+ return readIni(file, filename)
+}
+
+func readIni(contents io.Reader, filename string) (*ini, error) {
+ ret := &ini{
+ File: filename,
+ Sections: make(map[string]iniSection),
+ }
+
+ reader := bufio.NewReader(contents)
+
+ // Empty global section
+ section := make(iniSection, 0, 10)
+ sectionname := ""
+
+ ret.Sections[sectionname] = section
+
+ var lineno uint
+
+ for {
+ line, err := readFullLine(reader)
+
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return nil, err
+ }
+
+ lineno++
+ line = strings.TrimSpace(line)
+
+ // Skip empty lines and lines starting with ; (comments)
+ if len(line) == 0 || line[0] == ';' || line[0] == '#' {
+ continue
+ }
+
+ if line[0] == '[' {
+ if line[0] != '[' || line[len(line)-1] != ']' {
+ return nil, &IniError{
+ Message: "malformed section header",
+ File: filename,
+ LineNumber: lineno,
+ }
+ }
+
+ name := strings.TrimSpace(line[1 : len(line)-1])
+
+ if len(name) == 0 {
+ return nil, &IniError{
+ Message: "empty section name",
+ File: filename,
+ LineNumber: lineno,
+ }
+ }
+
+ sectionname = name
+ section = ret.Sections[name]
+
+ if section == nil {
+ section = make(iniSection, 0, 10)
+ ret.Sections[name] = section
+ }
+
+ continue
+ }
+
+ // Parse option here
+ keyval := strings.SplitN(line, "=", 2)
+
+ if len(keyval) != 2 {
+ return nil, &IniError{
+ Message: fmt.Sprintf("malformed key=value (%s)", line),
+ File: filename,
+ LineNumber: lineno,
+ }
+ }
+
+ name := strings.TrimSpace(keyval[0])
+ value := strings.TrimSpace(keyval[1])
+ quoted := false
+
+ if len(value) != 0 && value[0] == '"' {
+ if v, err := strconv.Unquote(value); err == nil {
+ value = v
+
+ quoted = true
+ } else {
+ return nil, &IniError{
+ Message: err.Error(),
+ File: filename,
+ LineNumber: lineno,
+ }
+ }
+ }
+
+ section = append(section, iniValue{
+ Name: name,
+ Value: value,
+ Quoted: quoted,
+ LineNumber: lineno,
+ })
+
+ ret.Sections[sectionname] = section
+ }
+
+ return ret, nil
+}
+
+func (i *IniParser) matchingGroups(name string) []*Group {
+ if len(name) == 0 {
+ var ret []*Group
+
+ i.parser.eachGroup(func(g *Group) {
+ ret = append(ret, g)
+ })
+
+ return ret
+ }
+
+ g := i.parser.groupByName(name)
+
+ if g != nil {
+ return []*Group{g}
+ }
+
+ return nil
+}
+
+func (i *IniParser) parse(ini *ini) error {
+ p := i.parser
+
+ var quotesLookup = make(map[*Option]bool)
+
+ for name, section := range ini.Sections {
+ groups := i.matchingGroups(name)
+
+ if len(groups) == 0 {
+ return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name)
+ }
+
+ for _, inival := range section {
+ var opt *Option
+
+ for _, group := range groups {
+ opt = group.optionByName(inival.Name, func(o *Option, n string) bool {
+ return strings.ToLower(o.tag.Get("ini-name")) == strings.ToLower(n)
+ })
+
+ if opt != nil && len(opt.tag.Get("no-ini")) != 0 {
+ opt = nil
+ }
+
+ if opt != nil {
+ break
+ }
+ }
+
+ if opt == nil {
+ if (p.Options & IgnoreUnknown) == None {
+ return &IniError{
+ Message: fmt.Sprintf("unknown option: %s", inival.Name),
+ File: ini.File,
+ LineNumber: inival.LineNumber,
+ }
+ }
+
+ continue
+ }
+
+ pval := &inival.Value
+
+ if !opt.canArgument() && len(inival.Value) == 0 {
+ pval = nil
+ } else {
+ if opt.value.Type().Kind() == reflect.Map {
+ parts := strings.SplitN(inival.Value, ":", 2)
+
+ // only handle unquoting
+ if len(parts) == 2 && parts[1][0] == '"' {
+ if v, err := strconv.Unquote(parts[1]); err == nil {
+ parts[1] = v
+
+ inival.Quoted = true
+ } else {
+ return &IniError{
+ Message: err.Error(),
+ File: ini.File,
+ LineNumber: inival.LineNumber,
+ }
+ }
+
+ s := parts[0] + ":" + parts[1]
+
+ pval = &s
+ }
+ }
+ }
+
+ if err := opt.set(pval); err != nil {
+ return &IniError{
+ Message: err.Error(),
+ File: ini.File,
+ LineNumber: inival.LineNumber,
+ }
+ }
+
+ // either all INI values are quoted or only values who need quoting
+ if _, ok := quotesLookup[opt]; !inival.Quoted || !ok {
+ quotesLookup[opt] = inival.Quoted
+ }
+
+ opt.tag.Set("_read-ini-name", inival.Name)
+ }
+ }
+
+ for opt, quoted := range quotesLookup {
+ opt.iniQuote = quoted
+ }
+
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/ini_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/ini_test.go
new file mode 100644
index 00000000000..dd7fe331209
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/ini_test.go
@@ -0,0 +1,950 @@
+package flags
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestWriteIni(t *testing.T) {
+ oldEnv := EnvSnapshot()
+ defer oldEnv.Restore()
+ os.Setenv("ENV_DEFAULT", "env-def")
+
+ var tests = []struct {
+ args []string
+ options IniOptions
+ expected string
+ }{
+ {
+ []string{"-vv", "--intmap=a:2", "--intmap", "b:3", "filename", "0", "command"},
+ IniDefault,
+ `[Application Options]
+; Show verbose debug information
+verbose = true
+verbose = true
+
+; Test env-default1 value
+EnvDefault1 = env-def
+
+; Test env-default2 value
+EnvDefault2 = env-def
+
+[Other Options]
+; A map from string to int
+int-map = a:2
+int-map = b:3
+
+`,
+ },
+ {
+ []string{"-vv", "--intmap=a:2", "--intmap", "b:3", "filename", "0", "command"},
+ IniDefault | IniIncludeDefaults,
+ `[Application Options]
+; Show verbose debug information
+verbose = true
+verbose = true
+
+; A slice of pointers to string
+; PtrSlice =
+
+EmptyDescription = false
+
+; Test default value
+Default = "Some\nvalue"
+
+; Test default array value
+DefaultArray = Some value
+DefaultArray = "Other\tvalue"
+
+; Testdefault map value
+DefaultMap = another:value
+DefaultMap = some:value
+
+; Test env-default1 value
+EnvDefault1 = env-def
+
+; Test env-default2 value
+EnvDefault2 = env-def
+
+; Option with named argument
+OptionWithArgName =
+
+; Option with choices
+OptionWithChoices =
+
+; Option only available in ini
+only-ini =
+
+[Other Options]
+; A slice of strings
+StringSlice = some
+StringSlice = value
+
+; A map from string to int
+int-map = a:2
+int-map = b:3
+
+[Subgroup]
+; This is a subgroup option
+Opt =
+
+[Subsubgroup]
+; This is a subsubgroup option
+Opt =
+
+[command]
+; Use for extra verbosity
+; ExtraVerbose =
+
+`,
+ },
+ {
+ []string{"filename", "0", "command"},
+ IniDefault | IniIncludeDefaults | IniCommentDefaults,
+ `[Application Options]
+; Show verbose debug information
+; verbose =
+
+; A slice of pointers to string
+; PtrSlice =
+
+; EmptyDescription = false
+
+; Test default value
+; Default = "Some\nvalue"
+
+; Test default array value
+; DefaultArray = Some value
+; DefaultArray = "Other\tvalue"
+
+; Testdefault map value
+; DefaultMap = another:value
+; DefaultMap = some:value
+
+; Test env-default1 value
+EnvDefault1 = env-def
+
+; Test env-default2 value
+EnvDefault2 = env-def
+
+; Option with named argument
+; OptionWithArgName =
+
+; Option with choices
+; OptionWithChoices =
+
+; Option only available in ini
+; only-ini =
+
+[Other Options]
+; A slice of strings
+; StringSlice = some
+; StringSlice = value
+
+; A map from string to int
+; int-map = a:1
+
+[Subgroup]
+; This is a subgroup option
+; Opt =
+
+[Subsubgroup]
+; This is a subsubgroup option
+; Opt =
+
+[command]
+; Use for extra verbosity
+; ExtraVerbose =
+
+`,
+ },
+ {
+ []string{"--default=New value", "--default-array=New value", "--default-map=new:value", "filename", "0", "command"},
+ IniDefault | IniIncludeDefaults | IniCommentDefaults,
+ `[Application Options]
+; Show verbose debug information
+; verbose =
+
+; A slice of pointers to string
+; PtrSlice =
+
+; EmptyDescription = false
+
+; Test default value
+Default = New value
+
+; Test default array value
+DefaultArray = New value
+
+; Testdefault map value
+DefaultMap = new:value
+
+; Test env-default1 value
+EnvDefault1 = env-def
+
+; Test env-default2 value
+EnvDefault2 = env-def
+
+; Option with named argument
+; OptionWithArgName =
+
+; Option with choices
+; OptionWithChoices =
+
+; Option only available in ini
+; only-ini =
+
+[Other Options]
+; A slice of strings
+; StringSlice = some
+; StringSlice = value
+
+; A map from string to int
+; int-map = a:1
+
+[Subgroup]
+; This is a subgroup option
+; Opt =
+
+[Subsubgroup]
+; This is a subsubgroup option
+; Opt =
+
+[command]
+; Use for extra verbosity
+; ExtraVerbose =
+
+`,
+ },
+ }
+
+ for _, test := range tests {
+ var opts helpOptions
+
+ p := NewNamedParser("TestIni", Default)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ _, err := p.ParseArgs(test.args)
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+
+ inip := NewIniParser(p)
+
+ var b bytes.Buffer
+ inip.Write(&b, test.options)
+
+ got := b.String()
+ expected := test.expected
+
+ msg := fmt.Sprintf("with arguments %+v and ini options %b", test.args, test.options)
+ assertDiff(t, got, expected, msg)
+ }
+}
+
+func TestReadIni_flagEquivalent(t *testing.T) {
+ type options struct {
+ Opt1 bool `long:"opt1"`
+
+ Group1 struct {
+ Opt2 bool `long:"opt2"`
+ } `group:"group1"`
+
+ Group2 struct {
+ Opt3 bool `long:"opt3"`
+ } `group:"group2" namespace:"ns1"`
+
+ Cmd1 struct {
+ Opt4 bool `long:"opt4"`
+ Opt5 bool `long:"foo.opt5"`
+
+ Group1 struct {
+ Opt6 bool `long:"opt6"`
+ Opt7 bool `long:"foo.opt7"`
+ } `group:"group1"`
+
+ Group2 struct {
+ Opt8 bool `long:"opt8"`
+ } `group:"group2" namespace:"ns1"`
+ } `command:"cmd1"`
+ }
+
+ a := `
+opt1=true
+
+[group1]
+opt2=true
+
+[group2]
+ns1.opt3=true
+
+[cmd1]
+opt4=true
+foo.opt5=true
+
+[cmd1.group1]
+opt6=true
+foo.opt7=true
+
+[cmd1.group2]
+ns1.opt8=true
+`
+ b := `
+opt1=true
+opt2=true
+ns1.opt3=true
+
+[cmd1]
+opt4=true
+foo.opt5=true
+opt6=true
+foo.opt7=true
+ns1.opt8=true
+`
+
+ parse := func(readIni string) (opts options, writeIni string) {
+ p := NewNamedParser("TestIni", Default)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ inip := NewIniParser(p)
+ err := inip.Parse(strings.NewReader(readIni))
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %s\n\nFile:\n%s", err, readIni)
+ }
+
+ var b bytes.Buffer
+ inip.Write(&b, Default)
+
+ return opts, b.String()
+ }
+
+ aOpt, aIni := parse(a)
+ bOpt, bIni := parse(b)
+
+ assertDiff(t, aIni, bIni, "")
+ if !reflect.DeepEqual(aOpt, bOpt) {
+ t.Errorf("not equal")
+ }
+}
+
+func TestReadIni(t *testing.T) {
+ var opts helpOptions
+
+ p := NewNamedParser("TestIni", Default)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ inip := NewIniParser(p)
+
+ inic := `
+; Show verbose debug information
+verbose = true
+verbose = true
+
+DefaultMap = another:"value\n1"
+DefaultMap = some:value 2
+
+[Application Options]
+; A slice of pointers to string
+; PtrSlice =
+
+; Test default value
+Default = "New\nvalue"
+
+; Test env-default1 value
+EnvDefault1 = New value
+
+[Other Options]
+# A slice of strings
+StringSlice = "some\nvalue"
+StringSlice = another value
+
+; A map from string to int
+int-map = a:2
+int-map = b:3
+
+`
+
+ b := strings.NewReader(inic)
+ err := inip.Parse(b)
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+
+ assertBoolArray(t, opts.Verbose, []bool{true, true})
+
+ if v := map[string]string{"another": "value\n1", "some": "value 2"}; !reflect.DeepEqual(opts.DefaultMap, v) {
+ t.Fatalf("Expected %#v for DefaultMap but got %#v", v, opts.DefaultMap)
+ }
+
+ assertString(t, opts.Default, "New\nvalue")
+
+ assertString(t, opts.EnvDefault1, "New value")
+
+ assertStringArray(t, opts.Other.StringSlice, []string{"some\nvalue", "another value"})
+
+ if v, ok := opts.Other.IntMap["a"]; !ok {
+ t.Errorf("Expected \"a\" in Other.IntMap")
+ } else if v != 2 {
+ t.Errorf("Expected Other.IntMap[\"a\"] = 2, but got %v", v)
+ }
+
+ if v, ok := opts.Other.IntMap["b"]; !ok {
+ t.Errorf("Expected \"b\" in Other.IntMap")
+ } else if v != 3 {
+ t.Errorf("Expected Other.IntMap[\"b\"] = 3, but got %v", v)
+ }
+}
+
+func TestReadAndWriteIni(t *testing.T) {
+ var tests = []struct {
+ options IniOptions
+ read string
+ write string
+ }{
+ {
+ IniIncludeComments,
+ `[Application Options]
+; Show verbose debug information
+verbose = true
+verbose = true
+
+; Test default value
+Default = "quote me"
+
+; Test default array value
+DefaultArray = 1
+DefaultArray = "2"
+DefaultArray = 3
+
+; Testdefault map value
+; DefaultMap =
+
+; Test env-default1 value
+EnvDefault1 = env-def
+
+; Test env-default2 value
+EnvDefault2 = env-def
+
+[Other Options]
+; A slice of strings
+; StringSlice =
+
+; A map from string to int
+int-map = a:2
+int-map = b:"3"
+
+`,
+ `[Application Options]
+; Show verbose debug information
+verbose = true
+verbose = true
+
+; Test default value
+Default = "quote me"
+
+; Test default array value
+DefaultArray = 1
+DefaultArray = 2
+DefaultArray = 3
+
+; Testdefault map value
+; DefaultMap =
+
+; Test env-default1 value
+EnvDefault1 = env-def
+
+; Test env-default2 value
+EnvDefault2 = env-def
+
+[Other Options]
+; A slice of strings
+; StringSlice =
+
+; A map from string to int
+int-map = a:2
+int-map = b:3
+
+`,
+ },
+ {
+ IniIncludeComments,
+ `[Application Options]
+; Show verbose debug information
+verbose = true
+verbose = true
+
+; Test default value
+Default = "quote me"
+
+; Test default array value
+DefaultArray = "1"
+DefaultArray = "2"
+DefaultArray = "3"
+
+; Testdefault map value
+; DefaultMap =
+
+; Test env-default1 value
+EnvDefault1 = env-def
+
+; Test env-default2 value
+EnvDefault2 = env-def
+
+[Other Options]
+; A slice of strings
+; StringSlice =
+
+; A map from string to int
+int-map = a:"2"
+int-map = b:"3"
+
+`,
+ `[Application Options]
+; Show verbose debug information
+verbose = true
+verbose = true
+
+; Test default value
+Default = "quote me"
+
+; Test default array value
+DefaultArray = "1"
+DefaultArray = "2"
+DefaultArray = "3"
+
+; Testdefault map value
+; DefaultMap =
+
+; Test env-default1 value
+EnvDefault1 = env-def
+
+; Test env-default2 value
+EnvDefault2 = env-def
+
+[Other Options]
+; A slice of strings
+; StringSlice =
+
+; A map from string to int
+int-map = a:"2"
+int-map = b:"3"
+
+`,
+ },
+ }
+
+ for _, test := range tests {
+ var opts helpOptions
+
+ p := NewNamedParser("TestIni", Default)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ inip := NewIniParser(p)
+
+ read := strings.NewReader(test.read)
+ err := inip.Parse(read)
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+
+ var write bytes.Buffer
+ inip.Write(&write, test.options)
+
+ got := write.String()
+
+ msg := fmt.Sprintf("with ini options %b", test.options)
+ assertDiff(t, got, test.write, msg)
+ }
+}
+
+func TestReadIniWrongQuoting(t *testing.T) {
+ var tests = []struct {
+ iniFile string
+ lineNumber uint
+ }{
+ {
+ iniFile: `Default = "New\nvalue`,
+ lineNumber: 1,
+ },
+ {
+ iniFile: `StringSlice = "New\nvalue`,
+ lineNumber: 1,
+ },
+ {
+ iniFile: `StringSlice = "New\nvalue"
+ StringSlice = "Second\nvalue`,
+ lineNumber: 2,
+ },
+ {
+ iniFile: `DefaultMap = some:"value`,
+ lineNumber: 1,
+ },
+ {
+ iniFile: `DefaultMap = some:value
+ DefaultMap = another:"value`,
+ lineNumber: 2,
+ },
+ }
+
+ for _, test := range tests {
+ var opts helpOptions
+
+ p := NewNamedParser("TestIni", Default)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ inip := NewIniParser(p)
+
+ inic := test.iniFile
+
+ b := strings.NewReader(inic)
+ err := inip.Parse(b)
+
+ if err == nil {
+ t.Fatalf("Expect error")
+ }
+
+ iniError := err.(*IniError)
+
+ if iniError.LineNumber != test.lineNumber {
+ t.Fatalf("Expect error on line %d", test.lineNumber)
+ }
+ }
+}
+
+func TestIniCommands(t *testing.T) {
+ var opts struct {
+ Value string `short:"v" long:"value"`
+
+ Add struct {
+ Name int `short:"n" long:"name" ini-name:"AliasName"`
+
+ Other struct {
+ O string `short:"o" long:"other"`
+ } `group:"Other Options"`
+ } `command:"add"`
+ }
+
+ p := NewNamedParser("TestIni", Default)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ inip := NewIniParser(p)
+
+ inic := `[Application Options]
+value = some value
+
+[add]
+AliasName = 5
+
+[add.Other Options]
+other = subgroup
+
+`
+
+ b := strings.NewReader(inic)
+ err := inip.Parse(b)
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+
+ assertString(t, opts.Value, "some value")
+
+ if opts.Add.Name != 5 {
+ t.Errorf("Expected opts.Add.Name to be 5, but got %v", opts.Add.Name)
+ }
+
+ assertString(t, opts.Add.Other.O, "subgroup")
+
+ // Test writing it back
+ buf := &bytes.Buffer{}
+
+ inip.Write(buf, IniDefault)
+
+ assertDiff(t, buf.String(), inic, "ini contents")
+}
+
+func TestIniNoIni(t *testing.T) {
+ var opts struct {
+ NoValue string `short:"n" long:"novalue" no-ini:"yes"`
+ Value string `short:"v" long:"value"`
+ }
+
+ p := NewNamedParser("TestIni", Default)
+ p.AddGroup("Application Options", "The application options", &opts)
+
+ inip := NewIniParser(p)
+
+ // read INI
+ inic := `[Application Options]
+novalue = some value
+value = some other value
+`
+
+ b := strings.NewReader(inic)
+ err := inip.Parse(b)
+
+ if err == nil {
+ t.Fatalf("Expected error")
+ }
+
+ iniError := err.(*IniError)
+
+ if v := uint(2); iniError.LineNumber != v {
+ t.Errorf("Expected opts.Add.Name to be %d, but got %d", v, iniError.LineNumber)
+ }
+
+ if v := "unknown option: novalue"; iniError.Message != v {
+ t.Errorf("Expected opts.Add.Name to be %s, but got %s", v, iniError.Message)
+ }
+
+ // write INI
+ opts.NoValue = "some value"
+ opts.Value = "some other value"
+
+ file, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("Cannot create temporary file: %s", err)
+ }
+ defer os.Remove(file.Name())
+
+ err = inip.WriteFile(file.Name(), IniIncludeDefaults)
+ if err != nil {
+ t.Fatalf("Could not write ini file: %s", err)
+ }
+
+ found, err := ioutil.ReadFile(file.Name())
+ if err != nil {
+ t.Fatalf("Could not read written ini file: %s", err)
+ }
+
+ expected := "[Application Options]\nValue = some other value\n\n"
+
+ assertDiff(t, string(found), expected, "ini content")
+}
+
+func TestIniParse(t *testing.T) {
+ file, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("Cannot create temporary file: %s", err)
+ }
+ defer os.Remove(file.Name())
+
+ _, err = file.WriteString("value = 123")
+ if err != nil {
+ t.Fatalf("Cannot write to temporary file: %s", err)
+ }
+
+ file.Close()
+
+ var opts struct {
+ Value int `long:"value"`
+ }
+
+ err = IniParse(file.Name(), &opts)
+ if err != nil {
+ t.Fatalf("Could not parse ini: %s", err)
+ }
+
+ if opts.Value != 123 {
+ t.Fatalf("Expected Value to be \"123\" but was \"%d\"", opts.Value)
+ }
+}
+
+func TestIniCliOverrides(t *testing.T) {
+ file, err := ioutil.TempFile("", "")
+
+ if err != nil {
+ t.Fatalf("Cannot create temporary file: %s", err)
+ }
+
+ defer os.Remove(file.Name())
+
+ _, err = file.WriteString("values = 123\n")
+ _, err = file.WriteString("values = 456\n")
+
+ if err != nil {
+ t.Fatalf("Cannot write to temporary file: %s", err)
+ }
+
+ file.Close()
+
+ var opts struct {
+ Values []int `long:"values"`
+ }
+
+ p := NewParser(&opts, Default)
+ err = NewIniParser(p).ParseFile(file.Name())
+
+ if err != nil {
+ t.Fatalf("Could not parse ini: %s", err)
+ }
+
+ _, err = p.ParseArgs([]string{"--values", "111", "--values", "222"})
+
+ if err != nil {
+ t.Fatalf("Failed to parse arguments: %s", err)
+ }
+
+ if len(opts.Values) != 2 {
+ t.Fatalf("Expected Values to contain two elements, but got %d", len(opts.Values))
+ }
+
+ if opts.Values[0] != 111 {
+ t.Fatalf("Expected Values[0] to be 111, but got '%d'", opts.Values[0])
+ }
+
+ if opts.Values[1] != 222 {
+ t.Fatalf("Expected Values[0] to be 222, but got '%d'", opts.Values[1])
+ }
+}
+
+func TestIniOverrides(t *testing.T) {
+ file, err := ioutil.TempFile("", "")
+
+ if err != nil {
+ t.Fatalf("Cannot create temporary file: %s", err)
+ }
+
+ defer os.Remove(file.Name())
+
+ _, err = file.WriteString("value-with-default = \"ini-value\"\n")
+ _, err = file.WriteString("value-with-default-override-cli = \"ini-value\"\n")
+
+ if err != nil {
+ t.Fatalf("Cannot write to temporary file: %s", err)
+ }
+
+ file.Close()
+
+ var opts struct {
+ ValueWithDefault string `long:"value-with-default" default:"value"`
+ ValueWithDefaultOverrideCli string `long:"value-with-default-override-cli" default:"value"`
+ }
+
+ p := NewParser(&opts, Default)
+ err = NewIniParser(p).ParseFile(file.Name())
+
+ if err != nil {
+ t.Fatalf("Could not parse ini: %s", err)
+ }
+
+ _, err = p.ParseArgs([]string{"--value-with-default-override-cli", "cli-value"})
+
+ if err != nil {
+ t.Fatalf("Failed to parse arguments: %s", err)
+ }
+
+ assertString(t, opts.ValueWithDefault, "ini-value")
+ assertString(t, opts.ValueWithDefaultOverrideCli, "cli-value")
+}
+
+func TestWriteFile(t *testing.T) {
+ file, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("Cannot create temporary file: %s", err)
+ }
+ defer os.Remove(file.Name())
+
+ var opts struct {
+ Value int `long:"value"`
+ }
+
+ opts.Value = 123
+
+ p := NewParser(&opts, Default)
+ ini := NewIniParser(p)
+
+ err = ini.WriteFile(file.Name(), IniIncludeDefaults)
+ if err != nil {
+ t.Fatalf("Could not write ini file: %s", err)
+ }
+
+ found, err := ioutil.ReadFile(file.Name())
+ if err != nil {
+ t.Fatalf("Could not read written ini file: %s", err)
+ }
+
+ expected := "[Application Options]\nValue = 123\n\n"
+
+ assertDiff(t, string(found), expected, "ini content")
+}
+
+func TestOverwriteRequiredOptions(t *testing.T) {
+ var tests = []struct {
+ args []string
+ expected []string
+ }{
+ {
+ args: []string{"--value", "from CLI"},
+ expected: []string{
+ "from CLI",
+ "from default",
+ },
+ },
+ {
+ args: []string{"--value", "from CLI", "--default", "from CLI"},
+ expected: []string{
+ "from CLI",
+ "from CLI",
+ },
+ },
+ {
+ args: []string{"--config", "no file name"},
+ expected: []string{
+ "from INI",
+ "from INI",
+ },
+ },
+ {
+ args: []string{"--value", "from CLI before", "--default", "from CLI before", "--config", "no file name"},
+ expected: []string{
+ "from INI",
+ "from INI",
+ },
+ },
+ {
+ args: []string{"--value", "from CLI before", "--default", "from CLI before", "--config", "no file name", "--value", "from CLI after", "--default", "from CLI after"},
+ expected: []string{
+ "from CLI after",
+ "from CLI after",
+ },
+ },
+ }
+
+ for _, test := range tests {
+ var opts struct {
+ Config func(s string) error `long:"config" no-ini:"true"`
+ Value string `long:"value" required:"true"`
+ Default string `long:"default" required:"true" default:"from default"`
+ }
+
+ p := NewParser(&opts, Default)
+
+ opts.Config = func(s string) error {
+ ini := NewIniParser(p)
+
+ return ini.Parse(bytes.NewBufferString("value = from INI\ndefault = from INI"))
+ }
+
+ _, err := p.ParseArgs(test.args)
+ if err != nil {
+ t.Fatalf("Unexpected error %s with args %+v", err, test.args)
+ }
+
+ if opts.Value != test.expected[0] {
+ t.Fatalf("Expected Value to be \"%s\" but was \"%s\" with args %+v", test.expected[0], opts.Value, test.args)
+ }
+
+ if opts.Default != test.expected[1] {
+ t.Fatalf("Expected Default to be \"%s\" but was \"%s\" with args %+v", test.expected[1], opts.Default, test.args)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/long_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/long_test.go
new file mode 100644
index 00000000000..02fc8c70129
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/long_test.go
@@ -0,0 +1,85 @@
+package flags
+
+import (
+ "testing"
+)
+
+func TestLong(t *testing.T) {
+ var opts = struct {
+ Value bool `long:"value"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "--value")
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+}
+
+func TestLongArg(t *testing.T) {
+ var opts = struct {
+ Value string `long:"value"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "--value", "value")
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, opts.Value, "value")
+}
+
+func TestLongArgEqual(t *testing.T) {
+ var opts = struct {
+ Value string `long:"value"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "--value=value")
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, opts.Value, "value")
+}
+
+func TestLongDefault(t *testing.T) {
+ var opts = struct {
+ Value string `long:"value" default:"value"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts)
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, opts.Value, "value")
+}
+
+func TestLongOptional(t *testing.T) {
+ var opts = struct {
+ Value string `long:"value" optional:"yes" optional-value:"value"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "--value")
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, opts.Value, "value")
+}
+
+func TestLongOptionalArg(t *testing.T) {
+ var opts = struct {
+ Value string `long:"value" optional:"yes" optional-value:"value"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "--value", "no")
+
+ assertStringArray(t, ret, []string{"no"})
+ assertString(t, opts.Value, "value")
+}
+
+func TestLongOptionalArgEqual(t *testing.T) {
+ var opts = struct {
+ Value string `long:"value" optional:"yes" optional-value:"value"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "--value=value", "no")
+
+ assertStringArray(t, ret, []string{"no"})
+ assertString(t, opts.Value, "value")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/man.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/man.go
new file mode 100644
index 00000000000..8e4a8b72671
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/man.go
@@ -0,0 +1,194 @@
+package flags
+
+import (
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "time"
+)
+
+func manQuote(s string) string {
+ return strings.Replace(s, "\\", "\\\\", -1)
+}
+
+func formatForMan(wr io.Writer, s string) {
+ for {
+ idx := strings.IndexRune(s, '`')
+
+ if idx < 0 {
+ fmt.Fprintf(wr, "%s", manQuote(s))
+ break
+ }
+
+ fmt.Fprintf(wr, "%s", manQuote(s[:idx]))
+
+ s = s[idx+1:]
+ idx = strings.IndexRune(s, '\'')
+
+ if idx < 0 {
+ fmt.Fprintf(wr, "%s", manQuote(s))
+ break
+ }
+
+ fmt.Fprintf(wr, "\\fB%s\\fP", manQuote(s[:idx]))
+ s = s[idx+1:]
+ }
+}
+
+func writeManPageOptions(wr io.Writer, grp *Group) {
+ grp.eachGroup(func(group *Group) {
+ if group.Hidden {
+ return
+ }
+
+ for _, opt := range group.options {
+ if !opt.canCli() || opt.Hidden {
+ continue
+ }
+
+ fmt.Fprintln(wr, ".TP")
+ fmt.Fprintf(wr, "\\fB")
+
+ if opt.ShortName != 0 {
+ fmt.Fprintf(wr, "\\fB\\-%c\\fR", opt.ShortName)
+ }
+
+ if len(opt.LongName) != 0 {
+ if opt.ShortName != 0 {
+ fmt.Fprintf(wr, ", ")
+ }
+
+ fmt.Fprintf(wr, "\\fB\\-\\-%s\\fR", manQuote(opt.LongNameWithNamespace()))
+ }
+
+ if len(opt.ValueName) != 0 || opt.OptionalArgument {
+ if opt.OptionalArgument {
+ fmt.Fprintf(wr, " [\\fI%s=%s\\fR]", manQuote(opt.ValueName), manQuote(strings.Join(quoteV(opt.OptionalValue), ", ")))
+ } else {
+ fmt.Fprintf(wr, " \\fI%s\\fR", manQuote(opt.ValueName))
+ }
+ }
+
+ if len(opt.Default) != 0 {
+ fmt.Fprintf(wr, " <default: \\fI%s\\fR>", manQuote(strings.Join(quoteV(opt.Default), ", ")))
+ } else if len(opt.EnvDefaultKey) != 0 {
+ if runtime.GOOS == "windows" {
+ fmt.Fprintf(wr, " <default: \\fI%%%s%%\\fR>", manQuote(opt.EnvDefaultKey))
+ } else {
+ fmt.Fprintf(wr, " <default: \\fI$%s\\fR>", manQuote(opt.EnvDefaultKey))
+ }
+ }
+
+ if opt.Required {
+ fmt.Fprintf(wr, " (\\fIrequired\\fR)")
+ }
+
+ fmt.Fprintln(wr, "\\fP")
+
+ if len(opt.Description) != 0 {
+ formatForMan(wr, opt.Description)
+ fmt.Fprintln(wr, "")
+ }
+ }
+ })
+}
+
+func writeManPageSubcommands(wr io.Writer, name string, root *Command) {
+ commands := root.sortedVisibleCommands()
+
+ for _, c := range commands {
+ var nn string
+
+ if c.Hidden {
+ continue
+ }
+
+ if len(name) != 0 {
+ nn = name + " " + c.Name
+ } else {
+ nn = c.Name
+ }
+
+ writeManPageCommand(wr, nn, root, c)
+ }
+}
+
+func writeManPageCommand(wr io.Writer, name string, root *Command, command *Command) {
+ fmt.Fprintf(wr, ".SS %s\n", name)
+ fmt.Fprintln(wr, command.ShortDescription)
+
+ if len(command.LongDescription) > 0 {
+ fmt.Fprintln(wr, "")
+
+ cmdstart := fmt.Sprintf("The %s command", manQuote(command.Name))
+
+ if strings.HasPrefix(command.LongDescription, cmdstart) {
+ fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name))
+
+ formatForMan(wr, command.LongDescription[len(cmdstart):])
+ fmt.Fprintln(wr, "")
+ } else {
+ formatForMan(wr, command.LongDescription)
+ fmt.Fprintln(wr, "")
+ }
+ }
+
+ var usage string
+ if us, ok := command.data.(Usage); ok {
+ usage = us.Usage()
+ } else if command.hasCliOptions() {
+ usage = fmt.Sprintf("[%s-OPTIONS]", command.Name)
+ }
+
+ var pre string
+ if root.hasCliOptions() {
+ pre = fmt.Sprintf("%s [OPTIONS] %s", root.Name, command.Name)
+ } else {
+ pre = fmt.Sprintf("%s %s", root.Name, command.Name)
+ }
+
+ if len(usage) > 0 {
+ fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage))
+ }
+
+ if len(command.Aliases) > 0 {
+ fmt.Fprintf(wr, "\n\\fBAliases\\fP: %s\n\n", manQuote(strings.Join(command.Aliases, ", ")))
+ }
+
+ writeManPageOptions(wr, command.Group)
+ writeManPageSubcommands(wr, name, command)
+}
+
+// WriteManPage writes a basic man page in groff format to the specified
+// writer.
+func (p *Parser) WriteManPage(wr io.Writer) {
+ t := time.Now()
+
+ fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006"))
+ fmt.Fprintln(wr, ".SH NAME")
+ fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuote(p.ShortDescription))
+ fmt.Fprintln(wr, ".SH SYNOPSIS")
+
+ usage := p.Usage
+
+ if len(usage) == 0 {
+ usage = "[OPTIONS]"
+ }
+
+ fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage))
+ fmt.Fprintln(wr, ".SH DESCRIPTION")
+
+ formatForMan(wr, p.LongDescription)
+ fmt.Fprintln(wr, "")
+
+ fmt.Fprintln(wr, ".SH OPTIONS")
+
+ writeManPageOptions(wr, p.Command.Group)
+
+ if len(p.visibleCommands()) > 0 {
+ fmt.Fprintln(wr, ".SH COMMANDS")
+
+ writeManPageSubcommands(wr, "", p.Command)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/marshal_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/marshal_test.go
new file mode 100644
index 00000000000..59c9ccefb96
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/marshal_test.go
@@ -0,0 +1,97 @@
+package flags
+
+import (
+ "fmt"
+ "testing"
+)
+
+type marshalled bool
+
+func (m *marshalled) UnmarshalFlag(value string) error {
+ if value == "yes" {
+ *m = true
+ } else if value == "no" {
+ *m = false
+ } else {
+ return fmt.Errorf("`%s' is not a valid value, please specify `yes' or `no'", value)
+ }
+
+ return nil
+}
+
+func (m marshalled) MarshalFlag() (string, error) {
+ if m {
+ return "yes", nil
+ }
+
+ return "no", nil
+}
+
+type marshalledError bool
+
+func (m marshalledError) MarshalFlag() (string, error) {
+ return "", newErrorf(ErrMarshal, "Failed to marshal")
+}
+
+func TestUnmarshal(t *testing.T) {
+ var opts = struct {
+ Value marshalled `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v=yes")
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+}
+
+func TestUnmarshalDefault(t *testing.T) {
+ var opts = struct {
+ Value marshalled `short:"v" default:"yes"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts)
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+}
+
+func TestUnmarshalOptional(t *testing.T) {
+ var opts = struct {
+ Value marshalled `short:"v" optional:"yes" optional-value:"yes"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v")
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+}
+
+func TestUnmarshalError(t *testing.T) {
+ var opts = struct {
+ Value marshalled `short:"v"`
+ }{}
+
+ assertParseFail(t, ErrMarshal, fmt.Sprintf("invalid argument for flag `%cv' (expected flags.marshalled): `invalid' is not a valid value, please specify `yes' or `no'", defaultShortOptDelimiter), &opts, "-vinvalid")
+}
+
+func TestMarshalError(t *testing.T) {
+ var opts = struct {
+ Value marshalledError `short:"v"`
+ }{}
+
+ p := NewParser(&opts, Default)
+ o := p.Command.Groups()[0].Options()[0]
+
+ _, err := convertToString(o.value, o.tag)
+
+ assertError(t, err, ErrMarshal, "Failed to marshal")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/multitag.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/multitag.go
new file mode 100644
index 00000000000..96bb1a31dee
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/multitag.go
@@ -0,0 +1,140 @@
+package flags
+
+import (
+ "strconv"
+)
+
+type multiTag struct {
+ value string
+ cache map[string][]string
+}
+
+func newMultiTag(v string) multiTag {
+ return multiTag{
+ value: v,
+ }
+}
+
+func (x *multiTag) scan() (map[string][]string, error) {
+ v := x.value
+
+ ret := make(map[string][]string)
+
+ // This is mostly copied from reflect.StructTag.Get
+ for v != "" {
+ i := 0
+
+ // Skip whitespace
+ for i < len(v) && v[i] == ' ' {
+ i++
+ }
+
+ v = v[i:]
+
+ if v == "" {
+ break
+ }
+
+ // Scan to colon to find key
+ i = 0
+
+ for i < len(v) && v[i] != ' ' && v[i] != ':' && v[i] != '"' {
+ i++
+ }
+
+ if i >= len(v) {
+ return nil, newErrorf(ErrTag, "expected `:' after key name, but got end of tag (in `%v`)", x.value)
+ }
+
+ if v[i] != ':' {
+ return nil, newErrorf(ErrTag, "expected `:' after key name, but got `%v' (in `%v`)", v[i], x.value)
+ }
+
+ if i+1 >= len(v) {
+ return nil, newErrorf(ErrTag, "expected `\"' to start tag value at end of tag (in `%v`)", x.value)
+ }
+
+ if v[i+1] != '"' {
+ return nil, newErrorf(ErrTag, "expected `\"' to start tag value, but got `%v' (in `%v`)", v[i+1], x.value)
+ }
+
+ name := v[:i]
+ v = v[i+1:]
+
+ // Scan quoted string to find value
+ i = 1
+
+ for i < len(v) && v[i] != '"' {
+ if v[i] == '\n' {
+ return nil, newErrorf(ErrTag, "unexpected newline in tag value `%v' (in `%v`)", name, x.value)
+ }
+
+ if v[i] == '\\' {
+ i++
+ }
+ i++
+ }
+
+ if i >= len(v) {
+ return nil, newErrorf(ErrTag, "expected end of tag value `\"' at end of tag (in `%v`)", x.value)
+ }
+
+ val, err := strconv.Unquote(v[:i+1])
+
+ if err != nil {
+ return nil, newErrorf(ErrTag, "Malformed value of tag `%v:%v` => %v (in `%v`)", name, v[:i+1], err, x.value)
+ }
+
+ v = v[i+1:]
+
+ ret[name] = append(ret[name], val)
+ }
+
+ return ret, nil
+}
+
+func (x *multiTag) Parse() error {
+ vals, err := x.scan()
+ x.cache = vals
+
+ return err
+}
+
+func (x *multiTag) cached() map[string][]string {
+ if x.cache == nil {
+ cache, _ := x.scan()
+
+ if cache == nil {
+ cache = make(map[string][]string)
+ }
+
+ x.cache = cache
+ }
+
+ return x.cache
+}
+
+func (x *multiTag) Get(key string) string {
+ c := x.cached()
+
+ if v, ok := c[key]; ok {
+ return v[len(v)-1]
+ }
+
+ return ""
+}
+
+func (x *multiTag) GetMany(key string) []string {
+ c := x.cached()
+ return c[key]
+}
+
+func (x *multiTag) Set(key string, value string) {
+ c := x.cached()
+ c[key] = []string{value}
+}
+
+func (x *multiTag) SetMany(key string, value []string) {
+ c := x.cached()
+ c[key] = value
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/option.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/option.go
new file mode 100644
index 00000000000..a7f4f9a9adc
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/option.go
@@ -0,0 +1,414 @@
+package flags
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "syscall"
+ "unicode/utf8"
+)
+
+// Option flag information. Contains a description of the option, short and
+// long name as well as a default value and whether an argument for this
+// flag is optional.
+type Option struct {
+ // The description of the option flag. This description is shown
+ // automatically in the built-in help.
+ Description string
+
+ // The short name of the option (a single character). If not 0, the
+ // option flag can be 'activated' using -<ShortName>. Either ShortName
+ // or LongName needs to be non-empty.
+ ShortName rune
+
+ // The long name of the option. If not "", the option flag can be
+ // activated using --<LongName>. Either ShortName or LongName needs
+ // to be non-empty.
+ LongName string
+
+ // The default value of the option.
+ Default []string
+
+ // The optional environment default value key name.
+ EnvDefaultKey string
+
+ // The optional delimiter string for EnvDefaultKey values.
+ EnvDefaultDelim string
+
+ // If true, specifies that the argument to an option flag is optional.
+ // When no argument to the flag is specified on the command line, the
+ // value of OptionalValue will be set in the field this option represents.
+ // This is only valid for non-boolean options.
+ OptionalArgument bool
+
+ // The optional value of the option. The optional value is used when
+ // the option flag is marked as having an OptionalArgument. This means
+ // that when the flag is specified, but no option argument is given,
+ // the value of the field this option represents will be set to
+ // OptionalValue. This is only valid for non-boolean options.
+ OptionalValue []string
+
+ // If true, the option _must_ be specified on the command line. If the
+ // option is not specified, the parser will generate an ErrRequired type
+ // error.
+ Required bool
+
+ // A name for the value of an option shown in the Help as --flag [ValueName]
+ ValueName string
+
+ // A mask value to show in the help instead of the default value. This
+ // is useful for hiding sensitive information in the help, such as
+ // passwords.
+ DefaultMask string
+
+ // If non empty, only a certain set of values is allowed for an option.
+ Choices []string
+
+ // If true, the option is not displayed in the help or man page
+ Hidden bool
+
+ // The group which the option belongs to
+ group *Group
+
+ // The struct field which the option represents.
+ field reflect.StructField
+
+ // The struct field value which the option represents.
+ value reflect.Value
+
+ // Determines if the option will be always quoted in the INI output
+ iniQuote bool
+
+ tag multiTag
+ isSet bool
+ preventDefault bool
+
+ defaultLiteral string
+}
+
+// LongNameWithNamespace returns the option's long name with the group namespaces
+// prepended by walking up the option's group tree. Namespaces and the long name
+// itself are separated by the parser's namespace delimiter. If the long name is
+// empty an empty string is returned.
+func (option *Option) LongNameWithNamespace() string {
+ if len(option.LongName) == 0 {
+ return ""
+ }
+
+ // fetch the namespace delimiter from the parser which is always at the
+ // end of the group hierarchy
+ namespaceDelimiter := ""
+ g := option.group
+
+ for {
+ if p, ok := g.parent.(*Parser); ok {
+ namespaceDelimiter = p.NamespaceDelimiter
+
+ break
+ }
+
+ switch i := g.parent.(type) {
+ case *Command:
+ g = i.Group
+ case *Group:
+ g = i
+ }
+ }
+
+ // concatenate long name with namespace
+ longName := option.LongName
+ g = option.group
+
+ for g != nil {
+ if g.Namespace != "" {
+ longName = g.Namespace + namespaceDelimiter + longName
+ }
+
+ switch i := g.parent.(type) {
+ case *Command:
+ g = i.Group
+ case *Group:
+ g = i
+ case *Parser:
+ g = nil
+ }
+ }
+
+ return longName
+}
+
+// String converts an option to a human friendly readable string describing the
+// option.
+func (option *Option) String() string {
+ var s string
+ var short string
+
+ if option.ShortName != 0 {
+ data := make([]byte, utf8.RuneLen(option.ShortName))
+ utf8.EncodeRune(data, option.ShortName)
+ short = string(data)
+
+ if len(option.LongName) != 0 {
+ s = fmt.Sprintf("%s%s, %s%s",
+ string(defaultShortOptDelimiter), short,
+ defaultLongOptDelimiter, option.LongNameWithNamespace())
+ } else {
+ s = fmt.Sprintf("%s%s", string(defaultShortOptDelimiter), short)
+ }
+ } else if len(option.LongName) != 0 {
+ s = fmt.Sprintf("%s%s", defaultLongOptDelimiter, option.LongNameWithNamespace())
+ }
+
+ return s
+}
+
+// Value returns the option value as an interface{}.
+func (option *Option) Value() interface{} {
+ return option.value.Interface()
+}
+
+// IsSet returns true if option has been set
+func (option *Option) IsSet() bool {
+ return option.isSet
+}
+
+// Set the value of an option to the specified value. An error will be returned
+// if the specified value could not be converted to the corresponding option
+// value type.
+func (option *Option) set(value *string) error {
+ kind := option.value.Type().Kind()
+
+ if (kind == reflect.Map || kind == reflect.Slice) && !option.isSet {
+ option.empty()
+ }
+
+ option.isSet = true
+ option.preventDefault = true
+
+ if len(option.Choices) != 0 {
+ found := false
+
+ for _, choice := range option.Choices {
+ if choice == *value {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ allowed := strings.Join(option.Choices[0:len(option.Choices)-1], ", ")
+
+ if len(option.Choices) > 1 {
+ allowed += " or " + option.Choices[len(option.Choices)-1]
+ }
+
+ return newErrorf(ErrInvalidChoice,
+ "Invalid value `%s' for option `%s'. Allowed values are: %s",
+ *value, option, allowed)
+ }
+ }
+
+ if option.isFunc() {
+ return option.call(value)
+ } else if value != nil {
+ return convert(*value, option.value, option.tag)
+ }
+
+ return convert("", option.value, option.tag)
+}
+
+func (option *Option) canCli() bool {
+ return option.ShortName != 0 || len(option.LongName) != 0
+}
+
+func (option *Option) canArgument() bool {
+ if u := option.isUnmarshaler(); u != nil {
+ return true
+ }
+
+ return !option.isBool()
+}
+
+func (option *Option) emptyValue() reflect.Value {
+ tp := option.value.Type()
+
+ if tp.Kind() == reflect.Map {
+ return reflect.MakeMap(tp)
+ }
+
+ return reflect.Zero(tp)
+}
+
+func (option *Option) empty() {
+ if !option.isFunc() {
+ option.value.Set(option.emptyValue())
+ }
+}
+
+func (option *Option) clearDefault() {
+ usedDefault := option.Default
+
+ if envKey := option.EnvDefaultKey; envKey != "" {
+ // os.Getenv() makes no distinction between undefined and
+ // empty values, so we use syscall.Getenv()
+ if value, ok := syscall.Getenv(envKey); ok {
+ if option.EnvDefaultDelim != "" {
+ usedDefault = strings.Split(value,
+ option.EnvDefaultDelim)
+ } else {
+ usedDefault = []string{value}
+ }
+ }
+ }
+
+ if len(usedDefault) > 0 {
+ option.empty()
+
+ for _, d := range usedDefault {
+ option.set(&d)
+ }
+ } else {
+ tp := option.value.Type()
+
+ switch tp.Kind() {
+ case reflect.Map:
+ if option.value.IsNil() {
+ option.empty()
+ }
+ case reflect.Slice:
+ if option.value.IsNil() {
+ option.empty()
+ }
+ }
+ }
+}
+
+func (option *Option) valueIsDefault() bool {
+ // Check if the value of the option corresponds to its
+ // default value
+ emptyval := option.emptyValue()
+
+ checkvalptr := reflect.New(emptyval.Type())
+ checkval := reflect.Indirect(checkvalptr)
+
+ checkval.Set(emptyval)
+
+ if len(option.Default) != 0 {
+ for _, v := range option.Default {
+ convert(v, checkval, option.tag)
+ }
+ }
+
+ return reflect.DeepEqual(option.value.Interface(), checkval.Interface())
+}
+
+func (option *Option) isUnmarshaler() Unmarshaler {
+ v := option.value
+
+ for {
+ if !v.CanInterface() {
+ break
+ }
+
+ i := v.Interface()
+
+ if u, ok := i.(Unmarshaler); ok {
+ return u
+ }
+
+ if !v.CanAddr() {
+ break
+ }
+
+ v = v.Addr()
+ }
+
+ return nil
+}
+
+func (option *Option) isBool() bool {
+ tp := option.value.Type()
+
+ for {
+ switch tp.Kind() {
+ case reflect.Bool:
+ return true
+ case reflect.Slice:
+ return (tp.Elem().Kind() == reflect.Bool)
+ case reflect.Func:
+ return tp.NumIn() == 0
+ case reflect.Ptr:
+ tp = tp.Elem()
+ default:
+ return false
+ }
+ }
+}
+
+func (option *Option) isFunc() bool {
+ return option.value.Type().Kind() == reflect.Func
+}
+
+func (option *Option) call(value *string) error {
+ var retval []reflect.Value
+
+ if value == nil {
+ retval = option.value.Call(nil)
+ } else {
+ tp := option.value.Type().In(0)
+
+ val := reflect.New(tp)
+ val = reflect.Indirect(val)
+
+ if err := convert(*value, val, option.tag); err != nil {
+ return err
+ }
+
+ retval = option.value.Call([]reflect.Value{val})
+ }
+
+ if len(retval) == 1 && retval[0].Type() == reflect.TypeOf((*error)(nil)).Elem() {
+ if retval[0].Interface() == nil {
+ return nil
+ }
+
+ return retval[0].Interface().(error)
+ }
+
+ return nil
+}
+
+func (option *Option) updateDefaultLiteral() {
+ defs := option.Default
+ def := ""
+
+ if len(defs) == 0 && option.canArgument() {
+ var showdef bool
+
+ switch option.field.Type.Kind() {
+ case reflect.Func, reflect.Ptr:
+ showdef = !option.value.IsNil()
+ case reflect.Slice, reflect.String, reflect.Array:
+ showdef = option.value.Len() > 0
+ case reflect.Map:
+ showdef = !option.value.IsNil() && option.value.Len() > 0
+ default:
+ zeroval := reflect.Zero(option.field.Type)
+ showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface())
+ }
+
+ if showdef {
+ def, _ = convertToString(option.value, option.tag)
+ }
+ } else if len(defs) != 0 {
+ l := len(defs) - 1
+
+ for i := 0; i < l; i++ {
+ def += quoteIfNeeded(defs[i]) + ", "
+ }
+
+ def += quoteIfNeeded(defs[l])
+ }
+
+ option.defaultLiteral = def
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/options_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/options_test.go
new file mode 100644
index 00000000000..b0fe9f4565c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/options_test.go
@@ -0,0 +1,45 @@
+package flags
+
+import (
+ "testing"
+)
+
+func TestPassDoubleDash(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+ }{}
+
+ p := NewParser(&opts, PassDoubleDash)
+ ret, err := p.ParseArgs([]string{"-v", "--", "-v", "-g"})
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ assertStringArray(t, ret, []string{"-v", "-g"})
+}
+
+func TestPassAfterNonOption(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+ }{}
+
+ p := NewParser(&opts, PassAfterNonOption)
+ ret, err := p.ParseArgs([]string{"-v", "arg", "-v", "-g"})
+
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ return
+ }
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+
+ assertStringArray(t, ret, []string{"arg", "-v", "-g"})
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/optstyle_other.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/optstyle_other.go
new file mode 100644
index 00000000000..29ca4b6063b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/optstyle_other.go
@@ -0,0 +1,67 @@
+// +build !windows
+
+package flags
+
+import (
+ "strings"
+)
+
+const (
+ defaultShortOptDelimiter = '-'
+ defaultLongOptDelimiter = "--"
+ defaultNameArgDelimiter = '='
+)
+
+func argumentStartsOption(arg string) bool {
+ return len(arg) > 0 && arg[0] == '-'
+}
+
+func argumentIsOption(arg string) bool {
+ if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' {
+ return true
+ }
+
+ if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' {
+ return true
+ }
+
+ return false
+}
+
+// stripOptionPrefix returns the option without the prefix and whether or
+// not the option is a long option or not.
+func stripOptionPrefix(optname string) (prefix string, name string, islong bool) {
+ if strings.HasPrefix(optname, "--") {
+ return "--", optname[2:], true
+ } else if strings.HasPrefix(optname, "-") {
+ return "-", optname[1:], false
+ }
+
+ return "", optname, false
+}
+
+// splitOption attempts to split the passed option into a name and an argument.
+// When there is no argument specified, nil will be returned for it.
+func splitOption(prefix string, option string, islong bool) (string, string, *string) {
+ pos := strings.Index(option, "=")
+
+ if (islong && pos >= 0) || (!islong && pos == 1) {
+ rest := option[pos+1:]
+ return option[:pos], "=", &rest
+ }
+
+ return option, "", nil
+}
+
+// addHelpGroup adds a new group that contains default help parameters.
+func (c *Command) addHelpGroup(showHelp func() error) *Group {
+ var help struct {
+ ShowHelp func() error `short:"h" long:"help" description:"Show this help message"`
+ }
+
+ help.ShowHelp = showHelp
+ ret, _ := c.AddGroup("Help Options", "", &help)
+ ret.isBuiltinHelp = true
+
+ return ret
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/optstyle_windows.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/optstyle_windows.go
new file mode 100644
index 00000000000..a51de9cb29c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/optstyle_windows.go
@@ -0,0 +1,106 @@
+package flags
+
+import (
+ "strings"
+)
+
+// Windows uses a front slash for both short and long options. Also it uses
+// a colon for name/argument delimter.
+const (
+ defaultShortOptDelimiter = '/'
+ defaultLongOptDelimiter = "/"
+ defaultNameArgDelimiter = ':'
+)
+
+func argumentStartsOption(arg string) bool {
+ return len(arg) > 0 && (arg[0] == '-' || arg[0] == '/')
+}
+
+func argumentIsOption(arg string) bool {
+ // Windows-style options allow front slash for the option
+ // delimiter.
+ if len(arg) > 1 && arg[0] == '/' {
+ return true
+ }
+
+ if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' {
+ return true
+ }
+
+ if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' {
+ return true
+ }
+
+ return false
+}
+
+// stripOptionPrefix returns the option without the prefix and whether or
+// not the option is a long option or not.
+func stripOptionPrefix(optname string) (prefix string, name string, islong bool) {
+ // Determine if the argument is a long option or not. Windows
+ // typically supports both long and short options with a single
+ // front slash as the option delimiter, so handle this situation
+ // nicely.
+ possplit := 0
+
+ if strings.HasPrefix(optname, "--") {
+ possplit = 2
+ islong = true
+ } else if strings.HasPrefix(optname, "-") {
+ possplit = 1
+ islong = false
+ } else if strings.HasPrefix(optname, "/") {
+ possplit = 1
+ islong = len(optname) > 2
+ }
+
+ return optname[:possplit], optname[possplit:], islong
+}
+
+// splitOption attempts to split the passed option into a name and an argument.
+// When there is no argument specified, nil will be returned for it.
+func splitOption(prefix string, option string, islong bool) (string, string, *string) {
+ if len(option) == 0 {
+ return option, "", nil
+ }
+
+ // Windows typically uses a colon for the option name and argument
+ // delimiter while POSIX typically uses an equals. Support both styles,
+ // but don't allow the two to be mixed. That is to say /foo:bar and
+ // --foo=bar are acceptable, but /foo=bar and --foo:bar are not.
+ var pos int
+ var sp string
+
+ if prefix == "/" {
+ sp = ":"
+ pos = strings.Index(option, sp)
+ } else if len(prefix) > 0 {
+ sp = "="
+ pos = strings.Index(option, sp)
+ }
+
+ if (islong && pos >= 0) || (!islong && pos == 1) {
+ rest := option[pos+1:]
+ return option[:pos], sp, &rest
+ }
+
+ return option, "", nil
+}
+
+// addHelpGroup adds a new group that contains default help parameters.
+func (c *Command) addHelpGroup(showHelp func() error) *Group {
+ // Windows CLI applications typically use /? for help, so make both
+ // that available as well as the POSIX style h and help.
+ var help struct {
+ ShowHelpWindows func() error `short:"?" description:"Show this help message"`
+ ShowHelpPosix func() error `short:"h" long:"help" description:"Show this help message"`
+ }
+
+ help.ShowHelpWindows = showHelp
+ help.ShowHelpPosix = showHelp
+
+ ret, _ := c.AddGroup("Help Options", "", &help)
+ ret.isBuiltinHelp = true
+
+ return ret
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/parser.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/parser.go
new file mode 100644
index 00000000000..f9e07ee10e3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/parser.go
@@ -0,0 +1,652 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "path"
+ "sort"
+ "strings"
+ "unicode/utf8"
+)
+
+// A Parser provides command line option parsing. It can contain several
+// option groups each with their own set of options.
+type Parser struct {
+ // Embedded, see Command for more information
+ *Command
+
+ // A usage string to be displayed in the help message.
+ Usage string
+
+ // Option flags changing the behavior of the parser.
+ Options Options
+
+ // NamespaceDelimiter separates group namespaces and option long names
+ NamespaceDelimiter string
+
+ // UnknownOptionsHandler is a function which gets called when the parser
+ // encounters an unknown option. The function receives the unknown option
+ // name, a SplitArgument which specifies its value if set with an argument
+ // separator, and the remaining command line arguments.
+ // It should return a new list of remaining arguments to continue parsing,
+ // or an error to indicate a parse failure.
+ UnknownOptionHandler func(option string, arg SplitArgument, args []string) ([]string, error)
+
+ // CompletionHandler is a function gets called to handle the completion of
+ // items. By default, the items are printed and the application is exited.
+ // You can override this default behavior by specifying a custom CompletionHandler.
+ CompletionHandler func(items []Completion)
+
+ internalError error
+}
+
+// SplitArgument represents the argument value of an option that was passed using
+// an argument separator.
+type SplitArgument interface {
+ // String returns the option's value as a string, and a boolean indicating
+ // if the option was present.
+ Value() (string, bool)
+}
+
+type strArgument struct {
+ value *string
+}
+
+func (s strArgument) Value() (string, bool) {
+ if s.value == nil {
+ return "", false
+ }
+
+ return *s.value, true
+}
+
+// Options provides parser options that change the behavior of the option
+// parser.
+type Options uint
+
+const (
+ // None indicates no options.
+ None Options = 0
+
+ // HelpFlag adds a default Help Options group to the parser containing
+ // -h and --help options. When either -h or --help is specified on the
+ // command line, the parser will return the special error of type
+ // ErrHelp. When PrintErrors is also specified, then the help message
+ // will also be automatically printed to os.Stderr.
+ HelpFlag = 1 << iota
+
+ // PassDoubleDash passes all arguments after a double dash, --, as
+ // remaining command line arguments (i.e. they will not be parsed for
+ // flags).
+ PassDoubleDash
+
+ // IgnoreUnknown ignores any unknown options and passes them as
+ // remaining command line arguments instead of generating an error.
+ IgnoreUnknown
+
+ // PrintErrors prints any errors which occurred during parsing to
+ // os.Stderr.
+ PrintErrors
+
+ // PassAfterNonOption passes all arguments after the first non option
+ // as remaining command line arguments. This is equivalent to strict
+ // POSIX processing.
+ PassAfterNonOption
+
+ // Default is a convenient default set of options which should cover
+ // most of the uses of the flags package.
+ Default = HelpFlag | PrintErrors | PassDoubleDash
+)
+
+type parseState struct {
+ arg string
+ args []string
+ retargs []string
+ positional []*Arg
+ err error
+
+ command *Command
+ lookup lookup
+}
+
+// Parse is a convenience function to parse command line options with default
+// settings. The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"). For more control, use
+// flags.NewParser.
+func Parse(data interface{}) ([]string, error) {
+ return NewParser(data, Default).Parse()
+}
+
+// ParseArgs is a convenience function to parse command line options with default
+// settings. The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"). The args argument is
+// the list of command line arguments to parse. If you just want to parse the
+// default program command line arguments (i.e. os.Args), then use flags.Parse
+// instead. For more control, use flags.NewParser.
+func ParseArgs(data interface{}, args []string) ([]string, error) {
+ return NewParser(data, Default).ParseArgs(args)
+}
+
+// NewParser creates a new parser. It uses os.Args[0] as the application
+// name and then calls Parser.NewNamedParser (see Parser.NewNamedParser for
+// more details). The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"), or nil if the default
+// group should not be added. The options parameter specifies a set of options
+// for the parser.
+func NewParser(data interface{}, options Options) *Parser {
+ p := NewNamedParser(path.Base(os.Args[0]), options)
+
+ if data != nil {
+ g, err := p.AddGroup("Application Options", "", data)
+
+ if err == nil {
+ g.parent = p
+ }
+
+ p.internalError = err
+ }
+
+ return p
+}
+
+// NewNamedParser creates a new parser. The appname is used to display the
+// executable name in the built-in help message. Option groups and commands can
+// be added to this parser by using AddGroup and AddCommand.
+func NewNamedParser(appname string, options Options) *Parser {
+ p := &Parser{
+ Command: newCommand(appname, "", "", nil),
+ Options: options,
+ NamespaceDelimiter: ".",
+ }
+
+ p.Command.parent = p
+
+ return p
+}
+
+// Parse parses the command line arguments from os.Args using Parser.ParseArgs.
+// For more detailed information see ParseArgs.
+func (p *Parser) Parse() ([]string, error) {
+ return p.ParseArgs(os.Args[1:])
+}
+
+// ParseArgs parses the command line arguments according to the option groups that
+// were added to the parser. On successful parsing of the arguments, the
+// remaining, non-option, arguments (if any) are returned. The returned error
+// indicates a parsing error and can be used with PrintError to display
+// contextual information on where the error occurred exactly.
+//
+// When the common help group has been added (AddHelp) and either -h or --help
+// was specified in the command line arguments, a help message will be
+// automatically printed if the PrintErrors option is enabled.
+// Furthermore, the special error type ErrHelp is returned.
+// It is up to the caller to exit the program if so desired.
+func (p *Parser) ParseArgs(args []string) ([]string, error) {
+ if p.internalError != nil {
+ return nil, p.internalError
+ }
+
+ p.eachOption(func(c *Command, g *Group, option *Option) {
+ option.isSet = false
+ option.updateDefaultLiteral()
+ })
+
+ // Add built-in help group to all commands if necessary
+ if (p.Options & HelpFlag) != None {
+ p.addHelpGroups(p.showBuiltinHelp)
+ }
+
+ compval := os.Getenv("GO_FLAGS_COMPLETION")
+
+ if len(compval) != 0 {
+ comp := &completion{parser: p}
+ items := comp.complete(args)
+
+ if p.CompletionHandler != nil {
+ p.CompletionHandler(items)
+ } else {
+ comp.print(items, compval == "verbose")
+ os.Exit(0)
+ }
+
+ return nil, nil
+ }
+
+ s := &parseState{
+ args: args,
+ retargs: make([]string, 0, len(args)),
+ }
+
+ p.fillParseState(s)
+
+ for !s.eof() {
+ arg := s.pop()
+
+ // When PassDoubleDash is set and we encounter a --, then
+ // simply append all the rest as arguments and break out
+ if (p.Options&PassDoubleDash) != None && arg == "--" {
+ s.addArgs(s.args...)
+ break
+ }
+
+ if !argumentIsOption(arg) {
+ // Note: this also sets s.err, so we can just check for
+ // nil here and use s.err later
+ if p.parseNonOption(s) != nil {
+ break
+ }
+
+ continue
+ }
+
+ var err error
+
+ prefix, optname, islong := stripOptionPrefix(arg)
+ optname, _, argument := splitOption(prefix, optname, islong)
+
+ if islong {
+ err = p.parseLong(s, optname, argument)
+ } else {
+ err = p.parseShort(s, optname, argument)
+ }
+
+ if err != nil {
+ ignoreUnknown := (p.Options & IgnoreUnknown) != None
+ parseErr := wrapError(err)
+
+ if parseErr.Type != ErrUnknownFlag || (!ignoreUnknown && p.UnknownOptionHandler == nil) {
+ s.err = parseErr
+ break
+ }
+
+ if ignoreUnknown {
+ s.addArgs(arg)
+ } else if p.UnknownOptionHandler != nil {
+ modifiedArgs, err := p.UnknownOptionHandler(optname, strArgument{argument}, s.args)
+
+ if err != nil {
+ s.err = err
+ break
+ }
+
+ s.args = modifiedArgs
+ }
+ }
+ }
+
+ if s.err == nil {
+ p.eachOption(func(c *Command, g *Group, option *Option) {
+ if option.preventDefault {
+ return
+ }
+
+ option.clearDefault()
+ })
+
+ s.checkRequired(p)
+ }
+
+ var reterr error
+
+ if s.err != nil {
+ reterr = s.err
+ } else if len(s.command.commands) != 0 && !s.command.SubcommandsOptional {
+ reterr = s.estimateCommand()
+ } else if cmd, ok := s.command.data.(Commander); ok {
+ reterr = cmd.Execute(s.retargs)
+ }
+
+ if reterr != nil {
+ var retargs []string
+
+ if ourErr, ok := reterr.(*Error); !ok || ourErr.Type != ErrHelp {
+ retargs = append([]string{s.arg}, s.args...)
+ } else {
+ retargs = s.args
+ }
+
+ return retargs, p.printError(reterr)
+ }
+
+ return s.retargs, nil
+}
+
+func (p *parseState) eof() bool {
+ return len(p.args) == 0
+}
+
+func (p *parseState) pop() string {
+ if p.eof() {
+ return ""
+ }
+
+ p.arg = p.args[0]
+ p.args = p.args[1:]
+
+ return p.arg
+}
+
+func (p *parseState) peek() string {
+ if p.eof() {
+ return ""
+ }
+
+ return p.args[0]
+}
+
+func (p *parseState) checkRequired(parser *Parser) error {
+ c := parser.Command
+
+ var required []*Option
+
+ for c != nil {
+ c.eachGroup(func(g *Group) {
+ for _, option := range g.options {
+ if !option.isSet && option.Required {
+ required = append(required, option)
+ }
+ }
+ })
+
+ c = c.Active
+ }
+
+ if len(required) == 0 {
+ if len(p.positional) > 0 {
+ var reqnames []string
+
+ for _, arg := range p.positional {
+ argRequired := (!arg.isRemaining() && p.command.ArgsRequired) || arg.Required != 0
+
+ if !argRequired {
+ continue
+ }
+
+ if arg.isRemaining() {
+ if arg.value.Len() < arg.Required {
+ var arguments string
+
+ if arg.Required > 1 {
+ arguments = "arguments, but got only " + fmt.Sprintf("%d", arg.value.Len())
+ } else {
+ arguments = "argument"
+ }
+
+ reqnames = append(reqnames, "`"+arg.Name+" (at least "+fmt.Sprintf("%d", arg.Required)+" "+arguments+")`")
+ }
+ } else {
+ reqnames = append(reqnames, "`"+arg.Name+"`")
+ }
+ }
+
+ if len(reqnames) == 0 {
+ return nil
+ }
+
+ var msg string
+
+ if len(reqnames) == 1 {
+ msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0])
+ } else {
+ msg = fmt.Sprintf("the required arguments %s and %s were not provided",
+ strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1])
+ }
+
+ p.err = newError(ErrRequired, msg)
+ return p.err
+ }
+
+ return nil
+ }
+
+ names := make([]string, 0, len(required))
+
+ for _, k := range required {
+ names = append(names, "`"+k.String()+"'")
+ }
+
+ sort.Strings(names)
+
+ var msg string
+
+ if len(names) == 1 {
+ msg = fmt.Sprintf("the required flag %s was not specified", names[0])
+ } else {
+ msg = fmt.Sprintf("the required flags %s and %s were not specified",
+ strings.Join(names[:len(names)-1], ", "), names[len(names)-1])
+ }
+
+ p.err = newError(ErrRequired, msg)
+ return p.err
+}
+
+func (p *parseState) estimateCommand() error {
+ commands := p.command.sortedVisibleCommands()
+ cmdnames := make([]string, len(commands))
+
+ for i, v := range commands {
+ cmdnames[i] = v.Name
+ }
+
+ var msg string
+ var errtype ErrorType
+
+ if len(p.retargs) != 0 {
+ c, l := closestChoice(p.retargs[0], cmdnames)
+ msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0])
+ errtype = ErrUnknownCommand
+
+ if float32(l)/float32(len(c)) < 0.5 {
+ msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c)
+ } else if len(cmdnames) == 1 {
+ msg = fmt.Sprintf("%s. You should use the %s command",
+ msg,
+ cmdnames[0])
+ } else {
+ msg = fmt.Sprintf("%s. Please specify one command of: %s or %s",
+ msg,
+ strings.Join(cmdnames[:len(cmdnames)-1], ", "),
+ cmdnames[len(cmdnames)-1])
+ }
+ } else {
+ errtype = ErrCommandRequired
+
+ if len(cmdnames) == 1 {
+ msg = fmt.Sprintf("Please specify the %s command", cmdnames[0])
+ } else {
+ msg = fmt.Sprintf("Please specify one command of: %s or %s",
+ strings.Join(cmdnames[:len(cmdnames)-1], ", "),
+ cmdnames[len(cmdnames)-1])
+ }
+ }
+
+ return newError(errtype, msg)
+}
+
+func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) {
+ if !option.canArgument() {
+ if argument != nil {
+ return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option)
+ }
+
+ err = option.set(nil)
+ } else if argument != nil || (canarg && !s.eof()) {
+ var arg string
+
+ if argument != nil {
+ arg = *argument
+ } else {
+ arg = s.pop()
+
+ if argumentIsOption(arg) {
+ return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got option `%s'", option, arg)
+ } else if p.Options&PassDoubleDash != 0 && arg == "--" {
+ return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option)
+ }
+ }
+
+ if option.tag.Get("unquote") != "false" {
+ arg, err = unquoteIfPossible(arg)
+ }
+
+ if err == nil {
+ err = option.set(&arg)
+ }
+ } else if option.OptionalArgument {
+ option.empty()
+
+ for _, v := range option.OptionalValue {
+ err = option.set(&v)
+
+ if err != nil {
+ break
+ }
+ }
+ } else {
+ err = newErrorf(ErrExpectedArgument, "expected argument for flag `%s'", option)
+ }
+
+ if err != nil {
+ if _, ok := err.(*Error); !ok {
+ err = newErrorf(ErrMarshal, "invalid argument for flag `%s' (expected %s): %s",
+ option,
+ option.value.Type(),
+ err.Error())
+ }
+ }
+
+ return err
+}
+
+func (p *Parser) parseLong(s *parseState, name string, argument *string) error {
+ if option := s.lookup.longNames[name]; option != nil {
+ // Only long options that are required can consume an argument
+ // from the argument list
+ canarg := !option.OptionalArgument
+
+ return p.parseOption(s, name, option, canarg, argument)
+ }
+
+ return newErrorf(ErrUnknownFlag, "unknown flag `%s'", name)
+}
+
+func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) {
+ c, n := utf8.DecodeRuneInString(optname)
+
+ if n == len(optname) {
+ return optname, nil
+ }
+
+ first := string(c)
+
+ if option := s.lookup.shortNames[first]; option != nil && option.canArgument() {
+ arg := optname[n:]
+ return first, &arg
+ }
+
+ return optname, nil
+}
+
+func (p *Parser) parseShort(s *parseState, optname string, argument *string) error {
+ if argument == nil {
+ optname, argument = p.splitShortConcatArg(s, optname)
+ }
+
+ for i, c := range optname {
+ shortname := string(c)
+
+ if option := s.lookup.shortNames[shortname]; option != nil {
+ // Only the last short argument can consume an argument from
+ // the arguments list, and only if it's non optional
+ canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument
+
+ if err := p.parseOption(s, shortname, option, canarg, argument); err != nil {
+ return err
+ }
+ } else {
+ return newErrorf(ErrUnknownFlag, "unknown flag `%s'", shortname)
+ }
+
+ // Only the first option can have a concatted argument, so just
+ // clear argument here
+ argument = nil
+ }
+
+ return nil
+}
+
+func (p *parseState) addArgs(args ...string) error {
+ for len(p.positional) > 0 && len(args) > 0 {
+ arg := p.positional[0]
+
+ if err := convert(args[0], arg.value, arg.tag); err != nil {
+ return err
+ }
+
+ if !arg.isRemaining() {
+ p.positional = p.positional[1:]
+ }
+
+ args = args[1:]
+ }
+
+ p.retargs = append(p.retargs, args...)
+ return nil
+}
+
+func (p *Parser) parseNonOption(s *parseState) error {
+ if len(s.positional) > 0 {
+ return s.addArgs(s.arg)
+ }
+
+ if cmd := s.lookup.commands[s.arg]; cmd != nil {
+ s.command.Active = cmd
+ cmd.fillParseState(s)
+ } else if (p.Options & PassAfterNonOption) != None {
+ // If PassAfterNonOption is set then all remaining arguments
+ // are considered positional
+ if err := s.addArgs(s.arg); err != nil {
+ return err
+ }
+
+ if err := s.addArgs(s.args...); err != nil {
+ return err
+ }
+
+ s.args = []string{}
+ } else {
+ return s.addArgs(s.arg)
+ }
+
+ return nil
+}
+
+func (p *Parser) showBuiltinHelp() error {
+ var b bytes.Buffer
+
+ p.WriteHelp(&b)
+ return newError(ErrHelp, b.String())
+}
+
+func (p *Parser) printError(err error) error {
+ if err != nil && (p.Options&PrintErrors) != None {
+ fmt.Fprintln(os.Stderr, err)
+ }
+
+ return err
+}
+
+func (p *Parser) clearIsSet() {
+ p.eachCommand(func(c *Command) {
+ c.eachGroup(func(g *Group) {
+ for _, option := range g.options {
+ option.isSet = false
+ }
+ })
+ }, true)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/parser_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/parser_test.go
new file mode 100644
index 00000000000..b57dbee9a25
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/parser_test.go
@@ -0,0 +1,487 @@
+package flags
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+type defaultOptions struct {
+ Int int `long:"i"`
+ IntDefault int `long:"id" default:"1"`
+
+ Float64 float64 `long:"f"`
+ Float64Default float64 `long:"fd" default:"-3.14"`
+
+ NumericFlag bool `short:"3" default:"false"`
+
+ String string `long:"str"`
+ StringDefault string `long:"strd" default:"abc"`
+ StringNotUnquoted string `long:"strnot" unquote:"false"`
+
+ Time time.Duration `long:"t"`
+ TimeDefault time.Duration `long:"td" default:"1m"`
+
+ Map map[string]int `long:"m"`
+ MapDefault map[string]int `long:"md" default:"a:1"`
+
+ Slice []int `long:"s"`
+ SliceDefault []int `long:"sd" default:"1" default:"2"`
+}
+
+func TestDefaults(t *testing.T) {
+ var tests = []struct {
+ msg string
+ args []string
+ expected defaultOptions
+ }{
+ {
+ msg: "no arguments, expecting default values",
+ args: []string{},
+ expected: defaultOptions{
+ Int: 0,
+ IntDefault: 1,
+
+ Float64: 0.0,
+ Float64Default: -3.14,
+
+ NumericFlag: false,
+
+ String: "",
+ StringDefault: "abc",
+
+ Time: 0,
+ TimeDefault: time.Minute,
+
+ Map: map[string]int{},
+ MapDefault: map[string]int{"a": 1},
+
+ Slice: []int{},
+ SliceDefault: []int{1, 2},
+ },
+ },
+ {
+ msg: "non-zero value arguments, expecting overwritten arguments",
+ args: []string{"--i=3", "--id=3", "--f=-2.71", "--fd=2.71", "-3", "--str=def", "--strd=def", "--t=3ms", "--td=3ms", "--m=c:3", "--md=c:3", "--s=3", "--sd=3"},
+ expected: defaultOptions{
+ Int: 3,
+ IntDefault: 3,
+
+ Float64: -2.71,
+ Float64Default: 2.71,
+
+ NumericFlag: true,
+
+ String: "def",
+ StringDefault: "def",
+
+ Time: 3 * time.Millisecond,
+ TimeDefault: 3 * time.Millisecond,
+
+ Map: map[string]int{"c": 3},
+ MapDefault: map[string]int{"c": 3},
+
+ Slice: []int{3},
+ SliceDefault: []int{3},
+ },
+ },
+ {
+ msg: "zero value arguments, expecting overwritten arguments",
+ args: []string{"--i=0", "--id=0", "--f=0", "--fd=0", "--str", "", "--strd=\"\"", "--t=0ms", "--td=0s", "--m=:0", "--md=:0", "--s=0", "--sd=0"},
+ expected: defaultOptions{
+ Int: 0,
+ IntDefault: 0,
+
+ Float64: 0,
+ Float64Default: 0,
+
+ String: "",
+ StringDefault: "",
+
+ Time: 0,
+ TimeDefault: 0,
+
+ Map: map[string]int{"": 0},
+ MapDefault: map[string]int{"": 0},
+
+ Slice: []int{0},
+ SliceDefault: []int{0},
+ },
+ },
+ }
+
+ for _, test := range tests {
+ var opts defaultOptions
+
+ _, err := ParseArgs(&opts, test.args)
+ if err != nil {
+ t.Fatalf("%s:\nUnexpected error: %v", test.msg, err)
+ }
+
+ if opts.Slice == nil {
+ opts.Slice = []int{}
+ }
+
+ if !reflect.DeepEqual(opts, test.expected) {
+ t.Errorf("%s:\nUnexpected options with arguments %+v\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.args, test.expected, opts)
+ }
+ }
+}
+
+func TestUnquoting(t *testing.T) {
+ var tests = []struct {
+ arg string
+ err error
+ value string
+ }{
+ {
+ arg: "\"abc",
+ err: strconv.ErrSyntax,
+ value: "",
+ },
+ {
+ arg: "\"\"abc\"",
+ err: strconv.ErrSyntax,
+ value: "",
+ },
+ {
+ arg: "\"abc\"",
+ err: nil,
+ value: "abc",
+ },
+ {
+ arg: "\"\\\"abc\\\"\"",
+ err: nil,
+ value: "\"abc\"",
+ },
+ {
+ arg: "\"\\\"abc\"",
+ err: nil,
+ value: "\"abc",
+ },
+ }
+
+ for _, test := range tests {
+ var opts defaultOptions
+
+ for _, delimiter := range []bool{false, true} {
+ p := NewParser(&opts, None)
+
+ var err error
+ if delimiter {
+ _, err = p.ParseArgs([]string{"--str=" + test.arg, "--strnot=" + test.arg})
+ } else {
+ _, err = p.ParseArgs([]string{"--str", test.arg, "--strnot", test.arg})
+ }
+
+ if test.err == nil {
+ if err != nil {
+ t.Fatalf("Expected no error but got: %v", err)
+ }
+
+ if test.value != opts.String {
+ t.Fatalf("Expected String to be %q but got %q", test.value, opts.String)
+ }
+ if q := strconv.Quote(test.value); q != opts.StringNotUnquoted {
+ t.Fatalf("Expected StringDefault to be %q but got %q", q, opts.StringNotUnquoted)
+ }
+ } else {
+ if err == nil {
+ t.Fatalf("Expected error")
+ } else if e, ok := err.(*Error); ok {
+ if strings.HasPrefix(e.Message, test.err.Error()) {
+ t.Fatalf("Expected error message to end with %q but got %v", test.err.Error(), e.Message)
+ }
+ }
+ }
+ }
+ }
+}
+
+// envRestorer keeps a copy of a set of env variables and can restore the env from them
+type envRestorer struct {
+ env map[string]string
+}
+
+func (r *envRestorer) Restore() {
+ os.Clearenv()
+ for k, v := range r.env {
+ os.Setenv(k, v)
+ }
+}
+
+// EnvSnapshot returns a snapshot of the currently set env variables
+func EnvSnapshot() *envRestorer {
+ r := envRestorer{make(map[string]string)}
+ for _, kv := range os.Environ() {
+ parts := strings.SplitN(kv, "=", 2)
+ if len(parts) != 2 {
+ panic("got a weird env variable: " + kv)
+ }
+ r.env[parts[0]] = parts[1]
+ }
+ return &r
+}
+
+type envDefaultOptions struct {
+ Int int `long:"i" default:"1" env:"TEST_I"`
+ Time time.Duration `long:"t" default:"1m" env:"TEST_T"`
+ Map map[string]int `long:"m" default:"a:1" env:"TEST_M" env-delim:";"`
+ Slice []int `long:"s" default:"1" default:"2" env:"TEST_S" env-delim:","`
+}
+
+func TestEnvDefaults(t *testing.T) {
+ var tests = []struct {
+ msg string
+ args []string
+ expected envDefaultOptions
+ env map[string]string
+ }{
+ {
+ msg: "no arguments, no env, expecting default values",
+ args: []string{},
+ expected: envDefaultOptions{
+ Int: 1,
+ Time: time.Minute,
+ Map: map[string]int{"a": 1},
+ Slice: []int{1, 2},
+ },
+ },
+ {
+ msg: "no arguments, env defaults, expecting env default values",
+ args: []string{},
+ expected: envDefaultOptions{
+ Int: 2,
+ Time: 2 * time.Minute,
+ Map: map[string]int{"a": 2, "b": 3},
+ Slice: []int{4, 5, 6},
+ },
+ env: map[string]string{
+ "TEST_I": "2",
+ "TEST_T": "2m",
+ "TEST_M": "a:2;b:3",
+ "TEST_S": "4,5,6",
+ },
+ },
+ {
+ msg: "non-zero value arguments, expecting overwritten arguments",
+ args: []string{"--i=3", "--t=3ms", "--m=c:3", "--s=3"},
+ expected: envDefaultOptions{
+ Int: 3,
+ Time: 3 * time.Millisecond,
+ Map: map[string]int{"c": 3},
+ Slice: []int{3},
+ },
+ env: map[string]string{
+ "TEST_I": "2",
+ "TEST_T": "2m",
+ "TEST_M": "a:2;b:3",
+ "TEST_S": "4,5,6",
+ },
+ },
+ {
+ msg: "zero value arguments, expecting overwritten arguments",
+ args: []string{"--i=0", "--t=0ms", "--m=:0", "--s=0"},
+ expected: envDefaultOptions{
+ Int: 0,
+ Time: 0,
+ Map: map[string]int{"": 0},
+ Slice: []int{0},
+ },
+ env: map[string]string{
+ "TEST_I": "2",
+ "TEST_T": "2m",
+ "TEST_M": "a:2;b:3",
+ "TEST_S": "4,5,6",
+ },
+ },
+ }
+
+ oldEnv := EnvSnapshot()
+ defer oldEnv.Restore()
+
+ for _, test := range tests {
+ var opts envDefaultOptions
+ oldEnv.Restore()
+ for envKey, envValue := range test.env {
+ os.Setenv(envKey, envValue)
+ }
+ _, err := ParseArgs(&opts, test.args)
+ if err != nil {
+ t.Fatalf("%s:\nUnexpected error: %v", test.msg, err)
+ }
+
+ if opts.Slice == nil {
+ opts.Slice = []int{}
+ }
+
+ if !reflect.DeepEqual(opts, test.expected) {
+ t.Errorf("%s:\nUnexpected options with arguments %+v\nexpected\n%+v\nbut got\n%+v\n", test.msg, test.args, test.expected, opts)
+ }
+ }
+}
+
+func TestOptionAsArgument(t *testing.T) {
+ var tests = []struct {
+ args []string
+ expectError bool
+ errType ErrorType
+ errMsg string
+ rest []string
+ }{
+ {
+ // short option must not be accepted as argument
+ args: []string{"--string-slice", "foobar", "--string-slice", "-o"},
+ expectError: true,
+ errType: ErrExpectedArgument,
+ errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-o'",
+ },
+ {
+ // long option must not be accepted as argument
+ args: []string{"--string-slice", "foobar", "--string-slice", "--other-option"},
+ expectError: true,
+ errType: ErrExpectedArgument,
+ errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `--other-option'",
+ },
+ {
+ // long option must not be accepted as argument
+ args: []string{"--string-slice", "--"},
+ expectError: true,
+ errType: ErrExpectedArgument,
+ errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got double dash `--'",
+ },
+ {
+ // quoted and appended option should be accepted as argument (even if it looks like an option)
+ args: []string{"--string-slice", "foobar", "--string-slice=\"--other-option\""},
+ },
+ {
+ // Accept any single character arguments including '-'
+ args: []string{"--string-slice", "-"},
+ },
+ {
+ // Do not accept arguments which start with '-' even if the next character is a digit
+ args: []string{"--string-slice", "-3.14"},
+ expectError: true,
+ errType: ErrExpectedArgument,
+ errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-3.14'",
+ },
+ {
+ // Do not accept arguments which start with '-' if the next character is not a digit
+ args: []string{"--string-slice", "-character"},
+ expectError: true,
+ errType: ErrExpectedArgument,
+ errMsg: "expected argument for flag `" + defaultLongOptDelimiter + "string-slice', but got option `-character'",
+ },
+ {
+ args: []string{"-o", "-", "-"},
+ rest: []string{"-", "-"},
+ },
+ }
+ var opts struct {
+ StringSlice []string `long:"string-slice"`
+ OtherOption bool `long:"other-option" short:"o"`
+ }
+
+ for _, test := range tests {
+ if test.expectError {
+ assertParseFail(t, test.errType, test.errMsg, &opts, test.args...)
+ } else {
+ args := assertParseSuccess(t, &opts, test.args...)
+
+ assertStringArray(t, args, test.rest)
+ }
+ }
+}
+
+func TestUnknownFlagHandler(t *testing.T) {
+
+ var opts struct {
+ Flag1 string `long:"flag1"`
+ Flag2 string `long:"flag2"`
+ }
+
+ p := NewParser(&opts, None)
+
+ var unknownFlag1 string
+ var unknownFlag2 bool
+ var unknownFlag3 string
+
+ // Set up a callback to intercept unknown options during parsing
+ p.UnknownOptionHandler = func(option string, arg SplitArgument, args []string) ([]string, error) {
+ if option == "unknownFlag1" {
+ if argValue, ok := arg.Value(); ok {
+ unknownFlag1 = argValue
+ return args, nil
+ }
+ // consume a value from remaining args list
+ unknownFlag1 = args[0]
+ return args[1:], nil
+ } else if option == "unknownFlag2" {
+ // treat this one as a bool switch, don't consume any args
+ unknownFlag2 = true
+ return args, nil
+ } else if option == "unknownFlag3" {
+ if argValue, ok := arg.Value(); ok {
+ unknownFlag3 = argValue
+ return args, nil
+ }
+ // consume a value from remaining args list
+ unknownFlag3 = args[0]
+ return args[1:], nil
+ }
+
+ return args, fmt.Errorf("Unknown flag: %v", option)
+ }
+
+ // Parse args containing some unknown flags, verify that
+ // our callback can handle all of them
+ _, err := p.ParseArgs([]string{"--flag1=stuff", "--unknownFlag1", "blah", "--unknownFlag2", "--unknownFlag3=baz", "--flag2=foo"})
+
+ if err != nil {
+ assertErrorf(t, "Parser returned unexpected error %v", err)
+ }
+
+ assertString(t, opts.Flag1, "stuff")
+ assertString(t, opts.Flag2, "foo")
+ assertString(t, unknownFlag1, "blah")
+ assertString(t, unknownFlag3, "baz")
+
+ if !unknownFlag2 {
+ assertErrorf(t, "Flag should have been set by unknown handler, but had value: %v", unknownFlag2)
+ }
+
+ // Parse args with unknown flags that callback doesn't handle, verify it returns error
+ _, err = p.ParseArgs([]string{"--flag1=stuff", "--unknownFlagX", "blah", "--flag2=foo"})
+
+ if err == nil {
+ assertErrorf(t, "Parser should have returned error, but returned nil")
+ }
+}
+
+func TestChoices(t *testing.T) {
+ var opts struct {
+ Choice string `long:"choose" choice:"v1" choice:"v2"`
+ }
+
+ assertParseFail(t, ErrInvalidChoice, "Invalid value `invalid' for option `"+defaultLongOptDelimiter+"choose'. Allowed values are: v1 or v2", &opts, "--choose", "invalid")
+ assertParseSuccess(t, &opts, "--choose", "v2")
+ assertString(t, opts.Choice, "v2")
+}
+
+func TestEmbedded(t *testing.T) {
+ type embedded struct {
+ V bool `short:"v"`
+ }
+ var opts struct {
+ embedded
+ }
+
+ assertParseSuccess(t, &opts, "-v")
+ if !opts.V {
+ t.Errorf("Expected V to be true")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/pointer_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/pointer_test.go
new file mode 100644
index 00000000000..e17445f696c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/pointer_test.go
@@ -0,0 +1,81 @@
+package flags
+
+import (
+ "testing"
+)
+
+func TestPointerBool(t *testing.T) {
+ var opts = struct {
+ Value *bool `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v")
+
+ assertStringArray(t, ret, []string{})
+
+ if !*opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+}
+
+func TestPointerString(t *testing.T) {
+ var opts = struct {
+ Value *string `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v", "value")
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, *opts.Value, "value")
+}
+
+func TestPointerSlice(t *testing.T) {
+ var opts = struct {
+ Value *[]string `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v", "value1", "-v", "value2")
+
+ assertStringArray(t, ret, []string{})
+ assertStringArray(t, *opts.Value, []string{"value1", "value2"})
+}
+
+func TestPointerMap(t *testing.T) {
+ var opts = struct {
+ Value *map[string]int `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v", "k1:2", "-v", "k2:-5")
+
+ assertStringArray(t, ret, []string{})
+
+ if v, ok := (*opts.Value)["k1"]; !ok {
+ t.Errorf("Expected key \"k1\" to exist")
+ } else if v != 2 {
+ t.Errorf("Expected \"k1\" to be 2, but got %#v", v)
+ }
+
+ if v, ok := (*opts.Value)["k2"]; !ok {
+ t.Errorf("Expected key \"k2\" to exist")
+ } else if v != -5 {
+ t.Errorf("Expected \"k2\" to be -5, but got %#v", v)
+ }
+}
+
+type PointerGroup struct {
+ Value bool `short:"v"`
+}
+
+func TestPointerGroup(t *testing.T) {
+ var opts = struct {
+ Group *PointerGroup `group:"Group Options"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v")
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.Group.Value {
+ t.Errorf("Expected Group.Value to be true")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/short_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/short_test.go
new file mode 100644
index 00000000000..95712c16238
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/short_test.go
@@ -0,0 +1,194 @@
+package flags
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestShort(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v")
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.Value {
+ t.Errorf("Expected Value to be true")
+ }
+}
+
+func TestShortTooLong(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"vv"`
+ }{}
+
+ assertParseFail(t, ErrShortNameTooLong, "short names can only be 1 character long, not `vv'", &opts)
+}
+
+func TestShortRequired(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v" required:"true"`
+ }{}
+
+ assertParseFail(t, ErrRequired, fmt.Sprintf("the required flag `%cv' was not specified", defaultShortOptDelimiter), &opts)
+}
+
+func TestShortMultiConcat(t *testing.T) {
+ var opts = struct {
+ V bool `short:"v"`
+ O bool `short:"o"`
+ F bool `short:"f"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-vo", "-f")
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.V {
+ t.Errorf("Expected V to be true")
+ }
+
+ if !opts.O {
+ t.Errorf("Expected O to be true")
+ }
+
+ if !opts.F {
+ t.Errorf("Expected F to be true")
+ }
+}
+
+func TestShortMultiRequiredConcat(t *testing.T) {
+ var opts = struct {
+ V bool `short:"v" required:"true"`
+ O bool `short:"o" required:"true"`
+ F bool `short:"f" required:"true"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-vo", "-f")
+
+ assertStringArray(t, ret, []string{})
+
+ if !opts.V {
+ t.Errorf("Expected V to be true")
+ }
+
+ if !opts.O {
+ t.Errorf("Expected O to be true")
+ }
+
+ if !opts.F {
+ t.Errorf("Expected F to be true")
+ }
+}
+
+func TestShortMultiSlice(t *testing.T) {
+ var opts = struct {
+ Values []bool `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v", "-v")
+
+ assertStringArray(t, ret, []string{})
+ assertBoolArray(t, opts.Values, []bool{true, true})
+}
+
+func TestShortMultiSliceConcat(t *testing.T) {
+ var opts = struct {
+ Values []bool `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-vvv")
+
+ assertStringArray(t, ret, []string{})
+ assertBoolArray(t, opts.Values, []bool{true, true, true})
+}
+
+func TestShortWithEqualArg(t *testing.T) {
+ var opts = struct {
+ Value string `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v=value")
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, opts.Value, "value")
+}
+
+func TestShortWithArg(t *testing.T) {
+ var opts = struct {
+ Value string `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-vvalue")
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, opts.Value, "value")
+}
+
+func TestShortArg(t *testing.T) {
+ var opts = struct {
+ Value string `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-v", "value")
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, opts.Value, "value")
+}
+
+func TestShortMultiWithEqualArg(t *testing.T) {
+ var opts = struct {
+ F []bool `short:"f"`
+ Value string `short:"v"`
+ }{}
+
+ assertParseFail(t, ErrExpectedArgument, fmt.Sprintf("expected argument for flag `%cv'", defaultShortOptDelimiter), &opts, "-ffv=value")
+}
+
+func TestShortMultiArg(t *testing.T) {
+ var opts = struct {
+ F []bool `short:"f"`
+ Value string `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-ffv", "value")
+
+ assertStringArray(t, ret, []string{})
+ assertBoolArray(t, opts.F, []bool{true, true})
+ assertString(t, opts.Value, "value")
+}
+
+func TestShortMultiArgConcatFail(t *testing.T) {
+ var opts = struct {
+ F []bool `short:"f"`
+ Value string `short:"v"`
+ }{}
+
+ assertParseFail(t, ErrExpectedArgument, fmt.Sprintf("expected argument for flag `%cv'", defaultShortOptDelimiter), &opts, "-ffvvalue")
+}
+
+func TestShortMultiArgConcat(t *testing.T) {
+ var opts = struct {
+ F []bool `short:"f"`
+ Value string `short:"v"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-vff")
+
+ assertStringArray(t, ret, []string{})
+ assertString(t, opts.Value, "ff")
+}
+
+func TestShortOptional(t *testing.T) {
+ var opts = struct {
+ F []bool `short:"f"`
+ Value string `short:"v" optional:"yes" optional-value:"value"`
+ }{}
+
+ ret := assertParseSuccess(t, &opts, "-fv", "f")
+
+ assertStringArray(t, ret, []string{"f"})
+ assertString(t, opts.Value, "value")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/tag_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/tag_test.go
new file mode 100644
index 00000000000..9daa7401b91
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/tag_test.go
@@ -0,0 +1,38 @@
+package flags
+
+import (
+ "testing"
+)
+
+func TestTagMissingColon(t *testing.T) {
+ var opts = struct {
+ Value bool `short`
+ }{}
+
+ assertParseFail(t, ErrTag, "expected `:' after key name, but got end of tag (in `short`)", &opts, "")
+}
+
+func TestTagMissingValue(t *testing.T) {
+ var opts = struct {
+ Value bool `short:`
+ }{}
+
+ assertParseFail(t, ErrTag, "expected `\"' to start tag value at end of tag (in `short:`)", &opts, "")
+}
+
+func TestTagMissingQuote(t *testing.T) {
+ var opts = struct {
+ Value bool `short:"v`
+ }{}
+
+ assertParseFail(t, ErrTag, "expected end of tag value `\"' at end of tag (in `short:\"v`)", &opts, "")
+}
+
+func TestTagNewline(t *testing.T) {
+ var opts = struct {
+ Value bool `long:"verbose" description:"verbose
+something"`
+ }{}
+
+ assertParseFail(t, ErrTag, "unexpected newline in tag value `description' (in `long:\"verbose\" description:\"verbose\nsomething\"`)", &opts, "")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize.go
new file mode 100644
index 00000000000..df97e7e821d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize.go
@@ -0,0 +1,28 @@
+// +build !windows,!plan9,!solaris
+
+package flags
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+type winsize struct {
+ row, col uint16
+ xpixel, ypixel uint16
+}
+
+func getTerminalColumns() int {
+ ws := winsize{}
+
+ if tIOCGWINSZ != 0 {
+ syscall.Syscall(syscall.SYS_IOCTL,
+ uintptr(0),
+ uintptr(tIOCGWINSZ),
+ uintptr(unsafe.Pointer(&ws)))
+
+ return int(ws.col)
+ }
+
+ return 80
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_linux.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_linux.go
new file mode 100644
index 00000000000..e3975e2835f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_linux.go
@@ -0,0 +1,7 @@
+// +build linux
+
+package flags
+
+const (
+ tIOCGWINSZ = 0x5413
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_nosysioctl.go
new file mode 100644
index 00000000000..2a9bbe005cb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_nosysioctl.go
@@ -0,0 +1,7 @@
+// +build windows plan9 solaris
+
+package flags
+
+func getTerminalColumns() int {
+ return 80
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_other.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_other.go
new file mode 100644
index 00000000000..308215155ea
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_other.go
@@ -0,0 +1,7 @@
+// +build !darwin,!freebsd,!netbsd,!openbsd,!linux
+
+package flags
+
+const (
+ tIOCGWINSZ = 0
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_unix.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_unix.go
new file mode 100644
index 00000000000..fcc11860101
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/termsize_unix.go
@@ -0,0 +1,7 @@
+// +build darwin freebsd netbsd openbsd
+
+package flags
+
+const (
+ tIOCGWINSZ = 0x40087468
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/unknown_test.go b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/unknown_test.go
new file mode 100644
index 00000000000..858be45885e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jessevdk/go-flags/unknown_test.go
@@ -0,0 +1,66 @@
+package flags
+
+import (
+ "testing"
+)
+
+func TestUnknownFlags(t *testing.T) {
+ var opts = struct {
+ Verbose []bool `short:"v" long:"verbose" description:"Verbose output"`
+ }{}
+
+ args := []string{
+ "-f",
+ }
+
+ p := NewParser(&opts, 0)
+ args, err := p.ParseArgs(args)
+
+ if err == nil {
+ t.Fatal("Expected error for unknown argument")
+ }
+}
+
+func TestIgnoreUnknownFlags(t *testing.T) {
+ var opts = struct {
+ Verbose []bool `short:"v" long:"verbose" description:"Verbose output"`
+ }{}
+
+ args := []string{
+ "hello",
+ "world",
+ "-v",
+ "--foo=bar",
+ "--verbose",
+ "-f",
+ }
+
+ p := NewParser(&opts, IgnoreUnknown)
+ args, err := p.ParseArgs(args)
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ exargs := []string{
+ "hello",
+ "world",
+ "--foo=bar",
+ "-f",
+ }
+
+ issame := (len(args) == len(exargs))
+
+ if issame {
+ for i := 0; i < len(args); i++ {
+ if args[i] != exargs[i] {
+ issame = false
+ break
+ }
+ }
+ }
+
+ if !issame {
+ t.Fatalf("Expected %v but got %v", exargs, args)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/LICENSE b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/LICENSE
new file mode 100644
index 00000000000..9b4a822d92c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2013, Space Monkey, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/README.md b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/README.md
new file mode 100644
index 00000000000..4ebb692fb18
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/README.md
@@ -0,0 +1,89 @@
+gls
+===
+
+Goroutine local storage
+
+### IMPORTANT NOTE ###
+
+It is my duty to point you to https://blog.golang.org/context, which is how
+Google solves all of the problems you'd perhaps consider using this package
+for at scale.
+
+One downside to Google's approach is that *all* of your functions must have
+a new first argument, but after clearing that hurdle everything else is much
+better.
+
+If you aren't interested in this warning, read on.
+
+### Huhwaht? Why? ###
+
+Every so often, a thread shows up on the
+[golang-nuts](https://groups.google.com/d/forum/golang-nuts) asking for some
+form of goroutine-local-storage, or some kind of goroutine id, or some kind of
+context. There are a few valid use cases for goroutine-local-storage, one of
+the most prominent being log line context. One poster was interested in being
+able to log an HTTP request context id in every log line in the same goroutine
+as the incoming HTTP request, without having to change every library and
+function call he was interested in logging.
+
+This would be pretty useful. Provided that you could get some kind of
+goroutine-local-storage, you could call
+[log.SetOutput](http://golang.org/pkg/log/#SetOutput) with your own logging
+writer that checks goroutine-local-storage for some context information and
+adds that context to your log lines.
+
+But alas, Andrew Gerrand's typically diplomatic answer to the question of
+goroutine-local variables was:
+
+> We wouldn't even be having this discussion if thread local storage wasn't
+> useful. But every feature comes at a cost, and in my opinion the cost of
+> threadlocals far outweighs their benefits. They're just not a good fit for
+> Go.
+
+So, yeah, that makes sense. That's a pretty good reason for why the language
+won't support a specific and (relatively) unuseful feature that requires some
+runtime changes, just for the sake of a little bit of log improvement.
+
+But does Go require runtime changes?
+
+### How it works ###
+
+Go has pretty fantastic introspective and reflective features, but one thing Go
+doesn't give you is any kind of access to the stack pointer, or frame pointer,
+or goroutine id, or anything contextual about your current stack. It gives you
+access to your list of callers, but only along with program counters, which are
+fixed at compile time.
+
+But it does give you the stack.
+
+So, we define 16 special functions and embed base-16 tags into the stack using
+the call order of those 16 functions. Then, we can read our tags back out of
+the stack looking at the callers list.
+
+We then use these tags as an index into a traditional map for implementing
+this library.
+
+### What are people saying? ###
+
+"Wow, that's horrifying."
+
+"This is the most terrible thing I have seen in a very long time."
+
+"Where is it getting a context from? Is this serializing all the requests?
+What the heck is the client being bound to? What are these tags? Why does he
+need callers? Oh god no. No no no."
+
+### Docs ###
+
+Please see the docs at http://godoc.org/github.com/jtolds/gls
+
+### Related ###
+
+If you're okay relying on the string format of the current runtime stacktrace
+including a unique goroutine id (not guaranteed by the spec or anything, but
+very unlikely to change within a Go release), you might be able to squeeze
+out a bit more performance by using this similar library, inspired by some
+code Brad Fitzpatrick wrote for debugging his HTTP/2 library:
+https://github.com/tylerb/gls (in contrast, jtolds/gls doesn't require
+any knowledge of the string format of the runtime stacktrace, which
+probably adds unnecessary overhead).
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/context.go b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/context.go
new file mode 100644
index 00000000000..90cfcf7db17
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/context.go
@@ -0,0 +1,144 @@
+// Package gls implements goroutine-local storage.
+package gls
+
+import (
+ "sync"
+)
+
+const (
+ maxCallers = 64
+)
+
+var (
+ stackTagPool = &idPool{}
+ mgrRegistry = make(map[*ContextManager]bool)
+ mgrRegistryMtx sync.RWMutex
+)
+
+// Values is simply a map of key types to value types. Used by SetValues to
+// set multiple values at once.
+type Values map[interface{}]interface{}
+
+// ContextManager is the main entrypoint for interacting with
+// Goroutine-local-storage. You can have multiple independent ContextManagers
+// at any given time. ContextManagers are usually declared globally for a given
+// class of context variables. You should use NewContextManager for
+// construction.
+type ContextManager struct {
+ mtx sync.RWMutex
+ values map[uint]Values
+}
+
+// NewContextManager returns a brand new ContextManager. It also registers the
+// new ContextManager in the ContextManager registry which is used by the Go
+// method. ContextManagers are typically defined globally at package scope.
+func NewContextManager() *ContextManager {
+ mgr := &ContextManager{values: make(map[uint]Values)}
+ mgrRegistryMtx.Lock()
+ defer mgrRegistryMtx.Unlock()
+ mgrRegistry[mgr] = true
+ return mgr
+}
+
+// Unregister removes a ContextManager from the global registry, used by the
+// Go method. Only intended for use when you're completely done with a
+// ContextManager. Use of Unregister at all is rare.
+func (m *ContextManager) Unregister() {
+ mgrRegistryMtx.Lock()
+ defer mgrRegistryMtx.Unlock()
+ delete(mgrRegistry, m)
+}
+
+// SetValues takes a collection of values and a function to call for those
+// values to be set in. Anything further down the stack will have the set
+// values available through GetValue. SetValues will add new values or replace
+// existing values of the same key and will not mutate or change values for
+// previous stack frames.
+// SetValues is slow (makes a copy of all current and new values for the new
+// gls-context) in order to reduce the amount of lookups GetValue requires.
+func (m *ContextManager) SetValues(new_values Values, context_call func()) {
+ if len(new_values) == 0 {
+ context_call()
+ return
+ }
+
+ tags := readStackTags(1)
+
+ m.mtx.Lock()
+ values := new_values
+ for _, tag := range tags {
+ if existing_values, ok := m.values[tag]; ok {
+ // oh, we found existing values, let's make a copy
+ values = make(Values, len(existing_values)+len(new_values))
+ for key, val := range existing_values {
+ values[key] = val
+ }
+ for key, val := range new_values {
+ values[key] = val
+ }
+ break
+ }
+ }
+ new_tag := stackTagPool.Acquire()
+ m.values[new_tag] = values
+ m.mtx.Unlock()
+ defer func() {
+ m.mtx.Lock()
+ delete(m.values, new_tag)
+ m.mtx.Unlock()
+ stackTagPool.Release(new_tag)
+ }()
+
+ addStackTag(new_tag, context_call)
+}
+
+// GetValue will return a previously set value, provided that the value was set
+// by SetValues somewhere higher up the stack. If the value is not found, ok
+// will be false.
+func (m *ContextManager) GetValue(key interface{}) (value interface{}, ok bool) {
+
+ tags := readStackTags(1)
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+ for _, tag := range tags {
+ if values, ok := m.values[tag]; ok {
+ value, ok := values[key]
+ return value, ok
+ }
+ }
+ return "", false
+}
+
+func (m *ContextManager) getValues() Values {
+ tags := readStackTags(2)
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+ for _, tag := range tags {
+ if values, ok := m.values[tag]; ok {
+ return values
+ }
+ }
+ return nil
+}
+
+// Go preserves ContextManager values and Goroutine-local-storage across new
+// goroutine invocations. The Go method makes a copy of all existing values on
+// all registered context managers and makes sure they are still set after
+// kicking off the provided function in a new goroutine. If you don't use this
+// Go method instead of the standard 'go' keyword, you will lose values in
+// ContextManagers, as goroutines have brand new stacks.
+func Go(cb func()) {
+ mgrRegistryMtx.RLock()
+ defer mgrRegistryMtx.RUnlock()
+
+ for mgr, _ := range mgrRegistry {
+ values := mgr.getValues()
+ if len(values) > 0 {
+ mgr_copy := mgr
+ cb_copy := cb
+ cb = func() { mgr_copy.SetValues(values, cb_copy) }
+ }
+ }
+
+ go cb()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/context_test.go b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/context_test.go
new file mode 100644
index 00000000000..ae5bde4aede
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/context_test.go
@@ -0,0 +1,139 @@
+package gls
+
+import (
+ "fmt"
+ "sync"
+ "testing"
+)
+
+func TestContexts(t *testing.T) {
+ mgr1 := NewContextManager()
+ mgr2 := NewContextManager()
+
+ CheckVal := func(mgr *ContextManager, key, exp_val string) {
+ val, ok := mgr.GetValue(key)
+ if len(exp_val) == 0 {
+ if ok {
+ t.Fatalf("expected no value for key %s, got %s", key, val)
+ }
+ return
+ }
+ if !ok {
+ t.Fatalf("expected value %s for key %s, got no value",
+ exp_val, key)
+ }
+ if exp_val != val {
+ t.Fatalf("expected value %s for key %s, got %s", exp_val, key,
+ val)
+ }
+
+ }
+
+ Check := func(exp_m1v1, exp_m1v2, exp_m2v1, exp_m2v2 string) {
+ CheckVal(mgr1, "key1", exp_m1v1)
+ CheckVal(mgr1, "key2", exp_m1v2)
+ CheckVal(mgr2, "key1", exp_m2v1)
+ CheckVal(mgr2, "key2", exp_m2v2)
+ }
+
+ Check("", "", "", "")
+ mgr2.SetValues(Values{"key1": "val1c"}, func() {
+ Check("", "", "val1c", "")
+ mgr1.SetValues(Values{"key1": "val1a"}, func() {
+ Check("val1a", "", "val1c", "")
+ mgr1.SetValues(Values{"key2": "val1b"}, func() {
+ Check("val1a", "val1b", "val1c", "")
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ Check("", "", "", "")
+ }()
+ Go(func() {
+ defer wg.Done()
+ Check("val1a", "val1b", "val1c", "")
+ })
+ wg.Wait()
+ })
+ })
+ })
+}
+
+func ExampleContextManager_SetValues() {
+ var (
+ mgr = NewContextManager()
+ request_id_key = GenSym()
+ )
+
+ MyLog := func() {
+ if request_id, ok := mgr.GetValue(request_id_key); ok {
+ fmt.Println("My request id is:", request_id)
+ } else {
+ fmt.Println("No request id found")
+ }
+ }
+
+ mgr.SetValues(Values{request_id_key: "12345"}, func() {
+ MyLog()
+ })
+ MyLog()
+
+ // Output: My request id is: 12345
+ // No request id found
+}
+
+func ExampleGo() {
+ var (
+ mgr = NewContextManager()
+ request_id_key = GenSym()
+ )
+
+ MyLog := func() {
+ if request_id, ok := mgr.GetValue(request_id_key); ok {
+ fmt.Println("My request id is:", request_id)
+ } else {
+ fmt.Println("No request id found")
+ }
+ }
+
+ mgr.SetValues(Values{request_id_key: "12345"}, func() {
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ MyLog()
+ }()
+ wg.Wait()
+ wg.Add(1)
+ Go(func() {
+ defer wg.Done()
+ MyLog()
+ })
+ wg.Wait()
+ })
+
+ // Output: No request id found
+ // My request id is: 12345
+}
+
+func BenchmarkGetValue(b *testing.B) {
+ mgr := NewContextManager()
+ mgr.SetValues(Values{"test_key": "test_val"}, func() {
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ val, ok := mgr.GetValue("test_key")
+ if !ok || val != "test_val" {
+ b.FailNow()
+ }
+ }
+ })
+}
+
+func BenchmarkSetValues(b *testing.B) {
+ mgr := NewContextManager()
+ for i := 0; i < b.N/2; i++ {
+ mgr.SetValues(Values{"test_key": "test_val"}, func() {
+ mgr.SetValues(Values{"test_key2": "test_val2"}, func() {})
+ })
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/gen_sym.go b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/gen_sym.go
new file mode 100644
index 00000000000..8d5fc24d4a4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/gen_sym.go
@@ -0,0 +1,13 @@
+package gls
+
+var (
+ symPool = &idPool{}
+)
+
+// ContextKey is a throwaway value you can use as a key to a ContextManager
+type ContextKey struct{ id uint }
+
+// GenSym will return a brand new, never-before-used ContextKey
+func GenSym() ContextKey {
+ return ContextKey{id: symPool.Acquire()}
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/id_pool.go b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/id_pool.go
new file mode 100644
index 00000000000..b7974ae0026
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/id_pool.go
@@ -0,0 +1,34 @@
+package gls
+
+// though this could probably be better at keeping ids smaller, the goal of
+// this class is to keep a registry of the smallest unique integer ids
+// per-process possible
+
+import (
+ "sync"
+)
+
+type idPool struct {
+ mtx sync.Mutex
+ released []uint
+ max_id uint
+}
+
+func (p *idPool) Acquire() (id uint) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+ if len(p.released) > 0 {
+ id = p.released[len(p.released)-1]
+ p.released = p.released[:len(p.released)-1]
+ return id
+ }
+ id = p.max_id
+ p.max_id++
+ return id
+}
+
+func (p *idPool) Release(id uint) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+ p.released = append(p.released, id)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags.go b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags.go
new file mode 100644
index 00000000000..9b8e39ba7c2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags.go
@@ -0,0 +1,43 @@
+package gls
+
+// so, basically, we're going to encode integer tags in base-16 on the stack
+
+const (
+ bitWidth = 4
+)
+
+func addStackTag(tag uint, context_call func()) {
+ if context_call == nil {
+ return
+ }
+ markS(tag, context_call)
+}
+
+func markS(tag uint, cb func()) { _m(tag, cb) }
+func mark0(tag uint, cb func()) { _m(tag, cb) }
+func mark1(tag uint, cb func()) { _m(tag, cb) }
+func mark2(tag uint, cb func()) { _m(tag, cb) }
+func mark3(tag uint, cb func()) { _m(tag, cb) }
+func mark4(tag uint, cb func()) { _m(tag, cb) }
+func mark5(tag uint, cb func()) { _m(tag, cb) }
+func mark6(tag uint, cb func()) { _m(tag, cb) }
+func mark7(tag uint, cb func()) { _m(tag, cb) }
+func mark8(tag uint, cb func()) { _m(tag, cb) }
+func mark9(tag uint, cb func()) { _m(tag, cb) }
+func markA(tag uint, cb func()) { _m(tag, cb) }
+func markB(tag uint, cb func()) { _m(tag, cb) }
+func markC(tag uint, cb func()) { _m(tag, cb) }
+func markD(tag uint, cb func()) { _m(tag, cb) }
+func markE(tag uint, cb func()) { _m(tag, cb) }
+func markF(tag uint, cb func()) { _m(tag, cb) }
+
+var pc_lookup = make(map[uintptr]int8, 17)
+var mark_lookup [16]func(uint, func())
+
+func _m(tag_remainder uint, cb func()) {
+ if tag_remainder == 0 {
+ cb()
+ } else {
+ mark_lookup[tag_remainder&0xf](tag_remainder>>bitWidth, cb)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags_js.go b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags_js.go
new file mode 100644
index 00000000000..21d5595926b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags_js.go
@@ -0,0 +1,101 @@
+// +build js
+
+package gls
+
+// This file is used for GopherJS builds, which don't have normal runtime support
+
+import (
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/gopherjs/gopherjs/js"
+)
+
+var stackRE = regexp.MustCompile("\\s+at (\\S*) \\([^:]+:(\\d+):(\\d+)")
+
+func findPtr() uintptr {
+ jsStack := js.Global.Get("Error").New().Get("stack").Call("split", "\n")
+ for i := 1; i < jsStack.Get("length").Int(); i++ {
+ item := jsStack.Index(i).String()
+ matches := stackRE.FindAllStringSubmatch(item, -1)
+ if matches == nil {
+ return 0
+ }
+ pkgPath := matches[0][1]
+ if strings.HasPrefix(pkgPath, "$packages.github.com/jtolds/gls.mark") {
+ line, _ := strconv.Atoi(matches[0][2])
+ char, _ := strconv.Atoi(matches[0][3])
+ x := (uintptr(line) << 16) | uintptr(char)
+ return x
+ }
+ }
+
+ return 0
+}
+
+func init() {
+ setEntries := func(f func(uint, func()), v int8) {
+ var ptr uintptr
+ f(0, func() {
+ ptr = findPtr()
+ })
+ pc_lookup[ptr] = v
+ if v >= 0 {
+ mark_lookup[v] = f
+ }
+ }
+ setEntries(markS, -0x1)
+ setEntries(mark0, 0x0)
+ setEntries(mark1, 0x1)
+ setEntries(mark2, 0x2)
+ setEntries(mark3, 0x3)
+ setEntries(mark4, 0x4)
+ setEntries(mark5, 0x5)
+ setEntries(mark6, 0x6)
+ setEntries(mark7, 0x7)
+ setEntries(mark8, 0x8)
+ setEntries(mark9, 0x9)
+ setEntries(markA, 0xa)
+ setEntries(markB, 0xb)
+ setEntries(markC, 0xc)
+ setEntries(markD, 0xd)
+ setEntries(markE, 0xe)
+ setEntries(markF, 0xf)
+}
+
+func currentStack(skip int) (stack []uintptr) {
+ jsStack := js.Global.Get("Error").New().Get("stack").Call("split", "\n")
+ for i := skip + 2; i < jsStack.Get("length").Int(); i++ {
+ item := jsStack.Index(i).String()
+ matches := stackRE.FindAllStringSubmatch(item, -1)
+ if matches == nil {
+ return stack
+ }
+ line, _ := strconv.Atoi(matches[0][2])
+ char, _ := strconv.Atoi(matches[0][3])
+ x := (uintptr(line) << 16) | uintptr(char)&0xffff
+ stack = append(stack, x)
+ }
+
+ return stack
+}
+
+func readStackTags(skip int) (tags []uint) {
+ stack := currentStack(skip)
+ var current_tag uint
+ for _, pc := range stack {
+ val, ok := pc_lookup[pc]
+ if !ok {
+ continue
+ }
+ if val < 0 {
+ tags = append(tags, current_tag)
+ current_tag = 0
+ continue
+ }
+ current_tag <<= bitWidth
+ current_tag += uint(val)
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags_main.go b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags_main.go
new file mode 100644
index 00000000000..cb302b9ef63
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/jtolds/gls/stack_tags_main.go
@@ -0,0 +1,61 @@
+// +build !js
+
+package gls
+
+// This file is used for standard Go builds, which have the expected runtime support
+
+import (
+ "reflect"
+ "runtime"
+)
+
+func init() {
+ setEntries := func(f func(uint, func()), v int8) {
+ pc_lookup[reflect.ValueOf(f).Pointer()] = v
+ if v >= 0 {
+ mark_lookup[v] = f
+ }
+ }
+ setEntries(markS, -0x1)
+ setEntries(mark0, 0x0)
+ setEntries(mark1, 0x1)
+ setEntries(mark2, 0x2)
+ setEntries(mark3, 0x3)
+ setEntries(mark4, 0x4)
+ setEntries(mark5, 0x5)
+ setEntries(mark6, 0x6)
+ setEntries(mark7, 0x7)
+ setEntries(mark8, 0x8)
+ setEntries(mark9, 0x9)
+ setEntries(markA, 0xa)
+ setEntries(markB, 0xb)
+ setEntries(markC, 0xc)
+ setEntries(markD, 0xd)
+ setEntries(markE, 0xe)
+ setEntries(markF, 0xf)
+}
+
+func currentStack(skip int) []uintptr {
+ stack := make([]uintptr, maxCallers)
+ return stack[:runtime.Callers(3+skip, stack)]
+}
+
+func readStackTags(skip int) (tags []uint) {
+ stack := currentStack(skip)
+ var current_tag uint
+ for _, pc := range stack {
+ pc = runtime.FuncForPC(pc).Entry()
+ val, ok := pc_lookup[pc]
+ if !ok {
+ continue
+ }
+ if val < 0 {
+ tags = append(tags, current_tag)
+ current_tag = 0
+ continue
+ }
+ current_tag <<= bitWidth
+ current_tag += uint(val)
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/.travis.yml b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/.travis.yml
new file mode 100644
index 00000000000..5c9c2a30f07
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - tip
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL
diff --git a/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/README.mkd b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/README.mkd
new file mode 100644
index 00000000000..ffb0edd2c4e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/README.mkd
@@ -0,0 +1,26 @@
+go-runewidth
+============
+
+[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
+[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD)
+[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
+
+Provides functions to get fixed width of the character or string.
+
+Usage
+-----
+
+```go
+runewidth.StringWidth("つのだ☆HIRO") == 12
+```
+
+
+Author
+------
+
+Yasuhiro Matsumoto
+
+License
+-------
+
+under the MIT License: http://mattn.mit-license.org/2013
diff --git a/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth.go b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth.go
new file mode 100644
index 00000000000..3fbf33d595e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth.go
@@ -0,0 +1,464 @@
+package runewidth
+
+var EastAsianWidth = IsEastAsian()
+var DefaultCondition = &Condition{EastAsianWidth}
+
+type interval struct {
+ first rune
+ last rune
+}
+
+var combining = []interval{
+ {0x0300, 0x036F}, {0x0483, 0x0486}, {0x0488, 0x0489},
+ {0x0591, 0x05BD}, {0x05BF, 0x05BF}, {0x05C1, 0x05C2},
+ {0x05C4, 0x05C5}, {0x05C7, 0x05C7}, {0x0600, 0x0603},
+ {0x0610, 0x0615}, {0x064B, 0x065E}, {0x0670, 0x0670},
+ {0x06D6, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED},
+ {0x070F, 0x070F}, {0x0711, 0x0711}, {0x0730, 0x074A},
+ {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, {0x0901, 0x0902},
+ {0x093C, 0x093C}, {0x0941, 0x0948}, {0x094D, 0x094D},
+ {0x0951, 0x0954}, {0x0962, 0x0963}, {0x0981, 0x0981},
+ {0x09BC, 0x09BC}, {0x09C1, 0x09C4}, {0x09CD, 0x09CD},
+ {0x09E2, 0x09E3}, {0x0A01, 0x0A02}, {0x0A3C, 0x0A3C},
+ {0x0A41, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D},
+ {0x0A70, 0x0A71}, {0x0A81, 0x0A82}, {0x0ABC, 0x0ABC},
+ {0x0AC1, 0x0AC5}, {0x0AC7, 0x0AC8}, {0x0ACD, 0x0ACD},
+ {0x0AE2, 0x0AE3}, {0x0B01, 0x0B01}, {0x0B3C, 0x0B3C},
+ {0x0B3F, 0x0B3F}, {0x0B41, 0x0B43}, {0x0B4D, 0x0B4D},
+ {0x0B56, 0x0B56}, {0x0B82, 0x0B82}, {0x0BC0, 0x0BC0},
+ {0x0BCD, 0x0BCD}, {0x0C3E, 0x0C40}, {0x0C46, 0x0C48},
+ {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0CBC, 0x0CBC},
+ {0x0CBF, 0x0CBF}, {0x0CC6, 0x0CC6}, {0x0CCC, 0x0CCD},
+ {0x0CE2, 0x0CE3}, {0x0D41, 0x0D43}, {0x0D4D, 0x0D4D},
+ {0x0DCA, 0x0DCA}, {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6},
+ {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E},
+ {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC},
+ {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35},
+ {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F71, 0x0F7E},
+ {0x0F80, 0x0F84}, {0x0F86, 0x0F87}, {0x0F90, 0x0F97},
+ {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102D, 0x1030},
+ {0x1032, 0x1032}, {0x1036, 0x1037}, {0x1039, 0x1039},
+ {0x1058, 0x1059}, {0x1160, 0x11FF}, {0x135F, 0x135F},
+ {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753},
+ {0x1772, 0x1773}, {0x17B4, 0x17B5}, {0x17B7, 0x17BD},
+ {0x17C6, 0x17C6}, {0x17C9, 0x17D3}, {0x17DD, 0x17DD},
+ {0x180B, 0x180D}, {0x18A9, 0x18A9}, {0x1920, 0x1922},
+ {0x1927, 0x1928}, {0x1932, 0x1932}, {0x1939, 0x193B},
+ {0x1A17, 0x1A18}, {0x1B00, 0x1B03}, {0x1B34, 0x1B34},
+ {0x1B36, 0x1B3A}, {0x1B3C, 0x1B3C}, {0x1B42, 0x1B42},
+ {0x1B6B, 0x1B73}, {0x1DC0, 0x1DCA}, {0x1DFE, 0x1DFF},
+ {0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x2063},
+ {0x206A, 0x206F}, {0x20D0, 0x20EF}, {0x302A, 0x302F},
+ {0x3099, 0x309A}, {0xA806, 0xA806}, {0xA80B, 0xA80B},
+ {0xA825, 0xA826}, {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F},
+ {0xFE20, 0xFE23}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB},
+ {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F},
+ {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x1D167, 0x1D169},
+ {0x1D173, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
+ {0x1D242, 0x1D244}, {0xE0001, 0xE0001}, {0xE0020, 0xE007F},
+ {0xE0100, 0xE01EF},
+}
+
+type ctype int
+
+const (
+ narrow ctype = iota
+ ambiguous
+ wide
+ halfwidth
+ fullwidth
+ neutral
+)
+
+type intervalType struct {
+ first rune
+ last rune
+ ctype ctype
+}
+
+var ctypes = []intervalType{
+ {0x0020, 0x007E, narrow},
+ {0x00A1, 0x00A1, ambiguous},
+ {0x00A2, 0x00A3, narrow},
+ {0x00A4, 0x00A4, ambiguous},
+ {0x00A5, 0x00A6, narrow},
+ {0x00A7, 0x00A8, ambiguous},
+ {0x00AA, 0x00AA, ambiguous},
+ {0x00AC, 0x00AC, narrow},
+ {0x00AD, 0x00AE, ambiguous},
+ {0x00AF, 0x00AF, narrow},
+ {0x00B0, 0x00B4, ambiguous},
+ {0x00B6, 0x00BA, ambiguous},
+ {0x00BC, 0x00BF, ambiguous},
+ {0x00C6, 0x00C6, ambiguous},
+ {0x00D0, 0x00D0, ambiguous},
+ {0x00D7, 0x00D8, ambiguous},
+ {0x00DE, 0x00E1, ambiguous},
+ {0x00E6, 0x00E6, ambiguous},
+ {0x00E8, 0x00EA, ambiguous},
+ {0x00EC, 0x00ED, ambiguous},
+ {0x00F0, 0x00F0, ambiguous},
+ {0x00F2, 0x00F3, ambiguous},
+ {0x00F7, 0x00FA, ambiguous},
+ {0x00FC, 0x00FC, ambiguous},
+ {0x00FE, 0x00FE, ambiguous},
+ {0x0101, 0x0101, ambiguous},
+ {0x0111, 0x0111, ambiguous},
+ {0x0113, 0x0113, ambiguous},
+ {0x011B, 0x011B, ambiguous},
+ {0x0126, 0x0127, ambiguous},
+ {0x012B, 0x012B, ambiguous},
+ {0x0131, 0x0133, ambiguous},
+ {0x0138, 0x0138, ambiguous},
+ {0x013F, 0x0142, ambiguous},
+ {0x0144, 0x0144, ambiguous},
+ {0x0148, 0x014B, ambiguous},
+ {0x014D, 0x014D, ambiguous},
+ {0x0152, 0x0153, ambiguous},
+ {0x0166, 0x0167, ambiguous},
+ {0x016B, 0x016B, ambiguous},
+ {0x01CE, 0x01CE, ambiguous},
+ {0x01D0, 0x01D0, ambiguous},
+ {0x01D2, 0x01D2, ambiguous},
+ {0x01D4, 0x01D4, ambiguous},
+ {0x01D6, 0x01D6, ambiguous},
+ {0x01D8, 0x01D8, ambiguous},
+ {0x01DA, 0x01DA, ambiguous},
+ {0x01DC, 0x01DC, ambiguous},
+ {0x0251, 0x0251, ambiguous},
+ {0x0261, 0x0261, ambiguous},
+ {0x02C4, 0x02C4, ambiguous},
+ {0x02C7, 0x02C7, ambiguous},
+ {0x02C9, 0x02CB, ambiguous},
+ {0x02CD, 0x02CD, ambiguous},
+ {0x02D0, 0x02D0, ambiguous},
+ {0x02D8, 0x02DB, ambiguous},
+ {0x02DD, 0x02DD, ambiguous},
+ {0x02DF, 0x02DF, ambiguous},
+ {0x0300, 0x036F, ambiguous},
+ {0x0391, 0x03A2, ambiguous},
+ {0x03A3, 0x03A9, ambiguous},
+ {0x03B1, 0x03C1, ambiguous},
+ {0x03C3, 0x03C9, ambiguous},
+ {0x0401, 0x0401, ambiguous},
+ {0x0410, 0x044F, ambiguous},
+ {0x0451, 0x0451, ambiguous},
+ {0x1100, 0x115F, wide},
+ {0x2010, 0x2010, ambiguous},
+ {0x2013, 0x2016, ambiguous},
+ {0x2018, 0x2019, ambiguous},
+ {0x201C, 0x201D, ambiguous},
+ {0x2020, 0x2022, ambiguous},
+ {0x2024, 0x2027, ambiguous},
+ {0x2030, 0x2030, ambiguous},
+ {0x2032, 0x2033, ambiguous},
+ {0x2035, 0x2035, ambiguous},
+ {0x203B, 0x203B, ambiguous},
+ {0x203E, 0x203E, ambiguous},
+ {0x2074, 0x2074, ambiguous},
+ {0x207F, 0x207F, ambiguous},
+ {0x2081, 0x2084, ambiguous},
+ {0x20A9, 0x20A9, halfwidth},
+ {0x20AC, 0x20AC, ambiguous},
+ {0x2103, 0x2103, ambiguous},
+ {0x2105, 0x2105, ambiguous},
+ {0x2109, 0x2109, ambiguous},
+ {0x2113, 0x2113, ambiguous},
+ {0x2116, 0x2116, ambiguous},
+ {0x2121, 0x2122, ambiguous},
+ {0x2126, 0x2126, ambiguous},
+ {0x212B, 0x212B, ambiguous},
+ {0x2153, 0x2154, ambiguous},
+ {0x215B, 0x215E, ambiguous},
+ {0x2160, 0x216B, ambiguous},
+ {0x2170, 0x2179, ambiguous},
+ {0x2189, 0x218A, ambiguous},
+ {0x2190, 0x2199, ambiguous},
+ {0x21B8, 0x21B9, ambiguous},
+ {0x21D2, 0x21D2, ambiguous},
+ {0x21D4, 0x21D4, ambiguous},
+ {0x21E7, 0x21E7, ambiguous},
+ {0x2200, 0x2200, ambiguous},
+ {0x2202, 0x2203, ambiguous},
+ {0x2207, 0x2208, ambiguous},
+ {0x220B, 0x220B, ambiguous},
+ {0x220F, 0x220F, ambiguous},
+ {0x2211, 0x2211, ambiguous},
+ {0x2215, 0x2215, ambiguous},
+ {0x221A, 0x221A, ambiguous},
+ {0x221D, 0x2220, ambiguous},
+ {0x2223, 0x2223, ambiguous},
+ {0x2225, 0x2225, ambiguous},
+ {0x2227, 0x222C, ambiguous},
+ {0x222E, 0x222E, ambiguous},
+ {0x2234, 0x2237, ambiguous},
+ {0x223C, 0x223D, ambiguous},
+ {0x2248, 0x2248, ambiguous},
+ {0x224C, 0x224C, ambiguous},
+ {0x2252, 0x2252, ambiguous},
+ {0x2260, 0x2261, ambiguous},
+ {0x2264, 0x2267, ambiguous},
+ {0x226A, 0x226B, ambiguous},
+ {0x226E, 0x226F, ambiguous},
+ {0x2282, 0x2283, ambiguous},
+ {0x2286, 0x2287, ambiguous},
+ {0x2295, 0x2295, ambiguous},
+ {0x2299, 0x2299, ambiguous},
+ {0x22A5, 0x22A5, ambiguous},
+ {0x22BF, 0x22BF, ambiguous},
+ {0x2312, 0x2312, ambiguous},
+ {0x2329, 0x232A, wide},
+ {0x2460, 0x24E9, ambiguous},
+ {0x24EB, 0x254B, ambiguous},
+ {0x2550, 0x2573, ambiguous},
+ {0x2580, 0x258F, ambiguous},
+ {0x2592, 0x2595, ambiguous},
+ {0x25A0, 0x25A1, ambiguous},
+ {0x25A3, 0x25A9, ambiguous},
+ {0x25B2, 0x25B3, ambiguous},
+ {0x25B6, 0x25B7, ambiguous},
+ {0x25BC, 0x25BD, ambiguous},
+ {0x25C0, 0x25C1, ambiguous},
+ {0x25C6, 0x25C8, ambiguous},
+ {0x25CB, 0x25CB, ambiguous},
+ {0x25CE, 0x25D1, ambiguous},
+ {0x25E2, 0x25E5, ambiguous},
+ {0x25EF, 0x25EF, ambiguous},
+ {0x2605, 0x2606, ambiguous},
+ {0x2609, 0x2609, ambiguous},
+ {0x260E, 0x260F, ambiguous},
+ {0x2614, 0x2615, ambiguous},
+ {0x261C, 0x261C, ambiguous},
+ {0x261E, 0x261E, ambiguous},
+ {0x2640, 0x2640, ambiguous},
+ {0x2642, 0x2642, ambiguous},
+ {0x2660, 0x2661, ambiguous},
+ {0x2663, 0x2665, ambiguous},
+ {0x2667, 0x266A, ambiguous},
+ {0x266C, 0x266D, ambiguous},
+ {0x266F, 0x266F, ambiguous},
+ {0x269E, 0x269F, ambiguous},
+ {0x26BE, 0x26BF, ambiguous},
+ {0x26C4, 0x26CD, ambiguous},
+ {0x26CF, 0x26E1, ambiguous},
+ {0x26E3, 0x26E3, ambiguous},
+ {0x26E8, 0x26FF, ambiguous},
+ {0x273D, 0x273D, ambiguous},
+ {0x2757, 0x2757, ambiguous},
+ {0x2776, 0x277F, ambiguous},
+ {0x27E6, 0x27ED, narrow},
+ {0x2985, 0x2986, narrow},
+ {0x2B55, 0x2B59, ambiguous},
+ {0x2E80, 0x2E9A, wide},
+ {0x2E9B, 0x2EF4, wide},
+ {0x2F00, 0x2FD6, wide},
+ {0x2FF0, 0x2FFC, wide},
+ {0x3000, 0x3000, fullwidth},
+ {0x3001, 0x303E, wide},
+ {0x3041, 0x3097, wide},
+ {0x3099, 0x3100, wide},
+ {0x3105, 0x312E, wide},
+ {0x3131, 0x318F, wide},
+ {0x3190, 0x31BB, wide},
+ {0x31C0, 0x31E4, wide},
+ {0x31F0, 0x321F, wide},
+ {0x3220, 0x3247, wide},
+ {0x3248, 0x324F, ambiguous},
+ {0x3250, 0x32FF, wide},
+ {0x3300, 0x4DBF, wide},
+ {0x4E00, 0xA48D, wide},
+ {0xA490, 0xA4C7, wide},
+ {0xA960, 0xA97D, wide},
+ {0xAC00, 0xD7A4, wide},
+ {0xE000, 0xF8FF, ambiguous},
+ {0xF900, 0xFAFF, wide},
+ {0xFE00, 0xFE0F, ambiguous},
+ {0xFE10, 0xFE1A, wide},
+ {0xFE30, 0xFE53, wide},
+ {0xFE54, 0xFE67, wide},
+ {0xFE68, 0xFE6C, wide},
+ {0xFF01, 0xFF60, fullwidth},
+ {0xFF61, 0xFFBF, halfwidth},
+ {0xFFC2, 0xFFC8, halfwidth},
+ {0xFFCA, 0xFFD0, halfwidth},
+ {0xFFD2, 0xFFD8, halfwidth},
+ {0xFFDA, 0xFFDD, halfwidth},
+ {0xFFE0, 0xFFE7, fullwidth},
+ {0xFFE8, 0xFFEF, halfwidth},
+ {0xFFFD, 0xFFFE, ambiguous},
+ {0x1B000, 0x1B002, wide},
+ {0x1F100, 0x1F10A, ambiguous},
+ {0x1F110, 0x1F12D, ambiguous},
+ {0x1F130, 0x1F169, ambiguous},
+ {0x1F170, 0x1F19B, ambiguous},
+ {0x1F200, 0x1F203, wide},
+ {0x1F210, 0x1F23B, wide},
+ {0x1F240, 0x1F249, wide},
+ {0x1F250, 0x1F252, wide},
+ {0x20000, 0x2FFFE, wide},
+ {0x30000, 0x3FFFE, wide},
+ {0xE0100, 0xE01F0, ambiguous},
+ {0xF0000, 0xFFFFD, ambiguous},
+ {0x100000, 0x10FFFE, ambiguous},
+}
+
+type Condition struct {
+ EastAsianWidth bool
+}
+
+func NewCondition() *Condition {
+ return &Condition{EastAsianWidth}
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func (c *Condition) RuneWidth(r rune) int {
+ if r == 0 {
+ return 0
+ }
+ if r < 32 || (r >= 0x7f && r < 0xa0) {
+ return 1
+ }
+ for _, iv := range combining {
+ if iv.first <= r && r <= iv.last {
+ return 0
+ }
+ }
+
+ if c.EastAsianWidth && IsAmbiguousWidth(r) {
+ return 2
+ }
+
+ if r >= 0x1100 &&
+ (r <= 0x115f || r == 0x2329 || r == 0x232a ||
+ (r >= 0x2e80 && r <= 0xa4cf && r != 0x303f) ||
+ (r >= 0xac00 && r <= 0xd7a3) ||
+ (r >= 0xf900 && r <= 0xfaff) ||
+ (r >= 0xfe30 && r <= 0xfe6f) ||
+ (r >= 0xff00 && r <= 0xff60) ||
+ (r >= 0xffe0 && r <= 0xffe6) ||
+ (r >= 0x20000 && r <= 0x2fffd) ||
+ (r >= 0x30000 && r <= 0x3fffd)) {
+ return 2
+ }
+ return 1
+}
+
+func (c *Condition) StringWidth(s string) (width int) {
+ for _, r := range []rune(s) {
+ width += c.RuneWidth(r)
+ }
+ return width
+}
+
+func (c *Condition) Truncate(s string, w int, tail string) string {
+ if c.StringWidth(s) <= w {
+ return s
+ }
+ r := []rune(s)
+ tw := c.StringWidth(tail)
+ w -= tw
+ width := 0
+ i := 0
+ for ; i < len(r); i++ {
+ cw := c.RuneWidth(r[i])
+ if width+cw > w {
+ break
+ }
+ width += cw
+ }
+ return string(r[0:i]) + tail
+}
+
+func (c *Condition) Wrap(s string, w int) string {
+ width := 0
+ out := ""
+ for _, r := range []rune(s) {
+ cw := RuneWidth(r)
+ if r == '\n' {
+ out += string(r)
+ width = 0
+ continue
+ } else if width+cw > w {
+ out += "\n"
+ width = 0
+ out += string(r)
+ width += cw
+ continue
+ }
+ out += string(r)
+ width += cw
+ }
+ return out
+}
+
+func (c *Condition) FillLeft(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return string(b) + s
+ }
+ return s
+}
+
+func (c *Condition) FillRight(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return s + string(b)
+ }
+ return s
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func RuneWidth(r rune) int {
+ return DefaultCondition.RuneWidth(r)
+}
+
+func ct(r rune) ctype {
+ for _, iv := range ctypes {
+ if iv.first <= r && r <= iv.last {
+ return iv.ctype
+ }
+ }
+ return neutral
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsAmbiguousWidth(r rune) bool {
+ return ct(r) == ambiguous
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsNeutralWidth(r rune) bool {
+ return ct(r) == neutral
+}
+
+func StringWidth(s string) (width int) {
+ return DefaultCondition.StringWidth(s)
+}
+
+func Truncate(s string, w int, tail string) string {
+ return DefaultCondition.Truncate(s, w, tail)
+}
+
+func Wrap(s string, w int) string {
+ return DefaultCondition.Wrap(s, w)
+}
+
+func FillLeft(s string, w int) string {
+ return DefaultCondition.FillLeft(s, w)
+}
+
+func FillRight(s string, w int) string {
+ return DefaultCondition.FillRight(s, w)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_js.go b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_js.go
new file mode 100644
index 00000000000..0ce32c5e7b7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_js.go
@@ -0,0 +1,8 @@
+// +build js
+
+package runewidth
+
+func IsEastAsian() bool {
+ // TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
+ return false
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_posix.go b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_posix.go
new file mode 100644
index 00000000000..a4495909d88
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_posix.go
@@ -0,0 +1,69 @@
+// +build !windows,!js
+
+package runewidth
+
+import (
+ "os"
+ "regexp"
+ "strings"
+)
+
+var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
+
+func IsEastAsian() bool {
+ locale := os.Getenv("LC_CTYPE")
+ if locale == "" {
+ locale = os.Getenv("LANG")
+ }
+
+ // ignore C locale
+ if locale == "POSIX" || locale == "C" {
+ return false
+ }
+ if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
+ return false
+ }
+
+ charset := strings.ToLower(locale)
+ r := reLoc.FindStringSubmatch(locale)
+ if len(r) == 2 {
+ charset = strings.ToLower(r[1])
+ }
+
+ if strings.HasSuffix(charset, "@cjk_narrow") {
+ return false
+ }
+
+ for pos, b := range []byte(charset) {
+ if b == '@' {
+ charset = charset[:pos]
+ break
+ }
+ }
+
+ mbc_max := 1
+ switch charset {
+ case "utf-8", "utf8":
+ mbc_max = 6
+ case "jis":
+ mbc_max = 8
+ case "eucjp":
+ mbc_max = 3
+ case "euckr", "euccn":
+ mbc_max = 2
+ case "sjis", "cp932", "cp51932", "cp936", "cp949", "cp950":
+ mbc_max = 2
+ case "big5":
+ mbc_max = 2
+ case "gbk", "gb2312":
+ mbc_max = 2
+ }
+
+ if mbc_max > 1 && (charset[0] != 'u' ||
+ strings.HasPrefix(locale, "ja") ||
+ strings.HasPrefix(locale, "ko") ||
+ strings.HasPrefix(locale, "zh")) {
+ return true
+ }
+ return false
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_test.go b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_test.go
new file mode 100644
index 00000000000..f9431282c76
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_test.go
@@ -0,0 +1,229 @@
+package runewidth
+
+import (
+ "testing"
+)
+
+var runewidthtests = []struct {
+ in rune
+ out int
+}{
+ {'世', 2},
+ {'界', 2},
+ {'セ', 1},
+ {'カ', 1},
+ {'イ', 1},
+ {'☆', 2}, // double width in ambiguous
+ {'\x00', 0},
+ {'\x01', 1},
+ {'\u0300', 0},
+}
+
+func TestRuneWidth(t *testing.T) {
+ c := NewCondition()
+ c.EastAsianWidth = true
+ for _, tt := range runewidthtests {
+ if out := c.RuneWidth(tt.in); out != tt.out {
+ t.Errorf("Width(%q) = %q, want %q", tt.in, out, tt.out)
+ }
+ }
+}
+
+var isambiguouswidthtests = []struct {
+ in rune
+ out bool
+}{
+ {'世', false},
+ {'■', true},
+ {'界', false},
+ {'○', true},
+ {'㈱', false},
+ {'①', true},
+ {'②', true},
+ {'③', true},
+ {'④', true},
+ {'⑤', true},
+ {'⑥', true},
+ {'⑦', true},
+ {'⑧', true},
+ {'⑨', true},
+ {'⑩', true},
+ {'⑪', true},
+ {'⑫', true},
+ {'⑬', true},
+ {'⑭', true},
+ {'⑮', true},
+ {'⑯', true},
+ {'⑰', true},
+ {'⑱', true},
+ {'⑲', true},
+ {'⑳', true},
+ {'☆', true},
+}
+
+func TestIsAmbiguousWidth(t *testing.T) {
+ for _, tt := range isambiguouswidthtests {
+ if out := IsAmbiguousWidth(tt.in); out != tt.out {
+ t.Errorf("IsAmbiguousWidth(%q) = %q, want %q", tt.in, out, tt.out)
+ }
+ }
+}
+
+var stringwidthtests = []struct {
+ in string
+ out int
+}{
+ {"■㈱の世界①", 12},
+ {"スター☆", 8},
+}
+
+func TestStringWidth(t *testing.T) {
+ c := NewCondition()
+ c.EastAsianWidth = true
+ for _, tt := range stringwidthtests {
+ if out := c.StringWidth(tt.in); out != tt.out {
+ t.Errorf("StringWidth(%q) = %q, want %q", tt.in, out, tt.out)
+ }
+ }
+}
+
+func TestStringWidthInvalid(t *testing.T) {
+ s := "こんにちわ\x00世界"
+ if out := StringWidth(s); out != 14 {
+ t.Errorf("StringWidth(%q) = %q, want %q", s, out, 14)
+ }
+}
+
+func TestTruncateSmaller(t *testing.T) {
+ s := "あいうえお"
+ expected := "あいうえお"
+
+ if out := Truncate(s, 10, "..."); out != expected {
+ t.Errorf("Truncate(%q) = %q, want %q", s, out, expected)
+ }
+}
+
+func TestTruncate(t *testing.T) {
+ s := "あいうえおあいうえおえおおおおおおおおおおおおおおおおおおおおおおおおおおおおおお"
+ expected := "あいうえおあいうえおえおおおおおおおおおおおおおおおおおおおおおおおおおおお..."
+
+ out := Truncate(s, 80, "...")
+ if out != expected {
+ t.Errorf("Truncate(%q) = %q, want %q", s, out, expected)
+ }
+ width := StringWidth(out)
+ if width != 79 {
+ t.Errorf("width of Truncate(%q) should be %d, but %d", s, 79, width)
+ }
+}
+
+func TestTruncateFit(t *testing.T) {
+ s := "aあいうえおあいうえおえおおおおおおおおおおおおおおおおおおおおおおおおおおおおおお"
+ expected := "aあいうえおあいうえおえおおおおおおおおおおおおおおおおおおおおおおおおおおお..."
+
+ out := Truncate(s, 80, "...")
+ if out != expected {
+ t.Errorf("Truncate(%q) = %q, want %q", s, out, expected)
+ }
+ width := StringWidth(out)
+ if width != 80 {
+ t.Errorf("width of Truncate(%q) should be %d, but %d", s, 80, width)
+ }
+}
+
+func TestTruncateJustFit(t *testing.T) {
+ s := "あいうえおあいうえおえおおおおおおおおおおおおおおおおおおおおおおおおおおおおお"
+ expected := "あいうえおあいうえおえおおおおおおおおおおおおおおおおおおおおおおおおおおおおお"
+
+ out := Truncate(s, 80, "...")
+ if out != expected {
+ t.Errorf("Truncate(%q) = %q, want %q", s, out, expected)
+ }
+ width := StringWidth(out)
+ if width != 80 {
+ t.Errorf("width of Truncate(%q) should be %d, but %d", s, 80, width)
+ }
+}
+
+func TestWrap(t *testing.T) {
+ s := `東京特許許可局局長はよく柿喰う客だ/東京特許許可局局長はよく柿喰う客だ
+123456789012345678901234567890
+
+END`
+ expected := `東京特許許可局局長はよく柿喰う
+客だ/東京特許許可局局長はよく
+柿喰う客だ
+123456789012345678901234567890
+
+END`
+
+ if out := Wrap(s, 30); out != expected {
+ t.Errorf("Wrap(%q) = %q, want %q", s, out, expected)
+ }
+}
+
+func TestTruncateNoNeeded(t *testing.T) {
+ s := "あいうえおあい"
+ expected := "あいうえおあい"
+
+ if out := Truncate(s, 80, "..."); out != expected {
+ t.Errorf("Truncate(%q) = %q, want %q", s, out, expected)
+ }
+}
+
+var isneutralwidthtests = []struct {
+ in rune
+ out bool
+}{
+ {'→', false},
+ {'┊', false},
+ {'┈', false},
+ {'~', false},
+ {'└', false},
+ {'⣀', true},
+ {'⣀', true},
+}
+
+func TestIsNeutralWidth(t *testing.T) {
+ for _, tt := range isneutralwidthtests {
+ if out := IsNeutralWidth(tt.in); out != tt.out {
+ t.Errorf("IsNeutralWidth(%q) = %q, want %q", tt.in, out, tt.out)
+ }
+ }
+}
+
+func TestFillLeft(t *testing.T) {
+ s := "あxいうえお"
+ expected := " あxいうえお"
+
+ if out := FillLeft(s, 15); out != expected {
+ t.Errorf("FillLeft(%q) = %q, want %q", s, out, expected)
+ }
+}
+
+func TestFillLeftFit(t *testing.T) {
+ s := "あいうえお"
+ expected := "あいうえお"
+
+ if out := FillLeft(s, 10); out != expected {
+ t.Errorf("FillLeft(%q) = %q, want %q", s, out, expected)
+ }
+}
+
+func TestFillRight(t *testing.T) {
+ s := "あxいうえお"
+ expected := "あxいうえお "
+
+ if out := FillRight(s, 15); out != expected {
+ t.Errorf("FillRight(%q) = %q, want %q", s, out, expected)
+ }
+}
+
+func TestFillRightFit(t *testing.T) {
+ s := "あいうえお"
+ expected := "あいうえお"
+
+ if out := FillRight(s, 10); out != expected {
+ t.Errorf("FillRight(%q) = %q, want %q", s, out, expected)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_windows.go b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_windows.go
new file mode 100644
index 00000000000..bdd84454bec
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/mattn/go-runewidth/runewidth_windows.go
@@ -0,0 +1,24 @@
+package runewidth
+
+import (
+ "syscall"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32")
+ procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
+)
+
+func IsEastAsian() bool {
+ r1, _, _ := procGetConsoleOutputCP.Call()
+ if r1 == 0 {
+ return false
+ }
+
+ switch int(r1) {
+ case 932, 51932, 936, 949, 950:
+ return true
+ }
+
+ return false
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/AUTHORS b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/AUTHORS
new file mode 100644
index 00000000000..fe26fb0fb05
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/AUTHORS
@@ -0,0 +1,4 @@
+# Please keep this file sorted.
+
+Georg Reinke <guelfey@googlemail.com>
+nsf <no.smile.face@gmail.com>
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/LICENSE b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/LICENSE
new file mode 100644
index 00000000000..d9bc068ce74
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/LICENSE
@@ -0,0 +1,19 @@
+Copyright (C) 2012 termbox-go authors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/README.md b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/README.md
new file mode 100644
index 00000000000..9a7b35602ed
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/README.md
@@ -0,0 +1,28 @@
+## Termbox
+Termbox is a library that provides a minimalistic API which allows the programmer to write text-based user interfaces. The library is crossplatform and has both terminal-based implementations on *nix operating systems and a winapi console based implementation for windows operating systems. The basic idea is an abstraction of the greatest common subset of features available on all major terminals and other terminal-like APIs in a minimalistic fashion. Small API means it is easy to implement, test, maintain and learn it, that's what makes the termbox a distinct library in its area.
+
+### Installation
+Install and update this go package with `go get -u github.com/nsf/termbox-go`
+
+### Examples
+For examples of what can be done take a look at demos in the _demos directory. You can try them with go run: `go run _demos/keyboard.go`
+
+There are also some interesting projects using termbox-go:
+ - [godit](https://github.com/nsf/godit) is an emacsish lightweight text editor written using termbox.
+ - [gomatrix](https://github.com/GeertJohan/gomatrix) connects to The Matrix and displays its data streams in your terminal.
+ - [gotetris](https://github.com/jjinux/gotetris) is an implementation of Tetris.
+ - [sokoban-go](https://github.com/rn2dy/sokoban-go) is an implementation of sokoban game.
+ - [hecate](https://github.com/evanmiller/hecate) is a hex editor designed by Satan.
+ - [httopd](https://github.com/verdverm/httopd) is top for httpd logs.
+ - [mop](https://github.com/michaeldv/mop) is stock market tracker for hackers.
+ - [termui](https://github.com/gizak/termui) is a terminal dashboard.
+ - [termloop](https://github.com/JoelOtter/termloop) is a terminal game engine.
+ - [xterm-color-chart](https://github.com/kutuluk/xterm-color-chart) is a XTerm 256 color chart.
+ - [gocui](https://github.com/jroimartin/gocui) is a minimalist Go library aimed at creating console user interfaces.
+ - [dry](https://github.com/moncho/dry) is an interactive cli to manage Docker containers.
+ - [pxl](https://github.com/ichinaski/pxl) displays images in the terminal.
+ - [snake-game](https://github.com/DyegoCosta/snake-game) is an implementation of the Snake game.
+ - [gone](https://github.com/guillaumebreton/gone) is a CLI pomodoro® timer.
+
+### API reference
+[godoc.org/github.com/nsf/termbox-go](http://godoc.org/github.com/nsf/termbox-go)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/editbox.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/editbox.go
new file mode 100644
index 00000000000..e429080d313
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/editbox.go
@@ -0,0 +1,300 @@
+package main
+
+import (
+ "github.com/mattn/go-runewidth"
+ "github.com/nsf/termbox-go"
+ "unicode/utf8"
+)
+
+func tbprint(x, y int, fg, bg termbox.Attribute, msg string) {
+ for _, c := range msg {
+ termbox.SetCell(x, y, c, fg, bg)
+ x += runewidth.RuneWidth(c)
+ }
+}
+
+func fill(x, y, w, h int, cell termbox.Cell) {
+ for ly := 0; ly < h; ly++ {
+ for lx := 0; lx < w; lx++ {
+ termbox.SetCell(x+lx, y+ly, cell.Ch, cell.Fg, cell.Bg)
+ }
+ }
+}
+
+func rune_advance_len(r rune, pos int) int {
+ if r == '\t' {
+ return tabstop_length - pos%tabstop_length
+ }
+ return runewidth.RuneWidth(r)
+}
+
+func voffset_coffset(text []byte, boffset int) (voffset, coffset int) {
+ text = text[:boffset]
+ for len(text) > 0 {
+ r, size := utf8.DecodeRune(text)
+ text = text[size:]
+ coffset += 1
+ voffset += rune_advance_len(r, voffset)
+ }
+ return
+}
+
+func byte_slice_grow(s []byte, desired_cap int) []byte {
+ if cap(s) < desired_cap {
+ ns := make([]byte, len(s), desired_cap)
+ copy(ns, s)
+ return ns
+ }
+ return s
+}
+
+func byte_slice_remove(text []byte, from, to int) []byte {
+ size := to - from
+ copy(text[from:], text[to:])
+ text = text[:len(text)-size]
+ return text
+}
+
+func byte_slice_insert(text []byte, offset int, what []byte) []byte {
+ n := len(text) + len(what)
+ text = byte_slice_grow(text, n)
+ text = text[:n]
+ copy(text[offset+len(what):], text[offset:])
+ copy(text[offset:], what)
+ return text
+}
+
+const preferred_horizontal_threshold = 5
+const tabstop_length = 8
+
+type EditBox struct {
+ text []byte
+ line_voffset int
+ cursor_boffset int // cursor offset in bytes
+ cursor_voffset int // visual cursor offset in termbox cells
+ cursor_coffset int // cursor offset in unicode code points
+}
+
+// Draws the EditBox in the given location, 'h' is not used at the moment
+func (eb *EditBox) Draw(x, y, w, h int) {
+ eb.AdjustVOffset(w)
+
+ const coldef = termbox.ColorDefault
+ fill(x, y, w, h, termbox.Cell{Ch: ' '})
+
+ t := eb.text
+ lx := 0
+ tabstop := 0
+ for {
+ rx := lx - eb.line_voffset
+ if len(t) == 0 {
+ break
+ }
+
+ if lx == tabstop {
+ tabstop += tabstop_length
+ }
+
+ if rx >= w {
+ termbox.SetCell(x+w-1, y, '→',
+ coldef, coldef)
+ break
+ }
+
+ r, size := utf8.DecodeRune(t)
+ if r == '\t' {
+ for ; lx < tabstop; lx++ {
+ rx = lx - eb.line_voffset
+ if rx >= w {
+ goto next
+ }
+
+ if rx >= 0 {
+ termbox.SetCell(x+rx, y, ' ', coldef, coldef)
+ }
+ }
+ } else {
+ if rx >= 0 {
+ termbox.SetCell(x+rx, y, r, coldef, coldef)
+ }
+ lx += runewidth.RuneWidth(r)
+ }
+ next:
+ t = t[size:]
+ }
+
+ if eb.line_voffset != 0 {
+ termbox.SetCell(x, y, '←', coldef, coldef)
+ }
+}
+
+// Adjusts line visual offset to a proper value depending on width
+func (eb *EditBox) AdjustVOffset(width int) {
+ ht := preferred_horizontal_threshold
+ max_h_threshold := (width - 1) / 2
+ if ht > max_h_threshold {
+ ht = max_h_threshold
+ }
+
+ threshold := width - 1
+ if eb.line_voffset != 0 {
+ threshold = width - ht
+ }
+ if eb.cursor_voffset-eb.line_voffset >= threshold {
+ eb.line_voffset = eb.cursor_voffset + (ht - width + 1)
+ }
+
+ if eb.line_voffset != 0 && eb.cursor_voffset-eb.line_voffset < ht {
+ eb.line_voffset = eb.cursor_voffset - ht
+ if eb.line_voffset < 0 {
+ eb.line_voffset = 0
+ }
+ }
+}
+
+func (eb *EditBox) MoveCursorTo(boffset int) {
+ eb.cursor_boffset = boffset
+ eb.cursor_voffset, eb.cursor_coffset = voffset_coffset(eb.text, boffset)
+}
+
+func (eb *EditBox) RuneUnderCursor() (rune, int) {
+ return utf8.DecodeRune(eb.text[eb.cursor_boffset:])
+}
+
+func (eb *EditBox) RuneBeforeCursor() (rune, int) {
+ return utf8.DecodeLastRune(eb.text[:eb.cursor_boffset])
+}
+
+func (eb *EditBox) MoveCursorOneRuneBackward() {
+ if eb.cursor_boffset == 0 {
+ return
+ }
+ _, size := eb.RuneBeforeCursor()
+ eb.MoveCursorTo(eb.cursor_boffset - size)
+}
+
+func (eb *EditBox) MoveCursorOneRuneForward() {
+ if eb.cursor_boffset == len(eb.text) {
+ return
+ }
+ _, size := eb.RuneUnderCursor()
+ eb.MoveCursorTo(eb.cursor_boffset + size)
+}
+
+func (eb *EditBox) MoveCursorToBeginningOfTheLine() {
+ eb.MoveCursorTo(0)
+}
+
+func (eb *EditBox) MoveCursorToEndOfTheLine() {
+ eb.MoveCursorTo(len(eb.text))
+}
+
+func (eb *EditBox) DeleteRuneBackward() {
+ if eb.cursor_boffset == 0 {
+ return
+ }
+
+ eb.MoveCursorOneRuneBackward()
+ _, size := eb.RuneUnderCursor()
+ eb.text = byte_slice_remove(eb.text, eb.cursor_boffset, eb.cursor_boffset+size)
+}
+
+func (eb *EditBox) DeleteRuneForward() {
+ if eb.cursor_boffset == len(eb.text) {
+ return
+ }
+ _, size := eb.RuneUnderCursor()
+ eb.text = byte_slice_remove(eb.text, eb.cursor_boffset, eb.cursor_boffset+size)
+}
+
+func (eb *EditBox) DeleteTheRestOfTheLine() {
+ eb.text = eb.text[:eb.cursor_boffset]
+}
+
+func (eb *EditBox) InsertRune(r rune) {
+ var buf [utf8.UTFMax]byte
+ n := utf8.EncodeRune(buf[:], r)
+ eb.text = byte_slice_insert(eb.text, eb.cursor_boffset, buf[:n])
+ eb.MoveCursorOneRuneForward()
+}
+
+// Please, keep in mind that cursor depends on the value of line_voffset, which
+// is being set on Draw() call, so.. call this method after Draw() one.
+func (eb *EditBox) CursorX() int {
+ return eb.cursor_voffset - eb.line_voffset
+}
+
+var edit_box EditBox
+
+const edit_box_width = 30
+
+func redraw_all() {
+ const coldef = termbox.ColorDefault
+ termbox.Clear(coldef, coldef)
+ w, h := termbox.Size()
+
+ midy := h / 2
+ midx := (w - edit_box_width) / 2
+
+ // unicode box drawing chars around the edit box
+ termbox.SetCell(midx-1, midy, '│', coldef, coldef)
+ termbox.SetCell(midx+edit_box_width, midy, '│', coldef, coldef)
+ termbox.SetCell(midx-1, midy-1, '┌', coldef, coldef)
+ termbox.SetCell(midx-1, midy+1, '└', coldef, coldef)
+ termbox.SetCell(midx+edit_box_width, midy-1, '┐', coldef, coldef)
+ termbox.SetCell(midx+edit_box_width, midy+1, '┘', coldef, coldef)
+ fill(midx, midy-1, edit_box_width, 1, termbox.Cell{Ch: '─'})
+ fill(midx, midy+1, edit_box_width, 1, termbox.Cell{Ch: '─'})
+
+ edit_box.Draw(midx, midy, edit_box_width, 1)
+ termbox.SetCursor(midx+edit_box.CursorX(), midy)
+
+ tbprint(midx+6, midy+3, coldef, coldef, "Press ESC to quit")
+ termbox.Flush()
+}
+
+func main() {
+ err := termbox.Init()
+ if err != nil {
+ panic(err)
+ }
+ defer termbox.Close()
+ termbox.SetInputMode(termbox.InputEsc)
+
+ redraw_all()
+mainloop:
+ for {
+ switch ev := termbox.PollEvent(); ev.Type {
+ case termbox.EventKey:
+ switch ev.Key {
+ case termbox.KeyEsc:
+ break mainloop
+ case termbox.KeyArrowLeft, termbox.KeyCtrlB:
+ edit_box.MoveCursorOneRuneBackward()
+ case termbox.KeyArrowRight, termbox.KeyCtrlF:
+ edit_box.MoveCursorOneRuneForward()
+ case termbox.KeyBackspace, termbox.KeyBackspace2:
+ edit_box.DeleteRuneBackward()
+ case termbox.KeyDelete, termbox.KeyCtrlD:
+ edit_box.DeleteRuneForward()
+ case termbox.KeyTab:
+ edit_box.InsertRune('\t')
+ case termbox.KeySpace:
+ edit_box.InsertRune(' ')
+ case termbox.KeyCtrlK:
+ edit_box.DeleteTheRestOfTheLine()
+ case termbox.KeyHome, termbox.KeyCtrlA:
+ edit_box.MoveCursorToBeginningOfTheLine()
+ case termbox.KeyEnd, termbox.KeyCtrlE:
+ edit_box.MoveCursorToEndOfTheLine()
+ default:
+ if ev.Ch != 0 {
+ edit_box.InsertRune(ev.Ch)
+ }
+ }
+ case termbox.EventError:
+ panic(ev.Err)
+ }
+ redraw_all()
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/interrupt.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/interrupt.go
new file mode 100644
index 00000000000..55345219fd0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/interrupt.go
@@ -0,0 +1,69 @@
+package main
+
+import (
+ "fmt"
+ "github.com/nsf/termbox-go"
+ "time"
+)
+
+func tbPrint(x, y int, fg, bg termbox.Attribute, msg string) {
+ for _, c := range msg {
+ termbox.SetCell(x, y, c, fg, bg)
+ x++
+ }
+}
+
+func draw(i int) {
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ defer termbox.Flush()
+
+ w, h := termbox.Size()
+ s := fmt.Sprintf("count = %d", i)
+
+ tbPrint((w/2)-(len(s)/2), h/2, termbox.ColorRed, termbox.ColorDefault, s)
+}
+
+func main() {
+ err := termbox.Init()
+ if err != nil {
+ panic(err)
+ }
+ termbox.SetInputMode(termbox.InputEsc)
+
+ go func() {
+ time.Sleep(5 * time.Second)
+ termbox.Interrupt()
+
+ // This should never run - the Interrupt(), above, should cause the event
+ // loop below to exit, which then exits the process. If something goes
+ // wrong, this panic will trigger and show what happened.
+ time.Sleep(1 * time.Second)
+ panic("this should never run")
+ }()
+
+ var count int
+
+ draw(count)
+mainloop:
+ for {
+ switch ev := termbox.PollEvent(); ev.Type {
+ case termbox.EventKey:
+ if ev.Ch == '+' {
+ count++
+ } else if ev.Ch == '-' {
+ count--
+ }
+
+ case termbox.EventError:
+ panic(ev.Err)
+
+ case termbox.EventInterrupt:
+ break mainloop
+ }
+
+ draw(count)
+ }
+ termbox.Close()
+
+ fmt.Println("Finished")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/keyboard.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/keyboard.go
new file mode 100644
index 00000000000..b6a258e4ca2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/keyboard.go
@@ -0,0 +1,722 @@
+package main
+
+import "github.com/nsf/termbox-go"
+import "fmt"
+
+type key struct {
+ x int
+ y int
+ ch rune
+}
+
+var K_ESC = []key{{1, 1, 'E'}, {2, 1, 'S'}, {3, 1, 'C'}}
+var K_F1 = []key{{6, 1, 'F'}, {7, 1, '1'}}
+var K_F2 = []key{{9, 1, 'F'}, {10, 1, '2'}}
+var K_F3 = []key{{12, 1, 'F'}, {13, 1, '3'}}
+var K_F4 = []key{{15, 1, 'F'}, {16, 1, '4'}}
+var K_F5 = []key{{19, 1, 'F'}, {20, 1, '5'}}
+var K_F6 = []key{{22, 1, 'F'}, {23, 1, '6'}}
+var K_F7 = []key{{25, 1, 'F'}, {26, 1, '7'}}
+var K_F8 = []key{{28, 1, 'F'}, {29, 1, '8'}}
+var K_F9 = []key{{33, 1, 'F'}, {34, 1, '9'}}
+var K_F10 = []key{{36, 1, 'F'}, {37, 1, '1'}, {38, 1, '0'}}
+var K_F11 = []key{{40, 1, 'F'}, {41, 1, '1'}, {42, 1, '1'}}
+var K_F12 = []key{{44, 1, 'F'}, {45, 1, '1'}, {46, 1, '2'}}
+var K_PRN = []key{{50, 1, 'P'}, {51, 1, 'R'}, {52, 1, 'N'}}
+var K_SCR = []key{{54, 1, 'S'}, {55, 1, 'C'}, {56, 1, 'R'}}
+var K_BRK = []key{{58, 1, 'B'}, {59, 1, 'R'}, {60, 1, 'K'}}
+var K_LED1 = []key{{66, 1, '-'}}
+var K_LED2 = []key{{70, 1, '-'}}
+var K_LED3 = []key{{74, 1, '-'}}
+var K_TILDE = []key{{1, 4, '`'}}
+var K_TILDE_SHIFT = []key{{1, 4, '~'}}
+var K_1 = []key{{4, 4, '1'}}
+var K_1_SHIFT = []key{{4, 4, '!'}}
+var K_2 = []key{{7, 4, '2'}}
+var K_2_SHIFT = []key{{7, 4, '@'}}
+var K_3 = []key{{10, 4, '3'}}
+var K_3_SHIFT = []key{{10, 4, '#'}}
+var K_4 = []key{{13, 4, '4'}}
+var K_4_SHIFT = []key{{13, 4, '$'}}
+var K_5 = []key{{16, 4, '5'}}
+var K_5_SHIFT = []key{{16, 4, '%'}}
+var K_6 = []key{{19, 4, '6'}}
+var K_6_SHIFT = []key{{19, 4, '^'}}
+var K_7 = []key{{22, 4, '7'}}
+var K_7_SHIFT = []key{{22, 4, '&'}}
+var K_8 = []key{{25, 4, '8'}}
+var K_8_SHIFT = []key{{25, 4, '*'}}
+var K_9 = []key{{28, 4, '9'}}
+var K_9_SHIFT = []key{{28, 4, '('}}
+var K_0 = []key{{31, 4, '0'}}
+var K_0_SHIFT = []key{{31, 4, ')'}}
+var K_MINUS = []key{{34, 4, '-'}}
+var K_MINUS_SHIFT = []key{{34, 4, '_'}}
+var K_EQUALS = []key{{37, 4, '='}}
+var K_EQUALS_SHIFT = []key{{37, 4, '+'}}
+var K_BACKSLASH = []key{{40, 4, '\\'}}
+var K_BACKSLASH_SHIFT = []key{{40, 4, '|'}}
+var K_BACKSPACE = []key{{44, 4, 0x2190}, {45, 4, 0x2500}, {46, 4, 0x2500}}
+var K_INS = []key{{50, 4, 'I'}, {51, 4, 'N'}, {52, 4, 'S'}}
+var K_HOM = []key{{54, 4, 'H'}, {55, 4, 'O'}, {56, 4, 'M'}}
+var K_PGU = []key{{58, 4, 'P'}, {59, 4, 'G'}, {60, 4, 'U'}}
+var K_K_NUMLOCK = []key{{65, 4, 'N'}}
+var K_K_SLASH = []key{{68, 4, '/'}}
+var K_K_STAR = []key{{71, 4, '*'}}
+var K_K_MINUS = []key{{74, 4, '-'}}
+var K_TAB = []key{{1, 6, 'T'}, {2, 6, 'A'}, {3, 6, 'B'}}
+var K_q = []key{{6, 6, 'q'}}
+var K_Q = []key{{6, 6, 'Q'}}
+var K_w = []key{{9, 6, 'w'}}
+var K_W = []key{{9, 6, 'W'}}
+var K_e = []key{{12, 6, 'e'}}
+var K_E = []key{{12, 6, 'E'}}
+var K_r = []key{{15, 6, 'r'}}
+var K_R = []key{{15, 6, 'R'}}
+var K_t = []key{{18, 6, 't'}}
+var K_T = []key{{18, 6, 'T'}}
+var K_y = []key{{21, 6, 'y'}}
+var K_Y = []key{{21, 6, 'Y'}}
+var K_u = []key{{24, 6, 'u'}}
+var K_U = []key{{24, 6, 'U'}}
+var K_i = []key{{27, 6, 'i'}}
+var K_I = []key{{27, 6, 'I'}}
+var K_o = []key{{30, 6, 'o'}}
+var K_O = []key{{30, 6, 'O'}}
+var K_p = []key{{33, 6, 'p'}}
+var K_P = []key{{33, 6, 'P'}}
+var K_LSQB = []key{{36, 6, '['}}
+var K_LCUB = []key{{36, 6, '{'}}
+var K_RSQB = []key{{39, 6, ']'}}
+var K_RCUB = []key{{39, 6, '}'}}
+var K_ENTER = []key{
+ {43, 6, 0x2591}, {44, 6, 0x2591}, {45, 6, 0x2591}, {46, 6, 0x2591},
+ {43, 7, 0x2591}, {44, 7, 0x2591}, {45, 7, 0x21B5}, {46, 7, 0x2591},
+ {41, 8, 0x2591}, {42, 8, 0x2591}, {43, 8, 0x2591}, {44, 8, 0x2591},
+ {45, 8, 0x2591}, {46, 8, 0x2591},
+}
+var K_DEL = []key{{50, 6, 'D'}, {51, 6, 'E'}, {52, 6, 'L'}}
+var K_END = []key{{54, 6, 'E'}, {55, 6, 'N'}, {56, 6, 'D'}}
+var K_PGD = []key{{58, 6, 'P'}, {59, 6, 'G'}, {60, 6, 'D'}}
+var K_K_7 = []key{{65, 6, '7'}}
+var K_K_8 = []key{{68, 6, '8'}}
+var K_K_9 = []key{{71, 6, '9'}}
+var K_K_PLUS = []key{{74, 6, ' '}, {74, 7, '+'}, {74, 8, ' '}}
+var K_CAPS = []key{{1, 8, 'C'}, {2, 8, 'A'}, {3, 8, 'P'}, {4, 8, 'S'}}
+var K_a = []key{{7, 8, 'a'}}
+var K_A = []key{{7, 8, 'A'}}
+var K_s = []key{{10, 8, 's'}}
+var K_S = []key{{10, 8, 'S'}}
+var K_d = []key{{13, 8, 'd'}}
+var K_D = []key{{13, 8, 'D'}}
+var K_f = []key{{16, 8, 'f'}}
+var K_F = []key{{16, 8, 'F'}}
+var K_g = []key{{19, 8, 'g'}}
+var K_G = []key{{19, 8, 'G'}}
+var K_h = []key{{22, 8, 'h'}}
+var K_H = []key{{22, 8, 'H'}}
+var K_j = []key{{25, 8, 'j'}}
+var K_J = []key{{25, 8, 'J'}}
+var K_k = []key{{28, 8, 'k'}}
+var K_K = []key{{28, 8, 'K'}}
+var K_l = []key{{31, 8, 'l'}}
+var K_L = []key{{31, 8, 'L'}}
+var K_SEMICOLON = []key{{34, 8, ';'}}
+var K_PARENTHESIS = []key{{34, 8, ':'}}
+var K_QUOTE = []key{{37, 8, '\''}}
+var K_DOUBLEQUOTE = []key{{37, 8, '"'}}
+var K_K_4 = []key{{65, 8, '4'}}
+var K_K_5 = []key{{68, 8, '5'}}
+var K_K_6 = []key{{71, 8, '6'}}
+var K_LSHIFT = []key{{1, 10, 'S'}, {2, 10, 'H'}, {3, 10, 'I'}, {4, 10, 'F'}, {5, 10, 'T'}}
+var K_z = []key{{9, 10, 'z'}}
+var K_Z = []key{{9, 10, 'Z'}}
+var K_x = []key{{12, 10, 'x'}}
+var K_X = []key{{12, 10, 'X'}}
+var K_c = []key{{15, 10, 'c'}}
+var K_C = []key{{15, 10, 'C'}}
+var K_v = []key{{18, 10, 'v'}}
+var K_V = []key{{18, 10, 'V'}}
+var K_b = []key{{21, 10, 'b'}}
+var K_B = []key{{21, 10, 'B'}}
+var K_n = []key{{24, 10, 'n'}}
+var K_N = []key{{24, 10, 'N'}}
+var K_m = []key{{27, 10, 'm'}}
+var K_M = []key{{27, 10, 'M'}}
+var K_COMMA = []key{{30, 10, ','}}
+var K_LANB = []key{{30, 10, '<'}}
+var K_PERIOD = []key{{33, 10, '.'}}
+var K_RANB = []key{{33, 10, '>'}}
+var K_SLASH = []key{{36, 10, '/'}}
+var K_QUESTION = []key{{36, 10, '?'}}
+var K_RSHIFT = []key{{42, 10, 'S'}, {43, 10, 'H'}, {44, 10, 'I'}, {45, 10, 'F'}, {46, 10, 'T'}}
+var K_ARROW_UP = []key{{54, 10, '('}, {55, 10, 0x2191}, {56, 10, ')'}}
+var K_K_1 = []key{{65, 10, '1'}}
+var K_K_2 = []key{{68, 10, '2'}}
+var K_K_3 = []key{{71, 10, '3'}}
+var K_K_ENTER = []key{{74, 10, 0x2591}, {74, 11, 0x2591}, {74, 12, 0x2591}}
+var K_LCTRL = []key{{1, 12, 'C'}, {2, 12, 'T'}, {3, 12, 'R'}, {4, 12, 'L'}}
+var K_LWIN = []key{{6, 12, 'W'}, {7, 12, 'I'}, {8, 12, 'N'}}
+var K_LALT = []key{{10, 12, 'A'}, {11, 12, 'L'}, {12, 12, 'T'}}
+var K_SPACE = []key{
+ {14, 12, ' '}, {15, 12, ' '}, {16, 12, ' '}, {17, 12, ' '}, {18, 12, ' '},
+ {19, 12, 'S'}, {20, 12, 'P'}, {21, 12, 'A'}, {22, 12, 'C'}, {23, 12, 'E'},
+ {24, 12, ' '}, {25, 12, ' '}, {26, 12, ' '}, {27, 12, ' '}, {28, 12, ' '},
+}
+var K_RALT = []key{{30, 12, 'A'}, {31, 12, 'L'}, {32, 12, 'T'}}
+var K_RWIN = []key{{34, 12, 'W'}, {35, 12, 'I'}, {36, 12, 'N'}}
+var K_RPROP = []key{{38, 12, 'P'}, {39, 12, 'R'}, {40, 12, 'O'}, {41, 12, 'P'}}
+var K_RCTRL = []key{{43, 12, 'C'}, {44, 12, 'T'}, {45, 12, 'R'}, {46, 12, 'L'}}
+var K_ARROW_LEFT = []key{{50, 12, '('}, {51, 12, 0x2190}, {52, 12, ')'}}
+var K_ARROW_DOWN = []key{{54, 12, '('}, {55, 12, 0x2193}, {56, 12, ')'}}
+var K_ARROW_RIGHT = []key{{58, 12, '('}, {59, 12, 0x2192}, {60, 12, ')'}}
+var K_K_0 = []key{{65, 12, ' '}, {66, 12, '0'}, {67, 12, ' '}, {68, 12, ' '}}
+var K_K_PERIOD = []key{{71, 12, '.'}}
+
+type combo struct {
+ keys [][]key
+}
+
+var combos = []combo{
+ {[][]key{K_TILDE, K_2, K_SPACE, K_LCTRL, K_RCTRL}},
+ {[][]key{K_A, K_LCTRL, K_RCTRL}},
+ {[][]key{K_B, K_LCTRL, K_RCTRL}},
+ {[][]key{K_C, K_LCTRL, K_RCTRL}},
+ {[][]key{K_D, K_LCTRL, K_RCTRL}},
+ {[][]key{K_E, K_LCTRL, K_RCTRL}},
+ {[][]key{K_F, K_LCTRL, K_RCTRL}},
+ {[][]key{K_G, K_LCTRL, K_RCTRL}},
+ {[][]key{K_H, K_BACKSPACE, K_LCTRL, K_RCTRL}},
+ {[][]key{K_I, K_TAB, K_LCTRL, K_RCTRL}},
+ {[][]key{K_J, K_LCTRL, K_RCTRL}},
+ {[][]key{K_K, K_LCTRL, K_RCTRL}},
+ {[][]key{K_L, K_LCTRL, K_RCTRL}},
+ {[][]key{K_M, K_ENTER, K_K_ENTER, K_LCTRL, K_RCTRL}},
+ {[][]key{K_N, K_LCTRL, K_RCTRL}},
+ {[][]key{K_O, K_LCTRL, K_RCTRL}},
+ {[][]key{K_P, K_LCTRL, K_RCTRL}},
+ {[][]key{K_Q, K_LCTRL, K_RCTRL}},
+ {[][]key{K_R, K_LCTRL, K_RCTRL}},
+ {[][]key{K_S, K_LCTRL, K_RCTRL}},
+ {[][]key{K_T, K_LCTRL, K_RCTRL}},
+ {[][]key{K_U, K_LCTRL, K_RCTRL}},
+ {[][]key{K_V, K_LCTRL, K_RCTRL}},
+ {[][]key{K_W, K_LCTRL, K_RCTRL}},
+ {[][]key{K_X, K_LCTRL, K_RCTRL}},
+ {[][]key{K_Y, K_LCTRL, K_RCTRL}},
+ {[][]key{K_Z, K_LCTRL, K_RCTRL}},
+ {[][]key{K_LSQB, K_ESC, K_3, K_LCTRL, K_RCTRL}},
+ {[][]key{K_4, K_BACKSLASH, K_LCTRL, K_RCTRL}},
+ {[][]key{K_RSQB, K_5, K_LCTRL, K_RCTRL}},
+ {[][]key{K_6, K_LCTRL, K_RCTRL}},
+ {[][]key{K_7, K_SLASH, K_MINUS_SHIFT, K_LCTRL, K_RCTRL}},
+ {[][]key{K_SPACE}},
+ {[][]key{K_1_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_DOUBLEQUOTE, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_3_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_4_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_5_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_7_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_QUOTE}},
+ {[][]key{K_9_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_0_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_8_SHIFT, K_K_STAR, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_EQUALS_SHIFT, K_K_PLUS, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_COMMA}},
+ {[][]key{K_MINUS, K_K_MINUS}},
+ {[][]key{K_PERIOD, K_K_PERIOD}},
+ {[][]key{K_SLASH, K_K_SLASH}},
+ {[][]key{K_0, K_K_0}},
+ {[][]key{K_1, K_K_1}},
+ {[][]key{K_2, K_K_2}},
+ {[][]key{K_3, K_K_3}},
+ {[][]key{K_4, K_K_4}},
+ {[][]key{K_5, K_K_5}},
+ {[][]key{K_6, K_K_6}},
+ {[][]key{K_7, K_K_7}},
+ {[][]key{K_8, K_K_8}},
+ {[][]key{K_9, K_K_9}},
+ {[][]key{K_PARENTHESIS, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_SEMICOLON}},
+ {[][]key{K_LANB, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_EQUALS}},
+ {[][]key{K_RANB, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_QUESTION, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_2_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_A, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_B, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_C, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_D, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_E, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_F, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_G, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_H, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_I, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_J, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_K, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_L, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_M, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_N, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_O, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_P, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_Q, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_R, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_S, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_T, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_U, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_V, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_W, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_X, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_Y, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_Z, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_LSQB}},
+ {[][]key{K_BACKSLASH}},
+ {[][]key{K_RSQB}},
+ {[][]key{K_6_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_MINUS_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_TILDE}},
+ {[][]key{K_a}},
+ {[][]key{K_b}},
+ {[][]key{K_c}},
+ {[][]key{K_d}},
+ {[][]key{K_e}},
+ {[][]key{K_f}},
+ {[][]key{K_g}},
+ {[][]key{K_h}},
+ {[][]key{K_i}},
+ {[][]key{K_j}},
+ {[][]key{K_k}},
+ {[][]key{K_l}},
+ {[][]key{K_m}},
+ {[][]key{K_n}},
+ {[][]key{K_o}},
+ {[][]key{K_p}},
+ {[][]key{K_q}},
+ {[][]key{K_r}},
+ {[][]key{K_s}},
+ {[][]key{K_t}},
+ {[][]key{K_u}},
+ {[][]key{K_v}},
+ {[][]key{K_w}},
+ {[][]key{K_x}},
+ {[][]key{K_y}},
+ {[][]key{K_z}},
+ {[][]key{K_LCUB, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_BACKSLASH_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_RCUB, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_TILDE_SHIFT, K_LSHIFT, K_RSHIFT}},
+ {[][]key{K_8, K_BACKSPACE, K_LCTRL, K_RCTRL}},
+}
+
+var func_combos = []combo{
+ {[][]key{K_F1}},
+ {[][]key{K_F2}},
+ {[][]key{K_F3}},
+ {[][]key{K_F4}},
+ {[][]key{K_F5}},
+ {[][]key{K_F6}},
+ {[][]key{K_F7}},
+ {[][]key{K_F8}},
+ {[][]key{K_F9}},
+ {[][]key{K_F10}},
+ {[][]key{K_F11}},
+ {[][]key{K_F12}},
+ {[][]key{K_INS}},
+ {[][]key{K_DEL}},
+ {[][]key{K_HOM}},
+ {[][]key{K_END}},
+ {[][]key{K_PGU}},
+ {[][]key{K_PGD}},
+ {[][]key{K_ARROW_UP}},
+ {[][]key{K_ARROW_DOWN}},
+ {[][]key{K_ARROW_LEFT}},
+ {[][]key{K_ARROW_RIGHT}},
+}
+
+func print_tb(x, y int, fg, bg termbox.Attribute, msg string) {
+ for _, c := range msg {
+ termbox.SetCell(x, y, c, fg, bg)
+ x++
+ }
+}
+
+func printf_tb(x, y int, fg, bg termbox.Attribute, format string, args ...interface{}) {
+ s := fmt.Sprintf(format, args...)
+ print_tb(x, y, fg, bg, s)
+}
+
+func draw_key(k []key, fg, bg termbox.Attribute) {
+ for _, k := range k {
+ termbox.SetCell(k.x+2, k.y+4, k.ch, fg, bg)
+ }
+}
+
+func draw_keyboard() {
+ termbox.SetCell(0, 0, 0x250C, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(79, 0, 0x2510, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(0, 23, 0x2514, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(79, 23, 0x2518, termbox.ColorWhite, termbox.ColorBlack)
+
+ for i := 1; i < 79; i++ {
+ termbox.SetCell(i, 0, 0x2500, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(i, 23, 0x2500, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(i, 17, 0x2500, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(i, 4, 0x2500, termbox.ColorWhite, termbox.ColorBlack)
+ }
+ for i := 1; i < 23; i++ {
+ termbox.SetCell(0, i, 0x2502, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(79, i, 0x2502, termbox.ColorWhite, termbox.ColorBlack)
+ }
+ termbox.SetCell(0, 17, 0x251C, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(79, 17, 0x2524, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(0, 4, 0x251C, termbox.ColorWhite, termbox.ColorBlack)
+ termbox.SetCell(79, 4, 0x2524, termbox.ColorWhite, termbox.ColorBlack)
+ for i := 5; i < 17; i++ {
+ termbox.SetCell(1, i, 0x2588, termbox.ColorYellow, termbox.ColorYellow)
+ termbox.SetCell(78, i, 0x2588, termbox.ColorYellow, termbox.ColorYellow)
+ }
+
+ draw_key(K_ESC, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F1, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F2, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F3, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F4, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F5, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F6, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F7, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F8, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F9, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F10, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F11, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_F12, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_PRN, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_SCR, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_BRK, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_LED1, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_LED2, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_LED3, termbox.ColorWhite, termbox.ColorBlue)
+
+ draw_key(K_TILDE, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_1, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_2, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_3, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_4, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_5, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_6, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_7, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_8, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_9, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_0, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_MINUS, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_EQUALS, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_BACKSLASH, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_BACKSPACE, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_INS, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_HOM, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_PGU, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_NUMLOCK, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_SLASH, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_STAR, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_MINUS, termbox.ColorWhite, termbox.ColorBlue)
+
+ draw_key(K_TAB, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_q, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_w, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_e, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_r, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_t, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_y, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_u, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_i, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_o, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_p, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_LSQB, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_RSQB, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_ENTER, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_DEL, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_END, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_PGD, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_7, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_8, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_9, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_PLUS, termbox.ColorWhite, termbox.ColorBlue)
+
+ draw_key(K_CAPS, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_a, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_s, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_d, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_f, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_g, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_h, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_j, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_k, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_l, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_SEMICOLON, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_QUOTE, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_4, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_5, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_6, termbox.ColorWhite, termbox.ColorBlue)
+
+ draw_key(K_LSHIFT, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_z, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_x, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_c, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_v, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_b, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_n, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_m, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_COMMA, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_PERIOD, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_SLASH, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_RSHIFT, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_ARROW_UP, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_1, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_2, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_3, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_ENTER, termbox.ColorWhite, termbox.ColorBlue)
+
+ draw_key(K_LCTRL, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_LWIN, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_LALT, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_SPACE, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_RCTRL, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_RPROP, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_RWIN, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_RALT, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_ARROW_LEFT, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_ARROW_DOWN, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_ARROW_RIGHT, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_0, termbox.ColorWhite, termbox.ColorBlue)
+ draw_key(K_K_PERIOD, termbox.ColorWhite, termbox.ColorBlue)
+
+ printf_tb(33, 1, termbox.ColorMagenta|termbox.AttrBold, termbox.ColorBlack, "Keyboard demo!")
+ printf_tb(21, 2, termbox.ColorMagenta, termbox.ColorBlack, "(press CTRL+X and then CTRL+Q to exit)")
+ printf_tb(15, 3, termbox.ColorMagenta, termbox.ColorBlack, "(press CTRL+X and then CTRL+C to change input mode)")
+
+ inputmode := termbox.SetInputMode(termbox.InputCurrent)
+ inputmode_str := ""
+ switch {
+ case inputmode&termbox.InputEsc != 0:
+ inputmode_str = "termbox.InputEsc"
+ case inputmode&termbox.InputAlt != 0:
+ inputmode_str = "termbox.InputAlt"
+ }
+
+ if inputmode&termbox.InputMouse != 0 {
+ inputmode_str += " | termbox.InputMouse"
+ }
+ printf_tb(3, 18, termbox.ColorWhite, termbox.ColorBlack, "Input mode: %s", inputmode_str)
+}
+
+var fcmap = []string{
+ "CTRL+2, CTRL+~",
+ "CTRL+A",
+ "CTRL+B",
+ "CTRL+C",
+ "CTRL+D",
+ "CTRL+E",
+ "CTRL+F",
+ "CTRL+G",
+ "CTRL+H, BACKSPACE",
+ "CTRL+I, TAB",
+ "CTRL+J",
+ "CTRL+K",
+ "CTRL+L",
+ "CTRL+M, ENTER",
+ "CTRL+N",
+ "CTRL+O",
+ "CTRL+P",
+ "CTRL+Q",
+ "CTRL+R",
+ "CTRL+S",
+ "CTRL+T",
+ "CTRL+U",
+ "CTRL+V",
+ "CTRL+W",
+ "CTRL+X",
+ "CTRL+Y",
+ "CTRL+Z",
+ "CTRL+3, ESC, CTRL+[",
+ "CTRL+4, CTRL+\\",
+ "CTRL+5, CTRL+]",
+ "CTRL+6",
+ "CTRL+7, CTRL+/, CTRL+_",
+ "SPACE",
+}
+
+var fkmap = []string{
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "INSERT",
+ "DELETE",
+ "HOME",
+ "END",
+ "PGUP",
+ "PGDN",
+ "ARROW UP",
+ "ARROW DOWN",
+ "ARROW LEFT",
+ "ARROW RIGHT",
+}
+
+func funckeymap(k termbox.Key) string {
+ if k == termbox.KeyCtrl8 {
+ return "CTRL+8, BACKSPACE 2" /* 0x7F */
+ } else if k >= termbox.KeyArrowRight && k <= 0xFFFF {
+ return fkmap[0xFFFF-k]
+ } else if k <= termbox.KeySpace {
+ return fcmap[k]
+ }
+ return "UNKNOWN"
+}
+
+func pretty_print_press(ev *termbox.Event) {
+ printf_tb(3, 19, termbox.ColorWhite, termbox.ColorBlack, "Key: ")
+ printf_tb(8, 19, termbox.ColorYellow, termbox.ColorBlack, "decimal: %d", ev.Key)
+ printf_tb(8, 20, termbox.ColorGreen, termbox.ColorBlack, "hex: 0x%X", ev.Key)
+ printf_tb(8, 21, termbox.ColorCyan, termbox.ColorBlack, "octal: 0%o", ev.Key)
+ printf_tb(8, 22, termbox.ColorRed, termbox.ColorBlack, "string: %s", funckeymap(ev.Key))
+
+ printf_tb(54, 19, termbox.ColorWhite, termbox.ColorBlack, "Char: ")
+ printf_tb(60, 19, termbox.ColorYellow, termbox.ColorBlack, "decimal: %d", ev.Ch)
+ printf_tb(60, 20, termbox.ColorGreen, termbox.ColorBlack, "hex: 0x%X", ev.Ch)
+ printf_tb(60, 21, termbox.ColorCyan, termbox.ColorBlack, "octal: 0%o", ev.Ch)
+ printf_tb(60, 22, termbox.ColorRed, termbox.ColorBlack, "string: %s", string(ev.Ch))
+
+ modifier := "none"
+ if ev.Mod != 0 {
+ modifier = "termbox.ModAlt"
+ }
+ printf_tb(54, 18, termbox.ColorWhite, termbox.ColorBlack, "Modifier: %s", modifier)
+}
+
+func pretty_print_resize(ev *termbox.Event) {
+ printf_tb(3, 19, termbox.ColorWhite, termbox.ColorBlack, "Resize event: %d x %d", ev.Width, ev.Height)
+}
+
+var counter = 0
+
+func pretty_print_mouse(ev *termbox.Event) {
+ printf_tb(3, 19, termbox.ColorWhite, termbox.ColorBlack, "Mouse event: %d x %d", ev.MouseX, ev.MouseY)
+ button := ""
+ switch ev.Key {
+ case termbox.MouseLeft:
+ button = "MouseLeft: %d"
+ case termbox.MouseMiddle:
+ button = "MouseMiddle: %d"
+ case termbox.MouseRight:
+ button = "MouseRight: %d"
+ case termbox.MouseWheelUp:
+ button = "MouseWheelUp: %d"
+ case termbox.MouseWheelDown:
+ button = "MouseWheelDown: %d"
+ case termbox.MouseRelease:
+ button = "MouseRelease: %d"
+ }
+ if ev.Mod&termbox.ModMotion != 0 {
+ button += "*"
+ }
+ counter++
+ printf_tb(43, 19, termbox.ColorWhite, termbox.ColorBlack, "Key: ")
+ printf_tb(48, 19, termbox.ColorYellow, termbox.ColorBlack, button, counter)
+}
+
+func dispatch_press(ev *termbox.Event) {
+ if ev.Mod&termbox.ModAlt != 0 {
+ draw_key(K_LALT, termbox.ColorWhite, termbox.ColorRed)
+ draw_key(K_RALT, termbox.ColorWhite, termbox.ColorRed)
+ }
+
+ var k *combo
+ if ev.Key >= termbox.KeyArrowRight {
+ k = &func_combos[0xFFFF-ev.Key]
+ } else if ev.Ch < 128 {
+ if ev.Ch == 0 && ev.Key < 128 {
+ k = &combos[ev.Key]
+ } else {
+ k = &combos[ev.Ch]
+ }
+ }
+ if k == nil {
+ return
+ }
+
+ keys := k.keys
+ for _, k := range keys {
+ draw_key(k, termbox.ColorWhite, termbox.ColorRed)
+ }
+}
+
+func main() {
+ err := termbox.Init()
+ if err != nil {
+ panic(err)
+ }
+ defer termbox.Close()
+
+ termbox.SetInputMode(termbox.InputEsc | termbox.InputMouse)
+
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ draw_keyboard()
+ termbox.Flush()
+ inputmode := 0
+ ctrlxpressed := false
+loop:
+ for {
+ switch ev := termbox.PollEvent(); ev.Type {
+ case termbox.EventKey:
+ if ev.Key == termbox.KeyCtrlS && ctrlxpressed {
+ termbox.Sync()
+ }
+ if ev.Key == termbox.KeyCtrlQ && ctrlxpressed {
+ break loop
+ }
+ if ev.Key == termbox.KeyCtrlC && ctrlxpressed {
+ chmap := []termbox.InputMode{
+ termbox.InputEsc | termbox.InputMouse,
+ termbox.InputAlt | termbox.InputMouse,
+ termbox.InputEsc,
+ termbox.InputAlt,
+ }
+ inputmode++
+ if inputmode >= len(chmap) {
+ inputmode = 0
+ }
+ termbox.SetInputMode(chmap[inputmode])
+ }
+ if ev.Key == termbox.KeyCtrlX {
+ ctrlxpressed = true
+ } else {
+ ctrlxpressed = false
+ }
+
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ draw_keyboard()
+ dispatch_press(&ev)
+ pretty_print_press(&ev)
+ termbox.Flush()
+ case termbox.EventResize:
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ draw_keyboard()
+ pretty_print_resize(&ev)
+ termbox.Flush()
+ case termbox.EventMouse:
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ draw_keyboard()
+ pretty_print_mouse(&ev)
+ termbox.Flush()
+ case termbox.EventError:
+ panic(ev.Err)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/output.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/output.go
new file mode 100644
index 00000000000..2b9479b078d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/output.go
@@ -0,0 +1,228 @@
+package main
+
+import "github.com/mattn/go-runewidth"
+import "github.com/nsf/termbox-go"
+
+const chars = "nnnnnnnnnbbbbbbbbbuuuuuuuuuBBBBBBBBB"
+
+var output_mode = termbox.OutputNormal
+
+func next_char(current int) int {
+ current++
+ if current >= len(chars) {
+ return 0
+ }
+ return current
+}
+
+func print_combinations_table(sx, sy int, attrs []termbox.Attribute) {
+ var bg termbox.Attribute
+ current_char := 0
+ y := sy
+
+ all_attrs := []termbox.Attribute{
+ 0,
+ termbox.AttrBold,
+ termbox.AttrUnderline,
+ termbox.AttrBold | termbox.AttrUnderline,
+ }
+
+ draw_line := func() {
+ x := sx
+ for _, a := range all_attrs {
+ for c := termbox.ColorDefault; c <= termbox.ColorWhite; c++ {
+ fg := a | c
+ termbox.SetCell(x, y, rune(chars[current_char]), fg, bg)
+ current_char = next_char(current_char)
+ x++
+ }
+ }
+ }
+
+ for _, a := range attrs {
+ for c := termbox.ColorDefault; c <= termbox.ColorWhite; c++ {
+ bg = a | c
+ draw_line()
+ y++
+ }
+ }
+}
+
+func print_wide(x, y int, s string) {
+ red := false
+ for _, r := range s {
+ c := termbox.ColorDefault
+ if red {
+ c = termbox.ColorRed
+ }
+ termbox.SetCell(x, y, r, termbox.ColorDefault, c)
+ w := runewidth.RuneWidth(r)
+ if w == 0 || (w == 2 && runewidth.IsAmbiguousWidth(r)) {
+ w = 1
+ }
+ x += w
+
+ red = !red
+ }
+}
+
+const hello_world = "こんにちは世界"
+
+func draw_all() {
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+
+ switch output_mode {
+
+ case termbox.OutputNormal:
+ print_combinations_table(1, 1, []termbox.Attribute{
+ 0,
+ termbox.AttrBold,
+ })
+ print_combinations_table(2+len(chars), 1, []termbox.Attribute{
+ termbox.AttrReverse,
+ })
+ print_wide(2+len(chars), 11, hello_world)
+
+ case termbox.OutputGrayscale:
+ for y := 0; y < 26; y++ {
+ for x := 0; x < 26; x++ {
+ termbox.SetCell(x, y, 'n',
+ termbox.Attribute(x+1),
+ termbox.Attribute(y+1))
+ termbox.SetCell(x+27, y, 'b',
+ termbox.Attribute(x+1)|termbox.AttrBold,
+ termbox.Attribute(26-y))
+ termbox.SetCell(x+54, y, 'u',
+ termbox.Attribute(x+1)|termbox.AttrUnderline,
+ termbox.Attribute(y+1))
+ }
+ termbox.SetCell(82, y, 'd',
+ termbox.Attribute(y+1),
+ termbox.ColorDefault)
+ termbox.SetCell(83, y, 'd',
+ termbox.ColorDefault,
+ termbox.Attribute(26-y))
+ }
+
+ case termbox.Output216:
+ for r := 0; r < 6; r++ {
+ for g := 0; g < 6; g++ {
+ for b := 0; b < 6; b++ {
+ y := r
+ x := g + 6*b
+ c1 := termbox.Attribute(1 + r*36 + g*6 + b)
+ bg := termbox.Attribute(1 + g*36 + b*6 + r)
+ c2 := termbox.Attribute(1 + b*36 + r*6 + g)
+ bc1 := c1 | termbox.AttrBold
+ uc1 := c1 | termbox.AttrUnderline
+ bc2 := c2 | termbox.AttrBold
+ uc2 := c2 | termbox.AttrUnderline
+ termbox.SetCell(x, y, 'n', c1, bg)
+ termbox.SetCell(x, y+6, 'b', bc1, bg)
+ termbox.SetCell(x, y+12, 'u', uc1, bg)
+ termbox.SetCell(x, y+18, 'B', bc1|uc1, bg)
+ termbox.SetCell(x+37, y, 'n', c2, bg)
+ termbox.SetCell(x+37, y+6, 'b', bc2, bg)
+ termbox.SetCell(x+37, y+12, 'u', uc2, bg)
+ termbox.SetCell(x+37, y+18, 'B', bc2|uc2, bg)
+ }
+ c1 := termbox.Attribute(1 + g*6 + r*36)
+ c2 := termbox.Attribute(6 + g*6 + r*36)
+ termbox.SetCell(74+g, r, 'd', c1, termbox.ColorDefault)
+ termbox.SetCell(74+g, r+6, 'd', c2, termbox.ColorDefault)
+ termbox.SetCell(74+g, r+12, 'd', termbox.ColorDefault, c1)
+ termbox.SetCell(74+g, r+18, 'd', termbox.ColorDefault, c2)
+ }
+ }
+
+ case termbox.Output256:
+ for y := 0; y < 4; y++ {
+ for x := 0; x < 8; x++ {
+ for z := 0; z < 8; z++ {
+ bg := termbox.Attribute(1 + y*64 + x*8 + z)
+ c1 := termbox.Attribute(256 - y*64 - x*8 - z)
+ c2 := termbox.Attribute(1 + y*64 + z*8 + x)
+ c3 := termbox.Attribute(256 - y*64 - z*8 - x)
+ c4 := termbox.Attribute(1 + y*64 + x*4 + z*4)
+ bold := c2 | termbox.AttrBold
+ under := c3 | termbox.AttrUnderline
+ both := c1 | termbox.AttrBold | termbox.AttrUnderline
+ termbox.SetCell(z+8*x, y, ' ', 0, bg)
+ termbox.SetCell(z+8*x, y+5, 'n', c4, bg)
+ termbox.SetCell(z+8*x, y+10, 'b', bold, bg)
+ termbox.SetCell(z+8*x, y+15, 'u', under, bg)
+ termbox.SetCell(z+8*x, y+20, 'B', both, bg)
+ }
+ }
+ }
+ for x := 0; x < 12; x++ {
+ for y := 0; y < 2; y++ {
+ c1 := termbox.Attribute(233 + y*12 + x)
+ termbox.SetCell(66+x, y, 'd', c1, termbox.ColorDefault)
+ termbox.SetCell(66+x, 2+y, 'd', termbox.ColorDefault, c1)
+ }
+ }
+ for x := 0; x < 6; x++ {
+ for y := 0; y < 6; y++ {
+ c1 := termbox.Attribute(17 + x*6 + y*36)
+ c2 := termbox.Attribute(17 + 5 + x*6 + y*36)
+ termbox.SetCell(66+x, 6+y, 'd', c1, termbox.ColorDefault)
+ termbox.SetCell(66+x, 12+y, 'd', c2, termbox.ColorDefault)
+ termbox.SetCell(72+x, 6+y, 'd', termbox.ColorDefault, c1)
+ termbox.SetCell(72+x, 12+y, 'd', termbox.ColorDefault, c2)
+ }
+ }
+
+ }
+
+ termbox.Flush()
+}
+
+var available_modes = []termbox.OutputMode{
+ termbox.OutputNormal,
+ termbox.OutputGrayscale,
+ termbox.Output216,
+ termbox.Output256,
+}
+
+var output_mode_index = 0
+
+func switch_output_mode(direction int) {
+ output_mode_index += direction
+ if output_mode_index < 0 {
+ output_mode_index = len(available_modes) - 1
+ } else if output_mode_index >= len(available_modes) {
+ output_mode_index = 0
+ }
+ output_mode = termbox.SetOutputMode(available_modes[output_mode_index])
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ termbox.Sync()
+}
+
+func main() {
+ err := termbox.Init()
+ if err != nil {
+ panic(err)
+ }
+ defer termbox.Close()
+
+ draw_all()
+loop:
+ for {
+ switch ev := termbox.PollEvent(); ev.Type {
+ case termbox.EventKey:
+ switch ev.Key {
+ case termbox.KeyEsc:
+ break loop
+ case termbox.KeyArrowUp, termbox.KeyArrowRight:
+ switch_output_mode(1)
+ draw_all()
+ case termbox.KeyArrowDown, termbox.KeyArrowLeft:
+ switch_output_mode(-1)
+ draw_all()
+ }
+ case termbox.EventResize:
+ draw_all()
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/paint.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/paint.go
new file mode 100644
index 00000000000..fbafd18ae99
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/paint.go
@@ -0,0 +1,105 @@
+package main
+
+import (
+ "github.com/nsf/termbox-go"
+)
+
+var curCol = 0
+var curRune = 0
+var backbuf []termbox.Cell
+var bbw, bbh int
+
+var runes = []rune{' ', '░', '▒', '▓', '█'}
+var colors = []termbox.Attribute{
+ termbox.ColorBlack,
+ termbox.ColorRed,
+ termbox.ColorGreen,
+ termbox.ColorYellow,
+ termbox.ColorBlue,
+ termbox.ColorMagenta,
+ termbox.ColorCyan,
+ termbox.ColorWhite,
+}
+
+type attrFunc func(int) (rune, termbox.Attribute, termbox.Attribute)
+
+func updateAndDrawButtons(current *int, x, y int, mx, my int, n int, attrf attrFunc) {
+ lx, ly := x, y
+ for i := 0; i < n; i++ {
+ if lx <= mx && mx <= lx+3 && ly <= my && my <= ly+1 {
+ *current = i
+ }
+ r, fg, bg := attrf(i)
+ termbox.SetCell(lx+0, ly+0, r, fg, bg)
+ termbox.SetCell(lx+1, ly+0, r, fg, bg)
+ termbox.SetCell(lx+2, ly+0, r, fg, bg)
+ termbox.SetCell(lx+3, ly+0, r, fg, bg)
+ termbox.SetCell(lx+0, ly+1, r, fg, bg)
+ termbox.SetCell(lx+1, ly+1, r, fg, bg)
+ termbox.SetCell(lx+2, ly+1, r, fg, bg)
+ termbox.SetCell(lx+3, ly+1, r, fg, bg)
+ lx += 4
+ }
+ lx, ly = x, y
+ for i := 0; i < n; i++ {
+ if *current == i {
+ fg := termbox.ColorRed | termbox.AttrBold
+ bg := termbox.ColorDefault
+ termbox.SetCell(lx+0, ly+2, '^', fg, bg)
+ termbox.SetCell(lx+1, ly+2, '^', fg, bg)
+ termbox.SetCell(lx+2, ly+2, '^', fg, bg)
+ termbox.SetCell(lx+3, ly+2, '^', fg, bg)
+ }
+ lx += 4
+ }
+}
+
+func update_and_redraw_all(mx, my int) {
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ if mx != -1 && my != -1 {
+ backbuf[bbw*my+mx] = termbox.Cell{Ch: runes[curRune], Fg: colors[curCol]}
+ }
+ copy(termbox.CellBuffer(), backbuf)
+ _, h := termbox.Size()
+ updateAndDrawButtons(&curRune, 0, 0, mx, my, len(runes), func(i int) (rune, termbox.Attribute, termbox.Attribute) {
+ return runes[i], termbox.ColorDefault, termbox.ColorDefault
+ })
+ updateAndDrawButtons(&curCol, 0, h-3, mx, my, len(colors), func(i int) (rune, termbox.Attribute, termbox.Attribute) {
+ return ' ', termbox.ColorDefault, colors[i]
+ })
+ termbox.Flush()
+}
+
+func reallocBackBuffer(w, h int) {
+ bbw, bbh = w, h
+ backbuf = make([]termbox.Cell, w*h)
+}
+
+func main() {
+ err := termbox.Init()
+ if err != nil {
+ panic(err)
+ }
+ defer termbox.Close()
+ termbox.SetInputMode(termbox.InputEsc | termbox.InputMouse)
+ reallocBackBuffer(termbox.Size())
+ update_and_redraw_all(-1, -1)
+
+mainloop:
+ for {
+ mx, my := -1, -1
+ switch ev := termbox.PollEvent(); ev.Type {
+ case termbox.EventKey:
+ if ev.Key == termbox.KeyEsc {
+ break mainloop
+ }
+ case termbox.EventMouse:
+ if ev.Key == termbox.MouseLeft {
+ mx, my = ev.MouseX, ev.MouseY
+ }
+ case termbox.EventResize:
+ reallocBackBuffer(ev.Width, ev.Height)
+ }
+ update_and_redraw_all(mx, my)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/random_output.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/random_output.go
new file mode 100644
index 00000000000..efcf0b7c9de
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/random_output.go
@@ -0,0 +1,46 @@
+package main
+
+import "github.com/nsf/termbox-go"
+import "math/rand"
+import "time"
+
+func draw() {
+ w, h := termbox.Size()
+ termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
+ for y := 0; y < h; y++ {
+ for x := 0; x < w; x++ {
+ termbox.SetCell(x, y, ' ', termbox.ColorDefault,
+ termbox.Attribute(rand.Int()%8)+1)
+ }
+ }
+ termbox.Flush()
+}
+
+func main() {
+ err := termbox.Init()
+ if err != nil {
+ panic(err)
+ }
+ defer termbox.Close()
+
+ event_queue := make(chan termbox.Event)
+ go func() {
+ for {
+ event_queue <- termbox.PollEvent()
+ }
+ }()
+
+ draw()
+loop:
+ for {
+ select {
+ case ev := <-event_queue:
+ if ev.Type == termbox.EventKey && ev.Key == termbox.KeyEsc {
+ break loop
+ }
+ default:
+ draw()
+ time.Sleep(10 * time.Millisecond)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/raw_input.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/raw_input.go
new file mode 100644
index 00000000000..97a489758f4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/_demos/raw_input.go
@@ -0,0 +1,109 @@
+package main
+
+import (
+ "fmt"
+ "github.com/nsf/termbox-go"
+ "strings"
+)
+
+func tbprint(x, y int, fg, bg termbox.Attribute, msg string) {
+ for _, c := range msg {
+ termbox.SetCell(x, y, c, fg, bg)
+ x++
+ }
+}
+
+var current string
+var curev termbox.Event
+
+func mouse_button_str(k termbox.Key) string {
+ switch k {
+ case termbox.MouseLeft:
+ return "MouseLeft"
+ case termbox.MouseMiddle:
+ return "MouseMiddle"
+ case termbox.MouseRight:
+ return "MouseRight"
+ case termbox.MouseRelease:
+ return "MouseRelease"
+ case termbox.MouseWheelUp:
+ return "MouseWheelUp"
+ case termbox.MouseWheelDown:
+ return "MouseWheelDown"
+ }
+ return "Key"
+}
+
+func mod_str(m termbox.Modifier) string {
+ var out []string
+ if m&termbox.ModAlt != 0 {
+ out = append(out, "ModAlt")
+ }
+ if m&termbox.ModMotion != 0 {
+ out = append(out, "ModMotion")
+ }
+ return strings.Join(out, " | ")
+}
+
+func redraw_all() {
+ const coldef = termbox.ColorDefault
+ termbox.Clear(coldef, coldef)
+ tbprint(0, 0, termbox.ColorMagenta, coldef, "Press 'q' to quit")
+ tbprint(0, 1, coldef, coldef, current)
+ switch curev.Type {
+ case termbox.EventKey:
+ tbprint(0, 2, coldef, coldef,
+ fmt.Sprintf("EventKey: k: %d, c: %c, mod: %s", curev.Key, curev.Ch, mod_str(curev.Mod)))
+ case termbox.EventMouse:
+ tbprint(0, 2, coldef, coldef,
+ fmt.Sprintf("EventMouse: x: %d, y: %d, b: %s, mod: %s",
+ curev.MouseX, curev.MouseY, mouse_button_str(curev.Key), mod_str(curev.Mod)))
+ case termbox.EventNone:
+ tbprint(0, 2, coldef, coldef, "EventNone")
+ }
+ tbprint(0, 3, coldef, coldef, fmt.Sprintf("%d", curev.N))
+ termbox.Flush()
+}
+
+func main() {
+ err := termbox.Init()
+ if err != nil {
+ panic(err)
+ }
+ defer termbox.Close()
+ termbox.SetInputMode(termbox.InputAlt | termbox.InputMouse)
+ redraw_all()
+
+ data := make([]byte, 0, 64)
+mainloop:
+ for {
+ if cap(data)-len(data) < 32 {
+ newdata := make([]byte, len(data), len(data)+32)
+ copy(newdata, data)
+ data = newdata
+ }
+ beg := len(data)
+ d := data[beg : beg+32]
+ switch ev := termbox.PollRawEvent(d); ev.Type {
+ case termbox.EventRaw:
+ data = data[:beg+ev.N]
+ current = fmt.Sprintf("%q", data)
+ if current == `"q"` {
+ break mainloop
+ }
+
+ for {
+ ev := termbox.ParseEvent(data)
+ if ev.N == 0 {
+ break
+ }
+ curev = ev
+ copy(data, data[curev.N:])
+ data = data[:len(data)-curev.N]
+ }
+ case termbox.EventError:
+ panic(ev.Err)
+ }
+ redraw_all()
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api.go
new file mode 100644
index 00000000000..b339e532f8e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api.go
@@ -0,0 +1,458 @@
+// +build !windows
+
+package termbox
+
+import "github.com/mattn/go-runewidth"
+import "fmt"
+import "os"
+import "os/signal"
+import "syscall"
+import "runtime"
+
+// public API
+
+// Initializes termbox library. This function should be called before any other functions.
+// After successful initialization, the library must be finalized using 'Close' function.
+//
+// Example usage:
+// err := termbox.Init()
+// if err != nil {
+// panic(err)
+// }
+// defer termbox.Close()
+func Init() error {
+ var err error
+
+ out, err = os.OpenFile("/dev/tty", syscall.O_WRONLY, 0)
+ if err != nil {
+ return err
+ }
+ in, err = syscall.Open("/dev/tty", syscall.O_RDONLY, 0)
+ if err != nil {
+ return err
+ }
+
+ err = setup_term()
+ if err != nil {
+ return fmt.Errorf("termbox: error while reading terminfo data: %v", err)
+ }
+
+ signal.Notify(sigwinch, syscall.SIGWINCH)
+ signal.Notify(sigio, syscall.SIGIO)
+
+ _, err = fcntl(in, syscall.F_SETFL, syscall.O_ASYNC|syscall.O_NONBLOCK)
+ if err != nil {
+ return err
+ }
+ _, err = fcntl(in, syscall.F_SETOWN, syscall.Getpid())
+ if runtime.GOOS != "darwin" && err != nil {
+ return err
+ }
+ err = tcgetattr(out.Fd(), &orig_tios)
+ if err != nil {
+ return err
+ }
+
+ tios := orig_tios
+ tios.Iflag &^= syscall_IGNBRK | syscall_BRKINT | syscall_PARMRK |
+ syscall_ISTRIP | syscall_INLCR | syscall_IGNCR |
+ syscall_ICRNL | syscall_IXON
+ tios.Lflag &^= syscall_ECHO | syscall_ECHONL | syscall_ICANON |
+ syscall_ISIG | syscall_IEXTEN
+ tios.Cflag &^= syscall_CSIZE | syscall_PARENB
+ tios.Cflag |= syscall_CS8
+ tios.Cc[syscall_VMIN] = 1
+ tios.Cc[syscall_VTIME] = 0
+
+ err = tcsetattr(out.Fd(), &tios)
+ if err != nil {
+ return err
+ }
+
+ out.WriteString(funcs[t_enter_ca])
+ out.WriteString(funcs[t_enter_keypad])
+ out.WriteString(funcs[t_hide_cursor])
+ out.WriteString(funcs[t_clear_screen])
+
+ termw, termh = get_term_size(out.Fd())
+ back_buffer.init(termw, termh)
+ front_buffer.init(termw, termh)
+ back_buffer.clear()
+ front_buffer.clear()
+
+ go func() {
+ buf := make([]byte, 128)
+ for {
+ select {
+ case <-sigio:
+ for {
+ n, err := syscall.Read(in, buf)
+ if err == syscall.EAGAIN || err == syscall.EWOULDBLOCK {
+ break
+ }
+ select {
+ case input_comm <- input_event{buf[:n], err}:
+ ie := <-input_comm
+ buf = ie.data[:128]
+ case <-quit:
+ return
+ }
+ }
+ case <-quit:
+ return
+ }
+ }
+ }()
+
+ IsInit = true
+ return nil
+}
+
+// Interrupt an in-progress call to PollEvent by causing it to return
+// EventInterrupt. Note that this function will block until the PollEvent
+// function has successfully been interrupted.
+func Interrupt() {
+ interrupt_comm <- struct{}{}
+}
+
+// Finalizes termbox library, should be called after successful initialization
+// when termbox's functionality isn't required anymore.
+func Close() {
+ quit <- 1
+ out.WriteString(funcs[t_show_cursor])
+ out.WriteString(funcs[t_sgr0])
+ out.WriteString(funcs[t_clear_screen])
+ out.WriteString(funcs[t_exit_ca])
+ out.WriteString(funcs[t_exit_keypad])
+ out.WriteString(funcs[t_exit_mouse])
+ tcsetattr(out.Fd(), &orig_tios)
+
+ out.Close()
+ syscall.Close(in)
+
+ // reset the state, so that on next Init() it will work again
+ termw = 0
+ termh = 0
+ input_mode = InputEsc
+ out = nil
+ in = 0
+ lastfg = attr_invalid
+ lastbg = attr_invalid
+ lastx = coord_invalid
+ lasty = coord_invalid
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ IsInit = false
+}
+
+// Synchronizes the internal back buffer with the terminal.
+func Flush() error {
+ // invalidate cursor position
+ lastx = coord_invalid
+ lasty = coord_invalid
+
+ update_size_maybe()
+
+ for y := 0; y < front_buffer.height; y++ {
+ line_offset := y * front_buffer.width
+ for x := 0; x < front_buffer.width; {
+ cell_offset := line_offset + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ if back.Ch < ' ' {
+ back.Ch = ' '
+ }
+ w := runewidth.RuneWidth(back.Ch)
+ if w == 0 || w == 2 && runewidth.IsAmbiguousWidth(back.Ch) {
+ w = 1
+ }
+ if *back == *front {
+ x += w
+ continue
+ }
+ *front = *back
+ send_attr(back.Fg, back.Bg)
+
+ if w == 2 && x == front_buffer.width-1 {
+ // there's not enough space for 2-cells rune,
+ // let's just put a space in there
+ send_char(x, y, ' ')
+ } else {
+ send_char(x, y, back.Ch)
+ if w == 2 {
+ next := cell_offset + 1
+ front_buffer.cells[next] = Cell{
+ Ch: 0,
+ Fg: back.Fg,
+ Bg: back.Bg,
+ }
+ }
+ }
+ x += w
+ }
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+ return flush()
+}
+
+// Sets the position of the cursor. See also HideCursor().
+func SetCursor(x, y int) {
+ if is_cursor_hidden(cursor_x, cursor_y) && !is_cursor_hidden(x, y) {
+ outbuf.WriteString(funcs[t_show_cursor])
+ }
+
+ if !is_cursor_hidden(cursor_x, cursor_y) && is_cursor_hidden(x, y) {
+ outbuf.WriteString(funcs[t_hide_cursor])
+ }
+
+ cursor_x, cursor_y = x, y
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+}
+
+// The shortcut for SetCursor(-1, -1).
+func HideCursor() {
+ SetCursor(cursor_hidden, cursor_hidden)
+}
+
+// Changes cell's parameters in the internal back buffer at the specified
+// position.
+func SetCell(x, y int, ch rune, fg, bg Attribute) {
+ if x < 0 || x >= back_buffer.width {
+ return
+ }
+ if y < 0 || y >= back_buffer.height {
+ return
+ }
+
+ back_buffer.cells[y*back_buffer.width+x] = Cell{ch, fg, bg}
+}
+
+// Returns a slice into the termbox's back buffer. You can get its dimensions
+// using 'Size' function. The slice remains valid as long as no 'Clear' or
+// 'Flush' function calls were made after call to this function.
+func CellBuffer() []Cell {
+ return back_buffer.cells
+}
+
+// After getting a raw event from PollRawEvent function call, you can parse it
+// again into an ordinary one using termbox logic. That is parse an event as
+// termbox would do it. Returned event in addition to usual Event struct fields
+// sets N field to the amount of bytes used within 'data' slice. If the length
+// of 'data' slice is zero or event cannot be parsed for some other reason, the
+// function will return a special event type: EventNone.
+//
+// IMPORTANT: EventNone may contain a non-zero N, which means you should skip
+// these bytes, because termbox cannot recognize them.
+//
+// NOTE: This API is experimental and may change in future.
+func ParseEvent(data []byte) Event {
+ event := Event{Type: EventKey}
+ ok := extract_event(data, &event)
+ if !ok {
+ return Event{Type: EventNone, N: event.N}
+ }
+ return event
+}
+
+// Wait for an event and return it. This is a blocking function call. Instead
+// of EventKey and EventMouse it returns EventRaw events. Raw event is written
+// into `data` slice and Event's N field is set to the amount of bytes written.
+// The minimum required length of the 'data' slice is 1. This requirement may
+// vary on different platforms.
+//
+// NOTE: This API is experimental and may change in future.
+func PollRawEvent(data []byte) Event {
+ if len(data) == 0 {
+ panic("len(data) >= 1 is a requirement")
+ }
+
+ var event Event
+ if extract_raw_event(data, &event) {
+ return event
+ }
+
+ for {
+ select {
+ case ev := <-input_comm:
+ if ev.err != nil {
+ return Event{Type: EventError, Err: ev.err}
+ }
+
+ inbuf = append(inbuf, ev.data...)
+ input_comm <- ev
+ if extract_raw_event(data, &event) {
+ return event
+ }
+ case <-interrupt_comm:
+ event.Type = EventInterrupt
+ return event
+
+ case <-sigwinch:
+ event.Type = EventResize
+ event.Width, event.Height = get_term_size(out.Fd())
+ return event
+ }
+ }
+}
+
+// Wait for an event and return it. This is a blocking function call.
+func PollEvent() Event {
+ var event Event
+
+ // try to extract event from input buffer, return on success
+ event.Type = EventKey
+ ok := extract_event(inbuf, &event)
+ if event.N != 0 {
+ copy(inbuf, inbuf[event.N:])
+ inbuf = inbuf[:len(inbuf)-event.N]
+ }
+ if ok {
+ return event
+ }
+
+ for {
+ select {
+ case ev := <-input_comm:
+ if ev.err != nil {
+ return Event{Type: EventError, Err: ev.err}
+ }
+
+ inbuf = append(inbuf, ev.data...)
+ input_comm <- ev
+ ok := extract_event(inbuf, &event)
+ if event.N != 0 {
+ copy(inbuf, inbuf[event.N:])
+ inbuf = inbuf[:len(inbuf)-event.N]
+ }
+ if ok {
+ return event
+ }
+ case <-interrupt_comm:
+ event.Type = EventInterrupt
+ return event
+
+ case <-sigwinch:
+ event.Type = EventResize
+ event.Width, event.Height = get_term_size(out.Fd())
+ return event
+ }
+ }
+ panic("unreachable")
+}
+
+// Returns the size of the internal back buffer (which is mostly the same as
+// terminal's window size in characters). But it doesn't always match the size
+// of the terminal window, after the terminal size has changed, the internal
+// back buffer will get in sync only after Clear or Flush function calls.
+func Size() (width int, height int) {
+ return termw, termh
+}
+
+// Clears the internal back buffer.
+func Clear(fg, bg Attribute) error {
+ foreground, background = fg, bg
+ err := update_size_maybe()
+ back_buffer.clear()
+ return err
+}
+
+// Sets termbox input mode. Termbox has two input modes:
+//
+// 1. Esc input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC means KeyEsc. This is the default input mode.
+//
+// 2. Alt input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC enables ModAlt modifier for the next keyboard event.
+//
+// Both input modes can be OR'ed with Mouse mode. Setting Mouse mode bit up will
+// enable mouse button press/release and drag events.
+//
+// If 'mode' is InputCurrent, returns the current input mode. See also Input*
+// constants.
+func SetInputMode(mode InputMode) InputMode {
+ if mode == InputCurrent {
+ return input_mode
+ }
+ if mode&(InputEsc|InputAlt) == 0 {
+ mode |= InputEsc
+ }
+ if mode&(InputEsc|InputAlt) == InputEsc|InputAlt {
+ mode &^= InputAlt
+ }
+ if mode&InputMouse != 0 {
+ out.WriteString(funcs[t_enter_mouse])
+ } else {
+ out.WriteString(funcs[t_exit_mouse])
+ }
+
+ input_mode = mode
+ return input_mode
+}
+
+// Sets the termbox output mode. Termbox has four output options:
+//
+// 1. OutputNormal => [1..8]
+// This mode provides 8 different colors:
+// black, red, green, yellow, blue, magenta, cyan, white
+// Shortcut: ColorBlack, ColorRed, ...
+// Attributes: AttrBold, AttrUnderline, AttrReverse
+//
+// Example usage:
+// SetCell(x, y, '@', ColorBlack | AttrBold, ColorRed);
+//
+// 2. Output256 => [1..256]
+// In this mode you can leverage the 256 terminal mode:
+// 0x01 - 0x08: the 8 colors as in OutputNormal
+// 0x09 - 0x10: Color* | AttrBold
+// 0x11 - 0xe8: 216 different colors
+// 0xe9 - 0x1ff: 24 different shades of grey
+//
+// Example usage:
+// SetCell(x, y, '@', 184, 240);
+// SetCell(x, y, '@', 0xb8, 0xf0);
+//
+// 3. Output216 => [1..216]
+// This mode supports the 3rd range of the 256 mode only.
+// But you dont need to provide an offset.
+//
+// 4. OutputGrayscale => [1..26]
+// This mode supports the 4th range of the 256 mode
+// and black and white colors from 3th range of the 256 mode
+// But you dont need to provide an offset.
+//
+// In all modes, 0x00 represents the default color.
+//
+// `go run _demos/output.go` to see its impact on your terminal.
+//
+// If 'mode' is OutputCurrent, it returns the current output mode.
+//
+// Note that this may return a different OutputMode than the one requested,
+// as the requested mode may not be available on the target platform.
+func SetOutputMode(mode OutputMode) OutputMode {
+ if mode == OutputCurrent {
+ return output_mode
+ }
+
+ output_mode = mode
+ return output_mode
+}
+
+// Sync comes handy when something causes desync between termbox's understanding
+// of a terminal buffer and the reality. Such as a third party process. Sync
+// forces a complete resync between the termbox and a terminal, it may not be
+// visually pretty though.
+func Sync() error {
+ front_buffer.clear()
+ err := send_clear()
+ if err != nil {
+ return err
+ }
+
+ return Flush()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api_common.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api_common.go
new file mode 100644
index 00000000000..9f23661f561
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api_common.go
@@ -0,0 +1,187 @@
+// termbox is a library for creating cross-platform text-based interfaces
+package termbox
+
+// public API, common OS agnostic part
+
+type (
+ InputMode int
+ OutputMode int
+ EventType uint8
+ Modifier uint8
+ Key uint16
+ Attribute uint16
+)
+
+// This type represents a termbox event. The 'Mod', 'Key' and 'Ch' fields are
+// valid if 'Type' is EventKey. The 'Width' and 'Height' fields are valid if
+// 'Type' is EventResize. The 'Err' field is valid if 'Type' is EventError.
+type Event struct {
+ Type EventType // one of Event* constants
+ Mod Modifier // one of Mod* constants or 0
+ Key Key // one of Key* constants, invalid if 'Ch' is not 0
+ Ch rune // a unicode character
+ Width int // width of the screen
+ Height int // height of the screen
+ Err error // error in case if input failed
+ MouseX int // x coord of mouse
+ MouseY int // y coord of mouse
+ N int // number of bytes written when getting a raw event
+}
+
+// A cell, single conceptual entity on the screen. The screen is basically a 2d
+// array of cells. 'Ch' is a unicode character, 'Fg' and 'Bg' are foreground
+// and background attributes respectively.
+type Cell struct {
+ Ch rune
+ Fg Attribute
+ Bg Attribute
+}
+
+// To know if termbox has been initialized or not
+var (
+ IsInit bool = false
+)
+
+// Key constants, see Event.Key field.
+const (
+ KeyF1 Key = 0xFFFF - iota
+ KeyF2
+ KeyF3
+ KeyF4
+ KeyF5
+ KeyF6
+ KeyF7
+ KeyF8
+ KeyF9
+ KeyF10
+ KeyF11
+ KeyF12
+ KeyInsert
+ KeyDelete
+ KeyHome
+ KeyEnd
+ KeyPgup
+ KeyPgdn
+ KeyArrowUp
+ KeyArrowDown
+ KeyArrowLeft
+ KeyArrowRight
+ key_min // see terminfo
+ MouseLeft
+ MouseMiddle
+ MouseRight
+ MouseRelease
+ MouseWheelUp
+ MouseWheelDown
+)
+
+const (
+ KeyCtrlTilde Key = 0x00
+ KeyCtrl2 Key = 0x00
+ KeyCtrlSpace Key = 0x00
+ KeyCtrlA Key = 0x01
+ KeyCtrlB Key = 0x02
+ KeyCtrlC Key = 0x03
+ KeyCtrlD Key = 0x04
+ KeyCtrlE Key = 0x05
+ KeyCtrlF Key = 0x06
+ KeyCtrlG Key = 0x07
+ KeyBackspace Key = 0x08
+ KeyCtrlH Key = 0x08
+ KeyTab Key = 0x09
+ KeyCtrlI Key = 0x09
+ KeyCtrlJ Key = 0x0A
+ KeyCtrlK Key = 0x0B
+ KeyCtrlL Key = 0x0C
+ KeyEnter Key = 0x0D
+ KeyCtrlM Key = 0x0D
+ KeyCtrlN Key = 0x0E
+ KeyCtrlO Key = 0x0F
+ KeyCtrlP Key = 0x10
+ KeyCtrlQ Key = 0x11
+ KeyCtrlR Key = 0x12
+ KeyCtrlS Key = 0x13
+ KeyCtrlT Key = 0x14
+ KeyCtrlU Key = 0x15
+ KeyCtrlV Key = 0x16
+ KeyCtrlW Key = 0x17
+ KeyCtrlX Key = 0x18
+ KeyCtrlY Key = 0x19
+ KeyCtrlZ Key = 0x1A
+ KeyEsc Key = 0x1B
+ KeyCtrlLsqBracket Key = 0x1B
+ KeyCtrl3 Key = 0x1B
+ KeyCtrl4 Key = 0x1C
+ KeyCtrlBackslash Key = 0x1C
+ KeyCtrl5 Key = 0x1D
+ KeyCtrlRsqBracket Key = 0x1D
+ KeyCtrl6 Key = 0x1E
+ KeyCtrl7 Key = 0x1F
+ KeyCtrlSlash Key = 0x1F
+ KeyCtrlUnderscore Key = 0x1F
+ KeySpace Key = 0x20
+ KeyBackspace2 Key = 0x7F
+ KeyCtrl8 Key = 0x7F
+)
+
+// Alt modifier constant, see Event.Mod field and SetInputMode function.
+const (
+ ModAlt Modifier = 1 << iota
+ ModMotion
+)
+
+// Cell colors, you can combine a color with multiple attributes using bitwise
+// OR ('|').
+const (
+ ColorDefault Attribute = iota
+ ColorBlack
+ ColorRed
+ ColorGreen
+ ColorYellow
+ ColorBlue
+ ColorMagenta
+ ColorCyan
+ ColorWhite
+)
+
+// Cell attributes, it is possible to use multiple attributes by combining them
+// using bitwise OR ('|'). Although, colors cannot be combined. But you can
+// combine attributes and a single color.
+//
+// It's worth mentioning that some platforms don't support certain attibutes.
+// For example windows console doesn't support AttrUnderline. And on some
+// terminals applying AttrBold to background may result in blinking text. Use
+// them with caution and test your code on various terminals.
+const (
+ AttrBold Attribute = 1 << (iota + 9)
+ AttrUnderline
+ AttrReverse
+)
+
+// Input mode. See SetInputMode function.
+const (
+ InputEsc InputMode = 1 << iota
+ InputAlt
+ InputMouse
+ InputCurrent InputMode = 0
+)
+
+// Output mode. See SetOutputMode function.
+const (
+ OutputCurrent OutputMode = iota
+ OutputNormal
+ Output256
+ Output216
+ OutputGrayscale
+)
+
+// Event type. See Event.Type field.
+const (
+ EventKey EventType = iota
+ EventResize
+ EventMouse
+ EventError
+ EventInterrupt
+ EventRaw
+ EventNone
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api_windows.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api_windows.go
new file mode 100644
index 00000000000..7def30a67df
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/api_windows.go
@@ -0,0 +1,239 @@
+package termbox
+
+import (
+ "syscall"
+)
+
+// public API
+
+// Initializes termbox library. This function should be called before any other functions.
+// After successful initialization, the library must be finalized using 'Close' function.
+//
+// Example usage:
+// err := termbox.Init()
+// if err != nil {
+// panic(err)
+// }
+// defer termbox.Close()
+func Init() error {
+ var err error
+
+ interrupt, err = create_event()
+ if err != nil {
+ return err
+ }
+
+ in, err = syscall.Open("CONIN$", syscall.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+ out, err = syscall.Open("CONOUT$", syscall.O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+
+ err = get_console_mode(in, &orig_mode)
+ if err != nil {
+ return err
+ }
+
+ err = set_console_mode(in, enable_window_input)
+ if err != nil {
+ return err
+ }
+
+ orig_size = get_term_size(out)
+ win_size := get_win_size(out)
+
+ err = set_console_screen_buffer_size(out, win_size)
+ if err != nil {
+ return err
+ }
+
+ err = get_console_cursor_info(out, &orig_cursor_info)
+ if err != nil {
+ return err
+ }
+
+ show_cursor(false)
+ term_size = get_term_size(out)
+ back_buffer.init(int(term_size.x), int(term_size.y))
+ front_buffer.init(int(term_size.x), int(term_size.y))
+ back_buffer.clear()
+ front_buffer.clear()
+ clear()
+
+ diffbuf = make([]diff_msg, 0, 32)
+
+ go input_event_producer()
+ IsInit = true
+ return nil
+}
+
+// Finalizes termbox library, should be called after successful initialization
+// when termbox's functionality isn't required anymore.
+func Close() {
+ // we ignore errors here, because we can't really do anything about them
+ Clear(0, 0)
+ Flush()
+
+ // stop event producer
+ cancel_comm <- true
+ set_event(interrupt)
+ select {
+ case <-input_comm:
+ default:
+ }
+ <-cancel_done_comm
+
+ set_console_cursor_info(out, &orig_cursor_info)
+ set_console_cursor_position(out, coord{})
+ set_console_screen_buffer_size(out, orig_size)
+ set_console_mode(in, orig_mode)
+ syscall.Close(in)
+ syscall.Close(out)
+ syscall.Close(interrupt)
+ IsInit = false
+}
+
+// Interrupt an in-progress call to PollEvent by causing it to return
+// EventInterrupt. Note that this function will block until the PollEvent
+// function has successfully been interrupted.
+func Interrupt() {
+ interrupt_comm <- struct{}{}
+}
+
+// Synchronizes the internal back buffer with the terminal.
+func Flush() error {
+ update_size_maybe()
+ prepare_diff_messages()
+ for _, diff := range diffbuf {
+ r := small_rect{
+ left: 0,
+ top: diff.pos,
+ right: term_size.x - 1,
+ bottom: diff.pos + diff.lines - 1,
+ }
+ write_console_output(out, diff.chars, r)
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+ return nil
+}
+
+// Sets the position of the cursor. See also HideCursor().
+func SetCursor(x, y int) {
+ if is_cursor_hidden(cursor_x, cursor_y) && !is_cursor_hidden(x, y) {
+ show_cursor(true)
+ }
+
+ if !is_cursor_hidden(cursor_x, cursor_y) && is_cursor_hidden(x, y) {
+ show_cursor(false)
+ }
+
+ cursor_x, cursor_y = x, y
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+}
+
+// The shortcut for SetCursor(-1, -1).
+func HideCursor() {
+ SetCursor(cursor_hidden, cursor_hidden)
+}
+
+// Changes cell's parameters in the internal back buffer at the specified
+// position.
+func SetCell(x, y int, ch rune, fg, bg Attribute) {
+ if x < 0 || x >= back_buffer.width {
+ return
+ }
+ if y < 0 || y >= back_buffer.height {
+ return
+ }
+
+ back_buffer.cells[y*back_buffer.width+x] = Cell{ch, fg, bg}
+}
+
+// Returns a slice into the termbox's back buffer. You can get its dimensions
+// using 'Size' function. The slice remains valid as long as no 'Clear' or
+// 'Flush' function calls were made after call to this function.
+func CellBuffer() []Cell {
+ return back_buffer.cells
+}
+
+// Wait for an event and return it. This is a blocking function call.
+func PollEvent() Event {
+ select {
+ case ev := <-input_comm:
+ return ev
+ case <-interrupt_comm:
+ return Event{Type: EventInterrupt}
+ }
+}
+
+// Returns the size of the internal back buffer (which is mostly the same as
+// console's window size in characters). But it doesn't always match the size
+// of the console window, after the console size has changed, the internal back
+// buffer will get in sync only after Clear or Flush function calls.
+func Size() (int, int) {
+ return int(term_size.x), int(term_size.y)
+}
+
+// Clears the internal back buffer.
+func Clear(fg, bg Attribute) error {
+ foreground, background = fg, bg
+ update_size_maybe()
+ back_buffer.clear()
+ return nil
+}
+
+// Sets termbox input mode. Termbox has two input modes:
+//
+// 1. Esc input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC means KeyEsc. This is the default input mode.
+//
+// 2. Alt input mode. When ESC sequence is in the buffer and it doesn't match
+// any known sequence. ESC enables ModAlt modifier for the next keyboard event.
+//
+// Both input modes can be OR'ed with Mouse mode. Setting Mouse mode bit up will
+// enable mouse button press/release and drag events.
+//
+// If 'mode' is InputCurrent, returns the current input mode. See also Input*
+// constants.
+func SetInputMode(mode InputMode) InputMode {
+ if mode == InputCurrent {
+ return input_mode
+ }
+ if mode&InputMouse != 0 {
+ err := set_console_mode(in, enable_window_input|enable_mouse_input|enable_extended_flags)
+ if err != nil {
+ panic(err)
+ }
+ } else {
+ err := set_console_mode(in, enable_window_input)
+ if err != nil {
+ panic(err)
+ }
+ }
+
+ input_mode = mode
+ return input_mode
+}
+
+// Sets the termbox output mode.
+//
+// Windows console does not support extra colour modes,
+// so this will always set and return OutputNormal.
+func SetOutputMode(mode OutputMode) OutputMode {
+ return OutputNormal
+}
+
+// Sync comes handy when something causes desync between termbox's understanding
+// of a terminal buffer and the reality. Such as a third party process. Sync
+// forces a complete resync between the termbox and a terminal, it may not be
+// visually pretty though. At the moment on Windows it does nothing.
+func Sync() error {
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/collect_terminfo.py b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/collect_terminfo.py
new file mode 100755
index 00000000000..5e50975e63a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/collect_terminfo.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+
+import sys, os, subprocess
+
+def escaped(s):
+ return repr(s)[1:-1]
+
+def tput(term, name):
+ try:
+ return subprocess.check_output(['tput', '-T%s' % term, name]).decode()
+ except subprocess.CalledProcessError as e:
+ return e.output.decode()
+
+
+def w(s):
+ if s == None:
+ return
+ sys.stdout.write(s)
+
+terminals = {
+ 'xterm' : 'xterm',
+ 'rxvt-256color' : 'rxvt_256color',
+ 'rxvt-unicode' : 'rxvt_unicode',
+ 'linux' : 'linux',
+ 'Eterm' : 'eterm',
+ 'screen' : 'screen'
+}
+
+keys = [
+ "F1", "kf1",
+ "F2", "kf2",
+ "F3", "kf3",
+ "F4", "kf4",
+ "F5", "kf5",
+ "F6", "kf6",
+ "F7", "kf7",
+ "F8", "kf8",
+ "F9", "kf9",
+ "F10", "kf10",
+ "F11", "kf11",
+ "F12", "kf12",
+ "INSERT", "kich1",
+ "DELETE", "kdch1",
+ "HOME", "khome",
+ "END", "kend",
+ "PGUP", "kpp",
+ "PGDN", "knp",
+ "KEY_UP", "kcuu1",
+ "KEY_DOWN", "kcud1",
+ "KEY_LEFT", "kcub1",
+ "KEY_RIGHT", "kcuf1"
+]
+
+funcs = [
+ "T_ENTER_CA", "smcup",
+ "T_EXIT_CA", "rmcup",
+ "T_SHOW_CURSOR", "cnorm",
+ "T_HIDE_CURSOR", "civis",
+ "T_CLEAR_SCREEN", "clear",
+ "T_SGR0", "sgr0",
+ "T_UNDERLINE", "smul",
+ "T_BOLD", "bold",
+ "T_BLINK", "blink",
+ "T_REVERSE", "rev",
+ "T_ENTER_KEYPAD", "smkx",
+ "T_EXIT_KEYPAD", "rmkx"
+]
+
+def iter_pairs(iterable):
+ iterable = iter(iterable)
+ while True:
+ yield (next(iterable), next(iterable))
+
+def do_term(term, nick):
+ w("// %s\n" % term)
+ w("var %s_keys = []string{\n\t" % nick)
+ for k, v in iter_pairs(keys):
+ w('"')
+ w(escaped(tput(term, v)))
+ w('",')
+ w("\n}\n")
+ w("var %s_funcs = []string{\n\t" % nick)
+ for k,v in iter_pairs(funcs):
+ w('"')
+ if v == "sgr":
+ w("\\033[3%d;4%dm")
+ elif v == "cup":
+ w("\\033[%d;%dH")
+ else:
+ w(escaped(tput(term, v)))
+ w('", ')
+ w("\n}\n\n")
+
+def do_terms(d):
+ w("var terms = []struct {\n")
+ w("\tname string\n")
+ w("\tkeys []string\n")
+ w("\tfuncs []string\n")
+ w("}{\n")
+ for k, v in d.items():
+ w('\t{"%s", %s_keys, %s_funcs},\n' % (k, v, v))
+ w("}\n\n")
+
+w("// +build !windows\n\npackage termbox\n\n")
+
+for k,v in terminals.items():
+ do_term(k, v)
+
+do_terms(terminals)
+
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls.go
new file mode 100644
index 00000000000..4f52bb9af9a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls.go
@@ -0,0 +1,39 @@
+// +build ignore
+
+package termbox
+
+/*
+#include <termios.h>
+#include <sys/ioctl.h>
+*/
+import "C"
+
+type syscall_Termios C.struct_termios
+
+const (
+ syscall_IGNBRK = C.IGNBRK
+ syscall_BRKINT = C.BRKINT
+ syscall_PARMRK = C.PARMRK
+ syscall_ISTRIP = C.ISTRIP
+ syscall_INLCR = C.INLCR
+ syscall_IGNCR = C.IGNCR
+ syscall_ICRNL = C.ICRNL
+ syscall_IXON = C.IXON
+ syscall_OPOST = C.OPOST
+ syscall_ECHO = C.ECHO
+ syscall_ECHONL = C.ECHONL
+ syscall_ICANON = C.ICANON
+ syscall_ISIG = C.ISIG
+ syscall_IEXTEN = C.IEXTEN
+ syscall_CSIZE = C.CSIZE
+ syscall_PARENB = C.PARENB
+ syscall_CS8 = C.CS8
+ syscall_VMIN = C.VMIN
+ syscall_VTIME = C.VTIME
+
+ // on darwin change these to (on *bsd too?):
+ // C.TIOCGETA
+ // C.TIOCSETA
+ syscall_TCGETS = C.TCGETS
+ syscall_TCSETS = C.TCSETS
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_darwin.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_darwin.go
new file mode 100644
index 00000000000..25b78f7ab70
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_darwin.go
@@ -0,0 +1,41 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+// +build !amd64
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_darwin_amd64.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_darwin_amd64.go
new file mode 100644
index 00000000000..11f25be79a4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_darwin_amd64.go
@@ -0,0 +1,40 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint64
+ Oflag uint64
+ Cflag uint64
+ Lflag uint64
+ Cc [20]uint8
+ Pad_cgo_0 [4]byte
+ Ispeed uint64
+ Ospeed uint64
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x40487413
+ syscall_TCSETS = 0x80487414
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_freebsd.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_freebsd.go
new file mode 100644
index 00000000000..e03624ebc71
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_freebsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_linux.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_linux.go
new file mode 100644
index 00000000000..b88960de617
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_linux.go
@@ -0,0 +1,33 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+import "syscall"
+
+type syscall_Termios syscall.Termios
+
+const (
+ syscall_IGNBRK = syscall.IGNBRK
+ syscall_BRKINT = syscall.BRKINT
+ syscall_PARMRK = syscall.PARMRK
+ syscall_ISTRIP = syscall.ISTRIP
+ syscall_INLCR = syscall.INLCR
+ syscall_IGNCR = syscall.IGNCR
+ syscall_ICRNL = syscall.ICRNL
+ syscall_IXON = syscall.IXON
+ syscall_OPOST = syscall.OPOST
+ syscall_ECHO = syscall.ECHO
+ syscall_ECHONL = syscall.ECHONL
+ syscall_ICANON = syscall.ICANON
+ syscall_ISIG = syscall.ISIG
+ syscall_IEXTEN = syscall.IEXTEN
+ syscall_CSIZE = syscall.CSIZE
+ syscall_PARENB = syscall.PARENB
+ syscall_CS8 = syscall.CS8
+ syscall_VMIN = syscall.VMIN
+ syscall_VTIME = syscall.VTIME
+
+ syscall_TCGETS = syscall.TCGETS
+ syscall_TCSETS = syscall.TCSETS
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_netbsd.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_netbsd.go
new file mode 100644
index 00000000000..49a3355b9a2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_netbsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_openbsd.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_openbsd.go
new file mode 100644
index 00000000000..49a3355b9a2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_openbsd.go
@@ -0,0 +1,39 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs syscalls.go
+
+package termbox
+
+type syscall_Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Cc [20]uint8
+ Ispeed int32
+ Ospeed int32
+}
+
+const (
+ syscall_IGNBRK = 0x1
+ syscall_BRKINT = 0x2
+ syscall_PARMRK = 0x8
+ syscall_ISTRIP = 0x20
+ syscall_INLCR = 0x40
+ syscall_IGNCR = 0x80
+ syscall_ICRNL = 0x100
+ syscall_IXON = 0x200
+ syscall_OPOST = 0x1
+ syscall_ECHO = 0x8
+ syscall_ECHONL = 0x10
+ syscall_ICANON = 0x100
+ syscall_ISIG = 0x80
+ syscall_IEXTEN = 0x400
+ syscall_CSIZE = 0x300
+ syscall_PARENB = 0x1000
+ syscall_CS8 = 0x300
+ syscall_VMIN = 0x10
+ syscall_VTIME = 0x11
+
+ syscall_TCGETS = 0x402c7413
+ syscall_TCSETS = 0x802c7414
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_windows.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_windows.go
new file mode 100644
index 00000000000..472d002a56a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/syscalls_windows.go
@@ -0,0 +1,61 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- -DUNICODE syscalls.go
+
+package termbox
+
+const (
+ foreground_blue = 0x1
+ foreground_green = 0x2
+ foreground_red = 0x4
+ foreground_intensity = 0x8
+ background_blue = 0x10
+ background_green = 0x20
+ background_red = 0x40
+ background_intensity = 0x80
+ std_input_handle = -0xa
+ std_output_handle = -0xb
+ key_event = 0x1
+ mouse_event = 0x2
+ window_buffer_size_event = 0x4
+ enable_window_input = 0x8
+ enable_mouse_input = 0x10
+ enable_extended_flags = 0x80
+
+ vk_f1 = 0x70
+ vk_f2 = 0x71
+ vk_f3 = 0x72
+ vk_f4 = 0x73
+ vk_f5 = 0x74
+ vk_f6 = 0x75
+ vk_f7 = 0x76
+ vk_f8 = 0x77
+ vk_f9 = 0x78
+ vk_f10 = 0x79
+ vk_f11 = 0x7a
+ vk_f12 = 0x7b
+ vk_insert = 0x2d
+ vk_delete = 0x2e
+ vk_home = 0x24
+ vk_end = 0x23
+ vk_pgup = 0x21
+ vk_pgdn = 0x22
+ vk_arrow_up = 0x26
+ vk_arrow_down = 0x28
+ vk_arrow_left = 0x25
+ vk_arrow_right = 0x27
+ vk_backspace = 0x8
+ vk_tab = 0x9
+ vk_enter = 0xd
+ vk_esc = 0x1b
+ vk_space = 0x20
+
+ left_alt_pressed = 0x2
+ left_ctrl_pressed = 0x8
+ right_alt_pressed = 0x1
+ right_ctrl_pressed = 0x4
+ shift_pressed = 0x10
+
+ generic_read = 0x80000000
+ generic_write = 0x40000000
+ console_textmode_buffer = 0x1
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox.go
new file mode 100644
index 00000000000..6e5ba6c8fa5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox.go
@@ -0,0 +1,514 @@
+// +build !windows
+
+package termbox
+
+import "unicode/utf8"
+import "bytes"
+import "syscall"
+import "unsafe"
+import "strings"
+import "strconv"
+import "os"
+import "io"
+
+// private API
+
+const (
+ t_enter_ca = iota
+ t_exit_ca
+ t_show_cursor
+ t_hide_cursor
+ t_clear_screen
+ t_sgr0
+ t_underline
+ t_bold
+ t_blink
+ t_reverse
+ t_enter_keypad
+ t_exit_keypad
+ t_enter_mouse
+ t_exit_mouse
+ t_max_funcs
+)
+
+const (
+ coord_invalid = -2
+ attr_invalid = Attribute(0xFFFF)
+)
+
+type input_event struct {
+ data []byte
+ err error
+}
+
+var (
+ // term specific sequences
+ keys []string
+ funcs []string
+
+ // termbox inner state
+ orig_tios syscall_Termios
+ back_buffer cellbuf
+ front_buffer cellbuf
+ termw int
+ termh int
+ input_mode = InputEsc
+ output_mode = OutputNormal
+ out *os.File
+ in int
+ lastfg = attr_invalid
+ lastbg = attr_invalid
+ lastx = coord_invalid
+ lasty = coord_invalid
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ inbuf = make([]byte, 0, 64)
+ outbuf bytes.Buffer
+ sigwinch = make(chan os.Signal, 1)
+ sigio = make(chan os.Signal, 1)
+ quit = make(chan int)
+ input_comm = make(chan input_event)
+ interrupt_comm = make(chan struct{})
+ intbuf = make([]byte, 0, 16)
+
+ // grayscale indexes
+ grayscale = []Attribute{
+ 0, 17, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244,
+ 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 232,
+ }
+)
+
+func write_cursor(x, y int) {
+ outbuf.WriteString("\033[")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(y+1), 10))
+ outbuf.WriteString(";")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(x+1), 10))
+ outbuf.WriteString("H")
+}
+
+func write_sgr_fg(a Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[38;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[3")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+func write_sgr_bg(a Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[48;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[4")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(a-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+func write_sgr(fg, bg Attribute) {
+ switch output_mode {
+ case Output256, Output216, OutputGrayscale:
+ outbuf.WriteString("\033[38;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10))
+ outbuf.WriteString("m")
+ outbuf.WriteString("\033[48;5;")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10))
+ outbuf.WriteString("m")
+ default:
+ outbuf.WriteString("\033[3")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(fg-1), 10))
+ outbuf.WriteString(";4")
+ outbuf.Write(strconv.AppendUint(intbuf, uint64(bg-1), 10))
+ outbuf.WriteString("m")
+ }
+}
+
+type winsize struct {
+ rows uint16
+ cols uint16
+ xpixels uint16
+ ypixels uint16
+}
+
+func get_term_size(fd uintptr) (int, int) {
+ var sz winsize
+ _, _, _ = syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&sz)))
+ return int(sz.cols), int(sz.rows)
+}
+
+func send_attr(fg, bg Attribute) {
+ if fg == lastfg && bg == lastbg {
+ return
+ }
+
+ outbuf.WriteString(funcs[t_sgr0])
+
+ var fgcol, bgcol Attribute
+
+ switch output_mode {
+ case Output256:
+ fgcol = fg & 0x1FF
+ bgcol = bg & 0x1FF
+ case Output216:
+ fgcol = fg & 0xFF
+ bgcol = bg & 0xFF
+ if fgcol > 216 {
+ fgcol = ColorDefault
+ }
+ if bgcol > 216 {
+ bgcol = ColorDefault
+ }
+ if fgcol != ColorDefault {
+ fgcol += 0x10
+ }
+ if bgcol != ColorDefault {
+ bgcol += 0x10
+ }
+ case OutputGrayscale:
+ fgcol = fg & 0x1F
+ bgcol = bg & 0x1F
+ if fgcol > 26 {
+ fgcol = ColorDefault
+ }
+ if bgcol > 26 {
+ bgcol = ColorDefault
+ }
+ if fgcol != ColorDefault {
+ fgcol = grayscale[fgcol]
+ }
+ if bgcol != ColorDefault {
+ bgcol = grayscale[bgcol]
+ }
+ default:
+ fgcol = fg & 0x0F
+ bgcol = bg & 0x0F
+ }
+
+ if fgcol != ColorDefault {
+ if bgcol != ColorDefault {
+ write_sgr(fgcol, bgcol)
+ } else {
+ write_sgr_fg(fgcol)
+ }
+ } else if bgcol != ColorDefault {
+ write_sgr_bg(bgcol)
+ }
+
+ if fg&AttrBold != 0 {
+ outbuf.WriteString(funcs[t_bold])
+ }
+ if bg&AttrBold != 0 {
+ outbuf.WriteString(funcs[t_blink])
+ }
+ if fg&AttrUnderline != 0 {
+ outbuf.WriteString(funcs[t_underline])
+ }
+ if fg&AttrReverse|bg&AttrReverse != 0 {
+ outbuf.WriteString(funcs[t_reverse])
+ }
+
+ lastfg, lastbg = fg, bg
+}
+
+func send_char(x, y int, ch rune) {
+ var buf [8]byte
+ n := utf8.EncodeRune(buf[:], ch)
+ if x-1 != lastx || y != lasty {
+ write_cursor(x, y)
+ }
+ lastx, lasty = x, y
+ outbuf.Write(buf[:n])
+}
+
+func flush() error {
+ _, err := io.Copy(out, &outbuf)
+ outbuf.Reset()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func send_clear() error {
+ send_attr(foreground, background)
+ outbuf.WriteString(funcs[t_clear_screen])
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ write_cursor(cursor_x, cursor_y)
+ }
+
+ // we need to invalidate cursor position too and these two vars are
+ // used only for simple cursor positioning optimization, cursor
+ // actually may be in the correct place, but we simply discard
+ // optimization once and it gives us simple solution for the case when
+ // cursor moved
+ lastx = coord_invalid
+ lasty = coord_invalid
+
+ return flush()
+}
+
+func update_size_maybe() error {
+ w, h := get_term_size(out.Fd())
+ if w != termw || h != termh {
+ termw, termh = w, h
+ back_buffer.resize(termw, termh)
+ front_buffer.resize(termw, termh)
+ front_buffer.clear()
+ return send_clear()
+ }
+ return nil
+}
+
+func tcsetattr(fd uintptr, termios *syscall_Termios) error {
+ r, _, e := syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall_TCSETS), uintptr(unsafe.Pointer(termios)))
+ if r != 0 {
+ return os.NewSyscallError("SYS_IOCTL", e)
+ }
+ return nil
+}
+
+func tcgetattr(fd uintptr, termios *syscall_Termios) error {
+ r, _, e := syscall.Syscall(syscall.SYS_IOCTL,
+ fd, uintptr(syscall_TCGETS), uintptr(unsafe.Pointer(termios)))
+ if r != 0 {
+ return os.NewSyscallError("SYS_IOCTL", e)
+ }
+ return nil
+}
+
+func parse_mouse_event(event *Event, buf string) (int, bool) {
+ if strings.HasPrefix(buf, "\033[M") && len(buf) >= 6 {
+ // X10 mouse encoding, the simplest one
+ // \033 [ M Cb Cx Cy
+ b := buf[3] - 32
+ switch b & 3 {
+ case 0:
+ if b&64 != 0 {
+ event.Key = MouseWheelUp
+ } else {
+ event.Key = MouseLeft
+ }
+ case 1:
+ if b&64 != 0 {
+ event.Key = MouseWheelDown
+ } else {
+ event.Key = MouseMiddle
+ }
+ case 2:
+ event.Key = MouseRight
+ case 3:
+ event.Key = MouseRelease
+ default:
+ return 6, false
+ }
+ event.Type = EventMouse // KeyEvent by default
+ if b&32 != 0 {
+ event.Mod |= ModMotion
+ }
+
+ // the coord is 1,1 for upper left
+ event.MouseX = int(buf[4]) - 1 - 32
+ event.MouseY = int(buf[5]) - 1 - 32
+ return 6, true
+ } else if strings.HasPrefix(buf, "\033[<") || strings.HasPrefix(buf, "\033[") {
+ // xterm 1006 extended mode or urxvt 1015 extended mode
+ // xterm: \033 [ < Cb ; Cx ; Cy (M or m)
+ // urxvt: \033 [ Cb ; Cx ; Cy M
+
+ // find the first M or m, that's where we stop
+ mi := strings.IndexAny(buf, "Mm")
+ if mi == -1 {
+ return 0, false
+ }
+
+ // whether it's a capital M or not
+ isM := buf[mi] == 'M'
+
+ // whether it's urxvt or not
+ isU := false
+
+ // buf[2] is safe here, because having M or m found means we have at
+ // least 3 bytes in a string
+ if buf[2] == '<' {
+ buf = buf[3:mi]
+ } else {
+ isU = true
+ buf = buf[2:mi]
+ }
+
+ s1 := strings.Index(buf, ";")
+ s2 := strings.LastIndex(buf, ";")
+ // not found or only one ';'
+ if s1 == -1 || s2 == -1 || s1 == s2 {
+ return 0, false
+ }
+
+ n1, err := strconv.ParseInt(buf[0:s1], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ n2, err := strconv.ParseInt(buf[s1+1:s2], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ n3, err := strconv.ParseInt(buf[s2+1:], 10, 64)
+ if err != nil {
+ return 0, false
+ }
+
+ // on urxvt, first number is encoded exactly as in X10, but we need to
+ // make it zero-based, on xterm it is zero-based already
+ if isU {
+ n1 -= 32
+ }
+ switch n1 & 3 {
+ case 0:
+ if n1&64 != 0 {
+ event.Key = MouseWheelUp
+ } else {
+ event.Key = MouseLeft
+ }
+ case 1:
+ if n1&64 != 0 {
+ event.Key = MouseWheelDown
+ } else {
+ event.Key = MouseMiddle
+ }
+ case 2:
+ event.Key = MouseRight
+ case 3:
+ event.Key = MouseRelease
+ default:
+ return mi + 1, false
+ }
+ if !isM {
+ // on xterm mouse release is signaled by lowercase m
+ event.Key = MouseRelease
+ }
+
+ event.Type = EventMouse // KeyEvent by default
+ if n1&32 != 0 {
+ event.Mod |= ModMotion
+ }
+
+ event.MouseX = int(n2) - 1
+ event.MouseY = int(n3) - 1
+ return mi + 1, true
+ }
+
+ return 0, false
+}
+
+func parse_escape_sequence(event *Event, buf []byte) (int, bool) {
+ bufstr := string(buf)
+ for i, key := range keys {
+ if strings.HasPrefix(bufstr, key) {
+ event.Ch = 0
+ event.Key = Key(0xFFFF - i)
+ return len(key), true
+ }
+ }
+
+ // if none of the keys match, let's try mouse seqences
+ return parse_mouse_event(event, bufstr)
+}
+
+func extract_raw_event(data []byte, event *Event) bool {
+ if len(inbuf) == 0 {
+ return false
+ }
+
+ n := len(data)
+ if n == 0 {
+ return false
+ }
+
+ n = copy(data, inbuf)
+ copy(inbuf, inbuf[n:])
+ inbuf = inbuf[:len(inbuf)-n]
+
+ event.N = n
+ event.Type = EventRaw
+ return true
+}
+
+func extract_event(inbuf []byte, event *Event) bool {
+ if len(inbuf) == 0 {
+ event.N = 0
+ return false
+ }
+
+ if inbuf[0] == '\033' {
+ // possible escape sequence
+ if n, ok := parse_escape_sequence(event, inbuf); n != 0 {
+ event.N = n
+ return ok
+ }
+
+ // it's not escape sequence, then it's Alt or Esc, check input_mode
+ switch {
+ case input_mode&InputEsc != 0:
+ // if we're in escape mode, fill Esc event, pop buffer, return success
+ event.Ch = 0
+ event.Key = KeyEsc
+ event.Mod = 0
+ event.N = 1
+ return true
+ case input_mode&InputAlt != 0:
+ // if we're in alt mode, set Alt modifier to event and redo parsing
+ event.Mod = ModAlt
+ ok := extract_event(inbuf[1:], event)
+ if ok {
+ event.N++
+ } else {
+ event.N = 0
+ }
+ return ok
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // if we're here, this is not an escape sequence and not an alt sequence
+ // so, it's a FUNCTIONAL KEY or a UNICODE character
+
+ // first of all check if it's a functional key
+ if Key(inbuf[0]) <= KeySpace || Key(inbuf[0]) == KeyBackspace2 {
+ // fill event, pop buffer, return success
+ event.Ch = 0
+ event.Key = Key(inbuf[0])
+ event.N = 1
+ return true
+ }
+
+ // the only possible option is utf8 rune
+ if r, n := utf8.DecodeRune(inbuf); r != utf8.RuneError {
+ event.Ch = r
+ event.Key = 0
+ event.N = n
+ return true
+ }
+
+ return false
+}
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r, _, e := syscall.Syscall(syscall.SYS_FCNTL, uintptr(fd), uintptr(cmd),
+ uintptr(arg))
+ val = int(r)
+ if e != 0 {
+ err = e
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox_common.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox_common.go
new file mode 100644
index 00000000000..c3355cc25e3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox_common.go
@@ -0,0 +1,59 @@
+package termbox
+
+// private API, common OS agnostic part
+
+type cellbuf struct {
+ width int
+ height int
+ cells []Cell
+}
+
+func (this *cellbuf) init(width, height int) {
+ this.width = width
+ this.height = height
+ this.cells = make([]Cell, width*height)
+}
+
+func (this *cellbuf) resize(width, height int) {
+ if this.width == width && this.height == height {
+ return
+ }
+
+ oldw := this.width
+ oldh := this.height
+ oldcells := this.cells
+
+ this.init(width, height)
+ this.clear()
+
+ minw, minh := oldw, oldh
+
+ if width < minw {
+ minw = width
+ }
+ if height < minh {
+ minh = height
+ }
+
+ for i := 0; i < minh; i++ {
+ srco, dsto := i*oldw, i*width
+ src := oldcells[srco : srco+minw]
+ dst := this.cells[dsto : dsto+minw]
+ copy(dst, src)
+ }
+}
+
+func (this *cellbuf) clear() {
+ for i := range this.cells {
+ c := &this.cells[i]
+ c.Ch = ' '
+ c.Fg = foreground
+ c.Bg = background
+ }
+}
+
+const cursor_hidden = -1
+
+func is_cursor_hidden(x, y int) bool {
+ return x == cursor_hidden || y == cursor_hidden
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox_windows.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox_windows.go
new file mode 100644
index 00000000000..f7dad7b8a5f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/termbox_windows.go
@@ -0,0 +1,856 @@
+package termbox
+
+import "syscall"
+import "unsafe"
+import "unicode/utf16"
+import "github.com/mattn/go-runewidth"
+
+type (
+ wchar uint16
+ short int16
+ dword uint32
+ word uint16
+ char_info struct {
+ char wchar
+ attr word
+ }
+ coord struct {
+ x short
+ y short
+ }
+ small_rect struct {
+ left short
+ top short
+ right short
+ bottom short
+ }
+ console_screen_buffer_info struct {
+ size coord
+ cursor_position coord
+ attributes word
+ window small_rect
+ maximum_window_size coord
+ }
+ console_cursor_info struct {
+ size dword
+ visible int32
+ }
+ input_record struct {
+ event_type word
+ _ [2]byte
+ event [16]byte
+ }
+ key_event_record struct {
+ key_down int32
+ repeat_count word
+ virtual_key_code word
+ virtual_scan_code word
+ unicode_char wchar
+ control_key_state dword
+ }
+ window_buffer_size_record struct {
+ size coord
+ }
+ mouse_event_record struct {
+ mouse_pos coord
+ button_state dword
+ control_key_state dword
+ event_flags dword
+ }
+)
+
+const (
+ mouse_lmb = 0x1
+ mouse_rmb = 0x2
+ mouse_mmb = 0x4 | 0x8 | 0x10
+)
+
+func (this coord) uintptr() uintptr {
+ return uintptr(*(*int32)(unsafe.Pointer(&this)))
+}
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var is_cjk = runewidth.IsEastAsian()
+
+var (
+ proc_set_console_active_screen_buffer = kernel32.NewProc("SetConsoleActiveScreenBuffer")
+ proc_set_console_screen_buffer_size = kernel32.NewProc("SetConsoleScreenBufferSize")
+ proc_create_console_screen_buffer = kernel32.NewProc("CreateConsoleScreenBuffer")
+ proc_get_console_screen_buffer_info = kernel32.NewProc("GetConsoleScreenBufferInfo")
+ proc_write_console_output = kernel32.NewProc("WriteConsoleOutputW")
+ proc_write_console_output_character = kernel32.NewProc("WriteConsoleOutputCharacterW")
+ proc_write_console_output_attribute = kernel32.NewProc("WriteConsoleOutputAttribute")
+ proc_set_console_cursor_info = kernel32.NewProc("SetConsoleCursorInfo")
+ proc_set_console_cursor_position = kernel32.NewProc("SetConsoleCursorPosition")
+ proc_get_console_cursor_info = kernel32.NewProc("GetConsoleCursorInfo")
+ proc_read_console_input = kernel32.NewProc("ReadConsoleInputW")
+ proc_get_console_mode = kernel32.NewProc("GetConsoleMode")
+ proc_set_console_mode = kernel32.NewProc("SetConsoleMode")
+ proc_fill_console_output_character = kernel32.NewProc("FillConsoleOutputCharacterW")
+ proc_fill_console_output_attribute = kernel32.NewProc("FillConsoleOutputAttribute")
+ proc_create_event = kernel32.NewProc("CreateEventW")
+ proc_wait_for_multiple_objects = kernel32.NewProc("WaitForMultipleObjects")
+ proc_set_event = kernel32.NewProc("SetEvent")
+)
+
+func set_console_active_screen_buffer(h syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_active_screen_buffer.Addr(),
+ 1, uintptr(h), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_screen_buffer_size(h syscall.Handle, size coord) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_screen_buffer_size.Addr(),
+ 2, uintptr(h), size.uintptr(), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func create_console_screen_buffer() (h syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(proc_create_console_screen_buffer.Addr(),
+ 5, uintptr(generic_read|generic_write), 0, 0, console_textmode_buffer, 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return syscall.Handle(r0), err
+}
+
+func get_console_screen_buffer_info(h syscall.Handle, info *console_screen_buffer_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_screen_buffer_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output(h syscall.Handle, chars []char_info, dst small_rect) (err error) {
+ tmp_coord = coord{dst.right - dst.left + 1, dst.bottom - dst.top + 1}
+ tmp_rect = dst
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&chars[0])), tmp_coord.uintptr(),
+ tmp_coord0.uintptr(), uintptr(unsafe.Pointer(&tmp_rect)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output_character(h syscall.Handle, chars []wchar, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output_character.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&chars[0])), uintptr(len(chars)),
+ pos.uintptr(), uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func write_console_output_attribute(h syscall.Handle, attrs []word, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_write_console_output_attribute.Addr(),
+ 5, uintptr(h), uintptr(unsafe.Pointer(&attrs[0])), uintptr(len(attrs)),
+ pos.uintptr(), uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_cursor_info(h syscall.Handle, info *console_cursor_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_cursor_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func get_console_cursor_info(h syscall.Handle, info *console_cursor_info) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_cursor_info.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(info)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_cursor_position(h syscall.Handle, pos coord) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_cursor_position.Addr(),
+ 2, uintptr(h), pos.uintptr(), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func read_console_input(h syscall.Handle, record *input_record) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_read_console_input.Addr(),
+ 4, uintptr(h), uintptr(unsafe.Pointer(record)), 1, uintptr(unsafe.Pointer(&tmp_arg)), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func get_console_mode(h syscall.Handle, mode *dword) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_get_console_mode.Addr(),
+ 2, uintptr(h), uintptr(unsafe.Pointer(mode)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_console_mode(h syscall.Handle, mode dword) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_console_mode.Addr(),
+ 2, uintptr(h), uintptr(mode), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func fill_console_output_character(h syscall.Handle, char wchar, n int) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_fill_console_output_character.Addr(),
+ 5, uintptr(h), uintptr(char), uintptr(n), tmp_coord.uintptr(),
+ uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func fill_console_output_attribute(h syscall.Handle, attr word, n int) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_fill_console_output_attribute.Addr(),
+ 5, uintptr(h), uintptr(attr), uintptr(n), tmp_coord.uintptr(),
+ uintptr(unsafe.Pointer(&tmp_arg)), 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func create_event() (out syscall.Handle, err error) {
+ r0, _, e1 := syscall.Syscall6(proc_create_event.Addr(),
+ 4, 0, 0, 0, 0, 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return syscall.Handle(r0), err
+}
+
+func wait_for_multiple_objects(objects []syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall6(proc_wait_for_multiple_objects.Addr(),
+ 4, uintptr(len(objects)), uintptr(unsafe.Pointer(&objects[0])),
+ 0, 0xFFFFFFFF, 0, 0)
+ if uint32(r0) == 0xFFFFFFFF {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+func set_event(ev syscall.Handle) (err error) {
+ r0, _, e1 := syscall.Syscall(proc_set_event.Addr(),
+ 1, uintptr(ev), 0, 0)
+ if int(r0) == 0 {
+ if e1 != 0 {
+ err = error(e1)
+ } else {
+ err = syscall.EINVAL
+ }
+ }
+ return
+}
+
+type diff_msg struct {
+ pos short
+ lines short
+ chars []char_info
+}
+
+type input_event struct {
+ event Event
+ err error
+}
+
+var (
+ orig_cursor_info console_cursor_info
+ orig_size coord
+ orig_mode dword
+ orig_screen syscall.Handle
+ back_buffer cellbuf
+ front_buffer cellbuf
+ term_size coord
+ input_mode = InputEsc
+ cursor_x = cursor_hidden
+ cursor_y = cursor_hidden
+ foreground = ColorDefault
+ background = ColorDefault
+ in syscall.Handle
+ out syscall.Handle
+ interrupt syscall.Handle
+ charbuf []char_info
+ diffbuf []diff_msg
+ beg_x = -1
+ beg_y = -1
+ beg_i = -1
+ input_comm = make(chan Event)
+ interrupt_comm = make(chan struct{})
+ cancel_comm = make(chan bool, 1)
+ cancel_done_comm = make(chan bool)
+ alt_mode_esc = false
+
+ // these ones just to prevent heap allocs at all costs
+ tmp_info console_screen_buffer_info
+ tmp_arg dword
+ tmp_coord0 = coord{0, 0}
+ tmp_coord = coord{0, 0}
+ tmp_rect = small_rect{0, 0, 0, 0}
+)
+
+func get_cursor_position(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return tmp_info.cursor_position
+}
+
+func get_term_size(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return tmp_info.size
+}
+
+func get_win_size(out syscall.Handle) coord {
+ err := get_console_screen_buffer_info(out, &tmp_info)
+ if err != nil {
+ panic(err)
+ }
+ return coord{
+ x: tmp_info.window.right - tmp_info.window.left + 1,
+ y: tmp_info.window.bottom - tmp_info.window.top + 1,
+ }
+}
+
+func update_size_maybe() {
+ size := get_term_size(out)
+ if size.x != term_size.x || size.y != term_size.y {
+ term_size = size
+ back_buffer.resize(int(size.x), int(size.y))
+ front_buffer.resize(int(size.x), int(size.y))
+ front_buffer.clear()
+ clear()
+
+ area := int(size.x) * int(size.y)
+ if cap(charbuf) < area {
+ charbuf = make([]char_info, 0, area)
+ }
+ }
+}
+
+var color_table_bg = []word{
+ 0, // default (black)
+ 0, // black
+ background_red,
+ background_green,
+ background_red | background_green, // yellow
+ background_blue,
+ background_red | background_blue, // magenta
+ background_green | background_blue, // cyan
+ background_red | background_blue | background_green, // white
+}
+
+var color_table_fg = []word{
+ foreground_red | foreground_blue | foreground_green, // default (white)
+ 0,
+ foreground_red,
+ foreground_green,
+ foreground_red | foreground_green, // yellow
+ foreground_blue,
+ foreground_red | foreground_blue, // magenta
+ foreground_green | foreground_blue, // cyan
+ foreground_red | foreground_blue | foreground_green, // white
+}
+
+const (
+ replacement_char = '\uFFFD'
+ max_rune = '\U0010FFFF'
+ surr1 = 0xd800
+ surr2 = 0xdc00
+ surr3 = 0xe000
+ surr_self = 0x10000
+)
+
+func append_diff_line(y int) int {
+ n := 0
+ for x := 0; x < front_buffer.width; {
+ cell_offset := y*front_buffer.width + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ attr, char := cell_to_char_info(*back)
+ charbuf = append(charbuf, char_info{attr: attr, char: char[0]})
+ *front = *back
+ n++
+ w := runewidth.RuneWidth(back.Ch)
+ if w == 0 || w == 2 && runewidth.IsAmbiguousWidth(back.Ch) {
+ w = 1
+ }
+ x += w
+ // If not CJK, fill trailing space with whitespace
+ if !is_cjk && w == 2 {
+ charbuf = append(charbuf, char_info{attr: attr, char: ' '})
+ }
+ }
+ return n
+}
+
+// compares 'back_buffer' with 'front_buffer' and prepares all changes in the form of
+// 'diff_msg's in the 'diff_buf'
+func prepare_diff_messages() {
+ // clear buffers
+ diffbuf = diffbuf[:0]
+ charbuf = charbuf[:0]
+
+ var diff diff_msg
+ gbeg := 0
+ for y := 0; y < front_buffer.height; y++ {
+ same := true
+ line_offset := y * front_buffer.width
+ for x := 0; x < front_buffer.width; x++ {
+ cell_offset := line_offset + x
+ back := &back_buffer.cells[cell_offset]
+ front := &front_buffer.cells[cell_offset]
+ if *back != *front {
+ same = false
+ break
+ }
+ }
+ if same && diff.lines > 0 {
+ diffbuf = append(diffbuf, diff)
+ diff = diff_msg{}
+ }
+ if !same {
+ beg := len(charbuf)
+ end := beg + append_diff_line(y)
+ if diff.lines == 0 {
+ diff.pos = short(y)
+ gbeg = beg
+ }
+ diff.lines++
+ diff.chars = charbuf[gbeg:end]
+ }
+ }
+ if diff.lines > 0 {
+ diffbuf = append(diffbuf, diff)
+ diff = diff_msg{}
+ }
+}
+
+func get_ct(table []word, idx int) word {
+ idx = idx & 0x0F
+ if idx >= len(table) {
+ idx = len(table) - 1
+ }
+ return table[idx]
+}
+
+func cell_to_char_info(c Cell) (attr word, wc [2]wchar) {
+ attr = get_ct(color_table_fg, int(c.Fg)) | get_ct(color_table_bg, int(c.Bg))
+ if c.Fg&AttrReverse|c.Bg&AttrReverse != 0 {
+ attr = (attr&0xF0)>>4 | (attr&0x0F)<<4
+ }
+ if c.Fg&AttrBold != 0 {
+ attr |= foreground_intensity
+ }
+ if c.Bg&AttrBold != 0 {
+ attr |= background_intensity
+ }
+
+ r0, r1 := utf16.EncodeRune(c.Ch)
+ if r0 == 0xFFFD {
+ wc[0] = wchar(c.Ch)
+ wc[1] = ' '
+ } else {
+ wc[0] = wchar(r0)
+ wc[1] = wchar(r1)
+ }
+ return
+}
+
+func move_cursor(x, y int) {
+ err := set_console_cursor_position(out, coord{short(x), short(y)})
+ if err != nil {
+ panic(err)
+ }
+}
+
+func show_cursor(visible bool) {
+ var v int32
+ if visible {
+ v = 1
+ }
+
+ var info console_cursor_info
+ info.size = 100
+ info.visible = v
+ err := set_console_cursor_info(out, &info)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func clear() {
+ var err error
+ attr, char := cell_to_char_info(Cell{
+ ' ',
+ foreground,
+ background,
+ })
+
+ area := int(term_size.x) * int(term_size.y)
+ err = fill_console_output_attribute(out, attr, area)
+ if err != nil {
+ panic(err)
+ }
+ err = fill_console_output_character(out, char[0], area)
+ if err != nil {
+ panic(err)
+ }
+ if !is_cursor_hidden(cursor_x, cursor_y) {
+ move_cursor(cursor_x, cursor_y)
+ }
+}
+
+func key_event_record_to_event(r *key_event_record) (Event, bool) {
+ if r.key_down == 0 {
+ return Event{}, false
+ }
+
+ e := Event{Type: EventKey}
+ if input_mode&InputAlt != 0 {
+ if alt_mode_esc {
+ e.Mod = ModAlt
+ alt_mode_esc = false
+ }
+ if r.control_key_state&(left_alt_pressed|right_alt_pressed) != 0 {
+ e.Mod = ModAlt
+ }
+ }
+
+ ctrlpressed := r.control_key_state&(left_ctrl_pressed|right_ctrl_pressed) != 0
+
+ if r.virtual_key_code >= vk_f1 && r.virtual_key_code <= vk_f12 {
+ switch r.virtual_key_code {
+ case vk_f1:
+ e.Key = KeyF1
+ case vk_f2:
+ e.Key = KeyF2
+ case vk_f3:
+ e.Key = KeyF3
+ case vk_f4:
+ e.Key = KeyF4
+ case vk_f5:
+ e.Key = KeyF5
+ case vk_f6:
+ e.Key = KeyF6
+ case vk_f7:
+ e.Key = KeyF7
+ case vk_f8:
+ e.Key = KeyF8
+ case vk_f9:
+ e.Key = KeyF9
+ case vk_f10:
+ e.Key = KeyF10
+ case vk_f11:
+ e.Key = KeyF11
+ case vk_f12:
+ e.Key = KeyF12
+ default:
+ panic("unreachable")
+ }
+
+ return e, true
+ }
+
+ if r.virtual_key_code <= vk_delete {
+ switch r.virtual_key_code {
+ case vk_insert:
+ e.Key = KeyInsert
+ case vk_delete:
+ e.Key = KeyDelete
+ case vk_home:
+ e.Key = KeyHome
+ case vk_end:
+ e.Key = KeyEnd
+ case vk_pgup:
+ e.Key = KeyPgup
+ case vk_pgdn:
+ e.Key = KeyPgdn
+ case vk_arrow_up:
+ e.Key = KeyArrowUp
+ case vk_arrow_down:
+ e.Key = KeyArrowDown
+ case vk_arrow_left:
+ e.Key = KeyArrowLeft
+ case vk_arrow_right:
+ e.Key = KeyArrowRight
+ case vk_backspace:
+ if ctrlpressed {
+ e.Key = KeyBackspace2
+ } else {
+ e.Key = KeyBackspace
+ }
+ case vk_tab:
+ e.Key = KeyTab
+ case vk_enter:
+ e.Key = KeyEnter
+ case vk_esc:
+ switch {
+ case input_mode&InputEsc != 0:
+ e.Key = KeyEsc
+ case input_mode&InputAlt != 0:
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ case vk_space:
+ if ctrlpressed {
+ // manual return here, because KeyCtrlSpace is zero
+ e.Key = KeyCtrlSpace
+ return e, true
+ } else {
+ e.Key = KeySpace
+ }
+ }
+
+ if e.Key != 0 {
+ return e, true
+ }
+ }
+
+ if ctrlpressed {
+ if Key(r.unicode_char) >= KeyCtrlA && Key(r.unicode_char) <= KeyCtrlRsqBracket {
+ e.Key = Key(r.unicode_char)
+ if input_mode&InputAlt != 0 && e.Key == KeyEsc {
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ return e, true
+ }
+ switch r.virtual_key_code {
+ case 192, 50:
+ // manual return here, because KeyCtrl2 is zero
+ e.Key = KeyCtrl2
+ return e, true
+ case 51:
+ if input_mode&InputAlt != 0 {
+ alt_mode_esc = true
+ return Event{}, false
+ }
+ e.Key = KeyCtrl3
+ case 52:
+ e.Key = KeyCtrl4
+ case 53:
+ e.Key = KeyCtrl5
+ case 54:
+ e.Key = KeyCtrl6
+ case 189, 191, 55:
+ e.Key = KeyCtrl7
+ case 8, 56:
+ e.Key = KeyCtrl8
+ }
+
+ if e.Key != 0 {
+ return e, true
+ }
+ }
+
+ if r.unicode_char != 0 {
+ e.Ch = rune(r.unicode_char)
+ return e, true
+ }
+
+ return Event{}, false
+}
+
+func input_event_producer() {
+ var r input_record
+ var err error
+ var last_button Key
+ var last_button_pressed Key
+ var last_state = dword(0)
+ var last_x, last_y = -1, -1
+ handles := []syscall.Handle{in, interrupt}
+ for {
+ err = wait_for_multiple_objects(handles)
+ if err != nil {
+ input_comm <- Event{Type: EventError, Err: err}
+ }
+
+ select {
+ case <-cancel_comm:
+ cancel_done_comm <- true
+ return
+ default:
+ }
+
+ err = read_console_input(in, &r)
+ if err != nil {
+ input_comm <- Event{Type: EventError, Err: err}
+ }
+
+ switch r.event_type {
+ case key_event:
+ kr := (*key_event_record)(unsafe.Pointer(&r.event))
+ ev, ok := key_event_record_to_event(kr)
+ if ok {
+ for i := 0; i < int(kr.repeat_count); i++ {
+ input_comm <- ev
+ }
+ }
+ case window_buffer_size_event:
+ sr := *(*window_buffer_size_record)(unsafe.Pointer(&r.event))
+ input_comm <- Event{
+ Type: EventResize,
+ Width: int(sr.size.x),
+ Height: int(sr.size.y),
+ }
+ case mouse_event:
+ mr := *(*mouse_event_record)(unsafe.Pointer(&r.event))
+ ev := Event{Type: EventMouse}
+ switch mr.event_flags {
+ case 0, 2:
+ // single or double click
+ cur_state := mr.button_state
+ switch {
+ case last_state&mouse_lmb == 0 && cur_state&mouse_lmb != 0:
+ last_button = MouseLeft
+ last_button_pressed = last_button
+ case last_state&mouse_rmb == 0 && cur_state&mouse_rmb != 0:
+ last_button = MouseRight
+ last_button_pressed = last_button
+ case last_state&mouse_mmb == 0 && cur_state&mouse_mmb != 0:
+ last_button = MouseMiddle
+ last_button_pressed = last_button
+ case last_state&mouse_lmb != 0 && cur_state&mouse_lmb == 0:
+ last_button = MouseRelease
+ case last_state&mouse_rmb != 0 && cur_state&mouse_rmb == 0:
+ last_button = MouseRelease
+ case last_state&mouse_mmb != 0 && cur_state&mouse_mmb == 0:
+ last_button = MouseRelease
+ default:
+ last_state = cur_state
+ continue
+ }
+ last_state = cur_state
+ ev.Key = last_button
+ last_x, last_y = int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ ev.MouseX = last_x
+ ev.MouseY = last_y
+ case 1:
+ // mouse motion
+ x, y := int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ if last_state != 0 && (last_x != x || last_y != y) {
+ ev.Key = last_button_pressed
+ ev.Mod = ModMotion
+ ev.MouseX = x
+ ev.MouseY = y
+ last_x, last_y = x, y
+ } else {
+ ev.Type = EventNone
+ }
+ case 4:
+ // mouse wheel
+ n := int16(mr.button_state >> 16)
+ if n > 0 {
+ ev.Key = MouseWheelUp
+ } else {
+ ev.Key = MouseWheelDown
+ }
+ last_x, last_y = int(mr.mouse_pos.x), int(mr.mouse_pos.y)
+ ev.MouseX = last_x
+ ev.MouseY = last_y
+ default:
+ ev.Type = EventNone
+ }
+ if ev.Type != EventNone {
+ input_comm <- ev
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/terminfo.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/terminfo.go
new file mode 100644
index 00000000000..35dbd70b894
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/terminfo.go
@@ -0,0 +1,221 @@
+// +build !windows
+// This file contains a simple and incomplete implementation of the terminfo
+// database. Information was taken from the ncurses manpages term(5) and
+// terminfo(5). Currently, only the string capabilities for special keys and for
+// functions without parameters are actually used. Colors are still done with
+// ANSI escape sequences. Other special features that are not (yet?) supported
+// are reading from ~/.terminfo, the TERMINFO_DIRS variable, Berkeley database
+// format and extended capabilities.
+
+package termbox
+
+import (
+ "bytes"
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+const (
+ ti_magic = 0432
+ ti_header_length = 12
+ ti_mouse_enter = "\x1b[?1000h\x1b[?1002h\x1b[?1015h\x1b[?1006h"
+ ti_mouse_leave = "\x1b[?1006l\x1b[?1015l\x1b[?1002l\x1b[?1000l"
+)
+
+func load_terminfo() ([]byte, error) {
+ var data []byte
+ var err error
+
+ term := os.Getenv("TERM")
+ if term == "" {
+ return nil, fmt.Errorf("termbox: TERM not set")
+ }
+
+ // The following behaviour follows the one described in terminfo(5) as
+ // distributed by ncurses.
+
+ terminfo := os.Getenv("TERMINFO")
+ if terminfo != "" {
+ // if TERMINFO is set, no other directory should be searched
+ return ti_try_path(terminfo)
+ }
+
+ // next, consider ~/.terminfo
+ home := os.Getenv("HOME")
+ if home != "" {
+ data, err = ti_try_path(home + "/.terminfo")
+ if err == nil {
+ return data, nil
+ }
+ }
+
+ // next, TERMINFO_DIRS
+ dirs := os.Getenv("TERMINFO_DIRS")
+ if dirs != "" {
+ for _, dir := range strings.Split(dirs, ":") {
+ if dir == "" {
+ // "" -> "/usr/share/terminfo"
+ dir = "/usr/share/terminfo"
+ }
+ data, err = ti_try_path(dir)
+ if err == nil {
+ return data, nil
+ }
+ }
+ }
+
+ // fall back to /usr/share/terminfo
+ return ti_try_path("/usr/share/terminfo")
+}
+
+func ti_try_path(path string) (data []byte, err error) {
+ // load_terminfo already made sure it is set
+ term := os.Getenv("TERM")
+
+ // first try, the typical *nix path
+ terminfo := path + "/" + term[0:1] + "/" + term
+ data, err = ioutil.ReadFile(terminfo)
+ if err == nil {
+ return
+ }
+
+ // fallback to darwin specific dirs structure
+ terminfo = path + "/" + hex.EncodeToString([]byte(term[:1])) + "/" + term
+ data, err = ioutil.ReadFile(terminfo)
+ return
+}
+
+func setup_term_builtin() error {
+ name := os.Getenv("TERM")
+ if name == "" {
+ return errors.New("termbox: TERM environment variable not set")
+ }
+
+ for _, t := range terms {
+ if t.name == name {
+ keys = t.keys
+ funcs = t.funcs
+ return nil
+ }
+ }
+
+ compat_table := []struct {
+ partial string
+ keys []string
+ funcs []string
+ }{
+ {"xterm", xterm_keys, xterm_funcs},
+ {"rxvt", rxvt_unicode_keys, rxvt_unicode_funcs},
+ {"linux", linux_keys, linux_funcs},
+ {"Eterm", eterm_keys, eterm_funcs},
+ {"screen", screen_keys, screen_funcs},
+ // let's assume that 'cygwin' is xterm compatible
+ {"cygwin", xterm_keys, xterm_funcs},
+ {"st", xterm_keys, xterm_funcs},
+ }
+
+ // try compatibility variants
+ for _, it := range compat_table {
+ if strings.Contains(name, it.partial) {
+ keys = it.keys
+ funcs = it.funcs
+ return nil
+ }
+ }
+
+ return errors.New("termbox: unsupported terminal")
+}
+
+func setup_term() (err error) {
+ var data []byte
+ var header [6]int16
+ var str_offset, table_offset int16
+
+ data, err = load_terminfo()
+ if err != nil {
+ return setup_term_builtin()
+ }
+
+ rd := bytes.NewReader(data)
+ // 0: magic number, 1: size of names section, 2: size of boolean section, 3:
+ // size of numbers section (in integers), 4: size of the strings section (in
+ // integers), 5: size of the string table
+
+ err = binary.Read(rd, binary.LittleEndian, header[:])
+ if err != nil {
+ return
+ }
+
+ if (header[1]+header[2])%2 != 0 {
+ // old quirk to align everything on word boundaries
+ header[2] += 1
+ }
+ str_offset = ti_header_length + header[1] + header[2] + 2*header[3]
+ table_offset = str_offset + 2*header[4]
+
+ keys = make([]string, 0xFFFF-key_min)
+ for i, _ := range keys {
+ keys[i], err = ti_read_string(rd, str_offset+2*ti_keys[i], table_offset)
+ if err != nil {
+ return
+ }
+ }
+ funcs = make([]string, t_max_funcs)
+ // the last two entries are reserved for mouse. because the table offset is
+ // not there, the two entries have to fill in manually
+ for i, _ := range funcs[:len(funcs)-2] {
+ funcs[i], err = ti_read_string(rd, str_offset+2*ti_funcs[i], table_offset)
+ if err != nil {
+ return
+ }
+ }
+ funcs[t_max_funcs-2] = ti_mouse_enter
+ funcs[t_max_funcs-1] = ti_mouse_leave
+ return nil
+}
+
+func ti_read_string(rd *bytes.Reader, str_off, table int16) (string, error) {
+ var off int16
+
+ _, err := rd.Seek(int64(str_off), 0)
+ if err != nil {
+ return "", err
+ }
+ err = binary.Read(rd, binary.LittleEndian, &off)
+ if err != nil {
+ return "", err
+ }
+ _, err = rd.Seek(int64(table+off), 0)
+ if err != nil {
+ return "", err
+ }
+ var bs []byte
+ for {
+ b, err := rd.ReadByte()
+ if err != nil {
+ return "", err
+ }
+ if b == byte(0x00) {
+ break
+ }
+ bs = append(bs, b)
+ }
+ return string(bs), nil
+}
+
+// "Maps" the function constants from termbox.go to the number of the respective
+// string capability in the terminfo file. Taken from (ncurses) term.h.
+var ti_funcs = []int16{
+ 28, 40, 16, 13, 5, 39, 36, 27, 26, 34, 89, 88,
+}
+
+// Same as above for the special keys.
+var ti_keys = []int16{
+ 66, 68 /* apparently not a typo; 67 is F10 for whatever reason */, 69, 70,
+ 71, 72, 73, 74, 75, 67, 216, 217, 77, 59, 76, 164, 82, 81, 87, 61, 79, 83,
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/terminfo_builtin.go b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/terminfo_builtin.go
new file mode 100644
index 00000000000..a94866067ba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/nsf/termbox-go/terminfo_builtin.go
@@ -0,0 +1,64 @@
+// +build !windows
+
+package termbox
+
+// Eterm
+var eterm_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var eterm_funcs = []string{
+ "\x1b7\x1b[?47h", "\x1b[2J\x1b[?47l\x1b8", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "", "", "", "",
+}
+
+// screen
+var screen_keys = []string{
+ "\x1bOP", "\x1bOQ", "\x1bOR", "\x1bOS", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[1~", "\x1b[4~", "\x1b[5~", "\x1b[6~", "\x1bOA", "\x1bOB", "\x1bOD", "\x1bOC",
+}
+var screen_funcs = []string{
+ "\x1b[?1049h", "\x1b[?1049l", "\x1b[34h\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b[?1h\x1b=", "\x1b[?1l\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// xterm
+var xterm_keys = []string{
+ "\x1bOP", "\x1bOQ", "\x1bOR", "\x1bOS", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1bOH", "\x1bOF", "\x1b[5~", "\x1b[6~", "\x1bOA", "\x1bOB", "\x1bOD", "\x1bOC",
+}
+var xterm_funcs = []string{
+ "\x1b[?1049h", "\x1b[?1049l", "\x1b[?12l\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b(B\x1b[m", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b[?1h\x1b=", "\x1b[?1l\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// rxvt-unicode
+var rxvt_unicode_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var rxvt_unicode_funcs = []string{
+ "\x1b[?1049h", "\x1b[r\x1b[?1049l", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x1b(B", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b=", "\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+// linux
+var linux_keys = []string{
+ "\x1b[[A", "\x1b[[B", "\x1b[[C", "\x1b[[D", "\x1b[[E", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[1~", "\x1b[4~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var linux_funcs = []string{
+ "", "", "\x1b[?25h\x1b[?0c", "\x1b[?25l\x1b[?1c", "\x1b[H\x1b[J", "\x1b[0;10m", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "", "", "", "",
+}
+
+// rxvt-256color
+var rxvt_256color_keys = []string{
+ "\x1b[11~", "\x1b[12~", "\x1b[13~", "\x1b[14~", "\x1b[15~", "\x1b[17~", "\x1b[18~", "\x1b[19~", "\x1b[20~", "\x1b[21~", "\x1b[23~", "\x1b[24~", "\x1b[2~", "\x1b[3~", "\x1b[7~", "\x1b[8~", "\x1b[5~", "\x1b[6~", "\x1b[A", "\x1b[B", "\x1b[D", "\x1b[C",
+}
+var rxvt_256color_funcs = []string{
+ "\x1b7\x1b[?47h", "\x1b[2J\x1b[?47l\x1b8", "\x1b[?25h", "\x1b[?25l", "\x1b[H\x1b[2J", "\x1b[m\x0f", "\x1b[4m", "\x1b[1m", "\x1b[5m", "\x1b[7m", "\x1b=", "\x1b>", ti_mouse_enter, ti_mouse_leave,
+}
+
+var terms = []struct {
+ name string
+ keys []string
+ funcs []string
+}{
+ {"Eterm", eterm_keys, eterm_funcs},
+ {"screen", screen_keys, screen_funcs},
+ {"xterm", xterm_keys, xterm_funcs},
+ {"rxvt-unicode", rxvt_unicode_keys, rxvt_unicode_funcs},
+ {"linux", linux_keys, linux_funcs},
+ {"rxvt-256color", rxvt_256color_keys, rxvt_256color_funcs},
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/.gitignore b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/.gitignore
new file mode 100644
index 00000000000..6ad551742d3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/.gitignore
@@ -0,0 +1,3 @@
+.DS_Store
+Thumbs.db
+/.idea
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/.travis.yml b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/.travis.yml
new file mode 100644
index 00000000000..44217c97335
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+
+install:
+ - go get -t ./...
+
+script: go test -v
+
+sudo: false
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/LICENSE.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/LICENSE.md
new file mode 100644
index 00000000000..48a3731c01a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/LICENSE.md
@@ -0,0 +1,23 @@
+Copyright (c) 2015 SmartyStreets, LLC
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+NOTE: Various optional and subordinate components carry their own licensing
+requirements and restrictions. Use of those components is subject to the terms
+and conditions outlined the respective license of each component.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/README.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/README.md
new file mode 100644
index 00000000000..58383bb00af
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/README.md
@@ -0,0 +1,575 @@
+# assertions
+--
+ import "github.com/smartystreets/assertions"
+
+Package assertions contains the implementations for all assertions which are
+referenced in goconvey's `convey` package
+(github.com/smartystreets/goconvey/convey) and gunit
+(github.com/smartystreets/gunit) for use with the So(...) method. They can also
+be used in traditional Go test functions and even in applications.
+
+Many of the assertions lean heavily on work done by Aaron Jacobs in his
+excellent oglematchers library. (https://github.com/jacobsa/oglematchers) The
+ShouldResemble assertion leans heavily on work done by Daniel Jacques in his
+very helpful go-render library. (https://github.com/luci/go-render)
+
+## Usage
+
+#### func GoConveyMode
+
+```go
+func GoConveyMode(yes bool)
+```
+GoConveyMode provides control over JSON serialization of failures. When using
+the assertions in this package from the convey package JSON results are very
+helpful and can be rendered in a DIFF view. In that case, this function will be
+called with a true value to enable the JSON serialization. By default, the
+assertions in this package will not serializer a JSON result, making standalone
+ussage more convenient.
+
+#### func ShouldAlmostEqual
+
+```go
+func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldAlmostEqual makes sure that two parameters are close enough to being
+equal. The acceptable delta may be specified with a third argument, or a very
+small default delta will be used.
+
+#### func ShouldBeBetween
+
+```go
+func ShouldBeBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldBeBetween receives exactly three parameters: an actual value, a lower
+bound, and an upper bound. It ensures that the actual value is between both
+bounds (but not equal to either of them).
+
+#### func ShouldBeBetweenOrEqual
+
+```go
+func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a
+lower bound, and an upper bound. It ensures that the actual value is between
+both bounds or equal to one of them.
+
+#### func ShouldBeBlank
+
+```go
+func ShouldBeBlank(actual interface{}, expected ...interface{}) string
+```
+ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal
+to "".
+
+#### func ShouldBeChronological
+
+```go
+func ShouldBeChronological(actual interface{}, expected ...interface{}) string
+```
+ShouldBeChronological receives a []time.Time slice and asserts that the are in
+chronological order starting with the first time.Time as the earliest.
+
+#### func ShouldBeEmpty
+
+```go
+func ShouldBeEmpty(actual interface{}, expected ...interface{}) string
+```
+ShouldBeEmpty receives a single parameter (actual) and determines whether or not
+calling len(actual) would return `0`. It obeys the rules specified by the len
+function for determining length: http://golang.org/pkg/builtin/#len
+
+#### func ShouldBeFalse
+
+```go
+func ShouldBeFalse(actual interface{}, expected ...interface{}) string
+```
+ShouldBeFalse receives a single parameter and ensures that it is false.
+
+#### func ShouldBeGreaterThan
+
+```go
+func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string
+```
+ShouldBeGreaterThan receives exactly two parameters and ensures that the first
+is greater than the second.
+
+#### func ShouldBeGreaterThanOrEqualTo
+
+```go
+func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string
+```
+ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that
+the first is greater than or equal to the second.
+
+#### func ShouldBeIn
+
+```go
+func ShouldBeIn(actual interface{}, expected ...interface{}) string
+```
+ShouldBeIn receives at least 2 parameters. The first is a proposed member of the
+collection that is passed in either as the second parameter, or of the
+collection that is comprised of all the remaining parameters. This assertion
+ensures that the proposed member is in the collection (using ShouldEqual).
+
+#### func ShouldBeLessThan
+
+```go
+func ShouldBeLessThan(actual interface{}, expected ...interface{}) string
+```
+ShouldBeLessThan receives exactly two parameters and ensures that the first is
+less than the second.
+
+#### func ShouldBeLessThanOrEqualTo
+
+```go
+func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string
+```
+ShouldBeLessThan receives exactly two parameters and ensures that the first is
+less than or equal to the second.
+
+#### func ShouldBeNil
+
+```go
+func ShouldBeNil(actual interface{}, expected ...interface{}) string
+```
+ShouldBeNil receives a single parameter and ensures that it is nil.
+
+#### func ShouldBeTrue
+
+```go
+func ShouldBeTrue(actual interface{}, expected ...interface{}) string
+```
+ShouldBeTrue receives a single parameter and ensures that it is true.
+
+#### func ShouldBeZeroValue
+
+```go
+func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string
+```
+ShouldBeZeroValue receives a single parameter and ensures that it is the Go
+equivalent of the default value, or "zero" value.
+
+#### func ShouldContain
+
+```go
+func ShouldContain(actual interface{}, expected ...interface{}) string
+```
+ShouldContain receives exactly two parameters. The first is a slice and the
+second is a proposed member. Membership is determined using ShouldEqual.
+
+#### func ShouldContainKey
+
+```go
+func ShouldContainKey(actual interface{}, expected ...interface{}) string
+```
+ShouldContainKey receives exactly two parameters. The first is a map and the
+second is a proposed key. Keys are compared with a simple '=='.
+
+#### func ShouldContainSubstring
+
+```go
+func ShouldContainSubstring(actual interface{}, expected ...interface{}) string
+```
+ShouldContainSubstring receives exactly 2 string parameters and ensures that the
+first contains the second as a substring.
+
+#### func ShouldEndWith
+
+```go
+func ShouldEndWith(actual interface{}, expected ...interface{}) string
+```
+ShouldEndWith receives exactly 2 string parameters and ensures that the first
+ends with the second.
+
+#### func ShouldEqual
+
+```go
+func ShouldEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldEqual receives exactly two parameters and does an equality check.
+
+#### func ShouldEqualTrimSpace
+
+```go
+func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string
+```
+ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the
+first is equal to the second after removing all leading and trailing whitespace
+using strings.TrimSpace(first).
+
+#### func ShouldEqualWithout
+
+```go
+func ShouldEqualWithout(actual interface{}, expected ...interface{}) string
+```
+ShouldEqualWithout receives exactly 3 string parameters and ensures that the
+first is equal to the second after removing all instances of the third from the
+first using strings.Replace(first, third, "", -1).
+
+#### func ShouldHappenAfter
+
+```go
+func ShouldHappenAfter(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the
+first happens after the second.
+
+#### func ShouldHappenBefore
+
+```go
+func ShouldHappenBefore(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the
+first happens before the second.
+
+#### func ShouldHappenBetween
+
+```go
+func ShouldHappenBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the
+first happens between (not on) the second and third.
+
+#### func ShouldHappenOnOrAfter
+
+```go
+func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that
+the first happens on or after the second.
+
+#### func ShouldHappenOnOrBefore
+
+```go
+func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that
+the first happens on or before the second.
+
+#### func ShouldHappenOnOrBetween
+
+```go
+func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that
+the first happens between or on the second and third.
+
+#### func ShouldHappenWithin
+
+```go
+func ShouldHappenWithin(actual interface{}, expected ...interface{}) string
+```
+ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3
+arguments) and asserts that the first time.Time happens within or on the
+duration specified relative to the other time.Time.
+
+#### func ShouldHaveLength
+
+```go
+func ShouldHaveLength(actual interface{}, expected ...interface{}) string
+```
+ShouldHaveLength receives 2 parameters. The first is a collection to check the
+length of, the second being the expected length. It obeys the rules specified by
+the len function for determining length: http://golang.org/pkg/builtin/#len
+
+#### func ShouldHaveSameTypeAs
+
+```go
+func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string
+```
+ShouldHaveSameTypeAs receives exactly two parameters and compares their
+underlying types for equality.
+
+#### func ShouldImplement
+
+```go
+func ShouldImplement(actual interface{}, expectedList ...interface{}) string
+```
+ShouldImplement receives exactly two parameters and ensures that the first
+implements the interface type of the second.
+
+#### func ShouldNotAlmostEqual
+
+```go
+func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual
+
+#### func ShouldNotBeBetween
+
+```go
+func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeBetween receives exactly three parameters: an actual value, a lower
+bound, and an upper bound. It ensures that the actual value is NOT between both
+bounds.
+
+#### func ShouldNotBeBetweenOrEqual
+
+```go
+func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a
+lower bound, and an upper bound. It ensures that the actual value is nopt
+between the bounds nor equal to either of them.
+
+#### func ShouldNotBeBlank
+
+```go
+func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is
+equal to "".
+
+#### func ShouldNotBeEmpty
+
+```go
+func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeEmpty receives a single parameter (actual) and determines whether or
+not calling len(actual) would return a value greater than zero. It obeys the
+rules specified by the `len` function for determining length:
+http://golang.org/pkg/builtin/#len
+
+#### func ShouldNotBeIn
+
+```go
+func ShouldNotBeIn(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of
+the collection that is passed in either as the second parameter, or of the
+collection that is comprised of all the remaining parameters. This assertion
+ensures that the proposed member is NOT in the collection (using ShouldEqual).
+
+#### func ShouldNotBeNil
+
+```go
+func ShouldNotBeNil(actual interface{}, expected ...interface{}) string
+```
+ShouldNotBeNil receives a single parameter and ensures that it is not nil.
+
+#### func ShouldNotContain
+
+```go
+func ShouldNotContain(actual interface{}, expected ...interface{}) string
+```
+ShouldNotContain receives exactly two parameters. The first is a slice and the
+second is a proposed member. Membership is determinied using ShouldEqual.
+
+#### func ShouldNotContainKey
+
+```go
+func ShouldNotContainKey(actual interface{}, expected ...interface{}) string
+```
+ShouldNotContainKey receives exactly two parameters. The first is a map and the
+second is a proposed absent key. Keys are compared with a simple '=='.
+
+#### func ShouldNotContainSubstring
+
+```go
+func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string
+```
+ShouldNotContainSubstring receives exactly 2 string parameters and ensures that
+the first does NOT contain the second as a substring.
+
+#### func ShouldNotEndWith
+
+```go
+func ShouldNotEndWith(actual interface{}, expected ...interface{}) string
+```
+ShouldEndWith receives exactly 2 string parameters and ensures that the first
+does not end with the second.
+
+#### func ShouldNotEqual
+
+```go
+func ShouldNotEqual(actual interface{}, expected ...interface{}) string
+```
+ShouldNotEqual receives exactly two parameters and does an inequality check.
+
+#### func ShouldNotHappenOnOrBetween
+
+```go
+func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string
+```
+ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts
+that the first does NOT happen between or on the second or third.
+
+#### func ShouldNotHappenWithin
+
+```go
+func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string
+```
+ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3
+arguments) and asserts that the first time.Time does NOT happen within or on the
+duration specified relative to the other time.Time.
+
+#### func ShouldNotHaveSameTypeAs
+
+```go
+func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string
+```
+ShouldNotHaveSameTypeAs receives exactly two parameters and compares their
+underlying types for inequality.
+
+#### func ShouldNotImplement
+
+```go
+func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string
+```
+ShouldNotImplement receives exactly two parameters and ensures that the first
+does NOT implement the interface type of the second.
+
+#### func ShouldNotPanic
+
+```go
+func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string)
+```
+ShouldNotPanic receives a void, niladic function and expects to execute the
+function without any panic.
+
+#### func ShouldNotPanicWith
+
+```go
+func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string)
+```
+ShouldNotPanicWith receives a void, niladic function and expects to recover a
+panic whose content differs from the second argument.
+
+#### func ShouldNotPointTo
+
+```go
+func ShouldNotPointTo(actual interface{}, expected ...interface{}) string
+```
+ShouldNotPointTo receives exactly two parameters and checks to see that they
+point to different addresess.
+
+#### func ShouldNotResemble
+
+```go
+func ShouldNotResemble(actual interface{}, expected ...interface{}) string
+```
+ShouldNotResemble receives exactly two parameters and does an inverse deep equal
+check (see reflect.DeepEqual)
+
+#### func ShouldNotStartWith
+
+```go
+func ShouldNotStartWith(actual interface{}, expected ...interface{}) string
+```
+ShouldNotStartWith receives exactly 2 string parameters and ensures that the
+first does not start with the second.
+
+#### func ShouldPanic
+
+```go
+func ShouldPanic(actual interface{}, expected ...interface{}) (message string)
+```
+ShouldPanic receives a void, niladic function and expects to recover a panic.
+
+#### func ShouldPanicWith
+
+```go
+func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string)
+```
+ShouldPanicWith receives a void, niladic function and expects to recover a panic
+with the second argument as the content.
+
+#### func ShouldPointTo
+
+```go
+func ShouldPointTo(actual interface{}, expected ...interface{}) string
+```
+ShouldPointTo receives exactly two parameters and checks to see that they point
+to the same address.
+
+#### func ShouldResemble
+
+```go
+func ShouldResemble(actual interface{}, expected ...interface{}) string
+```
+ShouldResemble receives exactly two parameters and does a deep equal check (see
+reflect.DeepEqual)
+
+#### func ShouldStartWith
+
+```go
+func ShouldStartWith(actual interface{}, expected ...interface{}) string
+```
+ShouldStartWith receives exactly 2 string parameters and ensures that the first
+starts with the second.
+
+#### func So
+
+```go
+func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string)
+```
+So is a convenience function (as opposed to an inconvenience function?) for
+running assertions on arbitrary arguments in any context, be it for testing or
+even application logging. It allows you to perform assertion-like behavior (and
+get nicely formatted messages detailing discrepancies) but without the program
+blowing up or panicking. All that is required is to import this package and call
+`So` with one of the assertions exported by this package as the second
+parameter. The first return parameter is a boolean indicating if the assertion
+was true. The second return parameter is the well-formatted message showing why
+an assertion was incorrect, or blank if the assertion was correct.
+
+Example:
+
+ if ok, message := So(x, ShouldBeGreaterThan, y); !ok {
+ log.Println(message)
+ }
+
+#### type Assertion
+
+```go
+type Assertion struct {
+}
+```
+
+
+#### func New
+
+```go
+func New(t testingT) *Assertion
+```
+New swallows the *testing.T struct and prints failed assertions using t.Error.
+Example: assertions.New(t).So(1, should.Equal, 1)
+
+#### func (*Assertion) Failed
+
+```go
+func (this *Assertion) Failed() bool
+```
+Failed reports whether any calls to So (on this Assertion instance) have failed.
+
+#### func (*Assertion) So
+
+```go
+func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool
+```
+So calls the standalone So function and additionally, calls t.Error in failure
+scenarios.
+
+#### type FailureView
+
+```go
+type FailureView struct {
+ Message string `json:"Message"`
+ Expected string `json:"Expected"`
+ Actual string `json:"Actual"`
+}
+```
+
+This struct is also declared in
+github.com/smartystreets/goconvey/convey/reporting. The json struct tags should
+be equal in both declarations.
+
+#### type Serializer
+
+```go
+type Serializer interface {
+ // contains filtered or unexported methods
+}
+```
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/assertions.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/assertions.goconvey
new file mode 100644
index 00000000000..e76cf275d47
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/assertions.goconvey
@@ -0,0 +1,3 @@
+#ignore
+-timeout=1s
+-coverpkg=github.com/smartystreets/assertions,github.com/smartystreets/assertions/internal/oglematchers \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/collections.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/collections.go
new file mode 100644
index 00000000000..d7f407e913f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/collections.go
@@ -0,0 +1,244 @@
+package assertions
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+// ShouldContain receives exactly two parameters. The first is a slice and the
+// second is a proposed member. Membership is determined using ShouldEqual.
+func ShouldContain(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil {
+ typeName := reflect.TypeOf(actual)
+
+ if fmt.Sprintf("%v", matchError) == "which is not a slice or array" {
+ return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName)
+ }
+ return fmt.Sprintf(shouldHaveContained, typeName, expected[0])
+ }
+ return success
+}
+
+// ShouldNotContain receives exactly two parameters. The first is a slice and the
+// second is a proposed member. Membership is determinied using ShouldEqual.
+func ShouldNotContain(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ typeName := reflect.TypeOf(actual)
+
+ if matchError := oglematchers.Contains(expected[0]).Matches(actual); matchError != nil {
+ if fmt.Sprintf("%v", matchError) == "which is not a slice or array" {
+ return fmt.Sprintf(shouldHaveBeenAValidCollection, typeName)
+ }
+ return success
+ }
+ return fmt.Sprintf(shouldNotHaveContained, typeName, expected[0])
+}
+
+// ShouldContainKey receives exactly two parameters. The first is a map and the
+// second is a proposed key. Keys are compared with a simple '=='.
+func ShouldContainKey(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ keys, isMap := mapKeys(actual)
+ if !isMap {
+ return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual))
+ }
+
+ if !keyFound(keys, expected[0]) {
+ return fmt.Sprintf(shouldHaveContainedKey, reflect.TypeOf(actual), expected)
+ }
+
+ return ""
+}
+
+// ShouldNotContainKey receives exactly two parameters. The first is a map and the
+// second is a proposed absent key. Keys are compared with a simple '=='.
+func ShouldNotContainKey(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ keys, isMap := mapKeys(actual)
+ if !isMap {
+ return fmt.Sprintf(shouldHaveBeenAValidMap, reflect.TypeOf(actual))
+ }
+
+ if keyFound(keys, expected[0]) {
+ return fmt.Sprintf(shouldNotHaveContainedKey, reflect.TypeOf(actual), expected)
+ }
+
+ return ""
+}
+
+func mapKeys(m interface{}) ([]reflect.Value, bool) {
+ value := reflect.ValueOf(m)
+ if value.Kind() != reflect.Map {
+ return nil, false
+ }
+ return value.MapKeys(), true
+}
+func keyFound(keys []reflect.Value, expectedKey interface{}) bool {
+ found := false
+ for _, key := range keys {
+ if key.Interface() == expectedKey {
+ found = true
+ }
+ }
+ return found
+}
+
+// ShouldBeIn receives at least 2 parameters. The first is a proposed member of the collection
+// that is passed in either as the second parameter, or of the collection that is comprised
+// of all the remaining parameters. This assertion ensures that the proposed member is in
+// the collection (using ShouldEqual).
+func ShouldBeIn(actual interface{}, expected ...interface{}) string {
+ if fail := atLeast(1, expected); fail != success {
+ return fail
+ }
+
+ if len(expected) == 1 {
+ return shouldBeIn(actual, expected[0])
+ }
+ return shouldBeIn(actual, expected)
+}
+func shouldBeIn(actual interface{}, expected interface{}) string {
+ if matchError := oglematchers.Contains(actual).Matches(expected); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenIn, actual, reflect.TypeOf(expected))
+ }
+ return success
+}
+
+// ShouldNotBeIn receives at least 2 parameters. The first is a proposed member of the collection
+// that is passed in either as the second parameter, or of the collection that is comprised
+// of all the remaining parameters. This assertion ensures that the proposed member is NOT in
+// the collection (using ShouldEqual).
+func ShouldNotBeIn(actual interface{}, expected ...interface{}) string {
+ if fail := atLeast(1, expected); fail != success {
+ return fail
+ }
+
+ if len(expected) == 1 {
+ return shouldNotBeIn(actual, expected[0])
+ }
+ return shouldNotBeIn(actual, expected)
+}
+func shouldNotBeIn(actual interface{}, expected interface{}) string {
+ if matchError := oglematchers.Contains(actual).Matches(expected); matchError == nil {
+ return fmt.Sprintf(shouldNotHaveBeenIn, actual, reflect.TypeOf(expected))
+ }
+ return success
+}
+
+// ShouldBeEmpty receives a single parameter (actual) and determines whether or not
+// calling len(actual) would return `0`. It obeys the rules specified by the len
+// function for determining length: http://golang.org/pkg/builtin/#len
+func ShouldBeEmpty(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ if actual == nil {
+ return success
+ }
+
+ value := reflect.ValueOf(actual)
+ switch value.Kind() {
+ case reflect.Slice:
+ if value.Len() == 0 {
+ return success
+ }
+ case reflect.Chan:
+ if value.Len() == 0 {
+ return success
+ }
+ case reflect.Map:
+ if value.Len() == 0 {
+ return success
+ }
+ case reflect.String:
+ if value.Len() == 0 {
+ return success
+ }
+ case reflect.Ptr:
+ elem := value.Elem()
+ kind := elem.Kind()
+ if (kind == reflect.Slice || kind == reflect.Array) && elem.Len() == 0 {
+ return success
+ }
+ }
+
+ return fmt.Sprintf(shouldHaveBeenEmpty, actual)
+}
+
+// ShouldNotBeEmpty receives a single parameter (actual) and determines whether or not
+// calling len(actual) would return a value greater than zero. It obeys the rules
+// specified by the `len` function for determining length: http://golang.org/pkg/builtin/#len
+func ShouldNotBeEmpty(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ if empty := ShouldBeEmpty(actual, expected...); empty != success {
+ return success
+ }
+ return fmt.Sprintf(shouldNotHaveBeenEmpty, actual)
+}
+
+// ShouldHaveLength receives 2 parameters. The first is a collection to check
+// the length of, the second being the expected length. It obeys the rules
+// specified by the len function for determining length:
+// http://golang.org/pkg/builtin/#len
+func ShouldHaveLength(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ var expectedLen int64
+ lenValue := reflect.ValueOf(expected[0])
+ switch lenValue.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ expectedLen = lenValue.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ expectedLen = int64(lenValue.Uint())
+ default:
+ return fmt.Sprintf(shouldHaveBeenAValidInteger, reflect.TypeOf(expected[0]))
+ }
+
+ if expectedLen < 0 {
+ return fmt.Sprintf(shouldHaveBeenAValidLength, expected[0])
+ }
+
+ value := reflect.ValueOf(actual)
+ switch value.Kind() {
+ case reflect.Slice,
+ reflect.Chan,
+ reflect.Map,
+ reflect.String:
+ if int64(value.Len()) == expectedLen {
+ return success
+ } else {
+ return fmt.Sprintf(shouldHaveHadLength, actual, value.Len(), expectedLen)
+ }
+ case reflect.Ptr:
+ elem := value.Elem()
+ kind := elem.Kind()
+ if kind == reflect.Slice || kind == reflect.Array {
+ if int64(elem.Len()) == expectedLen {
+ return success
+ } else {
+ return fmt.Sprintf(shouldHaveHadLength, actual, elem.Len(), expectedLen)
+ }
+ }
+ }
+ return fmt.Sprintf(shouldHaveBeenAValidCollection, reflect.TypeOf(actual))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/collections_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/collections_test.go
new file mode 100644
index 00000000000..6c7948f3c7a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/collections_test.go
@@ -0,0 +1,157 @@
+package assertions
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestShouldContainKey(t *testing.T) {
+ fail(t, so(map[int]int{}, ShouldContainKey), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(map[int]int{}, ShouldContainKey, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(Thing1{}, ShouldContainKey, 1), "You must provide a valid map type (was assertions.Thing1)!")
+ fail(t, so(nil, ShouldContainKey, 1), "You must provide a valid map type (was <nil>)!")
+ fail(t, so(map[int]int{1: 41}, ShouldContainKey, 2), "Expected the map[int]int to contain the key: [2] (but it didn't)!")
+
+ pass(t, so(map[int]int{1: 41}, ShouldContainKey, 1))
+ pass(t, so(map[int]int{1: 41, 2: 42, 3: 43}, ShouldContainKey, 2))
+}
+
+func TestShouldNotContainKey(t *testing.T) {
+ fail(t, so(map[int]int{}, ShouldNotContainKey), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(map[int]int{}, ShouldNotContainKey, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(Thing1{}, ShouldNotContainKey, 1), "You must provide a valid map type (was assertions.Thing1)!")
+ fail(t, so(nil, ShouldNotContainKey, 1), "You must provide a valid map type (was <nil>)!")
+ fail(t, so(map[int]int{1: 41}, ShouldNotContainKey, 1), "Expected the map[int]int NOT to contain the key: [1] (but it did)!")
+ pass(t, so(map[int]int{1: 41}, ShouldNotContainKey, 2))
+}
+
+func TestShouldContain(t *testing.T) {
+ fail(t, so([]int{}, ShouldContain), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so([]int{}, ShouldContain, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(Thing1{}, ShouldContain, 1), "You must provide a valid container (was assertions.Thing1)!")
+ fail(t, so(nil, ShouldContain, 1), "You must provide a valid container (was <nil>)!")
+ fail(t, so([]int{1}, ShouldContain, 2), "Expected the container ([]int) to contain: '2' (but it didn't)!")
+
+ pass(t, so([]int{1}, ShouldContain, 1))
+ pass(t, so([]int{1, 2, 3}, ShouldContain, 2))
+}
+
+func TestShouldNotContain(t *testing.T) {
+ fail(t, so([]int{}, ShouldNotContain), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so([]int{}, ShouldNotContain, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(Thing1{}, ShouldNotContain, 1), "You must provide a valid container (was assertions.Thing1)!")
+ fail(t, so(nil, ShouldNotContain, 1), "You must provide a valid container (was <nil>)!")
+
+ fail(t, so([]int{1}, ShouldNotContain, 1), "Expected the container ([]int) NOT to contain: '1' (but it did)!")
+ fail(t, so([]int{1, 2, 3}, ShouldNotContain, 2), "Expected the container ([]int) NOT to contain: '2' (but it did)!")
+
+ pass(t, so([]int{1}, ShouldNotContain, 2))
+}
+
+func TestShouldBeIn(t *testing.T) {
+ fail(t, so(4, ShouldBeIn), needNonEmptyCollection)
+
+ container := []int{1, 2, 3, 4}
+ pass(t, so(4, ShouldBeIn, container))
+ pass(t, so(4, ShouldBeIn, 1, 2, 3, 4))
+
+ fail(t, so(4, ShouldBeIn, 1, 2, 3), "Expected '4' to be in the container ([]interface {}), but it wasn't!")
+ fail(t, so(4, ShouldBeIn, []int{1, 2, 3}), "Expected '4' to be in the container ([]int), but it wasn't!")
+}
+
+func TestShouldNotBeIn(t *testing.T) {
+ fail(t, so(4, ShouldNotBeIn), needNonEmptyCollection)
+
+ container := []int{1, 2, 3, 4}
+ pass(t, so(42, ShouldNotBeIn, container))
+ pass(t, so(42, ShouldNotBeIn, 1, 2, 3, 4))
+
+ fail(t, so(2, ShouldNotBeIn, 1, 2, 3), "Expected '2' NOT to be in the container ([]interface {}), but it was!")
+ fail(t, so(2, ShouldNotBeIn, []int{1, 2, 3}), "Expected '2' NOT to be in the container ([]int), but it was!")
+}
+
+func TestShouldBeEmpty(t *testing.T) {
+ fail(t, so(1, ShouldBeEmpty, 2, 3), "This assertion requires exactly 0 comparison values (you provided 2).")
+
+ pass(t, so([]int{}, ShouldBeEmpty)) // empty slice
+ pass(t, so([]interface{}{}, ShouldBeEmpty)) // empty slice
+ pass(t, so(map[string]int{}, ShouldBeEmpty)) // empty map
+ pass(t, so("", ShouldBeEmpty)) // empty string
+ pass(t, so(&[]int{}, ShouldBeEmpty)) // pointer to empty slice
+ pass(t, so(&[0]int{}, ShouldBeEmpty)) // pointer to empty array
+ pass(t, so(nil, ShouldBeEmpty)) // nil
+ pass(t, so(make(chan string), ShouldBeEmpty)) // empty channel
+
+ fail(t, so([]int{1}, ShouldBeEmpty), "Expected [1] to be empty (but it wasn't)!") // non-empty slice
+ fail(t, so([]interface{}{1}, ShouldBeEmpty), "Expected [1] to be empty (but it wasn't)!") // non-empty slice
+ fail(t, so(map[string]int{"hi": 0}, ShouldBeEmpty), "Expected map[hi:0] to be empty (but it wasn't)!") // non-empty map
+ fail(t, so("hi", ShouldBeEmpty), "Expected hi to be empty (but it wasn't)!") // non-empty string
+ fail(t, so(&[]int{1}, ShouldBeEmpty), "Expected &[1] to be empty (but it wasn't)!") // pointer to non-empty slice
+ fail(t, so(&[1]int{1}, ShouldBeEmpty), "Expected &[1] to be empty (but it wasn't)!") // pointer to non-empty array
+ c := make(chan int, 1) // non-empty channel
+ go func() { c <- 1 }()
+ time.Sleep(time.Millisecond)
+ fail(t, so(c, ShouldBeEmpty), fmt.Sprintf("Expected %+v to be empty (but it wasn't)!", c))
+}
+
+func TestShouldNotBeEmpty(t *testing.T) {
+ fail(t, so(1, ShouldNotBeEmpty, 2, 3), "This assertion requires exactly 0 comparison values (you provided 2).")
+
+ fail(t, so([]int{}, ShouldNotBeEmpty), "Expected [] to NOT be empty (but it was)!") // empty slice
+ fail(t, so([]interface{}{}, ShouldNotBeEmpty), "Expected [] to NOT be empty (but it was)!") // empty slice
+ fail(t, so(map[string]int{}, ShouldNotBeEmpty), "Expected map[] to NOT be empty (but it was)!") // empty map
+ fail(t, so("", ShouldNotBeEmpty), "Expected to NOT be empty (but it was)!") // empty string
+ fail(t, so(&[]int{}, ShouldNotBeEmpty), "Expected &[] to NOT be empty (but it was)!") // pointer to empty slice
+ fail(t, so(&[0]int{}, ShouldNotBeEmpty), "Expected &[] to NOT be empty (but it was)!") // pointer to empty array
+ fail(t, so(nil, ShouldNotBeEmpty), "Expected <nil> to NOT be empty (but it was)!") // nil
+ c := make(chan int, 0) // non-empty channel
+ fail(t, so(c, ShouldNotBeEmpty), fmt.Sprintf("Expected %+v to NOT be empty (but it was)!", c)) // empty channel
+
+ pass(t, so([]int{1}, ShouldNotBeEmpty)) // non-empty slice
+ pass(t, so([]interface{}{1}, ShouldNotBeEmpty)) // non-empty slice
+ pass(t, so(map[string]int{"hi": 0}, ShouldNotBeEmpty)) // non-empty map
+ pass(t, so("hi", ShouldNotBeEmpty)) // non-empty string
+ pass(t, so(&[]int{1}, ShouldNotBeEmpty)) // pointer to non-empty slice
+ pass(t, so(&[1]int{1}, ShouldNotBeEmpty)) // pointer to non-empty array
+ c = make(chan int, 1)
+ go func() { c <- 1 }()
+ time.Sleep(time.Millisecond)
+ pass(t, so(c, ShouldNotBeEmpty))
+}
+
+func TestShouldHaveLength(t *testing.T) {
+ fail(t, so(1, ShouldHaveLength, 2), "You must provide a valid container (was int)!")
+ fail(t, so(nil, ShouldHaveLength, 1), "You must provide a valid container (was <nil>)!")
+ fail(t, so("hi", ShouldHaveLength, float64(1.0)), "You must provide a valid integer (was float64)!")
+ fail(t, so([]string{}, ShouldHaveLength), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so([]string{}, ShouldHaveLength, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).")
+ fail(t, so([]string{}, ShouldHaveLength, -10), "You must provide a valid positive integer (was -10)!")
+
+ fail(t, so([]int{}, ShouldHaveLength, 1), "Expected [] (length: 0) to have length equal to '1', but it wasn't!") // empty slice
+ fail(t, so([]interface{}{}, ShouldHaveLength, 1), "Expected [] (length: 0) to have length equal to '1', but it wasn't!") // empty slice
+ fail(t, so(map[string]int{}, ShouldHaveLength, 1), "Expected map[] (length: 0) to have length equal to '1', but it wasn't!") // empty map
+ fail(t, so("", ShouldHaveLength, 1), "Expected (length: 0) to have length equal to '1', but it wasn't!") // empty string
+ fail(t, so(&[]int{}, ShouldHaveLength, 1), "Expected &[] (length: 0) to have length equal to '1', but it wasn't!") // pointer to empty slice
+ fail(t, so(&[0]int{}, ShouldHaveLength, 1), "Expected &[] (length: 0) to have length equal to '1', but it wasn't!") // pointer to empty array
+ c := make(chan int, 0) // non-empty channel
+ fail(t, so(c, ShouldHaveLength, 1), fmt.Sprintf("Expected %+v (length: 0) to have length equal to '1', but it wasn't!", c))
+ c = make(chan int) // empty channel
+ fail(t, so(c, ShouldHaveLength, 1), fmt.Sprintf("Expected %+v (length: 0) to have length equal to '1', but it wasn't!", c))
+
+ pass(t, so([]int{1}, ShouldHaveLength, 1)) // non-empty slice
+ pass(t, so([]interface{}{1}, ShouldHaveLength, 1)) // non-empty slice
+ pass(t, so(map[string]int{"hi": 0}, ShouldHaveLength, 1)) // non-empty map
+ pass(t, so("hi", ShouldHaveLength, 2)) // non-empty string
+ pass(t, so(&[]int{1}, ShouldHaveLength, 1)) // pointer to non-empty slice
+ pass(t, so(&[1]int{1}, ShouldHaveLength, 1)) // pointer to non-empty array
+ c = make(chan int, 1)
+ go func() { c <- 1 }()
+ time.Sleep(time.Millisecond)
+ pass(t, so(c, ShouldHaveLength, 1))
+
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/doc.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/doc.go
new file mode 100644
index 00000000000..5720fc298c6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/doc.go
@@ -0,0 +1,105 @@
+// Package assertions contains the implementations for all assertions which
+// are referenced in goconvey's `convey` package
+// (github.com/smartystreets/goconvey/convey) and gunit (github.com/smartystreets/gunit)
+// for use with the So(...) method.
+// They can also be used in traditional Go test functions and even in
+// applications.
+//
+// Many of the assertions lean heavily on work done by Aaron Jacobs in his excellent oglematchers library.
+// (https://github.com/jacobsa/oglematchers)
+// The ShouldResemble assertion leans heavily on work done by Daniel Jacques in his very helpful go-render library.
+// (https://github.com/luci/go-render)
+package assertions
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// By default we use a no-op serializer. The actual Serializer provides a JSON
+// representation of failure results on selected assertions so the goconvey
+// web UI can display a convenient diff.
+var serializer Serializer = new(noopSerializer)
+
+// GoConveyMode provides control over JSON serialization of failures. When
+// using the assertions in this package from the convey package JSON results
+// are very helpful and can be rendered in a DIFF view. In that case, this function
+// will be called with a true value to enable the JSON serialization. By default,
+// the assertions in this package will not serializer a JSON result, making
+// standalone ussage more convenient.
+func GoConveyMode(yes bool) {
+ if yes {
+ serializer = newSerializer()
+ } else {
+ serializer = new(noopSerializer)
+ }
+}
+
+type testingT interface {
+ Error(args ...interface{})
+}
+
+type Assertion struct {
+ t testingT
+ failed bool
+}
+
+// New swallows the *testing.T struct and prints failed assertions using t.Error.
+// Example: assertions.New(t).So(1, should.Equal, 1)
+func New(t testingT) *Assertion {
+ return &Assertion{t: t}
+}
+
+// Failed reports whether any calls to So (on this Assertion instance) have failed.
+func (this *Assertion) Failed() bool {
+ return this.failed
+}
+
+// So calls the standalone So function and additionally, calls t.Error in failure scenarios.
+func (this *Assertion) So(actual interface{}, assert assertion, expected ...interface{}) bool {
+ ok, result := So(actual, assert, expected...)
+ if !ok {
+ this.failed = true
+ _, file, line, _ := runtime.Caller(1)
+ this.t.Error(fmt.Sprintf("\n%s:%d\n%s", file, line, result))
+ }
+ return ok
+}
+
+// So is a convenience function (as opposed to an inconvenience function?)
+// for running assertions on arbitrary arguments in any context, be it for testing or even
+// application logging. It allows you to perform assertion-like behavior (and get nicely
+// formatted messages detailing discrepancies) but without the program blowing up or panicking.
+// All that is required is to import this package and call `So` with one of the assertions
+// exported by this package as the second parameter.
+// The first return parameter is a boolean indicating if the assertion was true. The second
+// return parameter is the well-formatted message showing why an assertion was incorrect, or
+// blank if the assertion was correct.
+//
+// Example:
+//
+// if ok, message := So(x, ShouldBeGreaterThan, y); !ok {
+// log.Println(message)
+// }
+//
+func So(actual interface{}, assert assertion, expected ...interface{}) (bool, string) {
+ if result := so(actual, assert, expected...); len(result) == 0 {
+ return true, result
+ } else {
+ return false, result
+ }
+}
+
+// so is like So, except that it only returns the string message, which is blank if the
+// assertion passed. Used to facilitate testing.
+func so(actual interface{}, assert func(interface{}, ...interface{}) string, expected ...interface{}) string {
+ return assert(actual, expected...)
+}
+
+// assertion is an alias for a function with a signature that the So()
+// function can handle. Any future or custom assertions should conform to this
+// method signature. The return value should be an empty string if the assertion
+// passes and a well-formed failure message if not.
+type assertion func(actual interface{}, expected ...interface{}) string
+
+////////////////////////////////////////////////////////////////////////////
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/doc_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/doc_test.go
new file mode 100644
index 00000000000..041faaffcb2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/doc_test.go
@@ -0,0 +1,57 @@
+package assertions
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+)
+
+func TestPassingAssertion(t *testing.T) {
+ fake := &FakeT{buffer: new(bytes.Buffer)}
+ assertion := New(fake)
+ passed := assertion.So(1, ShouldEqual, 1)
+
+ if !passed {
+ t.Error("Assertion failed when it should have passed.")
+ }
+ if fake.buffer.Len() > 0 {
+ t.Error("Unexpected error message was printed.")
+ }
+}
+
+func TestFailingAssertion(t *testing.T) {
+ fake := &FakeT{buffer: new(bytes.Buffer)}
+ assertion := New(fake)
+ passed := assertion.So(1, ShouldEqual, 2)
+
+ if passed {
+ t.Error("Assertion passed when it should have failed.")
+ }
+ if fake.buffer.Len() == 0 {
+ t.Error("Expected error message not printed.")
+ }
+}
+
+func TestFailingGroupsOfAssertions(t *testing.T) {
+ fake := &FakeT{buffer: new(bytes.Buffer)}
+ assertion1 := New(fake)
+ assertion2 := New(fake)
+
+ assertion1.So(1, ShouldEqual, 2) // fail
+ assertion2.So(1, ShouldEqual, 1) // pass
+
+ if !assertion1.Failed() {
+ t.Error("Expected the first assertion to have been marked as failed.")
+ }
+ if assertion2.Failed() {
+ t.Error("Expected the second assertion to NOT have been marked as failed.")
+ }
+}
+
+type FakeT struct {
+ buffer *bytes.Buffer
+}
+
+func (this *FakeT) Error(args ...interface{}) {
+ fmt.Fprint(this.buffer, args...)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/equality.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/equality.go
new file mode 100644
index 00000000000..2b6049c37d9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/equality.go
@@ -0,0 +1,280 @@
+package assertions
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/go-render/render"
+)
+
+// default acceptable delta for ShouldAlmostEqual
+const defaultDelta = 0.0000000001
+
+// ShouldEqual receives exactly two parameters and does an equality check.
+func ShouldEqual(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+ return shouldEqual(actual, expected[0])
+}
+func shouldEqual(actual, expected interface{}) (message string) {
+ defer func() {
+ if r := recover(); r != nil {
+ message = serializer.serialize(expected, actual, fmt.Sprintf(shouldHaveBeenEqual, expected, actual))
+ return
+ }
+ }()
+
+ if matchError := oglematchers.Equals(expected).Matches(actual); matchError != nil {
+ expectedSyntax := fmt.Sprintf("%v", expected)
+ actualSyntax := fmt.Sprintf("%v", actual)
+ if expectedSyntax == actualSyntax && reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ message = fmt.Sprintf(shouldHaveBeenEqualTypeMismatch, expected, expected, actual, actual)
+ } else {
+ message = fmt.Sprintf(shouldHaveBeenEqual, expected, actual)
+ }
+ message = serializer.serialize(expected, actual, message)
+ return
+ }
+
+ return success
+}
+
+// ShouldNotEqual receives exactly two parameters and does an inequality check.
+func ShouldNotEqual(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ } else if ShouldEqual(actual, expected[0]) == success {
+ return fmt.Sprintf(shouldNotHaveBeenEqual, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldAlmostEqual makes sure that two parameters are close enough to being equal.
+// The acceptable delta may be specified with a third argument,
+// or a very small default delta will be used.
+func ShouldAlmostEqual(actual interface{}, expected ...interface{}) string {
+ actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...)
+
+ if err != "" {
+ return err
+ }
+
+ if math.Abs(actualFloat-expectedFloat) <= deltaFloat {
+ return success
+ } else {
+ return fmt.Sprintf(shouldHaveBeenAlmostEqual, actualFloat, expectedFloat)
+ }
+}
+
+// ShouldNotAlmostEqual is the inverse of ShouldAlmostEqual
+func ShouldNotAlmostEqual(actual interface{}, expected ...interface{}) string {
+ actualFloat, expectedFloat, deltaFloat, err := cleanAlmostEqualInput(actual, expected...)
+
+ if err != "" {
+ return err
+ }
+
+ if math.Abs(actualFloat-expectedFloat) > deltaFloat {
+ return success
+ } else {
+ return fmt.Sprintf(shouldHaveNotBeenAlmostEqual, actualFloat, expectedFloat)
+ }
+}
+
+func cleanAlmostEqualInput(actual interface{}, expected ...interface{}) (float64, float64, float64, string) {
+ deltaFloat := 0.0000000001
+
+ if len(expected) == 0 {
+ return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided neither)"
+ } else if len(expected) == 2 {
+ delta, err := getFloat(expected[1])
+
+ if err != nil {
+ return 0.0, 0.0, 0.0, "delta must be a numerical type"
+ }
+
+ deltaFloat = delta
+ } else if len(expected) > 2 {
+ return 0.0, 0.0, 0.0, "This assertion requires exactly one comparison value and an optional delta (you provided more values)"
+ }
+
+ actualFloat, err := getFloat(actual)
+
+ if err != nil {
+ return 0.0, 0.0, 0.0, err.Error()
+ }
+
+ expectedFloat, err := getFloat(expected[0])
+
+ if err != nil {
+ return 0.0, 0.0, 0.0, err.Error()
+ }
+
+ return actualFloat, expectedFloat, deltaFloat, ""
+}
+
+// returns the float value of any real number, or error if it is not a numerical type
+func getFloat(num interface{}) (float64, error) {
+ numValue := reflect.ValueOf(num)
+ numKind := numValue.Kind()
+
+ if numKind == reflect.Int ||
+ numKind == reflect.Int8 ||
+ numKind == reflect.Int16 ||
+ numKind == reflect.Int32 ||
+ numKind == reflect.Int64 {
+ return float64(numValue.Int()), nil
+ } else if numKind == reflect.Uint ||
+ numKind == reflect.Uint8 ||
+ numKind == reflect.Uint16 ||
+ numKind == reflect.Uint32 ||
+ numKind == reflect.Uint64 {
+ return float64(numValue.Uint()), nil
+ } else if numKind == reflect.Float32 ||
+ numKind == reflect.Float64 {
+ return numValue.Float(), nil
+ } else {
+ return 0.0, errors.New("must be a numerical type, but was " + numKind.String())
+ }
+}
+
+// ShouldResemble receives exactly two parameters and does a deep equal check (see reflect.DeepEqual)
+func ShouldResemble(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+
+ if matchError := oglematchers.DeepEquals(expected[0]).Matches(actual); matchError != nil {
+ return serializer.serializeDetailed(expected[0], actual,
+ fmt.Sprintf(shouldHaveResembled, render.Render(expected[0]), render.Render(actual)))
+ }
+
+ return success
+}
+
+// ShouldNotResemble receives exactly two parameters and does an inverse deep equal check (see reflect.DeepEqual)
+func ShouldNotResemble(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ } else if ShouldResemble(actual, expected[0]) == success {
+ return fmt.Sprintf(shouldNotHaveResembled, render.Render(actual), render.Render(expected[0]))
+ }
+ return success
+}
+
+// ShouldPointTo receives exactly two parameters and checks to see that they point to the same address.
+func ShouldPointTo(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+ return shouldPointTo(actual, expected[0])
+
+}
+func shouldPointTo(actual, expected interface{}) string {
+ actualValue := reflect.ValueOf(actual)
+ expectedValue := reflect.ValueOf(expected)
+
+ if ShouldNotBeNil(actual) != success {
+ return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "nil")
+ } else if ShouldNotBeNil(expected) != success {
+ return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "nil")
+ } else if actualValue.Kind() != reflect.Ptr {
+ return fmt.Sprintf(shouldHaveBeenNonNilPointer, "first", "not")
+ } else if expectedValue.Kind() != reflect.Ptr {
+ return fmt.Sprintf(shouldHaveBeenNonNilPointer, "second", "not")
+ } else if ShouldEqual(actualValue.Pointer(), expectedValue.Pointer()) != success {
+ actualAddress := reflect.ValueOf(actual).Pointer()
+ expectedAddress := reflect.ValueOf(expected).Pointer()
+ return serializer.serialize(expectedAddress, actualAddress, fmt.Sprintf(shouldHavePointedTo,
+ actual, actualAddress,
+ expected, expectedAddress))
+ }
+ return success
+}
+
+// ShouldNotPointTo receives exactly two parameters and checks to see that they point to different addresess.
+func ShouldNotPointTo(actual interface{}, expected ...interface{}) string {
+ if message := need(1, expected); message != success {
+ return message
+ }
+ compare := ShouldPointTo(actual, expected[0])
+ if strings.HasPrefix(compare, shouldBePointers) {
+ return compare
+ } else if compare == success {
+ return fmt.Sprintf(shouldNotHavePointedTo, actual, expected[0], reflect.ValueOf(actual).Pointer())
+ }
+ return success
+}
+
+// ShouldBeNil receives a single parameter and ensures that it is nil.
+func ShouldBeNil(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ } else if actual == nil {
+ return success
+ } else if interfaceHasNilValue(actual) {
+ return success
+ }
+ return fmt.Sprintf(shouldHaveBeenNil, actual)
+}
+func interfaceHasNilValue(actual interface{}) bool {
+ value := reflect.ValueOf(actual)
+ kind := value.Kind()
+ nilable := kind == reflect.Slice ||
+ kind == reflect.Chan ||
+ kind == reflect.Func ||
+ kind == reflect.Ptr ||
+ kind == reflect.Map
+
+ // Careful: reflect.Value.IsNil() will panic unless it's an interface, chan, map, func, slice, or ptr
+ // Reference: http://golang.org/pkg/reflect/#Value.IsNil
+ return nilable && value.IsNil()
+}
+
+// ShouldNotBeNil receives a single parameter and ensures that it is not nil.
+func ShouldNotBeNil(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ } else if ShouldBeNil(actual) == success {
+ return fmt.Sprintf(shouldNotHaveBeenNil, actual)
+ }
+ return success
+}
+
+// ShouldBeTrue receives a single parameter and ensures that it is true.
+func ShouldBeTrue(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ } else if actual != true {
+ return fmt.Sprintf(shouldHaveBeenTrue, actual)
+ }
+ return success
+}
+
+// ShouldBeFalse receives a single parameter and ensures that it is false.
+func ShouldBeFalse(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ } else if actual != false {
+ return fmt.Sprintf(shouldHaveBeenFalse, actual)
+ }
+ return success
+}
+
+// ShouldBeZeroValue receives a single parameter and ensures that it is
+// the Go equivalent of the default value, or "zero" value.
+func ShouldBeZeroValue(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+ zeroVal := reflect.Zero(reflect.TypeOf(actual)).Interface()
+ if !reflect.DeepEqual(zeroVal, actual) {
+ return serializer.serialize(zeroVal, actual, fmt.Sprintf(shouldHaveBeenZeroValue, actual))
+ }
+ return success
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/equality_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/equality_test.go
new file mode 100644
index 00000000000..5050e4b1619
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/equality_test.go
@@ -0,0 +1,269 @@
+package assertions
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+func TestShouldEqual(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ fail(t, so(1, ShouldEqual), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldEqual, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).")
+ fail(t, so(1, ShouldEqual, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ pass(t, so(1, ShouldEqual, 1))
+ fail(t, so(1, ShouldEqual, 2), "2|1|Expected: '2' Actual: '1' (Should be equal)")
+ fail(t, so(1, ShouldEqual, "1"), "1|1|Expected: '1' (string) Actual: '1' (int) (Should be equal, type mismatch)")
+
+ pass(t, so(true, ShouldEqual, true))
+ fail(t, so(true, ShouldEqual, false), "false|true|Expected: 'false' Actual: 'true' (Should be equal)")
+
+ pass(t, so("hi", ShouldEqual, "hi"))
+ fail(t, so("hi", ShouldEqual, "bye"), "bye|hi|Expected: 'bye' Actual: 'hi' (Should be equal)")
+
+ pass(t, so(42, ShouldEqual, uint(42)))
+
+ fail(t, so(Thing1{"hi"}, ShouldEqual, Thing1{}), "{}|{hi}|Expected: '{}' Actual: '{hi}' (Should be equal)")
+ fail(t, so(Thing1{"hi"}, ShouldEqual, Thing1{"hi"}), "{hi}|{hi}|Expected: '{hi}' Actual: '{hi}' (Should be equal)")
+ fail(t, so(&Thing1{"hi"}, ShouldEqual, &Thing1{"hi"}), "&{hi}|&{hi}|Expected: '&{hi}' Actual: '&{hi}' (Should be equal)")
+
+ fail(t, so(Thing1{}, ShouldEqual, Thing2{}), "{}|{}|Expected: '{}' Actual: '{}' (Should be equal)")
+}
+
+func TestShouldNotEqual(t *testing.T) {
+ fail(t, so(1, ShouldNotEqual), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldNotEqual, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).")
+ fail(t, so(1, ShouldNotEqual, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ pass(t, so(1, ShouldNotEqual, 2))
+ pass(t, so(1, ShouldNotEqual, "1"))
+ fail(t, so(1, ShouldNotEqual, 1), "Expected '1' to NOT equal '1' (but it did)!")
+
+ pass(t, so(true, ShouldNotEqual, false))
+ fail(t, so(true, ShouldNotEqual, true), "Expected 'true' to NOT equal 'true' (but it did)!")
+
+ pass(t, so("hi", ShouldNotEqual, "bye"))
+ fail(t, so("hi", ShouldNotEqual, "hi"), "Expected 'hi' to NOT equal 'hi' (but it did)!")
+
+ pass(t, so(&Thing1{"hi"}, ShouldNotEqual, &Thing1{"hi"}))
+ pass(t, so(Thing1{"hi"}, ShouldNotEqual, Thing1{"hi"}))
+ pass(t, so(Thing1{}, ShouldNotEqual, Thing1{}))
+ pass(t, so(Thing1{}, ShouldNotEqual, Thing2{}))
+}
+
+func TestShouldAlmostEqual(t *testing.T) {
+ fail(t, so(1, ShouldAlmostEqual), "This assertion requires exactly one comparison value and an optional delta (you provided neither)")
+ fail(t, so(1, ShouldAlmostEqual, 1, 2, 3), "This assertion requires exactly one comparison value and an optional delta (you provided more values)")
+
+ // with the default delta
+ pass(t, so(1, ShouldAlmostEqual, .99999999999999))
+ pass(t, so(1.3612499999999996, ShouldAlmostEqual, 1.36125))
+ pass(t, so(0.7285312499999999, ShouldAlmostEqual, 0.72853125))
+ fail(t, so(1, ShouldAlmostEqual, .99), "Expected '1' to almost equal '0.99' (but it didn't)!")
+
+ // with a different delta
+ pass(t, so(100.0, ShouldAlmostEqual, 110.0, 10.0))
+ fail(t, so(100.0, ShouldAlmostEqual, 111.0, 10.5), "Expected '100' to almost equal '111' (but it didn't)!")
+
+ // ints should work
+ pass(t, so(100, ShouldAlmostEqual, 100.0))
+ fail(t, so(100, ShouldAlmostEqual, 99.0), "Expected '100' to almost equal '99' (but it didn't)!")
+
+ // float32 should work
+ pass(t, so(float64(100.0), ShouldAlmostEqual, float32(100.0)))
+ fail(t, so(float32(100.0), ShouldAlmostEqual, 99.0, float32(0.1)), "Expected '100' to almost equal '99' (but it didn't)!")
+}
+
+func TestShouldNotAlmostEqual(t *testing.T) {
+ fail(t, so(1, ShouldNotAlmostEqual), "This assertion requires exactly one comparison value and an optional delta (you provided neither)")
+ fail(t, so(1, ShouldNotAlmostEqual, 1, 2, 3), "This assertion requires exactly one comparison value and an optional delta (you provided more values)")
+
+ // with the default delta
+ fail(t, so(1, ShouldNotAlmostEqual, .99999999999999), "Expected '1' to NOT almost equal '0.99999999999999' (but it did)!")
+ fail(t, so(1.3612499999999996, ShouldNotAlmostEqual, 1.36125), "Expected '1.3612499999999996' to NOT almost equal '1.36125' (but it did)!")
+ pass(t, so(1, ShouldNotAlmostEqual, .99))
+
+ // with a different delta
+ fail(t, so(100.0, ShouldNotAlmostEqual, 110.0, 10.0), "Expected '100' to NOT almost equal '110' (but it did)!")
+ pass(t, so(100.0, ShouldNotAlmostEqual, 111.0, 10.5))
+
+ // ints should work
+ fail(t, so(100, ShouldNotAlmostEqual, 100.0), "Expected '100' to NOT almost equal '100' (but it did)!")
+ pass(t, so(100, ShouldNotAlmostEqual, 99.0))
+
+ // float32 should work
+ fail(t, so(float64(100.0), ShouldNotAlmostEqual, float32(100.0)), "Expected '100' to NOT almost equal '100' (but it did)!")
+ pass(t, so(float32(100.0), ShouldNotAlmostEqual, 99.0, float32(0.1)))
+}
+
+func TestShouldResemble(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ fail(t, so(Thing1{"hi"}, ShouldResemble), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(Thing1{"hi"}, ShouldResemble, Thing1{"hi"}, Thing1{"hi"}), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so(Thing1{"hi"}, ShouldResemble, Thing1{"hi"}))
+ fail(t, so(Thing1{"hi"}, ShouldResemble, Thing1{"bye"}), `{bye}|{hi}|Expected: '"assertions.Thing1{a:\"bye\"}"' Actual: '"assertions.Thing1{a:\"hi\"}"' (Should resemble)!`)
+
+ var (
+ a []int
+ b []int = []int{}
+ )
+
+ fail(t, so(a, ShouldResemble, b), `[]|[]|Expected: '"[]int{}"' Actual: '"[]int(nil)"' (Should resemble)!`)
+ fail(t, so(2, ShouldResemble, 1), `1|2|Expected: '"1"' Actual: '"2"' (Should resemble)!`)
+
+ fail(t, so(StringStringMapAlias{"hi": "bye"}, ShouldResemble, map[string]string{"hi": "bye"}),
+ `map[hi:bye]|map[hi:bye]|Expected: '"map[string]string{\"hi\":\"bye\"}"' Actual: '"assertions.StringStringMapAlias{\"hi\":\"bye\"}"' (Should resemble)!`)
+ fail(t, so(StringSliceAlias{"hi", "bye"}, ShouldResemble, []string{"hi", "bye"}),
+ `[hi bye]|[hi bye]|Expected: '"[]string{\"hi\", \"bye\"}"' Actual: '"assertions.StringSliceAlias{\"hi\", \"bye\"}"' (Should resemble)!`)
+
+ // some types come out looking the same when represented with "%#v" so we show type mismatch info:
+ fail(t, so(StringAlias("hi"), ShouldResemble, "hi"), `hi|hi|Expected: '"\"hi\""' Actual: '"assertions.StringAlias(\"hi\")"' (Should resemble)!`)
+ fail(t, so(IntAlias(42), ShouldResemble, 42), `42|42|Expected: '"42"' Actual: '"assertions.IntAlias(42)"' (Should resemble)!`)
+}
+
+func TestShouldNotResemble(t *testing.T) {
+ fail(t, so(Thing1{"hi"}, ShouldNotResemble), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(Thing1{"hi"}, ShouldNotResemble, Thing1{"hi"}, Thing1{"hi"}), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so(Thing1{"hi"}, ShouldNotResemble, Thing1{"bye"}))
+ fail(t, so(Thing1{"hi"}, ShouldNotResemble, Thing1{"hi"}),
+ `Expected '"assertions.Thing1{a:\"hi\"}"' to NOT resemble '"assertions.Thing1{a:\"hi\"}"' (but it did)!`)
+
+ pass(t, so(map[string]string{"hi": "bye"}, ShouldResemble, map[string]string{"hi": "bye"}))
+ pass(t, so(IntAlias(42), ShouldNotResemble, 42))
+
+ pass(t, so(StringSliceAlias{"hi", "bye"}, ShouldNotResemble, []string{"hi", "bye"}))
+}
+
+func TestShouldPointTo(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ t1 := &Thing1{}
+ t2 := t1
+ t3 := &Thing1{}
+
+ pointer1 := reflect.ValueOf(t1).Pointer()
+ pointer3 := reflect.ValueOf(t3).Pointer()
+
+ fail(t, so(t1, ShouldPointTo), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(t1, ShouldPointTo, t2, t3), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so(t1, ShouldPointTo, t2))
+ fail(t, so(t1, ShouldPointTo, t3), fmt.Sprintf(
+ "%v|%v|Expected '&{a:}' (address: '%v') and '&{a:}' (address: '%v') to be the same address (but their weren't)!",
+ pointer3, pointer1, pointer1, pointer3))
+
+ t4 := Thing1{}
+ t5 := t4
+
+ fail(t, so(t4, ShouldPointTo, t5), "Both arguments should be pointers (the first was not)!")
+ fail(t, so(&t4, ShouldPointTo, t5), "Both arguments should be pointers (the second was not)!")
+ fail(t, so(nil, ShouldPointTo, nil), "Both arguments should be pointers (the first was nil)!")
+ fail(t, so(&t4, ShouldPointTo, nil), "Both arguments should be pointers (the second was nil)!")
+}
+
+func TestShouldNotPointTo(t *testing.T) {
+ t1 := &Thing1{}
+ t2 := t1
+ t3 := &Thing1{}
+
+ pointer1 := reflect.ValueOf(t1).Pointer()
+
+ fail(t, so(t1, ShouldNotPointTo), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(t1, ShouldNotPointTo, t2, t3), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so(t1, ShouldNotPointTo, t3))
+ fail(t, so(t1, ShouldNotPointTo, t2), fmt.Sprintf("Expected '&{a:}' and '&{a:}' to be different references (but they matched: '%v')!", pointer1))
+
+ t4 := Thing1{}
+ t5 := t4
+
+ fail(t, so(t4, ShouldNotPointTo, t5), "Both arguments should be pointers (the first was not)!")
+ fail(t, so(&t4, ShouldNotPointTo, t5), "Both arguments should be pointers (the second was not)!")
+ fail(t, so(nil, ShouldNotPointTo, nil), "Both arguments should be pointers (the first was nil)!")
+ fail(t, so(&t4, ShouldNotPointTo, nil), "Both arguments should be pointers (the second was nil)!")
+}
+
+func TestShouldBeNil(t *testing.T) {
+ fail(t, so(nil, ShouldBeNil, nil, nil, nil), "This assertion requires exactly 0 comparison values (you provided 3).")
+ fail(t, so(nil, ShouldBeNil, nil), "This assertion requires exactly 0 comparison values (you provided 1).")
+
+ pass(t, so(nil, ShouldBeNil))
+ fail(t, so(1, ShouldBeNil), "Expected: nil Actual: '1'")
+
+ var thing Thinger
+ pass(t, so(thing, ShouldBeNil))
+ thing = &Thing{}
+ fail(t, so(thing, ShouldBeNil), "Expected: nil Actual: '&{}'")
+
+ var thingOne *Thing1
+ pass(t, so(thingOne, ShouldBeNil))
+
+ var nilSlice []int = nil
+ pass(t, so(nilSlice, ShouldBeNil))
+
+ var nilMap map[string]string = nil
+ pass(t, so(nilMap, ShouldBeNil))
+
+ var nilChannel chan int = nil
+ pass(t, so(nilChannel, ShouldBeNil))
+
+ var nilFunc func() = nil
+ pass(t, so(nilFunc, ShouldBeNil))
+
+ var nilInterface interface{} = nil
+ pass(t, so(nilInterface, ShouldBeNil))
+}
+
+func TestShouldNotBeNil(t *testing.T) {
+ fail(t, so(nil, ShouldNotBeNil, nil, nil, nil), "This assertion requires exactly 0 comparison values (you provided 3).")
+ fail(t, so(nil, ShouldNotBeNil, nil), "This assertion requires exactly 0 comparison values (you provided 1).")
+
+ fail(t, so(nil, ShouldNotBeNil), "Expected '<nil>' to NOT be nil (but it was)!")
+ pass(t, so(1, ShouldNotBeNil))
+
+ var thing Thinger
+ fail(t, so(thing, ShouldNotBeNil), "Expected '<nil>' to NOT be nil (but it was)!")
+ thing = &Thing{}
+ pass(t, so(thing, ShouldNotBeNil))
+}
+
+func TestShouldBeTrue(t *testing.T) {
+ fail(t, so(true, ShouldBeTrue, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).")
+ fail(t, so(true, ShouldBeTrue, 1), "This assertion requires exactly 0 comparison values (you provided 1).")
+
+ fail(t, so(false, ShouldBeTrue), "Expected: true Actual: false")
+ fail(t, so(1, ShouldBeTrue), "Expected: true Actual: 1")
+ pass(t, so(true, ShouldBeTrue))
+}
+
+func TestShouldBeFalse(t *testing.T) {
+ fail(t, so(false, ShouldBeFalse, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).")
+ fail(t, so(false, ShouldBeFalse, 1), "This assertion requires exactly 0 comparison values (you provided 1).")
+
+ fail(t, so(true, ShouldBeFalse), "Expected: false Actual: true")
+ fail(t, so(1, ShouldBeFalse), "Expected: false Actual: 1")
+ pass(t, so(false, ShouldBeFalse))
+}
+
+func TestShouldBeZeroValue(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ fail(t, so(0, ShouldBeZeroValue, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).")
+ fail(t, so(false, ShouldBeZeroValue, true), "This assertion requires exactly 0 comparison values (you provided 1).")
+
+ fail(t, so(1, ShouldBeZeroValue), "0|1|'1' should have been the zero value") //"Expected: (zero value) Actual: 1")
+ fail(t, so(true, ShouldBeZeroValue), "false|true|'true' should have been the zero value") //"Expected: (zero value) Actual: true")
+ fail(t, so("123", ShouldBeZeroValue), "|123|'123' should have been the zero value") //"Expected: (zero value) Actual: 123")
+ fail(t, so(" ", ShouldBeZeroValue), "| |' ' should have been the zero value") //"Expected: (zero value) Actual: ")
+ fail(t, so([]string{"Nonempty"}, ShouldBeZeroValue), "[]|[Nonempty]|'[Nonempty]' should have been the zero value") //"Expected: (zero value) Actual: [Nonempty]")
+ fail(t, so(struct{ a string }{a: "asdf"}, ShouldBeZeroValue), "{}|{asdf}|'{a:asdf}' should have been the zero value")
+ pass(t, so(0, ShouldBeZeroValue))
+ pass(t, so(false, ShouldBeZeroValue))
+ pass(t, so("", ShouldBeZeroValue))
+ pass(t, so(struct{}{}, ShouldBeZeroValue))
+} \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/filter.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/filter.go
new file mode 100644
index 00000000000..ee368a97ed7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/filter.go
@@ -0,0 +1,23 @@
+package assertions
+
+import "fmt"
+
+const (
+ success = ""
+ needExactValues = "This assertion requires exactly %d comparison values (you provided %d)."
+ needNonEmptyCollection = "This assertion requires at least 1 comparison value (you provided 0)."
+)
+
+func need(needed int, expected []interface{}) string {
+ if len(expected) != needed {
+ return fmt.Sprintf(needExactValues, needed, len(expected))
+ }
+ return success
+}
+
+func atLeast(minimum int, expected []interface{}) string {
+ if len(expected) < 1 {
+ return needNonEmptyCollection
+ }
+ return success
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/Makefile b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/Makefile
new file mode 100644
index 00000000000..0894b82bd81
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/Makefile
@@ -0,0 +1,23 @@
+# This Makefile pulls the latest oglematchers (with dependencies),
+# rewrites the imports to match this location,
+# and ensures that all the tests pass.
+
+go: clean clone rewrite
+
+clean:
+ rm -rf ogle*
+ rm -rf reqtrace
+ rm -rf go-render
+
+clone:
+ git clone https://github.com/jacobsa/ogletest.git && rm -rf ogletest/.git
+ git clone https://github.com/jacobsa/oglemock.git && rm -rf oglemock/.git
+ git clone https://github.com/jacobsa/oglematchers.git && rm -rf oglematchers/.git
+ git clone https://github.com/jacobsa/reqtrace.git && rm -rf reqtrace/.git
+ git clone https://github.com/luci/go-render.git && rm -rf go-render/.git
+
+rewrite:
+ grep -rl --exclude Makefile 'github.com/jacobsa' . | xargs sed -i '' 's#github.com/jacobsa#github.com/smartystreets/assertions/internal#g'
+
+test:
+ go test github.com/smartystreets/assertions/...
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/.travis.yml b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/.travis.yml
new file mode 100644
index 00000000000..5a19a5faf38
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/.travis.yml
@@ -0,0 +1,21 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# {sudo: required, dist: trusty} is the magic incantation to pick the trusty
+# beta environment, which is the only environment we can get that has >4GB
+# memory. Currently the `go test -race` tests that we run will peak at just
+# over 4GB, which results in everything getting OOM-killed.
+sudo: required
+dist: trusty
+
+language: go
+
+go:
+- 1.4.2
+
+before_install:
+ - go get github.com/maruel/pre-commit-go/cmd/pcg
+
+script:
+ - pcg
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/LICENSE b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/LICENSE
new file mode 100644
index 00000000000..6280ff0e06b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/LICENSE
@@ -0,0 +1,27 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py
new file mode 100644
index 00000000000..d05f0cd8734
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py
@@ -0,0 +1,109 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Top-level presubmit script.
+
+See https://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
+details on the presubmit API built into depot_tools.
+"""
+
+import os
+import sys
+
+
+def PreCommitGo(input_api, output_api, pcg_mode):
+ """Run go-specific checks via pre-commit-go (pcg) if it's in PATH."""
+ if input_api.is_committing:
+ error_type = output_api.PresubmitError
+ else:
+ error_type = output_api.PresubmitPromptWarning
+
+ exe = 'pcg.exe' if sys.platform == 'win32' else 'pcg'
+ pcg = None
+ for p in os.environ['PATH'].split(os.pathsep):
+ pcg = os.path.join(p, exe)
+ if os.access(pcg, os.X_OK):
+ break
+ else:
+ return [
+ error_type(
+ 'pre-commit-go executable (pcg) could not be found in PATH. All Go '
+ 'checks are skipped. See https://github.com/maruel/pre-commit-go.')
+ ]
+
+ cmd = [pcg, 'run', '-m', ','.join(pcg_mode)]
+ if input_api.verbose:
+ cmd.append('-v')
+ # pcg can figure out what files to check on its own based on upstream ref,
+ # but on PRESUBMIT try builder upsteram isn't set, and it's just 1 commit.
+ if os.getenv('PRESUBMIT_BUILDER', ''):
+ cmd.extend(['-r', 'HEAD~1'])
+ return input_api.RunTests([
+ input_api.Command(
+ name='pre-commit-go: %s' % ', '.join(pcg_mode),
+ cmd=cmd,
+ kwargs={},
+ message=error_type),
+ ])
+
+
+def header(input_api):
+ """Returns the expected license header regexp for this project."""
+ current_year = int(input_api.time.strftime('%Y'))
+ allowed_years = (str(s) for s in reversed(xrange(2011, current_year + 1)))
+ years_re = '(' + '|'.join(allowed_years) + ')'
+ license_header = (
+ r'.*? Copyright %(year)s The Chromium Authors\. '
+ r'All rights reserved\.\n'
+ r'.*? Use of this source code is governed by a BSD-style license '
+ r'that can be\n'
+ r'.*? found in the LICENSE file\.(?: \*/)?\n'
+ ) % {
+ 'year': years_re,
+ }
+ return license_header
+
+
+def source_file_filter(input_api):
+ """Returns filter that selects source code files only."""
+ bl = list(input_api.DEFAULT_BLACK_LIST) + [
+ r'.+\.pb\.go$',
+ r'.+_string\.go$',
+ ]
+ wl = list(input_api.DEFAULT_WHITE_LIST) + [
+ r'.+\.go$',
+ ]
+ return lambda x: input_api.FilterSourceFile(x, white_list=wl, black_list=bl)
+
+
+def CommonChecks(input_api, output_api):
+ results = []
+ results.extend(
+ input_api.canned_checks.CheckChangeHasNoStrayWhitespace(
+ input_api, output_api,
+ source_file_filter=source_file_filter(input_api)))
+ results.extend(
+ input_api.canned_checks.CheckLicense(
+ input_api, output_api, header(input_api),
+ source_file_filter=source_file_filter(input_api)))
+ return results
+
+
+def CheckChangeOnUpload(input_api, output_api):
+ results = CommonChecks(input_api, output_api)
+ results.extend(PreCommitGo(input_api, output_api, ['lint', 'pre-commit']))
+ return results
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ results = CommonChecks(input_api, output_api)
+ results.extend(input_api.canned_checks.CheckChangeHasDescription(
+ input_api, output_api))
+ results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription(
+ input_api, output_api))
+ results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles(
+ input_api, output_api))
+ results.extend(PreCommitGo(
+ input_api, output_api, ['continuous-integration']))
+ return results
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/README.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/README.md
new file mode 100644
index 00000000000..a85380c421a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/README.md
@@ -0,0 +1,78 @@
+go-render: A verbose recursive Go type-to-string conversion library.
+====================================================================
+
+[![GoDoc](https://godoc.org/github.com/luci/go-render?status.svg)](https://godoc.org/github.com/luci/go-render)
+[![Build Status](https://travis-ci.org/luci/go-render.svg)](https://travis-ci.org/luci/go-render)
+
+This is not an official Google product.
+
+## Overview
+
+The *render* package implements a more verbose form of the standard Go string
+formatter, `fmt.Sprintf("%#v", value)`, adding:
+ - Pointer recursion. Normally, Go stops at the first pointer and prints its
+ address. The *render* package will recurse and continue to render pointer
+ values.
+ - Recursion loop detection. Recursion is nice, but if a recursion path detects
+ a loop, *render* will note this and move on.
+ - Custom type name rendering.
+ - Deterministic key sorting for `string`- and `int`-keyed maps.
+ - Testing!
+
+Call `render.Render` and pass it an `interface{}`.
+
+For example:
+
+```Go
+type customType int
+type testStruct struct {
+ S string
+ V *map[string]int
+ I interface{}
+}
+
+a := testStruct{
+ S: "hello",
+ V: &map[string]int{"foo": 0, "bar": 1},
+ I: customType(42),
+}
+
+fmt.Println("Render test:")
+fmt.Printf("fmt.Printf: %#v\n", a)))
+fmt.Printf("render.Render: %s\n", Render(a))
+```
+
+Yields:
+```
+fmt.Printf: render.testStruct{S:"hello", V:(*map[string]int)(0x600dd065), I:42}
+render.Render: render.testStruct{S:"hello", V:(*map[string]int){"bar":1, "foo":0}, I:render.customType(42)}
+```
+
+This is not intended to be a high-performance library, but it's not terrible
+either.
+
+Contributing
+------------
+
+ * Sign the [Google CLA](https://cla.developers.google.com/clas).
+ * Make sure your `user.email` and `user.name` are configured in `git config`.
+ * Install the [pcg](https://github.com/maruel/pre-commit-go) git hook:
+ `go get -u github.com/maruel/pre-commit-go/cmd/... && pcg`
+
+Run the following to setup the code review tool and create your first review:
+
+ git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git $HOME/src/depot_tools
+ export PATH="$PATH:$HOME/src/depot_tools"
+ cd $GOROOT/github.com/luci/go-render
+ git checkout -b work origin/master
+
+ # hack hack
+
+ git commit -a -m "This is awesome\nR=joe@example.com"
+ # This will ask for your Google Account credentials.
+ git cl upload -s
+ # Wait for LGTM over email.
+ # Check the commit queue box in codereview website.
+ # Wait for the change to be tested and landed automatically.
+
+Use `git cl help` and `git cl help <cmd>` for more details.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS
new file mode 100644
index 00000000000..e4172088dd3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS
@@ -0,0 +1,26 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Watchlist Rules
+# Refer: http://dev.chromium.org/developers/contributing-code/watchlists
+
+{
+
+ 'WATCHLIST_DEFINITIONS': {
+ 'all': {
+ 'filepath': '.+',
+ },
+ },
+
+ 'WATCHLISTS': {
+ 'all': [
+ # Add yourself here to get explicitly spammed.
+ 'maruel@chromium.org',
+ 'tandrii+luci-go@chromium.org',
+ 'todd@cloudera.com',
+ 'andrew.wang@cloudera.com',
+ ],
+ },
+
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml
new file mode 100644
index 00000000000..074ee1f84df
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml
@@ -0,0 +1,78 @@
+# https://github.com/maruel/pre-commit-go configuration file to run checks
+# automatically on commit, on push and on continuous integration service after
+# a push or on merge of a pull request.
+#
+# See https://godoc.org/github.com/maruel/pre-commit-go/checks for more
+# information.
+
+min_version: 0.4.7
+modes:
+ continuous-integration:
+ checks:
+ build:
+ - build_all: false
+ extra_args: []
+ coverage:
+ - use_global_inference: false
+ use_coveralls: true
+ global:
+ min_coverage: 50
+ max_coverage: 100
+ per_dir_default:
+ min_coverage: 1
+ max_coverage: 100
+ per_dir: {}
+ gofmt:
+ - {}
+ goimports:
+ - {}
+ test:
+ - extra_args:
+ - -v
+ - -race
+ max_duration: 600
+ lint:
+ checks:
+ golint:
+ - blacklist: []
+ govet:
+ - blacklist:
+ - ' composite literal uses unkeyed fields'
+ max_duration: 15
+ pre-commit:
+ checks:
+ build:
+ - build_all: false
+ extra_args: []
+ gofmt:
+ - {}
+ test:
+ - extra_args:
+ - -short
+ max_duration: 35
+ pre-push:
+ checks:
+ coverage:
+ - use_global_inference: false
+ use_coveralls: false
+ global:
+ min_coverage: 50
+ max_coverage: 100
+ per_dir_default:
+ min_coverage: 1
+ max_coverage: 100
+ per_dir: {}
+ goimports:
+ - {}
+ test:
+ - extra_args:
+ - -v
+ - -race
+ max_duration: 35
+
+ignore_patterns:
+- .*
+- _*
+- '*.pb.go'
+- '*_string.go'
+- '*-gen.go'
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/render/render.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/render/render.go
new file mode 100644
index 00000000000..e070a6b3b58
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/render/render.go
@@ -0,0 +1,327 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package render
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+var implicitTypeMap = map[reflect.Kind]string{
+ reflect.Bool: "bool",
+ reflect.String: "string",
+ reflect.Int: "int",
+ reflect.Int8: "int8",
+ reflect.Int16: "int16",
+ reflect.Int32: "int32",
+ reflect.Int64: "int64",
+ reflect.Uint: "uint",
+ reflect.Uint8: "uint8",
+ reflect.Uint16: "uint16",
+ reflect.Uint32: "uint32",
+ reflect.Uint64: "uint64",
+ reflect.Float32: "float32",
+ reflect.Float64: "float64",
+ reflect.Complex64: "complex64",
+ reflect.Complex128: "complex128",
+}
+
+// Render converts a structure to a string representation. Unline the "%#v"
+// format string, this resolves pointer types' contents in structs, maps, and
+// slices/arrays and prints their field values.
+func Render(v interface{}) string {
+ buf := bytes.Buffer{}
+ s := (*traverseState)(nil)
+ s.render(&buf, 0, reflect.ValueOf(v))
+ return buf.String()
+}
+
+// renderPointer is called to render a pointer value.
+//
+// This is overridable so that the test suite can have deterministic pointer
+// values in its expectations.
+var renderPointer = func(buf *bytes.Buffer, p uintptr) {
+ fmt.Fprintf(buf, "0x%016x", p)
+}
+
+// traverseState is used to note and avoid recursion as struct members are being
+// traversed.
+//
+// traverseState is allowed to be nil. Specifically, the root state is nil.
+type traverseState struct {
+ parent *traverseState
+ ptr uintptr
+}
+
+func (s *traverseState) forkFor(ptr uintptr) *traverseState {
+ for cur := s; cur != nil; cur = cur.parent {
+ if ptr == cur.ptr {
+ return nil
+ }
+ }
+
+ fs := &traverseState{
+ parent: s,
+ ptr: ptr,
+ }
+ return fs
+}
+
+func (s *traverseState) render(buf *bytes.Buffer, ptrs int, v reflect.Value) {
+ if v.Kind() == reflect.Invalid {
+ buf.WriteString("nil")
+ return
+ }
+ vt := v.Type()
+
+ // If the type being rendered is a potentially recursive type (a type that
+ // can contain itself as a member), we need to avoid recursion.
+ //
+ // If we've already seen this type before, mark that this is the case and
+ // write a recursion placeholder instead of actually rendering it.
+ //
+ // If we haven't seen it before, fork our `seen` tracking so any higher-up
+ // renderers will also render it at least once, then mark that we've seen it
+ // to avoid recursing on lower layers.
+ pe := uintptr(0)
+ vk := vt.Kind()
+ switch vk {
+ case reflect.Ptr:
+ // Since structs and arrays aren't pointers, they can't directly be
+ // recursed, but they can contain pointers to themselves. Record their
+ // pointer to avoid this.
+ switch v.Elem().Kind() {
+ case reflect.Struct, reflect.Array:
+ pe = v.Pointer()
+ }
+
+ case reflect.Slice, reflect.Map:
+ pe = v.Pointer()
+ }
+ if pe != 0 {
+ s = s.forkFor(pe)
+ if s == nil {
+ buf.WriteString("<REC(")
+ writeType(buf, ptrs, vt)
+ buf.WriteString(")>")
+ return
+ }
+ }
+
+ switch vk {
+ case reflect.Struct:
+ writeType(buf, ptrs, vt)
+ buf.WriteRune('{')
+ for i := 0; i < vt.NumField(); i++ {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(vt.Field(i).Name)
+ buf.WriteRune(':')
+
+ s.render(buf, 0, v.Field(i))
+ }
+ buf.WriteRune('}')
+
+ case reflect.Slice:
+ if v.IsNil() {
+ writeType(buf, ptrs, vt)
+ buf.WriteString("(nil)")
+ return
+ }
+ fallthrough
+
+ case reflect.Array:
+ writeType(buf, ptrs, vt)
+ buf.WriteString("{")
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+
+ s.render(buf, 0, v.Index(i))
+ }
+ buf.WriteRune('}')
+
+ case reflect.Map:
+ writeType(buf, ptrs, vt)
+ if v.IsNil() {
+ buf.WriteString("(nil)")
+ } else {
+ buf.WriteString("{")
+
+ mkeys := v.MapKeys()
+ tryAndSortMapKeys(vt, mkeys)
+
+ for i, mk := range mkeys {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+
+ s.render(buf, 0, mk)
+ buf.WriteString(":")
+ s.render(buf, 0, v.MapIndex(mk))
+ }
+ buf.WriteRune('}')
+ }
+
+ case reflect.Ptr:
+ ptrs++
+ fallthrough
+ case reflect.Interface:
+ if v.IsNil() {
+ writeType(buf, ptrs, v.Type())
+ buf.WriteRune('(')
+ fmt.Fprint(buf, "nil")
+ buf.WriteRune(')')
+ } else {
+ s.render(buf, ptrs, v.Elem())
+ }
+
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ writeType(buf, ptrs, vt)
+ buf.WriteRune('(')
+ renderPointer(buf, v.Pointer())
+ buf.WriteRune(')')
+
+ default:
+ tstr := vt.String()
+ implicit := ptrs == 0 && implicitTypeMap[vk] == tstr
+ if !implicit {
+ writeType(buf, ptrs, vt)
+ buf.WriteRune('(')
+ }
+
+ switch vk {
+ case reflect.String:
+ fmt.Fprintf(buf, "%q", v.String())
+ case reflect.Bool:
+ fmt.Fprintf(buf, "%v", v.Bool())
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ fmt.Fprintf(buf, "%d", v.Int())
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ fmt.Fprintf(buf, "%d", v.Uint())
+
+ case reflect.Float32, reflect.Float64:
+ fmt.Fprintf(buf, "%g", v.Float())
+
+ case reflect.Complex64, reflect.Complex128:
+ fmt.Fprintf(buf, "%g", v.Complex())
+ }
+
+ if !implicit {
+ buf.WriteRune(')')
+ }
+ }
+}
+
+func writeType(buf *bytes.Buffer, ptrs int, t reflect.Type) {
+ parens := ptrs > 0
+ switch t.Kind() {
+ case reflect.Chan, reflect.Func, reflect.UnsafePointer:
+ parens = true
+ }
+
+ if parens {
+ buf.WriteRune('(')
+ for i := 0; i < ptrs; i++ {
+ buf.WriteRune('*')
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Ptr:
+ if ptrs == 0 {
+ // This pointer was referenced from within writeType (e.g., as part of
+ // rendering a list), and so hasn't had its pointer asterisk accounted
+ // for.
+ buf.WriteRune('*')
+ }
+ writeType(buf, 0, t.Elem())
+
+ case reflect.Interface:
+ if n := t.Name(); n != "" {
+ buf.WriteString(t.String())
+ } else {
+ buf.WriteString("interface{}")
+ }
+
+ case reflect.Array:
+ buf.WriteRune('[')
+ buf.WriteString(strconv.FormatInt(int64(t.Len()), 10))
+ buf.WriteRune(']')
+ writeType(buf, 0, t.Elem())
+
+ case reflect.Slice:
+ if t == reflect.SliceOf(t.Elem()) {
+ buf.WriteString("[]")
+ writeType(buf, 0, t.Elem())
+ } else {
+ // Custom slice type, use type name.
+ buf.WriteString(t.String())
+ }
+
+ case reflect.Map:
+ if t == reflect.MapOf(t.Key(), t.Elem()) {
+ buf.WriteString("map[")
+ writeType(buf, 0, t.Key())
+ buf.WriteRune(']')
+ writeType(buf, 0, t.Elem())
+ } else {
+ // Custom map type, use type name.
+ buf.WriteString(t.String())
+ }
+
+ default:
+ buf.WriteString(t.String())
+ }
+
+ if parens {
+ buf.WriteRune(')')
+ }
+}
+
+type sortableValueSlice struct {
+ kind reflect.Kind
+ elements []reflect.Value
+}
+
+func (s *sortableValueSlice) Len() int {
+ return len(s.elements)
+}
+
+func (s *sortableValueSlice) Less(i, j int) bool {
+ switch s.kind {
+ case reflect.String:
+ return s.elements[i].String() < s.elements[j].String()
+
+ case reflect.Int:
+ return s.elements[i].Int() < s.elements[j].Int()
+
+ default:
+ panic(fmt.Errorf("unsupported sort kind: %s", s.kind))
+ }
+}
+
+func (s *sortableValueSlice) Swap(i, j int) {
+ s.elements[i], s.elements[j] = s.elements[j], s.elements[i]
+}
+
+func tryAndSortMapKeys(mt reflect.Type, k []reflect.Value) {
+ // Try our stock sortable values.
+ switch mt.Key().Kind() {
+ case reflect.String, reflect.Int:
+ vs := &sortableValueSlice{
+ kind: mt.Key().Kind(),
+ elements: k,
+ }
+ sort.Sort(vs)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/render/render_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/render/render_test.go
new file mode 100644
index 00000000000..1737cb702ae
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/go-render/render/render_test.go
@@ -0,0 +1,170 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package render
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "runtime"
+ "testing"
+)
+
+func init() {
+ // For testing purposes, pointers will render as "PTR" so that they are
+ // deterministic.
+ renderPointer = func(buf *bytes.Buffer, p uintptr) {
+ buf.WriteString("PTR")
+ }
+}
+
+func assertRendersLike(t *testing.T, name string, v interface{}, exp string) {
+ act := Render(v)
+ if act != exp {
+ _, _, line, _ := runtime.Caller(1)
+ t.Errorf("On line #%d, [%s] did not match expectations:\nExpected: %s\nActual : %s\n", line, name, exp, act)
+ }
+}
+
+func TestRenderList(t *testing.T) {
+ t.Parallel()
+
+ // Note that we make some of the fields exportable. This is to avoid a fun case
+ // where the first reflect.Value has a read-only bit set, but follow-on values
+ // do not, so recursion tests are off by one.
+ type testStruct struct {
+ Name string
+ I interface{}
+
+ m string
+ }
+
+ type myStringSlice []string
+ type myStringMap map[string]string
+ type myIntType int
+ type myStringType string
+
+ s0 := "string0"
+ s0P := &s0
+ mit := myIntType(42)
+ stringer := fmt.Stringer(nil)
+
+ for i, tc := range []struct {
+ a interface{}
+ s string
+ }{
+ {nil, `nil`},
+ {make(chan int), `(chan int)(PTR)`},
+ {&stringer, `(*fmt.Stringer)(nil)`},
+ {123, `123`},
+ {"hello", `"hello"`},
+ {(*testStruct)(nil), `(*render.testStruct)(nil)`},
+ {(**testStruct)(nil), `(**render.testStruct)(nil)`},
+ {[]***testStruct(nil), `[]***render.testStruct(nil)`},
+ {testStruct{Name: "foo", I: &testStruct{Name: "baz"}},
+ `render.testStruct{Name:"foo", I:(*render.testStruct){Name:"baz", I:interface{}(nil), m:""}, m:""}`},
+ {[]byte(nil), `[]uint8(nil)`},
+ {[]byte{}, `[]uint8{}`},
+ {map[string]string(nil), `map[string]string(nil)`},
+ {[]*testStruct{
+ {Name: "foo"},
+ {Name: "bar"},
+ }, `[]*render.testStruct{(*render.testStruct){Name:"foo", I:interface{}(nil), m:""}, ` +
+ `(*render.testStruct){Name:"bar", I:interface{}(nil), m:""}}`},
+ {myStringSlice{"foo", "bar"}, `render.myStringSlice{"foo", "bar"}`},
+ {myStringMap{"foo": "bar"}, `render.myStringMap{"foo":"bar"}`},
+ {myIntType(12), `render.myIntType(12)`},
+ {&mit, `(*render.myIntType)(42)`},
+ {myStringType("foo"), `render.myStringType("foo")`},
+ {struct {
+ a int
+ b string
+ }{123, "foo"}, `struct { a int; b string }{a:123, b:"foo"}`},
+ {[]string{"foo", "foo", "bar", "baz", "qux", "qux"},
+ `[]string{"foo", "foo", "bar", "baz", "qux", "qux"}`},
+ {[...]int{1, 2, 3}, `[3]int{1, 2, 3}`},
+ {map[string]bool{
+ "foo": true,
+ "bar": false,
+ }, `map[string]bool{"bar":false, "foo":true}`},
+ {map[int]string{1: "foo", 2: "bar"}, `map[int]string{1:"foo", 2:"bar"}`},
+ {uint32(1337), `1337`},
+ {3.14, `3.14`},
+ {complex(3, 0.14), `(3+0.14i)`},
+ {&s0, `(*string)("string0")`},
+ {&s0P, `(**string)("string0")`},
+ {[]interface{}{nil, 1, 2, nil}, `[]interface{}{interface{}(nil), 1, 2, interface{}(nil)}`},
+ } {
+ assertRendersLike(t, fmt.Sprintf("Input #%d", i), tc.a, tc.s)
+ }
+}
+
+func TestRenderRecursiveStruct(t *testing.T) {
+ type testStruct struct {
+ Name string
+ I interface{}
+ }
+
+ s := &testStruct{
+ Name: "recursive",
+ }
+ s.I = s
+
+ assertRendersLike(t, "Recursive struct", s,
+ `(*render.testStruct){Name:"recursive", I:<REC(*render.testStruct)>}`)
+}
+
+func TestRenderRecursiveArray(t *testing.T) {
+ a := [2]interface{}{}
+ a[0] = &a
+ a[1] = &a
+
+ assertRendersLike(t, "Recursive array", &a,
+ `(*[2]interface{}){<REC(*[2]interface{})>, <REC(*[2]interface{})>}`)
+}
+
+func TestRenderRecursiveMap(t *testing.T) {
+ m := map[string]interface{}{}
+ foo := "foo"
+ m["foo"] = m
+ m["bar"] = [](*string){&foo, &foo}
+ v := []map[string]interface{}{m, m}
+
+ assertRendersLike(t, "Recursive map", v,
+ `[]map[string]interface{}{map[string]interface{}{`+
+ `"bar":[]*string{(*string)("foo"), (*string)("foo")}, `+
+ `"foo":<REC(map[string]interface{})>}, `+
+ `map[string]interface{}{`+
+ `"bar":[]*string{(*string)("foo"), (*string)("foo")}, `+
+ `"foo":<REC(map[string]interface{})>}}`)
+}
+
+func ExampleInReadme() {
+ type customType int
+ type testStruct struct {
+ S string
+ V *map[string]int
+ I interface{}
+ }
+
+ a := testStruct{
+ S: "hello",
+ V: &map[string]int{"foo": 0, "bar": 1},
+ I: customType(42),
+ }
+
+ fmt.Println("Render test:")
+ fmt.Printf("fmt.Printf: %s\n", sanitizePointer(fmt.Sprintf("%#v", a)))
+ fmt.Printf("render.Render: %s\n", Render(a))
+ // Output: Render test:
+ // fmt.Printf: render.testStruct{S:"hello", V:(*map[string]int)(0x600dd065), I:42}
+ // render.Render: render.testStruct{S:"hello", V:(*map[string]int){"bar":1, "foo":0}, I:render.customType(42)}
+}
+
+var pointerRE = regexp.MustCompile(`\(0x[a-f0-9]+\)`)
+
+func sanitizePointer(s string) string {
+ return pointerRE.ReplaceAllString(s, "(0x600dd065)")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/.gitignore b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/.gitignore
new file mode 100644
index 00000000000..dd8fc7468f4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/.gitignore
@@ -0,0 +1,5 @@
+*.6
+6.out
+_obj/
+_test/
+_testmain.go
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml
new file mode 100644
index 00000000000..b97211926e8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml
@@ -0,0 +1,4 @@
+# Cf. http://docs.travis-ci.com/user/getting-started/
+# Cf. http://docs.travis-ci.com/user/languages/go/
+
+language: go
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/LICENSE b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/README.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/README.md
new file mode 100644
index 00000000000..215a2bb7a8b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/README.md
@@ -0,0 +1,58 @@
+[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/oglematchers)
+
+`oglematchers` is a package for the Go programming language containing a set of
+matchers, useful in a testing or mocking framework, inspired by and mostly
+compatible with [Google Test][googletest] for C++ and
+[Google JS Test][google-js-test]. The package is used by the
+[ogletest][ogletest] testing framework and [oglemock][oglemock] mocking
+framework, which may be more directly useful to you, but can be generically used
+elsewhere as well.
+
+A "matcher" is simply an object with a `Matches` method defining a set of golang
+values matched by the matcher, and a `Description` method describing that set.
+For example, here are some matchers:
+
+```go
+// Numbers
+Equals(17.13)
+LessThan(19)
+
+// Strings
+Equals("taco")
+HasSubstr("burrito")
+MatchesRegex("t.*o")
+
+// Combining matchers
+AnyOf(LessThan(17), GreaterThan(19))
+```
+
+There are lots more; see [here][reference] for a reference. You can also add
+your own simply by implementing the `oglematchers.Matcher` interface.
+
+
+Installation
+------------
+
+First, make sure you have installed Go 1.0.2 or newer. See
+[here][golang-install] for instructions.
+
+Use the following command to install `oglematchers` and keep it up to date:
+
+ go get -u github.com/smartystreets/assertions/internal/oglematchers
+
+
+Documentation
+-------------
+
+See [here][reference] for documentation. Alternatively, you can install the
+package and then use `godoc`:
+
+ godoc github.com/smartystreets/assertions/internal/oglematchers
+
+
+[reference]: http://godoc.org/github.com/smartystreets/assertions/internal/oglematchers
+[golang-install]: http://golang.org/doc/install.html
+[googletest]: http://code.google.com/p/googletest/
+[google-js-test]: http://code.google.com/p/google-js-test/
+[ogletest]: http://github.com/smartystreets/assertions/internal/ogletest
+[oglemock]: http://github.com/smartystreets/assertions/internal/oglemock
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/all_of.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/all_of.go
new file mode 100644
index 00000000000..d93a9740443
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/all_of.go
@@ -0,0 +1,70 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "strings"
+)
+
+// AllOf accepts a set of matchers S and returns a matcher that follows the
+// algorithm below when considering a candidate c:
+//
+// 1. Return true if for every Matcher m in S, m matches c.
+//
+// 2. Otherwise, if there is a matcher m in S such that m returns a fatal
+// error for c, return that matcher's error message.
+//
+// 3. Otherwise, return false with the error from some wrapped matcher.
+//
+// This is akin to a logical AND operation for matchers.
+func AllOf(matchers ...Matcher) Matcher {
+ return &allOfMatcher{matchers}
+}
+
+type allOfMatcher struct {
+ wrappedMatchers []Matcher
+}
+
+func (m *allOfMatcher) Description() string {
+ // Special case: the empty set.
+ if len(m.wrappedMatchers) == 0 {
+ return "is anything"
+ }
+
+ // Join the descriptions for the wrapped matchers.
+ wrappedDescs := make([]string, len(m.wrappedMatchers))
+ for i, wrappedMatcher := range m.wrappedMatchers {
+ wrappedDescs[i] = wrappedMatcher.Description()
+ }
+
+ return strings.Join(wrappedDescs, ", and ")
+}
+
+func (m *allOfMatcher) Matches(c interface{}) (err error) {
+ for _, wrappedMatcher := range m.wrappedMatchers {
+ if wrappedErr := wrappedMatcher.Matches(c); wrappedErr != nil {
+ err = wrappedErr
+
+ // If the error is fatal, return immediately with this error.
+ _, ok := wrappedErr.(*FatalError)
+ if ok {
+ return
+ }
+ }
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/all_of_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/all_of_test.go
new file mode 100644
index 00000000000..0f9d198fcbe
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/all_of_test.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "errors"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type allOfFakeMatcher struct {
+ desc string
+ err error
+}
+
+func (m *allOfFakeMatcher) Matches(c interface{}) error {
+ return m.err
+}
+
+func (m *allOfFakeMatcher) Description() string {
+ return m.desc
+}
+
+type AllOfTest struct {
+}
+
+func init() { RegisterTestSuite(&AllOfTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *AllOfTest) DescriptionWithEmptySet() {
+ m := AllOf()
+ ExpectEq("is anything", m.Description())
+}
+
+func (t *AllOfTest) DescriptionWithOneMatcher() {
+ m := AllOf(&allOfFakeMatcher{"taco", errors.New("")})
+ ExpectEq("taco", m.Description())
+}
+
+func (t *AllOfTest) DescriptionWithMultipleMatchers() {
+ m := AllOf(
+ &allOfFakeMatcher{"taco", errors.New("")},
+ &allOfFakeMatcher{"burrito", errors.New("")},
+ &allOfFakeMatcher{"enchilada", errors.New("")})
+
+ ExpectEq("taco, and burrito, and enchilada", m.Description())
+}
+
+func (t *AllOfTest) EmptySet() {
+ m := AllOf()
+ err := m.Matches(17)
+
+ ExpectEq(nil, err)
+}
+
+func (t *AllOfTest) OneMatcherReturnsFatalErrorAndSomeOthersFail() {
+ m := AllOf(
+ &allOfFakeMatcher{"", errors.New("")},
+ &allOfFakeMatcher{"", NewFatalError("taco")},
+ &allOfFakeMatcher{"", errors.New("")},
+ &allOfFakeMatcher{"", nil})
+
+ err := m.Matches(17)
+
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *AllOfTest) OneMatcherReturnsNonFatalAndOthersSayTrue() {
+ m := AllOf(
+ &allOfFakeMatcher{"", nil},
+ &allOfFakeMatcher{"", errors.New("taco")},
+ &allOfFakeMatcher{"", nil})
+
+ err := m.Matches(17)
+
+ ExpectFalse(isFatal(err))
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *AllOfTest) AllMatchersSayTrue() {
+ m := AllOf(
+ &allOfFakeMatcher{"", nil},
+ &allOfFakeMatcher{"", nil},
+ &allOfFakeMatcher{"", nil})
+
+ err := m.Matches(17)
+
+ ExpectEq(nil, err)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any.go
new file mode 100644
index 00000000000..f6991ec1020
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any.go
@@ -0,0 +1,32 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// Any returns a matcher that matches any value.
+func Any() Matcher {
+ return &anyMatcher{}
+}
+
+type anyMatcher struct {
+}
+
+func (m *anyMatcher) Description() string {
+ return "is anything"
+}
+
+func (m *anyMatcher) Matches(c interface{}) error {
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_of.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_of.go
new file mode 100644
index 00000000000..2918b51f21a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_of.go
@@ -0,0 +1,94 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// AnyOf accepts a set of values S and returns a matcher that follows the
+// algorithm below when considering a candidate c:
+//
+// 1. If there exists a value m in S such that m implements the Matcher
+// interface and m matches c, return true.
+//
+// 2. Otherwise, if there exists a value v in S such that v does not implement
+// the Matcher interface and the matcher Equals(v) matches c, return true.
+//
+// 3. Otherwise, if there is a value m in S such that m implements the Matcher
+// interface and m returns a fatal error for c, return that fatal error.
+//
+// 4. Otherwise, return false.
+//
+// This is akin to a logical OR operation for matchers, with non-matchers x
+// being treated as Equals(x).
+func AnyOf(vals ...interface{}) Matcher {
+ // Get ahold of a type variable for the Matcher interface.
+ var dummy *Matcher
+ matcherType := reflect.TypeOf(dummy).Elem()
+
+ // Create a matcher for each value, or use the value itself if it's already a
+ // matcher.
+ wrapped := make([]Matcher, len(vals))
+ for i, v := range vals {
+ t := reflect.TypeOf(v)
+ if t != nil && t.Implements(matcherType) {
+ wrapped[i] = v.(Matcher)
+ } else {
+ wrapped[i] = Equals(v)
+ }
+ }
+
+ return &anyOfMatcher{wrapped}
+}
+
+type anyOfMatcher struct {
+ wrapped []Matcher
+}
+
+func (m *anyOfMatcher) Description() string {
+ wrappedDescs := make([]string, len(m.wrapped))
+ for i, matcher := range m.wrapped {
+ wrappedDescs[i] = matcher.Description()
+ }
+
+ return fmt.Sprintf("or(%s)", strings.Join(wrappedDescs, ", "))
+}
+
+func (m *anyOfMatcher) Matches(c interface{}) (err error) {
+ err = errors.New("")
+
+ // Try each matcher in turn.
+ for _, matcher := range m.wrapped {
+ wrappedErr := matcher.Matches(c)
+
+ // Return immediately if there's a match.
+ if wrappedErr == nil {
+ err = nil
+ return
+ }
+
+ // Note the fatal error, if any.
+ if _, isFatal := wrappedErr.(*FatalError); isFatal {
+ err = wrappedErr
+ }
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_of_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_of_test.go
new file mode 100644
index 00000000000..f0b5025406f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_of_test.go
@@ -0,0 +1,139 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type fakeAnyOfMatcher struct {
+ desc string
+ err error
+}
+
+func (m *fakeAnyOfMatcher) Matches(c interface{}) error {
+ return m.err
+}
+
+func (m *fakeAnyOfMatcher) Description() string {
+ return m.desc
+}
+
+type AnyOfTest struct {
+}
+
+func init() { RegisterTestSuite(&AnyOfTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *AnyOfTest) EmptySet() {
+ matcher := AnyOf()
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *AnyOfTest) OneTrue() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"", NewFatalError("foo")},
+ 17,
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ &fakeAnyOfMatcher{"", nil},
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ )
+
+ err := matcher.Matches(0)
+ ExpectEq(nil, err)
+}
+
+func (t *AnyOfTest) OneEqual() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"", NewFatalError("foo")},
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ 13,
+ "taco",
+ 19,
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ )
+
+ err := matcher.Matches("taco")
+ ExpectEq(nil, err)
+}
+
+func (t *AnyOfTest) OneFatal() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ 17,
+ &fakeAnyOfMatcher{"", NewFatalError("taco")},
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ )
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *AnyOfTest) OneNil() {
+ var err error
+ matcher := AnyOf(
+ 13,
+ nil,
+ 19,
+ )
+
+ // No match
+ err = matcher.Matches(14)
+ ExpectNe(nil, err)
+
+ // Match
+ err = matcher.Matches(nil)
+ ExpectEq(nil, err)
+}
+
+func (t *AnyOfTest) AllFalseAndNotEqual() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ 17,
+ &fakeAnyOfMatcher{"", errors.New("foo")},
+ 19,
+ )
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *AnyOfTest) DescriptionForEmptySet() {
+ matcher := AnyOf()
+ ExpectEq("or()", matcher.Description())
+}
+
+func (t *AnyOfTest) DescriptionForNonEmptySet() {
+ matcher := AnyOf(
+ &fakeAnyOfMatcher{"taco", nil},
+ "burrito",
+ &fakeAnyOfMatcher{"enchilada", nil},
+ )
+
+ ExpectEq("or(taco, burrito, enchilada)", matcher.Description())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_test.go
new file mode 100644
index 00000000000..410cc12825e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/any_test.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type AnyTest struct {
+}
+
+func init() { RegisterTestSuite(&AnyTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *AnyTest) Description() {
+ m := Any()
+ ExpectEq("is anything", m.Description())
+}
+
+func (t *AnyTest) Matches() {
+ var err error
+ m := Any()
+
+ err = m.Matches(nil)
+ ExpectEq(nil, err)
+
+ err = m.Matches(17)
+ ExpectEq(nil, err)
+
+ err = m.Matches("taco")
+ ExpectEq(nil, err)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/contains.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/contains.go
new file mode 100644
index 00000000000..2f326dbc5d6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/contains.go
@@ -0,0 +1,61 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Return a matcher that matches arrays slices with at least one element that
+// matches the supplied argument. If the argument x is not itself a Matcher,
+// this is equivalent to Contains(Equals(x)).
+func Contains(x interface{}) Matcher {
+ var result containsMatcher
+ var ok bool
+
+ if result.elementMatcher, ok = x.(Matcher); !ok {
+ result.elementMatcher = Equals(x)
+ }
+
+ return &result
+}
+
+type containsMatcher struct {
+ elementMatcher Matcher
+}
+
+func (m *containsMatcher) Description() string {
+ return fmt.Sprintf("contains: %s", m.elementMatcher.Description())
+}
+
+func (m *containsMatcher) Matches(candidate interface{}) error {
+ // The candidate must be a slice or an array.
+ v := reflect.ValueOf(candidate)
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
+ return NewFatalError("which is not a slice or array")
+ }
+
+ // Check each element.
+ for i := 0; i < v.Len(); i++ {
+ elem := v.Index(i)
+ if matchErr := m.elementMatcher.Matches(elem.Interface()); matchErr == nil {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/contains_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/contains_test.go
new file mode 100644
index 00000000000..dfc981c1488
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/contains_test.go
@@ -0,0 +1,233 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type ContainsTest struct {}
+func init() { RegisterTestSuite(&ContainsTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *ContainsTest) WrongTypeCandidates() {
+ m := Contains("")
+ ExpectEq("contains: ", m.Description())
+
+ var err error
+
+ // Nil candidate
+ err = m.Matches(nil)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+
+ // String candidate
+ err = m.Matches("")
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+
+ // Map candidate
+ err = m.Matches(make(map[string]string))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+}
+
+func (t *ContainsTest) NilArgument() {
+ m := Contains(nil)
+ ExpectEq("contains: is nil", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Empty array of pointers
+ c = [...]*int{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Empty slice of pointers
+ c = []*int{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-empty array of integers
+ c = [...]int{17, 0, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-empty slice of integers
+ c = []int{17, 0, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching array of pointers
+ c = [...]*int{new(int), new(int)}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of pointers
+ c = []*int{new(int), new(int)}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of pointers
+ c = [...]*int{new(int), nil, new(int)}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of pointers
+ c = []*int{new(int), nil, new(int)}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching slice of pointers from matching array
+ someArray := [...]*int{new(int), nil, new(int)}
+ c = someArray[0:1]
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *ContainsTest) StringArgument() {
+ m := Contains("taco")
+ ExpectEq("contains: taco", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Non-matching array of strings
+ c = [...]string{"burrito", "enchilada"}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of strings
+ c = []string{"burrito", "enchilada"}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of strings
+ c = [...]string{"burrito", "taco", "enchilada"}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of strings
+ c = []string{"burrito", "taco", "enchilada"}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching slice of strings from matching array
+ someArray := [...]string{"burrito", "taco", "enchilada"}
+ c = someArray[0:1]
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *ContainsTest) IntegerArgument() {
+ m := Contains(int(17))
+ ExpectEq("contains: 17", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Non-matching array of integers
+ c = [...]int{13, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of integers
+ c = []int{13, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of integers
+ c = [...]int{13, 17, 19}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of integers
+ c = []int{13, 17, 19}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching slice of integers from matching array
+ someArray := [...]int{13, 17, 19}
+ c = someArray[0:1]
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching array of floats
+ c = [...]float32{13, 17.5, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of floats
+ c = []float32{13, 17.5, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of floats
+ c = [...]float32{13, 17, 19}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of floats
+ c = []float32{13, 17, 19}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+}
+
+func (t *ContainsTest) MatcherArgument() {
+ m := Contains(HasSubstr("ac"))
+ ExpectEq("contains: has substring \"ac\"", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Non-matching array of strings
+ c = [...]string{"burrito", "enchilada"}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Non-matching slice of strings
+ c = []string{"burrito", "enchilada"}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Matching array of strings
+ c = [...]string{"burrito", "taco", "enchilada"}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching slice of strings
+ c = []string{"burrito", "taco", "enchilada"}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching slice of strings from matching array
+ someArray := [...]string{"burrito", "taco", "enchilada"}
+ c = someArray[0:1]
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go
new file mode 100644
index 00000000000..1d91baef32e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals.go
@@ -0,0 +1,88 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+var byteSliceType reflect.Type = reflect.TypeOf([]byte{})
+
+// DeepEquals returns a matcher that matches based on 'deep equality', as
+// defined by the reflect package. This matcher requires that values have
+// identical types to x.
+func DeepEquals(x interface{}) Matcher {
+ return &deepEqualsMatcher{x}
+}
+
+type deepEqualsMatcher struct {
+ x interface{}
+}
+
+func (m *deepEqualsMatcher) Description() string {
+ xDesc := fmt.Sprintf("%v", m.x)
+ xValue := reflect.ValueOf(m.x)
+
+ // Special case: fmt.Sprintf presents nil slices as "[]", but
+ // reflect.DeepEqual makes a distinction between nil and empty slices. Make
+ // this less confusing.
+ if xValue.Kind() == reflect.Slice && xValue.IsNil() {
+ xDesc = "<nil slice>"
+ }
+
+ return fmt.Sprintf("deep equals: %s", xDesc)
+}
+
+func (m *deepEqualsMatcher) Matches(c interface{}) error {
+ // Make sure the types match.
+ ct := reflect.TypeOf(c)
+ xt := reflect.TypeOf(m.x)
+
+ if ct != xt {
+ return NewFatalError(fmt.Sprintf("which is of type %v", ct))
+ }
+
+ // Special case: handle byte slices more efficiently.
+ cValue := reflect.ValueOf(c)
+ xValue := reflect.ValueOf(m.x)
+
+ if ct == byteSliceType && !cValue.IsNil() && !xValue.IsNil() {
+ xBytes := m.x.([]byte)
+ cBytes := c.([]byte)
+
+ if bytes.Equal(cBytes, xBytes) {
+ return nil
+ }
+
+ return errors.New("")
+ }
+
+ // Defer to the reflect package.
+ if reflect.DeepEqual(m.x, c) {
+ return nil
+ }
+
+ // Special case: if the comparison failed because c is the nil slice, given
+ // an indication of this (since its value is printed as "[]").
+ if cValue.Kind() == reflect.Slice && cValue.IsNil() {
+ return errors.New("which is nil")
+ }
+
+ return errors.New("")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals_test.go
new file mode 100644
index 00000000000..a28113aaa6b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/deep_equals_test.go
@@ -0,0 +1,343 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "bytes"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type DeepEqualsTest struct {}
+func init() { RegisterTestSuite(&DeepEqualsTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *DeepEqualsTest) WrongTypeCandidateWithScalarValue() {
+ var x int = 17
+ m := DeepEquals(x)
+
+ var err error
+
+ // Nil candidate.
+ err = m.Matches(nil)
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("<nil>")))
+
+ // Int alias candidate.
+ type intAlias int
+ err = m.Matches(intAlias(x))
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("intAlias")))
+
+ // String candidate.
+ err = m.Matches("taco")
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("string")))
+
+ // Byte slice candidate.
+ err = m.Matches([]byte{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint8")))
+
+ // Other slice candidate.
+ err = m.Matches([]uint16{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint16")))
+
+ // Unsigned int candidate.
+ err = m.Matches(uint(17))
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("uint")))
+}
+
+func (t *DeepEqualsTest) WrongTypeCandidateWithByteSliceValue() {
+ x := []byte{}
+ m := DeepEquals(x)
+
+ var err error
+
+ // Nil candidate.
+ err = m.Matches(nil)
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("<nil>")))
+
+ // String candidate.
+ err = m.Matches("taco")
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("string")))
+
+ // Slice candidate with wrong value type.
+ err = m.Matches([]uint16{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint16")))
+}
+
+func (t *DeepEqualsTest) WrongTypeCandidateWithOtherSliceValue() {
+ x := []uint16{}
+ m := DeepEquals(x)
+
+ var err error
+
+ // Nil candidate.
+ err = m.Matches(nil)
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("<nil>")))
+
+ // String candidate.
+ err = m.Matches("taco")
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("string")))
+
+ // Byte slice candidate with wrong value type.
+ err = m.Matches([]byte{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint8")))
+
+ // Other slice candidate with wrong value type.
+ err = m.Matches([]uint32{})
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint32")))
+}
+
+func (t *DeepEqualsTest) WrongTypeCandidateWithNilLiteralValue() {
+ m := DeepEquals(nil)
+
+ var err error
+
+ // String candidate.
+ err = m.Matches("taco")
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("string")))
+
+ // Nil byte slice candidate.
+ err = m.Matches([]byte(nil))
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint8")))
+
+ // Nil other slice candidate.
+ err = m.Matches([]uint16(nil))
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("type")))
+ ExpectThat(err, Error(HasSubstr("[]uint16")))
+}
+
+func (t *DeepEqualsTest) NilLiteralValue() {
+ m := DeepEquals(nil)
+ ExpectEq("deep equals: <nil>", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Nil literal candidate.
+ c = nil
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+}
+
+func (t *DeepEqualsTest) IntValue() {
+ m := DeepEquals(int(17))
+ ExpectEq("deep equals: 17", m.Description())
+
+ var c interface{}
+ var err error
+
+ // Matching int.
+ c = int(17)
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching int.
+ c = int(18)
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *DeepEqualsTest) ByteSliceValue() {
+ x := []byte{17, 19}
+ m := DeepEquals(x)
+ ExpectEq("deep equals: [17 19]", m.Description())
+
+ var c []byte
+ var err error
+
+ // Matching.
+ c = make([]byte, len(x))
+ AssertEq(len(x), copy(c, x))
+
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Nil slice.
+ c = []byte(nil)
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("which is nil")))
+
+ // Prefix.
+ AssertGt(len(x), 1)
+ c = make([]byte, len(x)-1)
+ AssertEq(len(x)-1, copy(c, x))
+
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Suffix.
+ c = make([]byte, len(x)+1)
+ AssertEq(len(x), copy(c, x))
+
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *DeepEqualsTest) OtherSliceValue() {
+ x := []uint16{17, 19}
+ m := DeepEquals(x)
+ ExpectEq("deep equals: [17 19]", m.Description())
+
+ var c []uint16
+ var err error
+
+ // Matching.
+ c = make([]uint16, len(x))
+ AssertEq(len(x), copy(c, x))
+
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Nil slice.
+ c = []uint16(nil)
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("which is nil")))
+
+ // Prefix.
+ AssertGt(len(x), 1)
+ c = make([]uint16, len(x)-1)
+ AssertEq(len(x)-1, copy(c, x))
+
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+
+ // Suffix.
+ c = make([]uint16, len(x)+1)
+ AssertEq(len(x), copy(c, x))
+
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *DeepEqualsTest) NilByteSliceValue() {
+ x := []byte(nil)
+ m := DeepEquals(x)
+ ExpectEq("deep equals: <nil slice>", m.Description())
+
+ var c []byte
+ var err error
+
+ // Nil slice.
+ c = []byte(nil)
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-nil slice.
+ c = []byte{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *DeepEqualsTest) NilOtherSliceValue() {
+ x := []uint16(nil)
+ m := DeepEquals(x)
+ ExpectEq("deep equals: <nil slice>", m.Description())
+
+ var c []uint16
+ var err error
+
+ // Nil slice.
+ c = []uint16(nil)
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-nil slice.
+ c = []uint16{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("")))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Benchmarks
+////////////////////////////////////////////////////////////////////////
+
+func benchmarkWithSize(b *testing.B, size int) {
+ b.StopTimer()
+ buf := bytes.Repeat([]byte{0x01}, size)
+ bufCopy := make([]byte, size)
+ copy(bufCopy, buf)
+
+ matcher := DeepEquals(buf)
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ matcher.Matches(bufCopy)
+ }
+
+ b.SetBytes(int64(size))
+}
+
+func BenchmarkShortByteSlice(b *testing.B) {
+ benchmarkWithSize(b, 256)
+}
+
+func BenchmarkLongByteSlice(b *testing.B) {
+ benchmarkWithSize(b, 1<<24)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/elements_are.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/elements_are.go
new file mode 100644
index 00000000000..2941847c705
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/elements_are.go
@@ -0,0 +1,91 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// Given a list of arguments M, ElementsAre returns a matcher that matches
+// arrays and slices A where all of the following hold:
+//
+// * A is the same length as M.
+//
+// * For each i < len(A) where M[i] is a matcher, A[i] matches M[i].
+//
+// * For each i < len(A) where M[i] is not a matcher, A[i] matches
+// Equals(M[i]).
+//
+func ElementsAre(M ...interface{}) Matcher {
+ // Copy over matchers, or convert to Equals(x) for non-matcher x.
+ subMatchers := make([]Matcher, len(M))
+ for i, x := range M {
+ if matcher, ok := x.(Matcher); ok {
+ subMatchers[i] = matcher
+ continue
+ }
+
+ subMatchers[i] = Equals(x)
+ }
+
+ return &elementsAreMatcher{subMatchers}
+}
+
+type elementsAreMatcher struct {
+ subMatchers []Matcher
+}
+
+func (m *elementsAreMatcher) Description() string {
+ subDescs := make([]string, len(m.subMatchers))
+ for i, sm := range m.subMatchers {
+ subDescs[i] = sm.Description()
+ }
+
+ return fmt.Sprintf("elements are: [%s]", strings.Join(subDescs, ", "))
+}
+
+func (m *elementsAreMatcher) Matches(candidates interface{}) error {
+ // The candidate must be a slice or an array.
+ v := reflect.ValueOf(candidates)
+ if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
+ return NewFatalError("which is not a slice or array")
+ }
+
+ // The length must be correct.
+ if v.Len() != len(m.subMatchers) {
+ return errors.New(fmt.Sprintf("which is of length %d", v.Len()))
+ }
+
+ // Check each element.
+ for i, subMatcher := range m.subMatchers {
+ c := v.Index(i)
+ if matchErr := subMatcher.Matches(c.Interface()); matchErr != nil {
+ // Return an errors indicating which element doesn't match. If the
+ // matcher error was fatal, make this one fatal too.
+ err := errors.New(fmt.Sprintf("whose element %d doesn't match", i))
+ if _, isFatal := matchErr.(*FatalError); isFatal {
+ err = NewFatalError(err.Error())
+ }
+
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/elements_are_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/elements_are_test.go
new file mode 100644
index 00000000000..172584fa140
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/elements_are_test.go
@@ -0,0 +1,208 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type ElementsAreTest struct {
+}
+
+func init() { RegisterTestSuite(&ElementsAreTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *ElementsAreTest) EmptySet() {
+ m := ElementsAre()
+ ExpectEq("elements are: []", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // No candidates.
+ c = []interface{}{}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // One candidate.
+ c = []interface{}{17}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 1")))
+}
+
+func (t *ElementsAreTest) OneMatcher() {
+ m := ElementsAre(LessThan(17))
+ ExpectEq("elements are: [less than 17]", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // No candidates.
+ c = []interface{}{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 0")))
+
+ // Matching candidate.
+ c = []interface{}{16}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching candidate.
+ c = []interface{}{19}
+ err = m.Matches(c)
+ ExpectNe(nil, err)
+
+ // Two candidates.
+ c = []interface{}{17, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 2")))
+}
+
+func (t *ElementsAreTest) OneValue() {
+ m := ElementsAre(17)
+ ExpectEq("elements are: [17]", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // No candidates.
+ c = []interface{}{}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 0")))
+
+ // Matching int.
+ c = []interface{}{int(17)}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Matching float.
+ c = []interface{}{float32(17)}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // Non-matching candidate.
+ c = []interface{}{19}
+ err = m.Matches(c)
+ ExpectNe(nil, err)
+
+ // Two candidates.
+ c = []interface{}{17, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 2")))
+}
+
+func (t *ElementsAreTest) MultipleElements() {
+ m := ElementsAre("taco", LessThan(17))
+ ExpectEq("elements are: [taco, less than 17]", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // One candidate.
+ c = []interface{}{17}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 1")))
+
+ // Both matching.
+ c = []interface{}{"taco", 16}
+ err = m.Matches(c)
+ ExpectEq(nil, err)
+
+ // First non-matching.
+ c = []interface{}{"burrito", 16}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("whose element 0 doesn't match")))
+
+ // Second non-matching.
+ c = []interface{}{"taco", 17}
+ err = m.Matches(c)
+ ExpectThat(err, Error(Equals("whose element 1 doesn't match")))
+
+ // Three candidates.
+ c = []interface{}{"taco", 17, 19}
+ err = m.Matches(c)
+ ExpectThat(err, Error(HasSubstr("length 3")))
+}
+
+func (t *ElementsAreTest) ArrayCandidates() {
+ m := ElementsAre("taco", LessThan(17))
+
+ var err error
+
+ // One candidate.
+ err = m.Matches([1]interface{}{"taco"})
+ ExpectThat(err, Error(HasSubstr("length 1")))
+
+ // Both matching.
+ err = m.Matches([2]interface{}{"taco", 16})
+ ExpectEq(nil, err)
+
+ // First non-matching.
+ err = m.Matches([2]interface{}{"burrito", 16})
+ ExpectThat(err, Error(Equals("whose element 0 doesn't match")))
+}
+
+func (t *ElementsAreTest) WrongTypeCandidate() {
+ m := ElementsAre("taco")
+
+ var err error
+
+ // String candidate.
+ err = m.Matches("taco")
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+
+ // Map candidate.
+ err = m.Matches(map[string]string{})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+
+ // Nil candidate.
+ err = m.Matches(nil)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("array")))
+ ExpectThat(err, Error(HasSubstr("slice")))
+}
+
+func (t *ElementsAreTest) PropagatesFatality() {
+ m := ElementsAre(LessThan(17))
+ ExpectEq("elements are: [less than 17]", m.Description())
+
+ var c []interface{}
+ var err error
+
+ // Non-fatal error.
+ c = []interface{}{19}
+ err = m.Matches(c)
+ AssertNe(nil, err)
+ ExpectFalse(isFatal(err))
+
+ // Fatal error.
+ c = []interface{}{"taco"}
+ err = m.Matches(c)
+ AssertNe(nil, err)
+ ExpectTrue(isFatal(err))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/equals.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/equals.go
new file mode 100644
index 00000000000..a510707b3c7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/equals.go
@@ -0,0 +1,541 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+// Equals(x) returns a matcher that matches values v such that v and x are
+// equivalent. This includes the case when the comparison v == x using Go's
+// built-in comparison operator is legal (except for structs, which this
+// matcher does not support), but for convenience the following rules also
+// apply:
+//
+// * Type checking is done based on underlying types rather than actual
+// types, so that e.g. two aliases for string can be compared:
+//
+// type stringAlias1 string
+// type stringAlias2 string
+//
+// a := "taco"
+// b := stringAlias1("taco")
+// c := stringAlias2("taco")
+//
+// ExpectTrue(a == b) // Legal, passes
+// ExpectTrue(b == c) // Illegal, doesn't compile
+//
+// ExpectThat(a, Equals(b)) // Passes
+// ExpectThat(b, Equals(c)) // Passes
+//
+// * Values of numeric type are treated as if they were abstract numbers, and
+// compared accordingly. Therefore Equals(17) will match int(17),
+// int16(17), uint(17), float32(17), complex64(17), and so on.
+//
+// If you want a stricter matcher that contains no such cleverness, see
+// IdenticalTo instead.
+//
+// Arrays are supported by this matcher, but do not participate in the
+// exceptions above. Two arrays compared with this matcher must have identical
+// types, and their element type must itself be comparable according to Go's ==
+// operator.
+func Equals(x interface{}) Matcher {
+ v := reflect.ValueOf(x)
+
+ // This matcher doesn't support structs.
+ if v.Kind() == reflect.Struct {
+ panic(fmt.Sprintf("oglematchers.Equals: unsupported kind %v", v.Kind()))
+ }
+
+ // The == operator is not defined for non-nil slices.
+ if v.Kind() == reflect.Slice && v.Pointer() != uintptr(0) {
+ panic(fmt.Sprintf("oglematchers.Equals: non-nil slice"))
+ }
+
+ return &equalsMatcher{v}
+}
+
+type equalsMatcher struct {
+ expectedValue reflect.Value
+}
+
+////////////////////////////////////////////////////////////////////////
+// Numeric types
+////////////////////////////////////////////////////////////////////////
+
+func isSignedInteger(v reflect.Value) bool {
+ k := v.Kind()
+ return k >= reflect.Int && k <= reflect.Int64
+}
+
+func isUnsignedInteger(v reflect.Value) bool {
+ k := v.Kind()
+ return k >= reflect.Uint && k <= reflect.Uintptr
+}
+
+func isInteger(v reflect.Value) bool {
+ return isSignedInteger(v) || isUnsignedInteger(v)
+}
+
+func isFloat(v reflect.Value) bool {
+ k := v.Kind()
+ return k == reflect.Float32 || k == reflect.Float64
+}
+
+func isComplex(v reflect.Value) bool {
+ k := v.Kind()
+ return k == reflect.Complex64 || k == reflect.Complex128
+}
+
+func checkAgainstInt64(e int64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ if c.Int() == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ u := c.Uint()
+ if u <= math.MaxInt64 && int64(u) == e {
+ err = nil
+ }
+
+ // Turn around the various floating point types so that the checkAgainst*
+ // functions for them can deal with precision issues.
+ case isFloat(c), isComplex(c):
+ return Equals(c.Interface()).Matches(e)
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstUint64(e uint64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ i := c.Int()
+ if i >= 0 && uint64(i) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if c.Uint() == e {
+ err = nil
+ }
+
+ // Turn around the various floating point types so that the checkAgainst*
+ // functions for them can deal with precision issues.
+ case isFloat(c), isComplex(c):
+ return Equals(c.Interface()).Matches(e)
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstFloat32(e float32, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(c):
+ if float32(c.Int()) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if float32(c.Uint()) == e {
+ err = nil
+ }
+
+ case isFloat(c):
+ // Compare using float32 to avoid a false sense of precision; otherwise
+ // e.g. Equals(float32(0.1)) won't match float32(0.1).
+ if float32(c.Float()) == e {
+ err = nil
+ }
+
+ case isComplex(c):
+ comp := c.Complex()
+ rl := real(comp)
+ im := imag(comp)
+
+ // Compare using float32 to avoid a false sense of precision; otherwise
+ // e.g. Equals(float32(0.1)) won't match (0.1 + 0i).
+ if im == 0 && float32(rl) == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstFloat64(e float64, c reflect.Value) (err error) {
+ err = errors.New("")
+
+ ck := c.Kind()
+
+ switch {
+ case isSignedInteger(c):
+ if float64(c.Int()) == e {
+ err = nil
+ }
+
+ case isUnsignedInteger(c):
+ if float64(c.Uint()) == e {
+ err = nil
+ }
+
+ // If the actual value is lower precision, turn the comparison around so we
+ // apply the low-precision rules. Otherwise, e.g. Equals(0.1) may not match
+ // float32(0.1).
+ case ck == reflect.Float32 || ck == reflect.Complex64:
+ return Equals(c.Interface()).Matches(e)
+
+ // Otherwise, compare with double precision.
+ case isFloat(c):
+ if c.Float() == e {
+ err = nil
+ }
+
+ case isComplex(c):
+ comp := c.Complex()
+ rl := real(comp)
+ im := imag(comp)
+
+ if im == 0 && rl == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstComplex64(e complex64, c reflect.Value) (err error) {
+ err = errors.New("")
+ realPart := real(e)
+ imaginaryPart := imag(e)
+
+ switch {
+ case isInteger(c) || isFloat(c):
+ // If we have no imaginary part, then we should just compare against the
+ // real part. Otherwise, we can't be equal.
+ if imaginaryPart != 0 {
+ return
+ }
+
+ return checkAgainstFloat32(realPart, c)
+
+ case isComplex(c):
+ // Compare using complex64 to avoid a false sense of precision; otherwise
+ // e.g. Equals(0.1 + 0i) won't match float32(0.1).
+ if complex64(c.Complex()) == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+func checkAgainstComplex128(e complex128, c reflect.Value) (err error) {
+ err = errors.New("")
+ realPart := real(e)
+ imaginaryPart := imag(e)
+
+ switch {
+ case isInteger(c) || isFloat(c):
+ // If we have no imaginary part, then we should just compare against the
+ // real part. Otherwise, we can't be equal.
+ if imaginaryPart != 0 {
+ return
+ }
+
+ return checkAgainstFloat64(realPart, c)
+
+ case isComplex(c):
+ if c.Complex() == e {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not numeric")
+ }
+
+ return
+}
+
+////////////////////////////////////////////////////////////////////////
+// Other types
+////////////////////////////////////////////////////////////////////////
+
+func checkAgainstBool(e bool, c reflect.Value) (err error) {
+ if c.Kind() != reflect.Bool {
+ err = NewFatalError("which is not a bool")
+ return
+ }
+
+ err = errors.New("")
+ if c.Bool() == e {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstChan(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "chan int".
+ typeStr := fmt.Sprintf("%s %s", e.Type().ChanDir(), e.Type().Elem())
+
+ // Make sure c is a chan of the correct type.
+ if c.Kind() != reflect.Chan ||
+ c.Type().ChanDir() != e.Type().ChanDir() ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstFunc(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a function.
+ if c.Kind() != reflect.Func {
+ err = NewFatalError("which is not a function")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstMap(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a map.
+ if c.Kind() != reflect.Map {
+ err = NewFatalError("which is not a map")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstPtr(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "*int".
+ typeStr := fmt.Sprintf("*%v", e.Type().Elem())
+
+ // Make sure c is a pointer of the correct type.
+ if c.Kind() != reflect.Ptr ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstSlice(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "[]int".
+ typeStr := fmt.Sprintf("[]%v", e.Type().Elem())
+
+ // Make sure c is a slice of the correct type.
+ if c.Kind() != reflect.Slice ||
+ c.Type().Elem() != e.Type().Elem() {
+ err = NewFatalError(fmt.Sprintf("which is not a %s", typeStr))
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstString(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a string.
+ if c.Kind() != reflect.String {
+ err = NewFatalError("which is not a string")
+ return
+ }
+
+ err = errors.New("")
+ if c.String() == e.String() {
+ err = nil
+ }
+ return
+}
+
+func checkAgainstArray(e reflect.Value, c reflect.Value) (err error) {
+ // Create a description of e's type, e.g. "[2]int".
+ typeStr := fmt.Sprintf("%v", e.Type())
+
+ // Make sure c is the correct type.
+ if c.Type() != e.Type() {
+ err = NewFatalError(fmt.Sprintf("which is not %s", typeStr))
+ return
+ }
+
+ // Check for equality.
+ if e.Interface() != c.Interface() {
+ err = errors.New("")
+ return
+ }
+
+ return
+}
+
+func checkAgainstUnsafePointer(e reflect.Value, c reflect.Value) (err error) {
+ // Make sure c is a pointer.
+ if c.Kind() != reflect.UnsafePointer {
+ err = NewFatalError("which is not a unsafe.Pointer")
+ return
+ }
+
+ err = errors.New("")
+ if c.Pointer() == e.Pointer() {
+ err = nil
+ }
+ return
+}
+
+func checkForNil(c reflect.Value) (err error) {
+ err = errors.New("")
+
+ // Make sure it is legal to call IsNil.
+ switch c.Kind() {
+ case reflect.Invalid:
+ case reflect.Chan:
+ case reflect.Func:
+ case reflect.Interface:
+ case reflect.Map:
+ case reflect.Ptr:
+ case reflect.Slice:
+
+ default:
+ err = NewFatalError("which cannot be compared to nil")
+ return
+ }
+
+ // Ask whether the value is nil. Handle a nil literal (kind Invalid)
+ // specially, since it's not legal to call IsNil there.
+ if c.Kind() == reflect.Invalid || c.IsNil() {
+ err = nil
+ }
+ return
+}
+
+////////////////////////////////////////////////////////////////////////
+// Public implementation
+////////////////////////////////////////////////////////////////////////
+
+func (m *equalsMatcher) Matches(candidate interface{}) error {
+ e := m.expectedValue
+ c := reflect.ValueOf(candidate)
+ ek := e.Kind()
+
+ switch {
+ case ek == reflect.Bool:
+ return checkAgainstBool(e.Bool(), c)
+
+ case isSignedInteger(e):
+ return checkAgainstInt64(e.Int(), c)
+
+ case isUnsignedInteger(e):
+ return checkAgainstUint64(e.Uint(), c)
+
+ case ek == reflect.Float32:
+ return checkAgainstFloat32(float32(e.Float()), c)
+
+ case ek == reflect.Float64:
+ return checkAgainstFloat64(e.Float(), c)
+
+ case ek == reflect.Complex64:
+ return checkAgainstComplex64(complex64(e.Complex()), c)
+
+ case ek == reflect.Complex128:
+ return checkAgainstComplex128(complex128(e.Complex()), c)
+
+ case ek == reflect.Chan:
+ return checkAgainstChan(e, c)
+
+ case ek == reflect.Func:
+ return checkAgainstFunc(e, c)
+
+ case ek == reflect.Map:
+ return checkAgainstMap(e, c)
+
+ case ek == reflect.Ptr:
+ return checkAgainstPtr(e, c)
+
+ case ek == reflect.Slice:
+ return checkAgainstSlice(e, c)
+
+ case ek == reflect.String:
+ return checkAgainstString(e, c)
+
+ case ek == reflect.Array:
+ return checkAgainstArray(e, c)
+
+ case ek == reflect.UnsafePointer:
+ return checkAgainstUnsafePointer(e, c)
+
+ case ek == reflect.Invalid:
+ return checkForNil(c)
+ }
+
+ panic(fmt.Sprintf("equalsMatcher.Matches: unexpected kind: %v", ek))
+}
+
+func (m *equalsMatcher) Description() string {
+ // Special case: handle nil.
+ if !m.expectedValue.IsValid() {
+ return "is nil"
+ }
+
+ return fmt.Sprintf("%v", m.expectedValue.Interface())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/equals_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/equals_test.go
new file mode 100644
index 00000000000..6ac5df27329
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/equals_test.go
@@ -0,0 +1,3864 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "fmt"
+ "math"
+ "unsafe"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+var someInt int = -17
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type EqualsTest struct {
+}
+
+func init() { RegisterTestSuite(&EqualsTest{}) }
+
+type equalsTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *EqualsTest) checkTestCases(matcher Matcher, cases []equalsTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+ ExpectEq(
+ c.expectedResult,
+ (err == nil),
+ "Result for case %d: %v (Error: %v)", i, c, err)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(c.shouldBeFatal, isFatal, "Fatality for case %d: %v", i, c)
+
+ ExpectThat(err, Error(Equals(c.expectedError)), "Case %d: %v", i, c)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// nil
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) EqualsNil() {
+ matcher := Equals(nil)
+ ExpectEq("is nil", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Legal types
+ equalsTestCase{nil, true, false, ""},
+ equalsTestCase{chan int(nil), true, false, ""},
+ equalsTestCase{(func())(nil), true, false, ""},
+ equalsTestCase{interface{}(nil), true, false, ""},
+ equalsTestCase{map[int]int(nil), true, false, ""},
+ equalsTestCase{(*int)(nil), true, false, ""},
+ equalsTestCase{[]int(nil), true, false, ""},
+
+ equalsTestCase{make(chan int), false, false, ""},
+ equalsTestCase{func() {}, false, false, ""},
+ equalsTestCase{map[int]int{}, false, false, ""},
+ equalsTestCase{&someInt, false, false, ""},
+ equalsTestCase{[]int{}, false, false, ""},
+
+ // Illegal types
+ equalsTestCase{17, false, true, "which cannot be compared to nil"},
+ equalsTestCase{int8(17), false, true, "which cannot be compared to nil"},
+ equalsTestCase{uintptr(17), false, true, "which cannot be compared to nil"},
+ equalsTestCase{[...]int{}, false, true, "which cannot be compared to nil"},
+ equalsTestCase{"taco", false, true, "which cannot be compared to nil"},
+ equalsTestCase{equalsTestCase{}, false, true, "which cannot be compared to nil"},
+ equalsTestCase{unsafe.Pointer(&someInt), false, true, "which cannot be compared to nil"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegerLiteral() {
+ // -2^30
+ matcher := Equals(-1073741824)
+ ExpectEq("-1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1073741824.
+ equalsTestCase{-1073741824, true, false, ""},
+ equalsTestCase{-1073741824.0, true, false, ""},
+ equalsTestCase{-1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(-1073741824), true, false, ""},
+ equalsTestCase{int32(-1073741824), true, false, ""},
+ equalsTestCase{int64(-1073741824), true, false, ""},
+ equalsTestCase{float32(-1073741824), true, false, ""},
+ equalsTestCase{float64(-1073741824), true, false, ""},
+ equalsTestCase{complex64(-1073741824), true, false, ""},
+ equalsTestCase{complex128(-1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(-1073741824)), true, false, ""},
+
+ // Values that would be -1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-1073741823), false, false, ""},
+ equalsTestCase{int32(-1073741823), false, false, ""},
+ equalsTestCase{int64(-1073741823), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1073741824.1), false, false, ""},
+ equalsTestCase{float64(-1073741823.9), false, false, ""},
+ equalsTestCase{complex128(-1073741823), false, false, ""},
+ equalsTestCase{complex128(-1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegerLiteral() {
+ // 2^30
+ matcher := Equals(1073741824)
+ ExpectEq("1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1073741824.
+ equalsTestCase{1073741824, true, false, ""},
+ equalsTestCase{1073741824.0, true, false, ""},
+ equalsTestCase{1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(1073741824), true, false, ""},
+ equalsTestCase{uint(1073741824), true, false, ""},
+ equalsTestCase{int32(1073741824), true, false, ""},
+ equalsTestCase{int64(1073741824), true, false, ""},
+ equalsTestCase{uint32(1073741824), true, false, ""},
+ equalsTestCase{uint64(1073741824), true, false, ""},
+ equalsTestCase{uintptr(1073741824), true, false, ""},
+ equalsTestCase{float32(1073741824), true, false, ""},
+ equalsTestCase{float64(1073741824), true, false, ""},
+ equalsTestCase{complex64(1073741824), true, false, ""},
+ equalsTestCase{complex128(1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(1073741824)), true, false, ""},
+ equalsTestCase{interface{}(uint(1073741824)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1073741823), false, false, ""},
+ equalsTestCase{int32(1073741823), false, false, ""},
+ equalsTestCase{int64(1073741823), false, false, ""},
+ equalsTestCase{float64(1073741824.1), false, false, ""},
+ equalsTestCase{float64(1073741823.9), false, false, ""},
+ equalsTestCase{complex128(1073741823), false, false, ""},
+ equalsTestCase{complex128(1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Floating point literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralFloatingPointLiteral() {
+ // -2^30
+ matcher := Equals(-1073741824.0)
+ ExpectEq("-1.073741824e+09", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1073741824.
+ equalsTestCase{-1073741824, true, false, ""},
+ equalsTestCase{-1073741824.0, true, false, ""},
+ equalsTestCase{-1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(-1073741824), true, false, ""},
+ equalsTestCase{int32(-1073741824), true, false, ""},
+ equalsTestCase{int64(-1073741824), true, false, ""},
+ equalsTestCase{float32(-1073741824), true, false, ""},
+ equalsTestCase{float64(-1073741824), true, false, ""},
+ equalsTestCase{complex64(-1073741824), true, false, ""},
+ equalsTestCase{complex128(-1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(-1073741824)), true, false, ""},
+ equalsTestCase{interface{}(float64(-1073741824)), true, false, ""},
+
+ // Values that would be -1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-1073741823), false, false, ""},
+ equalsTestCase{int32(-1073741823), false, false, ""},
+ equalsTestCase{int64(-1073741823), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1073741824.1), false, false, ""},
+ equalsTestCase{float64(-1073741823.9), false, false, ""},
+ equalsTestCase{complex128(-1073741823), false, false, ""},
+ equalsTestCase{complex128(-1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralFloatingPointLiteral() {
+ // 2^30
+ matcher := Equals(1073741824.0)
+ ExpectEq("1.073741824e+09", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1073741824.
+ equalsTestCase{1073741824, true, false, ""},
+ equalsTestCase{1073741824.0, true, false, ""},
+ equalsTestCase{1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(1073741824), true, false, ""},
+ equalsTestCase{int32(1073741824), true, false, ""},
+ equalsTestCase{int64(1073741824), true, false, ""},
+ equalsTestCase{uint(1073741824), true, false, ""},
+ equalsTestCase{uint32(1073741824), true, false, ""},
+ equalsTestCase{uint64(1073741824), true, false, ""},
+ equalsTestCase{float32(1073741824), true, false, ""},
+ equalsTestCase{float64(1073741824), true, false, ""},
+ equalsTestCase{complex64(1073741824), true, false, ""},
+ equalsTestCase{complex128(1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(1073741824)), true, false, ""},
+ equalsTestCase{interface{}(float64(1073741824)), true, false, ""},
+
+ // Values that would be 1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1073741823), false, false, ""},
+ equalsTestCase{int32(1073741823), false, false, ""},
+ equalsTestCase{int64(1073741823), false, false, ""},
+ equalsTestCase{uint(1073741823), false, false, ""},
+ equalsTestCase{uint32(1073741823), false, false, ""},
+ equalsTestCase{uint64(1073741823), false, false, ""},
+ equalsTestCase{float64(1073741824.1), false, false, ""},
+ equalsTestCase{float64(1073741823.9), false, false, ""},
+ equalsTestCase{complex128(1073741823), false, false, ""},
+ equalsTestCase{complex128(1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonIntegralFloatingPointLiteral() {
+ matcher := Equals(17.1)
+ ExpectEq("17.1", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 17.1.
+ equalsTestCase{17.1, true, false, ""},
+ equalsTestCase{17.1, true, false, ""},
+ equalsTestCase{17.1 + 0i, true, false, ""},
+ equalsTestCase{float32(17.1), true, false, ""},
+ equalsTestCase{float64(17.1), true, false, ""},
+ equalsTestCase{complex64(17.1), true, false, ""},
+ equalsTestCase{complex128(17.1), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{17, false, false, ""},
+ equalsTestCase{17.2, false, false, ""},
+ equalsTestCase{18, false, false, ""},
+ equalsTestCase{int(17), false, false, ""},
+ equalsTestCase{int(18), false, false, ""},
+ equalsTestCase{int32(17), false, false, ""},
+ equalsTestCase{int64(17), false, false, ""},
+ equalsTestCase{uint(17), false, false, ""},
+ equalsTestCase{uint32(17), false, false, ""},
+ equalsTestCase{uint64(17), false, false, ""},
+ equalsTestCase{uintptr(17), false, false, ""},
+ equalsTestCase{complex128(17.1 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// bool
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) False() {
+ matcher := Equals(false)
+ ExpectEq("false", matcher.Description())
+
+ cases := []equalsTestCase{
+ // bools
+ equalsTestCase{false, true, false, ""},
+ equalsTestCase{bool(false), true, false, ""},
+
+ equalsTestCase{true, false, false, ""},
+ equalsTestCase{bool(true), false, false, ""},
+
+ // Other types.
+ equalsTestCase{int(0), false, true, "which is not a bool"},
+ equalsTestCase{int8(0), false, true, "which is not a bool"},
+ equalsTestCase{int16(0), false, true, "which is not a bool"},
+ equalsTestCase{int32(0), false, true, "which is not a bool"},
+ equalsTestCase{int64(0), false, true, "which is not a bool"},
+ equalsTestCase{uint(0), false, true, "which is not a bool"},
+ equalsTestCase{uint8(0), false, true, "which is not a bool"},
+ equalsTestCase{uint16(0), false, true, "which is not a bool"},
+ equalsTestCase{uint32(0), false, true, "which is not a bool"},
+ equalsTestCase{uint64(0), false, true, "which is not a bool"},
+ equalsTestCase{uintptr(0), false, true, "which is not a bool"},
+ equalsTestCase{[...]int{}, false, true, "which is not a bool"},
+ equalsTestCase{make(chan int), false, true, "which is not a bool"},
+ equalsTestCase{func() {}, false, true, "which is not a bool"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a bool"},
+ equalsTestCase{&someInt, false, true, "which is not a bool"},
+ equalsTestCase{[]int{}, false, true, "which is not a bool"},
+ equalsTestCase{"taco", false, true, "which is not a bool"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a bool"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) True() {
+ matcher := Equals(true)
+ ExpectEq("true", matcher.Description())
+
+ cases := []equalsTestCase{
+ // bools
+ equalsTestCase{true, true, false, ""},
+ equalsTestCase{bool(true), true, false, ""},
+
+ equalsTestCase{false, false, false, ""},
+ equalsTestCase{bool(false), false, false, ""},
+
+ // Other types.
+ equalsTestCase{int(1), false, true, "which is not a bool"},
+ equalsTestCase{int8(1), false, true, "which is not a bool"},
+ equalsTestCase{int16(1), false, true, "which is not a bool"},
+ equalsTestCase{int32(1), false, true, "which is not a bool"},
+ equalsTestCase{int64(1), false, true, "which is not a bool"},
+ equalsTestCase{uint(1), false, true, "which is not a bool"},
+ equalsTestCase{uint8(1), false, true, "which is not a bool"},
+ equalsTestCase{uint16(1), false, true, "which is not a bool"},
+ equalsTestCase{uint32(1), false, true, "which is not a bool"},
+ equalsTestCase{uint64(1), false, true, "which is not a bool"},
+ equalsTestCase{uintptr(1), false, true, "which is not a bool"},
+ equalsTestCase{[...]int{}, false, true, "which is not a bool"},
+ equalsTestCase{make(chan int), false, true, "which is not a bool"},
+ equalsTestCase{func() {}, false, true, "which is not a bool"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a bool"},
+ equalsTestCase{&someInt, false, true, "which is not a bool"},
+ equalsTestCase{[]int{}, false, true, "which is not a bool"},
+ equalsTestCase{"taco", false, true, "which is not a bool"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a bool"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt() {
+ // -2^30
+ matcher := Equals(int(-1073741824))
+ ExpectEq("-1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1073741824.
+ equalsTestCase{-1073741824, true, false, ""},
+ equalsTestCase{-1073741824.0, true, false, ""},
+ equalsTestCase{-1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(-1073741824), true, false, ""},
+ equalsTestCase{int32(-1073741824), true, false, ""},
+ equalsTestCase{int64(-1073741824), true, false, ""},
+ equalsTestCase{float32(-1073741824), true, false, ""},
+ equalsTestCase{float64(-1073741824), true, false, ""},
+ equalsTestCase{complex64(-1073741824), true, false, ""},
+ equalsTestCase{complex128(-1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(-1073741824)), true, false, ""},
+
+ // Values that would be -1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-1073741823), false, false, ""},
+ equalsTestCase{int32(-1073741823), false, false, ""},
+ equalsTestCase{int64(-1073741823), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1073741824.1), false, false, ""},
+ equalsTestCase{float64(-1073741823.9), false, false, ""},
+ equalsTestCase{complex128(-1073741823), false, false, ""},
+ equalsTestCase{complex128(-1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt() {
+ // 2^30
+ matcher := Equals(int(1073741824))
+ ExpectEq("1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1073741824.
+ equalsTestCase{1073741824, true, false, ""},
+ equalsTestCase{1073741824.0, true, false, ""},
+ equalsTestCase{1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(1073741824), true, false, ""},
+ equalsTestCase{uint(1073741824), true, false, ""},
+ equalsTestCase{int32(1073741824), true, false, ""},
+ equalsTestCase{int64(1073741824), true, false, ""},
+ equalsTestCase{uint32(1073741824), true, false, ""},
+ equalsTestCase{uint64(1073741824), true, false, ""},
+ equalsTestCase{uintptr(1073741824), true, false, ""},
+ equalsTestCase{float32(1073741824), true, false, ""},
+ equalsTestCase{float64(1073741824), true, false, ""},
+ equalsTestCase{complex64(1073741824), true, false, ""},
+ equalsTestCase{complex128(1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(1073741824)), true, false, ""},
+ equalsTestCase{interface{}(uint(1073741824)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1073741823), false, false, ""},
+ equalsTestCase{int32(1073741823), false, false, ""},
+ equalsTestCase{int64(1073741823), false, false, ""},
+ equalsTestCase{float64(1073741824.1), false, false, ""},
+ equalsTestCase{float64(1073741823.9), false, false, ""},
+ equalsTestCase{complex128(1073741823), false, false, ""},
+ equalsTestCase{complex128(1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int8
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt8() {
+ matcher := Equals(int8(-17))
+ ExpectEq("-17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -17.
+ equalsTestCase{-17, true, false, ""},
+ equalsTestCase{-17.0, true, false, ""},
+ equalsTestCase{-17 + 0i, true, false, ""},
+ equalsTestCase{int(-17), true, false, ""},
+ equalsTestCase{int8(-17), true, false, ""},
+ equalsTestCase{int16(-17), true, false, ""},
+ equalsTestCase{int32(-17), true, false, ""},
+ equalsTestCase{int64(-17), true, false, ""},
+ equalsTestCase{float32(-17), true, false, ""},
+ equalsTestCase{float64(-17), true, false, ""},
+ equalsTestCase{complex64(-17), true, false, ""},
+ equalsTestCase{complex128(-17), true, false, ""},
+ equalsTestCase{interface{}(int(-17)), true, false, ""},
+
+ // Values that would be -17 in two's complement.
+ equalsTestCase{uint((1 << 32) - 17), false, false, ""},
+ equalsTestCase{uint8((1 << 8) - 17), false, false, ""},
+ equalsTestCase{uint16((1 << 16) - 17), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 17), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 17), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) - 17), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-16), false, false, ""},
+ equalsTestCase{int8(-16), false, false, ""},
+ equalsTestCase{int16(-16), false, false, ""},
+ equalsTestCase{int32(-16), false, false, ""},
+ equalsTestCase{int64(-16), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float32(-17.1), false, false, ""},
+ equalsTestCase{float32(-16.9), false, false, ""},
+ equalsTestCase{complex64(-16), false, false, ""},
+ equalsTestCase{complex64(-17 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{-17}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{-17}, false, true, "which is not numeric"},
+ equalsTestCase{"-17", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroInt8() {
+ matcher := Equals(int8(0))
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 0.
+ equalsTestCase{0, true, false, ""},
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(int(0)), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{uintptr(0), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1), false, false, ""},
+ equalsTestCase{int8(1), false, false, ""},
+ equalsTestCase{int16(1), false, false, ""},
+ equalsTestCase{int32(1), false, false, ""},
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{float32(-0.1), false, false, ""},
+ equalsTestCase{float32(0.1), false, false, ""},
+ equalsTestCase{complex64(1), false, false, ""},
+ equalsTestCase{complex64(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{0}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{0}, false, true, "which is not numeric"},
+ equalsTestCase{"0", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt8() {
+ matcher := Equals(int8(17))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 17.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(17), true, false, ""},
+ equalsTestCase{int8(17), true, false, ""},
+ equalsTestCase{int16(17), true, false, ""},
+ equalsTestCase{int32(17), true, false, ""},
+ equalsTestCase{int64(17), true, false, ""},
+ equalsTestCase{float32(17), true, false, ""},
+ equalsTestCase{float64(17), true, false, ""},
+ equalsTestCase{complex64(17), true, false, ""},
+ equalsTestCase{complex128(17), true, false, ""},
+ equalsTestCase{interface{}(int(17)), true, false, ""},
+ equalsTestCase{uint(17), true, false, ""},
+ equalsTestCase{uint8(17), true, false, ""},
+ equalsTestCase{uint16(17), true, false, ""},
+ equalsTestCase{uint32(17), true, false, ""},
+ equalsTestCase{uint64(17), true, false, ""},
+ equalsTestCase{uintptr(17), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(16), false, false, ""},
+ equalsTestCase{int8(16), false, false, ""},
+ equalsTestCase{int16(16), false, false, ""},
+ equalsTestCase{int32(16), false, false, ""},
+ equalsTestCase{int64(16), false, false, ""},
+ equalsTestCase{float32(16.9), false, false, ""},
+ equalsTestCase{float32(17.1), false, false, ""},
+ equalsTestCase{complex64(16), false, false, ""},
+ equalsTestCase{complex64(17 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{17}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{17}, false, true, "which is not numeric"},
+ equalsTestCase{"17", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int16
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt16() {
+ matcher := Equals(int16(-32766))
+ ExpectEq("-32766", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -32766.
+ equalsTestCase{-32766, true, false, ""},
+ equalsTestCase{-32766.0, true, false, ""},
+ equalsTestCase{-32766 + 0i, true, false, ""},
+ equalsTestCase{int(-32766), true, false, ""},
+ equalsTestCase{int16(-32766), true, false, ""},
+ equalsTestCase{int32(-32766), true, false, ""},
+ equalsTestCase{int64(-32766), true, false, ""},
+ equalsTestCase{float32(-32766), true, false, ""},
+ equalsTestCase{float64(-32766), true, false, ""},
+ equalsTestCase{complex64(-32766), true, false, ""},
+ equalsTestCase{complex128(-32766), true, false, ""},
+ equalsTestCase{interface{}(int(-32766)), true, false, ""},
+
+ // Values that would be -32766 in two's complement.
+ equalsTestCase{uint((1 << 32) - 32766), false, false, ""},
+ equalsTestCase{uint16((1 << 16) - 32766), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 32766), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 32766), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) - 32766), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-16), false, false, ""},
+ equalsTestCase{int8(-16), false, false, ""},
+ equalsTestCase{int16(-16), false, false, ""},
+ equalsTestCase{int32(-16), false, false, ""},
+ equalsTestCase{int64(-16), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float32(-32766.1), false, false, ""},
+ equalsTestCase{float32(-32765.9), false, false, ""},
+ equalsTestCase{complex64(-32766.1), false, false, ""},
+ equalsTestCase{complex64(-32766 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{-32766}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{-32766}, false, true, "which is not numeric"},
+ equalsTestCase{"-32766", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroInt16() {
+ matcher := Equals(int16(0))
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 0.
+ equalsTestCase{0, true, false, ""},
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(int(0)), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{uintptr(0), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1), false, false, ""},
+ equalsTestCase{int8(1), false, false, ""},
+ equalsTestCase{int16(1), false, false, ""},
+ equalsTestCase{int32(1), false, false, ""},
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{float32(-0.1), false, false, ""},
+ equalsTestCase{float32(0.1), false, false, ""},
+ equalsTestCase{complex64(1), false, false, ""},
+ equalsTestCase{complex64(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{0}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{0}, false, true, "which is not numeric"},
+ equalsTestCase{"0", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt16() {
+ matcher := Equals(int16(32765))
+ ExpectEq("32765", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32765.
+ equalsTestCase{32765, true, false, ""},
+ equalsTestCase{32765.0, true, false, ""},
+ equalsTestCase{32765 + 0i, true, false, ""},
+ equalsTestCase{int(32765), true, false, ""},
+ equalsTestCase{int16(32765), true, false, ""},
+ equalsTestCase{int32(32765), true, false, ""},
+ equalsTestCase{int64(32765), true, false, ""},
+ equalsTestCase{float32(32765), true, false, ""},
+ equalsTestCase{float64(32765), true, false, ""},
+ equalsTestCase{complex64(32765), true, false, ""},
+ equalsTestCase{complex128(32765), true, false, ""},
+ equalsTestCase{interface{}(int(32765)), true, false, ""},
+ equalsTestCase{uint(32765), true, false, ""},
+ equalsTestCase{uint16(32765), true, false, ""},
+ equalsTestCase{uint32(32765), true, false, ""},
+ equalsTestCase{uint64(32765), true, false, ""},
+ equalsTestCase{uintptr(32765), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(32764), false, false, ""},
+ equalsTestCase{int16(32764), false, false, ""},
+ equalsTestCase{int32(32764), false, false, ""},
+ equalsTestCase{int64(32764), false, false, ""},
+ equalsTestCase{float32(32764.9), false, false, ""},
+ equalsTestCase{float32(32765.1), false, false, ""},
+ equalsTestCase{complex64(32765.9), false, false, ""},
+ equalsTestCase{complex64(32765 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{32765}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{32765}, false, true, "which is not numeric"},
+ equalsTestCase{"32765", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int32
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt32() {
+ // -2^30
+ matcher := Equals(int32(-1073741824))
+ ExpectEq("-1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1073741824.
+ equalsTestCase{-1073741824, true, false, ""},
+ equalsTestCase{-1073741824.0, true, false, ""},
+ equalsTestCase{-1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(-1073741824), true, false, ""},
+ equalsTestCase{int32(-1073741824), true, false, ""},
+ equalsTestCase{int64(-1073741824), true, false, ""},
+ equalsTestCase{float32(-1073741824), true, false, ""},
+ equalsTestCase{float64(-1073741824), true, false, ""},
+ equalsTestCase{complex64(-1073741824), true, false, ""},
+ equalsTestCase{complex128(-1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(-1073741824)), true, false, ""},
+
+ // Values that would be -1073741824 in two's complement.
+ equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""},
+ equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int(-1073741823), false, false, ""},
+ equalsTestCase{int32(-1073741823), false, false, ""},
+ equalsTestCase{int64(-1073741823), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1073741824.1), false, false, ""},
+ equalsTestCase{float64(-1073741823.9), false, false, ""},
+ equalsTestCase{complex128(-1073741823), false, false, ""},
+ equalsTestCase{complex128(-1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt32() {
+ // 2^30
+ matcher := Equals(int32(1073741824))
+ ExpectEq("1073741824", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1073741824.
+ equalsTestCase{1073741824, true, false, ""},
+ equalsTestCase{1073741824.0, true, false, ""},
+ equalsTestCase{1073741824 + 0i, true, false, ""},
+ equalsTestCase{int(1073741824), true, false, ""},
+ equalsTestCase{uint(1073741824), true, false, ""},
+ equalsTestCase{int32(1073741824), true, false, ""},
+ equalsTestCase{int64(1073741824), true, false, ""},
+ equalsTestCase{uint32(1073741824), true, false, ""},
+ equalsTestCase{uint64(1073741824), true, false, ""},
+ equalsTestCase{uintptr(1073741824), true, false, ""},
+ equalsTestCase{float32(1073741824), true, false, ""},
+ equalsTestCase{float64(1073741824), true, false, ""},
+ equalsTestCase{complex64(1073741824), true, false, ""},
+ equalsTestCase{complex128(1073741824), true, false, ""},
+ equalsTestCase{interface{}(int(1073741824)), true, false, ""},
+ equalsTestCase{interface{}(uint(1073741824)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(1073741823), false, false, ""},
+ equalsTestCase{int32(1073741823), false, false, ""},
+ equalsTestCase{int64(1073741823), false, false, ""},
+ equalsTestCase{float64(1073741824.1), false, false, ""},
+ equalsTestCase{float64(1073741823.9), false, false, ""},
+ equalsTestCase{complex128(1073741823), false, false, ""},
+ equalsTestCase{complex128(1073741824 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// int64
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeInt64() {
+ // -2^40
+ matcher := Equals(int64(-1099511627776))
+ ExpectEq("-1099511627776", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -1099511627776.
+ equalsTestCase{-1099511627776.0, true, false, ""},
+ equalsTestCase{-1099511627776 + 0i, true, false, ""},
+ equalsTestCase{int64(-1099511627776), true, false, ""},
+ equalsTestCase{float32(-1099511627776), true, false, ""},
+ equalsTestCase{float64(-1099511627776), true, false, ""},
+ equalsTestCase{complex64(-1099511627776), true, false, ""},
+ equalsTestCase{complex128(-1099511627776), true, false, ""},
+ equalsTestCase{interface{}(int64(-1099511627776)), true, false, ""},
+
+ // Values that would be -1099511627776 in two's complement.
+ equalsTestCase{uint64((1 << 64) - 1099511627776), false, false, ""},
+
+ // Non-equal values of signed integer type.
+ equalsTestCase{int64(-1099511627775), false, false, ""},
+
+ // Non-equal values of other numeric types.
+ equalsTestCase{float64(-1099511627776.1), false, false, ""},
+ equalsTestCase{float64(-1099511627775.9), false, false, ""},
+ equalsTestCase{complex128(-1099511627775), false, false, ""},
+ equalsTestCase{complex128(-1099511627776 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveInt64() {
+ // 2^40
+ matcher := Equals(int64(1099511627776))
+ ExpectEq("1099511627776", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 1099511627776.
+ equalsTestCase{1099511627776.0, true, false, ""},
+ equalsTestCase{1099511627776 + 0i, true, false, ""},
+ equalsTestCase{int64(1099511627776), true, false, ""},
+ equalsTestCase{uint64(1099511627776), true, false, ""},
+ equalsTestCase{uintptr(1099511627776), true, false, ""},
+ equalsTestCase{float32(1099511627776), true, false, ""},
+ equalsTestCase{float64(1099511627776), true, false, ""},
+ equalsTestCase{complex64(1099511627776), true, false, ""},
+ equalsTestCase{complex128(1099511627776), true, false, ""},
+ equalsTestCase{interface{}(int64(1099511627776)), true, false, ""},
+ equalsTestCase{interface{}(uint64(1099511627776)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1099511627775), false, false, ""},
+ equalsTestCase{uint64(1099511627775), false, false, ""},
+ equalsTestCase{float64(1099511627776.1), false, false, ""},
+ equalsTestCase{float64(1099511627775.9), false, false, ""},
+ equalsTestCase{complex128(1099511627775), false, false, ""},
+ equalsTestCase{complex128(1099511627776 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(int64(kTwoTo25 + 1))
+ ExpectEq("33554433", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := Equals(int64(kTwoTo54 + 1))
+ ExpectEq("18014398509481985", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint() {
+ const kExpected = 17
+ matcher := Equals(uint(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUint() {
+ const kExpected = (1 << 16) + 17
+ matcher := Equals(uint(kExpected))
+ ExpectEq("65553", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{65553, true, false, ""},
+ equalsTestCase{65553.0, true, false, ""},
+ equalsTestCase{65553 + 0i, true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int16(17), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(17), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) UintNotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(uint(kTwoTo25 + 1))
+ ExpectEq("33554433", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint8
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint8() {
+ const kExpected = 17
+ matcher := Equals(uint8(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint16
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint16() {
+ const kExpected = 17
+ matcher := Equals(uint16(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUint16() {
+ const kExpected = (1 << 8) + 17
+ matcher := Equals(uint16(kExpected))
+ ExpectEq("273", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{273, true, false, ""},
+ equalsTestCase{273.0, true, false, ""},
+ equalsTestCase{273 + 0i, true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int8(17), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(17), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint32
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint32() {
+ const kExpected = 17
+ matcher := Equals(uint32(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUint32() {
+ const kExpected = (1 << 16) + 17
+ matcher := Equals(uint32(kExpected))
+ ExpectEq("65553", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{65553, true, false, ""},
+ equalsTestCase{65553.0, true, false, ""},
+ equalsTestCase{65553 + 0i, true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int16(17), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(17), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Uint32NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(uint32(kTwoTo25 + 1))
+ ExpectEq("33554433", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uint64
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUint64() {
+ const kExpected = 17
+ matcher := Equals(uint64(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUint64() {
+ const kExpected = (1 << 32) + 17
+ matcher := Equals(uint64(kExpected))
+ ExpectEq("4294967313", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{4294967313.0, true, false, ""},
+ equalsTestCase{4294967313 + 0i, true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int(17), false, false, ""},
+ equalsTestCase{int32(17), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(17), false, false, ""},
+ equalsTestCase{uint32(17), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(uint64(kTwoTo25 + 1))
+ ExpectEq("33554433", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := Equals(uint64(kTwoTo54 + 1))
+ ExpectEq("18014398509481985", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// uintptr
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) SmallUintptr() {
+ const kExpected = 17
+ matcher := Equals(uintptr(kExpected))
+ ExpectEq("17", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{17, true, false, ""},
+ equalsTestCase{17.0, true, false, ""},
+ equalsTestCase{17 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int8(kExpected), true, false, ""},
+ equalsTestCase{int16(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint8(kExpected), true, false, ""},
+ equalsTestCase{uint16(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{kExpected + 1, false, false, ""},
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int8(kExpected + 1), false, false, ""},
+ equalsTestCase{int16(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint8(kExpected + 1), false, false, ""},
+ equalsTestCase{uint16(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeUintptr() {
+ const kExpected = (1 << 32) + 17
+ matcher := Equals(uintptr(kExpected))
+ ExpectEq("4294967313", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{4294967313.0, true, false, ""},
+ equalsTestCase{4294967313 + 0i, true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric types.
+ equalsTestCase{int(17), false, false, ""},
+ equalsTestCase{int32(17), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(17), false, false, ""},
+ equalsTestCase{uint32(17), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected + 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 1), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// float32
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralFloat32() {
+ matcher := Equals(float32(-32769))
+ ExpectEq("-32769", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -32769.
+ equalsTestCase{-32769.0, true, false, ""},
+ equalsTestCase{-32769 + 0i, true, false, ""},
+ equalsTestCase{int32(-32769), true, false, ""},
+ equalsTestCase{int64(-32769), true, false, ""},
+ equalsTestCase{float32(-32769), true, false, ""},
+ equalsTestCase{float64(-32769), true, false, ""},
+ equalsTestCase{complex64(-32769), true, false, ""},
+ equalsTestCase{complex128(-32769), true, false, ""},
+ equalsTestCase{interface{}(float32(-32769)), true, false, ""},
+ equalsTestCase{interface{}(int64(-32769)), true, false, ""},
+
+ // Values that would be -32769 in two's complement.
+ equalsTestCase{uint64((1 << 64) - 32769), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) - 32769), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(-32770), false, false, ""},
+ equalsTestCase{float32(-32769.1), false, false, ""},
+ equalsTestCase{float32(-32768.9), false, false, ""},
+ equalsTestCase{float64(-32769.1), false, false, ""},
+ equalsTestCase{float64(-32768.9), false, false, ""},
+ equalsTestCase{complex128(-32768), false, false, ""},
+ equalsTestCase{complex128(-32769 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NegativeNonIntegralFloat32() {
+ matcher := Equals(float32(-32769.1))
+ ExpectEq("-32769.1", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of -32769.1.
+ equalsTestCase{-32769.1, true, false, ""},
+ equalsTestCase{-32769.1 + 0i, true, false, ""},
+ equalsTestCase{float32(-32769.1), true, false, ""},
+ equalsTestCase{float64(-32769.1), true, false, ""},
+ equalsTestCase{complex64(-32769.1), true, false, ""},
+ equalsTestCase{complex128(-32769.1), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int32(-32769), false, false, ""},
+ equalsTestCase{int32(-32770), false, false, ""},
+ equalsTestCase{int64(-32769), false, false, ""},
+ equalsTestCase{int64(-32770), false, false, ""},
+ equalsTestCase{float32(-32769.2), false, false, ""},
+ equalsTestCase{float32(-32769.0), false, false, ""},
+ equalsTestCase{float64(-32769.2), false, false, ""},
+ equalsTestCase{complex128(-32769.1 + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeNegativeFloat32() {
+ const kExpected = -1 * (1 << 65)
+ matcher := Equals(float32(kExpected))
+ ExpectEq("-3.689349e+19", matcher.Description())
+
+ floatExpected := float32(kExpected)
+ castedInt := int64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroFloat32() {
+ matcher := Equals(float32(0))
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of zero.
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{uintptr(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(float32(0)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{int64(-1), false, false, ""},
+ equalsTestCase{float32(1), false, false, ""},
+ equalsTestCase{float32(-1), false, false, ""},
+ equalsTestCase{complex128(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralFloat32() {
+ matcher := Equals(float32(32769))
+ ExpectEq("32769", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.
+ equalsTestCase{32769.0, true, false, ""},
+ equalsTestCase{32769 + 0i, true, false, ""},
+ equalsTestCase{int(32769), true, false, ""},
+ equalsTestCase{int32(32769), true, false, ""},
+ equalsTestCase{int64(32769), true, false, ""},
+ equalsTestCase{uint(32769), true, false, ""},
+ equalsTestCase{uint32(32769), true, false, ""},
+ equalsTestCase{uint64(32769), true, false, ""},
+ equalsTestCase{uintptr(32769), true, false, ""},
+ equalsTestCase{float32(32769), true, false, ""},
+ equalsTestCase{float64(32769), true, false, ""},
+ equalsTestCase{complex64(32769), true, false, ""},
+ equalsTestCase{complex128(32769), true, false, ""},
+ equalsTestCase{interface{}(float32(32769)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(32770), false, false, ""},
+ equalsTestCase{uint64(32770), false, false, ""},
+ equalsTestCase{float32(32769.1), false, false, ""},
+ equalsTestCase{float32(32768.9), false, false, ""},
+ equalsTestCase{float64(32769.1), false, false, ""},
+ equalsTestCase{float64(32768.9), false, false, ""},
+ equalsTestCase{complex128(32768), false, false, ""},
+ equalsTestCase{complex128(32769 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveNonIntegralFloat32() {
+ matcher := Equals(float32(32769.1))
+ ExpectEq("32769.1", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.1.
+ equalsTestCase{32769.1, true, false, ""},
+ equalsTestCase{32769.1 + 0i, true, false, ""},
+ equalsTestCase{float32(32769.1), true, false, ""},
+ equalsTestCase{float64(32769.1), true, false, ""},
+ equalsTestCase{complex64(32769.1), true, false, ""},
+ equalsTestCase{complex128(32769.1), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int32(32769), false, false, ""},
+ equalsTestCase{int32(32770), false, false, ""},
+ equalsTestCase{uint64(32769), false, false, ""},
+ equalsTestCase{uint64(32770), false, false, ""},
+ equalsTestCase{float32(32769.2), false, false, ""},
+ equalsTestCase{float32(32769.0), false, false, ""},
+ equalsTestCase{float64(32769.2), false, false, ""},
+ equalsTestCase{complex128(32769.1 + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargePositiveFloat32() {
+ const kExpected = 1 << 65
+ matcher := Equals(float32(kExpected))
+ ExpectEq("3.689349e+19", matcher.Description())
+
+ floatExpected := float32(kExpected)
+ castedInt := uint64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{uint64(0), false, false, ""},
+ equalsTestCase{uint64(math.MaxUint64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(float32(kTwoTo25 + 1))
+ ExpectEq("3.3554432e+07", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 3), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// float64
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralFloat64() {
+ const kExpected = -(1 << 50)
+ matcher := Equals(float64(kExpected))
+ ExpectEq("-1.125899906842624e+15", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{-1125899906842624.0, true, false, ""},
+ equalsTestCase{-1125899906842624.0 + 0i, true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Values that would be kExpected in two's complement.
+ equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NegativeNonIntegralFloat64() {
+ const kTwoTo50 = 1 << 50
+ const kExpected = -kTwoTo50 - 0.25
+
+ matcher := Equals(float64(kExpected))
+ ExpectEq("-1.1258999068426242e+15", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(-kTwoTo50), false, false, ""},
+ equalsTestCase{int64(-kTwoTo50 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeNegativeFloat64() {
+ const kExpected = -1 * (1 << 65)
+ matcher := Equals(float64(kExpected))
+ ExpectEq("-3.6893488147419103e+19", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := int64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroFloat64() {
+ matcher := Equals(float64(0))
+ ExpectEq("0", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of zero.
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{uintptr(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(float32(0)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{int64(-1), false, false, ""},
+ equalsTestCase{float32(1), false, false, ""},
+ equalsTestCase{float32(-1), false, false, ""},
+ equalsTestCase{complex128(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralFloat64() {
+ const kExpected = 1 << 50
+ matcher := Equals(float64(kExpected))
+ ExpectEq("1.125899906842624e+15", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.
+ equalsTestCase{1125899906842624.0, true, false, ""},
+ equalsTestCase{1125899906842624.0 + 0i, true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveNonIntegralFloat64() {
+ const kTwoTo50 = 1 << 50
+ const kExpected = kTwoTo50 + 0.25
+ matcher := Equals(float64(kExpected))
+ ExpectEq("1.1258999068426242e+15", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kTwoTo50), false, false, ""},
+ equalsTestCase{int64(kTwoTo50 - 1), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargePositiveFloat64() {
+ const kExpected = 1 << 65
+ matcher := Equals(float64(kExpected))
+ ExpectEq("3.6893488147419103e+19", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := uint64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{uint64(0), false, false, ""},
+ equalsTestCase{uint64(math.MaxUint64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := Equals(float64(kTwoTo54 + 1))
+ ExpectEq("1.8014398509481984e+16", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// complex64
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralComplex64() {
+ const kExpected = -32769
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(-32769+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{-32769.0, true, false, ""},
+ equalsTestCase{-32769.0 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Values that would be kExpected in two's complement.
+ equalsTestCase{uint32((1 << 32) + kExpected), false, false, ""},
+ equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) + kExpected), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NegativeNonIntegralComplex64() {
+ const kTwoTo20 = 1 << 20
+ const kExpected = -kTwoTo20 - 0.25
+
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(-1.0485762e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(-kTwoTo20), false, false, ""},
+ equalsTestCase{int(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{int32(-kTwoTo20), false, false, ""},
+ equalsTestCase{int32(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{int64(-kTwoTo20), false, false, ""},
+ equalsTestCase{int64(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex64(kExpected - 0.75), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 0.75), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeNegativeComplex64() {
+ const kExpected = -1 * (1 << 65)
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(-3.689349e+19+0i)", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := int64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroComplex64() {
+ matcher := Equals(complex64(0))
+ ExpectEq("(0+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of zero.
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{uintptr(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(float32(0)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{int64(-1), false, false, ""},
+ equalsTestCase{float32(1), false, false, ""},
+ equalsTestCase{float32(-1), false, false, ""},
+ equalsTestCase{float64(1), false, false, ""},
+ equalsTestCase{float64(-1), false, false, ""},
+ equalsTestCase{complex64(0 + 2i), false, false, ""},
+ equalsTestCase{complex128(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralComplex64() {
+ const kExpected = 1 << 20
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(1.048576e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.
+ equalsTestCase{1048576.0, true, false, ""},
+ equalsTestCase{1048576.0 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveNonIntegralComplex64() {
+ const kTwoTo20 = 1 << 20
+ const kExpected = kTwoTo20 + 0.25
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(1.0485762e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kTwoTo20), false, false, ""},
+ equalsTestCase{int64(kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{uint64(kTwoTo20), false, false, ""},
+ equalsTestCase{uint64(kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargePositiveComplex64() {
+ const kExpected = 1 << 65
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(3.689349e+19+0i)", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := uint64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{uint64(0), false, false, ""},
+ equalsTestCase{uint64(math.MaxUint64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Complex64AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := Equals(complex64(kTwoTo25 + 1))
+ ExpectEq("(3.3554432e+07+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{int64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{int64(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{uint64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{uint64(kTwoTo25 + 3), false, false, ""},
+
+ // Single-precision floating point.
+ equalsTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo25 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo25 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo25 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Complex64WithNonZeroImaginaryPart() {
+ const kRealPart = 17
+ const kImagPart = 0.25i
+ const kExpected = kRealPart + kImagPart
+ matcher := Equals(complex64(kExpected))
+ ExpectEq("(17+0.25i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kRealPart + kImagPart, true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(kRealPart), false, false, ""},
+ equalsTestCase{int8(kRealPart), false, false, ""},
+ equalsTestCase{int16(kRealPart), false, false, ""},
+ equalsTestCase{int32(kRealPart), false, false, ""},
+ equalsTestCase{int64(kRealPart), false, false, ""},
+ equalsTestCase{uint(kRealPart), false, false, ""},
+ equalsTestCase{uint8(kRealPart), false, false, ""},
+ equalsTestCase{uint16(kRealPart), false, false, ""},
+ equalsTestCase{uint32(kRealPart), false, false, ""},
+ equalsTestCase{uint64(kRealPart), false, false, ""},
+ equalsTestCase{float32(kRealPart), false, false, ""},
+ equalsTestCase{float64(kRealPart), false, false, ""},
+ equalsTestCase{complex64(kRealPart), false, false, ""},
+ equalsTestCase{complex64(kRealPart + kImagPart + 0.5), false, false, ""},
+ equalsTestCase{complex64(kRealPart + kImagPart + 0.5i), false, false, ""},
+ equalsTestCase{complex128(kRealPart), false, false, ""},
+ equalsTestCase{complex128(kRealPart + kImagPart + 0.5), false, false, ""},
+ equalsTestCase{complex128(kRealPart + kImagPart + 0.5i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// complex128
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NegativeIntegralComplex128() {
+ const kExpected = -32769
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(-32769+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{-32769.0, true, false, ""},
+ equalsTestCase{-32769.0 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Values that would be kExpected in two's complement.
+ equalsTestCase{uint32((1 << 32) + kExpected), false, false, ""},
+ equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""},
+ equalsTestCase{uintptr((1 << 64) + kExpected), false, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NegativeNonIntegralComplex128() {
+ const kTwoTo20 = 1 << 20
+ const kExpected = -kTwoTo20 - 0.25
+
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(-1.04857625e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(-kTwoTo20), false, false, ""},
+ equalsTestCase{int(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{int32(-kTwoTo20), false, false, ""},
+ equalsTestCase{int32(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{int64(-kTwoTo20), false, false, ""},
+ equalsTestCase{int64(-kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex64(kExpected - 0.75), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 0.75), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargeNegativeComplex128() {
+ const kExpected = -1 * (1 << 65)
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(-3.6893488147419103e+19+0i)", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := int64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex64(kExpected + 2i), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ZeroComplex128() {
+ matcher := Equals(complex128(0))
+ ExpectEq("(0+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of zero.
+ equalsTestCase{0.0, true, false, ""},
+ equalsTestCase{0 + 0i, true, false, ""},
+ equalsTestCase{int(0), true, false, ""},
+ equalsTestCase{int8(0), true, false, ""},
+ equalsTestCase{int16(0), true, false, ""},
+ equalsTestCase{int32(0), true, false, ""},
+ equalsTestCase{int64(0), true, false, ""},
+ equalsTestCase{uint(0), true, false, ""},
+ equalsTestCase{uint8(0), true, false, ""},
+ equalsTestCase{uint16(0), true, false, ""},
+ equalsTestCase{uint32(0), true, false, ""},
+ equalsTestCase{uint64(0), true, false, ""},
+ equalsTestCase{uintptr(0), true, false, ""},
+ equalsTestCase{float32(0), true, false, ""},
+ equalsTestCase{float64(0), true, false, ""},
+ equalsTestCase{complex64(0), true, false, ""},
+ equalsTestCase{complex128(0), true, false, ""},
+ equalsTestCase{interface{}(float32(0)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(1), false, false, ""},
+ equalsTestCase{int64(-1), false, false, ""},
+ equalsTestCase{float32(1), false, false, ""},
+ equalsTestCase{float32(-1), false, false, ""},
+ equalsTestCase{float64(1), false, false, ""},
+ equalsTestCase{float64(-1), false, false, ""},
+ equalsTestCase{complex64(0 + 2i), false, false, ""},
+ equalsTestCase{complex128(0 + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveIntegralComplex128() {
+ const kExpected = 1 << 20
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(1.048576e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of 32769.
+ equalsTestCase{1048576.0, true, false, ""},
+ equalsTestCase{1048576.0 + 0i, true, false, ""},
+ equalsTestCase{int(kExpected), true, false, ""},
+ equalsTestCase{int32(kExpected), true, false, ""},
+ equalsTestCase{int64(kExpected), true, false, ""},
+ equalsTestCase{uint(kExpected), true, false, ""},
+ equalsTestCase{uint32(kExpected), true, false, ""},
+ equalsTestCase{uint64(kExpected), true, false, ""},
+ equalsTestCase{uintptr(kExpected), true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+ equalsTestCase{interface{}(float64(kExpected)), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(kExpected + 1), false, false, ""},
+ equalsTestCase{int32(kExpected + 1), false, false, ""},
+ equalsTestCase{int64(kExpected + 1), false, false, ""},
+ equalsTestCase{uint(kExpected + 1), false, false, ""},
+ equalsTestCase{uint32(kExpected + 1), false, false, ""},
+ equalsTestCase{uint64(kExpected + 1), false, false, ""},
+ equalsTestCase{uintptr(kExpected + 1), false, false, ""},
+ equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""},
+ equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.5), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.5), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+
+ // Non-numeric types.
+ equalsTestCase{true, false, true, "which is not numeric"},
+ equalsTestCase{[...]int{}, false, true, "which is not numeric"},
+ equalsTestCase{make(chan int), false, true, "which is not numeric"},
+ equalsTestCase{func() {}, false, true, "which is not numeric"},
+ equalsTestCase{map[int]int{}, false, true, "which is not numeric"},
+ equalsTestCase{&someInt, false, true, "which is not numeric"},
+ equalsTestCase{[]int{}, false, true, "which is not numeric"},
+ equalsTestCase{"taco", false, true, "which is not numeric"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) PositiveNonIntegralComplex128() {
+ const kTwoTo20 = 1 << 20
+ const kExpected = kTwoTo20 + 0.25
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(1.04857625e+06+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int64(kTwoTo20), false, false, ""},
+ equalsTestCase{int64(kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{uint64(kTwoTo20), false, false, ""},
+ equalsTestCase{uint64(kTwoTo20 - 1), false, false, ""},
+ equalsTestCase{float32(kExpected - 1), false, false, ""},
+ equalsTestCase{float32(kExpected + 1), false, false, ""},
+ equalsTestCase{float64(kExpected - 0.25), false, false, ""},
+ equalsTestCase{float64(kExpected + 0.25), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1), false, false, ""},
+ equalsTestCase{complex64(kExpected - 1i), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1), false, false, ""},
+ equalsTestCase{complex128(kExpected - 1i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) LargePositiveComplex128() {
+ const kExpected = 1 << 65
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(3.6893488147419103e+19+0i)", matcher.Description())
+
+ floatExpected := float64(kExpected)
+ castedInt := uint64(floatExpected)
+
+ cases := []equalsTestCase{
+ // Equal values of numeric type.
+ equalsTestCase{kExpected + 0i, true, false, ""},
+ equalsTestCase{float32(kExpected), true, false, ""},
+ equalsTestCase{float64(kExpected), true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{castedInt, false, false, ""},
+ equalsTestCase{int64(0), false, false, ""},
+ equalsTestCase{int64(math.MinInt64), false, false, ""},
+ equalsTestCase{int64(math.MaxInt64), false, false, ""},
+ equalsTestCase{uint64(0), false, false, ""},
+ equalsTestCase{uint64(math.MaxUint64), false, false, ""},
+ equalsTestCase{float32(kExpected / 2), false, false, ""},
+ equalsTestCase{float64(kExpected / 2), false, false, ""},
+ equalsTestCase{complex128(kExpected + 2i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Complex128AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := Equals(complex128(kTwoTo54 + 1))
+ ExpectEq("(1.8014398509481984e+16+0i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Integers.
+ equalsTestCase{int64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{int64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{uint64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{uint64(kTwoTo54 + 3), false, false, ""},
+
+ // Double-precision floating point.
+ equalsTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{float64(kTwoTo54 + 3), false, false, ""},
+
+ equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""},
+ equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""},
+ equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) Complex128WithNonZeroImaginaryPart() {
+ const kRealPart = 17
+ const kImagPart = 0.25i
+ const kExpected = kRealPart + kImagPart
+ matcher := Equals(complex128(kExpected))
+ ExpectEq("(17+0.25i)", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Various types of the expected value.
+ equalsTestCase{kExpected, true, false, ""},
+ equalsTestCase{kRealPart + kImagPart, true, false, ""},
+ equalsTestCase{complex64(kExpected), true, false, ""},
+ equalsTestCase{complex128(kExpected), true, false, ""},
+
+ // Non-equal values of numeric type.
+ equalsTestCase{int(kRealPart), false, false, ""},
+ equalsTestCase{int8(kRealPart), false, false, ""},
+ equalsTestCase{int16(kRealPart), false, false, ""},
+ equalsTestCase{int32(kRealPart), false, false, ""},
+ equalsTestCase{int64(kRealPart), false, false, ""},
+ equalsTestCase{uint(kRealPart), false, false, ""},
+ equalsTestCase{uint8(kRealPart), false, false, ""},
+ equalsTestCase{uint16(kRealPart), false, false, ""},
+ equalsTestCase{uint32(kRealPart), false, false, ""},
+ equalsTestCase{uint64(kRealPart), false, false, ""},
+ equalsTestCase{float32(kRealPart), false, false, ""},
+ equalsTestCase{float64(kRealPart), false, false, ""},
+ equalsTestCase{complex64(kRealPart), false, false, ""},
+ equalsTestCase{complex64(kRealPart + kImagPart + 0.5), false, false, ""},
+ equalsTestCase{complex64(kRealPart + kImagPart + 0.5i), false, false, ""},
+ equalsTestCase{complex128(kRealPart), false, false, ""},
+ equalsTestCase{complex128(kRealPart + kImagPart + 0.5), false, false, ""},
+ equalsTestCase{complex128(kRealPart + kImagPart + 0.5i), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Arrays
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) ArrayOfComparableType() {
+ expected := [3]uint{17, 19, 23}
+
+ matcher := Equals(expected)
+ ExpectEq("[17 19 23]", matcher.Description())
+
+ // To defeat constant de-duping by the compiler.
+ makeArray := func(i, j, k uint) [3]uint { return [3]uint{i, j, k} }
+
+ type arrayAlias [3]uint
+ type uintAlias uint
+
+ cases := []equalsTestCase{
+ // Correct types, equal.
+ equalsTestCase{expected, true, false, ""},
+ equalsTestCase{[3]uint{17, 19, 23}, true, false, ""},
+ equalsTestCase{makeArray(17, 19, 23), true, false, ""},
+
+ // Correct types, not equal.
+ equalsTestCase{[3]uint{0, 0, 0}, false, false, ""},
+ equalsTestCase{[3]uint{18, 19, 23}, false, false, ""},
+ equalsTestCase{[3]uint{17, 20, 23}, false, false, ""},
+ equalsTestCase{[3]uint{17, 19, 22}, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not [3]uint"},
+ equalsTestCase{bool(false), false, true, "which is not [3]uint"},
+ equalsTestCase{int(0), false, true, "which is not [3]uint"},
+ equalsTestCase{int8(0), false, true, "which is not [3]uint"},
+ equalsTestCase{int16(0), false, true, "which is not [3]uint"},
+ equalsTestCase{int32(0), false, true, "which is not [3]uint"},
+ equalsTestCase{int64(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint8(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint16(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint32(0), false, true, "which is not [3]uint"},
+ equalsTestCase{uint64(0), false, true, "which is not [3]uint"},
+ equalsTestCase{true, false, true, "which is not [3]uint"},
+ equalsTestCase{[...]int{}, false, true, "which is not [3]uint"},
+ equalsTestCase{func() {}, false, true, "which is not [3]uint"},
+ equalsTestCase{map[int]int{}, false, true, "which is not [3]uint"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not [3]uint"},
+ equalsTestCase{[2]uint{17, 19}, false, true, "which is not [3]uint"},
+ equalsTestCase{[4]uint{17, 19, 23, 0}, false, true, "which is not [3]uint"},
+ equalsTestCase{arrayAlias{17, 19, 23}, false, true, "which is not [3]uint"},
+ equalsTestCase{[3]uintAlias{17, 19, 23}, false, true, "which is not [3]uint"},
+ equalsTestCase{[3]int32{17, 19, 23}, false, true, "which is not [3]uint"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ArrayOfNonComparableType() {
+ type nonComparableArray [2]map[string]string
+ f := func() {
+ ExpectEq(nonComparableArray{}, nonComparableArray{})
+ }
+
+ ExpectThat(f, Panics(MatchesRegexp("uncomparable.*nonComparableArray")))
+}
+
+////////////////////////////////////////////////////////////////////////
+// chan
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilChan() {
+ var nilChan1 chan int
+ var nilChan2 chan int
+ var nilChan3 chan uint
+ var nonNilChan1 chan int = make(chan int)
+ var nonNilChan2 chan uint = make(chan uint)
+
+ matcher := Equals(nilChan1)
+ ExpectEq("<nil>", matcher.Description())
+
+ cases := []equalsTestCase{
+ // int channels
+ equalsTestCase{nilChan1, true, false, ""},
+ equalsTestCase{nilChan2, true, false, ""},
+ equalsTestCase{nonNilChan1, false, false, ""},
+
+ // uint channels
+ equalsTestCase{nilChan3, false, true, "which is not a chan int"},
+ equalsTestCase{nonNilChan2, false, true, "which is not a chan int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a chan int"},
+ equalsTestCase{bool(false), false, true, "which is not a chan int"},
+ equalsTestCase{int(0), false, true, "which is not a chan int"},
+ equalsTestCase{int8(0), false, true, "which is not a chan int"},
+ equalsTestCase{int16(0), false, true, "which is not a chan int"},
+ equalsTestCase{int32(0), false, true, "which is not a chan int"},
+ equalsTestCase{int64(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint8(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint16(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint32(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint64(0), false, true, "which is not a chan int"},
+ equalsTestCase{true, false, true, "which is not a chan int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{func() {}, false, true, "which is not a chan int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{&someInt, false, true, "which is not a chan int"},
+ equalsTestCase{[]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{"taco", false, true, "which is not a chan int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a chan int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilChan() {
+ var nilChan1 chan int
+ var nilChan2 chan uint
+ var nonNilChan1 chan int = make(chan int)
+ var nonNilChan2 chan int = make(chan int)
+ var nonNilChan3 chan uint = make(chan uint)
+
+ matcher := Equals(nonNilChan1)
+ ExpectEq(fmt.Sprintf("%v", nonNilChan1), matcher.Description())
+
+ cases := []equalsTestCase{
+ // int channels
+ equalsTestCase{nonNilChan1, true, false, ""},
+ equalsTestCase{nonNilChan2, false, false, ""},
+ equalsTestCase{nilChan1, false, false, ""},
+
+ // uint channels
+ equalsTestCase{nilChan2, false, true, "which is not a chan int"},
+ equalsTestCase{nonNilChan3, false, true, "which is not a chan int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a chan int"},
+ equalsTestCase{bool(false), false, true, "which is not a chan int"},
+ equalsTestCase{int(0), false, true, "which is not a chan int"},
+ equalsTestCase{int8(0), false, true, "which is not a chan int"},
+ equalsTestCase{int16(0), false, true, "which is not a chan int"},
+ equalsTestCase{int32(0), false, true, "which is not a chan int"},
+ equalsTestCase{int64(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint8(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint16(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint32(0), false, true, "which is not a chan int"},
+ equalsTestCase{uint64(0), false, true, "which is not a chan int"},
+ equalsTestCase{true, false, true, "which is not a chan int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{func() {}, false, true, "which is not a chan int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{&someInt, false, true, "which is not a chan int"},
+ equalsTestCase{[]int{}, false, true, "which is not a chan int"},
+ equalsTestCase{"taco", false, true, "which is not a chan int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a chan int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) ChanDirection() {
+ var chan1 chan<- int
+ var chan2 <-chan int
+ var chan3 chan int
+
+ matcher := Equals(chan1)
+ ExpectEq(fmt.Sprintf("%v", chan1), matcher.Description())
+
+ cases := []equalsTestCase{
+ equalsTestCase{chan1, true, false, ""},
+ equalsTestCase{chan2, false, true, "which is not a chan<- int"},
+ equalsTestCase{chan3, false, true, "which is not a chan<- int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// func
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) Functions() {
+ func1 := func() {}
+ func2 := func() {}
+ func3 := func(x int) {}
+
+ matcher := Equals(func1)
+ ExpectEq(fmt.Sprintf("%v", func1), matcher.Description())
+
+ cases := []equalsTestCase{
+ // Functions.
+ equalsTestCase{func1, true, false, ""},
+ equalsTestCase{func2, false, false, ""},
+ equalsTestCase{func3, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a function"},
+ equalsTestCase{bool(false), false, true, "which is not a function"},
+ equalsTestCase{int(0), false, true, "which is not a function"},
+ equalsTestCase{int8(0), false, true, "which is not a function"},
+ equalsTestCase{int16(0), false, true, "which is not a function"},
+ equalsTestCase{int32(0), false, true, "which is not a function"},
+ equalsTestCase{int64(0), false, true, "which is not a function"},
+ equalsTestCase{uint(0), false, true, "which is not a function"},
+ equalsTestCase{uint8(0), false, true, "which is not a function"},
+ equalsTestCase{uint16(0), false, true, "which is not a function"},
+ equalsTestCase{uint32(0), false, true, "which is not a function"},
+ equalsTestCase{uint64(0), false, true, "which is not a function"},
+ equalsTestCase{true, false, true, "which is not a function"},
+ equalsTestCase{[...]int{}, false, true, "which is not a function"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a function"},
+ equalsTestCase{&someInt, false, true, "which is not a function"},
+ equalsTestCase{[]int{}, false, true, "which is not a function"},
+ equalsTestCase{"taco", false, true, "which is not a function"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a function"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// map
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilMap() {
+ var nilMap1 map[int]int
+ var nilMap2 map[int]int
+ var nilMap3 map[int]uint
+ var nonNilMap1 map[int]int = make(map[int]int)
+ var nonNilMap2 map[int]uint = make(map[int]uint)
+
+ matcher := Equals(nilMap1)
+ ExpectEq("map[]", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nilMap1, true, false, ""},
+ equalsTestCase{nilMap2, true, false, ""},
+ equalsTestCase{nilMap3, true, false, ""},
+ equalsTestCase{nonNilMap1, false, false, ""},
+ equalsTestCase{nonNilMap2, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a map"},
+ equalsTestCase{bool(false), false, true, "which is not a map"},
+ equalsTestCase{int(0), false, true, "which is not a map"},
+ equalsTestCase{int8(0), false, true, "which is not a map"},
+ equalsTestCase{int16(0), false, true, "which is not a map"},
+ equalsTestCase{int32(0), false, true, "which is not a map"},
+ equalsTestCase{int64(0), false, true, "which is not a map"},
+ equalsTestCase{uint(0), false, true, "which is not a map"},
+ equalsTestCase{uint8(0), false, true, "which is not a map"},
+ equalsTestCase{uint16(0), false, true, "which is not a map"},
+ equalsTestCase{uint32(0), false, true, "which is not a map"},
+ equalsTestCase{uint64(0), false, true, "which is not a map"},
+ equalsTestCase{true, false, true, "which is not a map"},
+ equalsTestCase{[...]int{}, false, true, "which is not a map"},
+ equalsTestCase{func() {}, false, true, "which is not a map"},
+ equalsTestCase{&someInt, false, true, "which is not a map"},
+ equalsTestCase{[]int{}, false, true, "which is not a map"},
+ equalsTestCase{"taco", false, true, "which is not a map"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a map"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilMap() {
+ var nilMap1 map[int]int
+ var nilMap2 map[int]uint
+ var nonNilMap1 map[int]int = make(map[int]int)
+ var nonNilMap2 map[int]int = make(map[int]int)
+ var nonNilMap3 map[int]uint = make(map[int]uint)
+
+ matcher := Equals(nonNilMap1)
+ ExpectEq("map[]", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nonNilMap1, true, false, ""},
+ equalsTestCase{nonNilMap2, false, false, ""},
+ equalsTestCase{nonNilMap3, false, false, ""},
+ equalsTestCase{nilMap1, false, false, ""},
+ equalsTestCase{nilMap2, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a map"},
+ equalsTestCase{bool(false), false, true, "which is not a map"},
+ equalsTestCase{int(0), false, true, "which is not a map"},
+ equalsTestCase{int8(0), false, true, "which is not a map"},
+ equalsTestCase{int16(0), false, true, "which is not a map"},
+ equalsTestCase{int32(0), false, true, "which is not a map"},
+ equalsTestCase{int64(0), false, true, "which is not a map"},
+ equalsTestCase{uint(0), false, true, "which is not a map"},
+ equalsTestCase{uint8(0), false, true, "which is not a map"},
+ equalsTestCase{uint16(0), false, true, "which is not a map"},
+ equalsTestCase{uint32(0), false, true, "which is not a map"},
+ equalsTestCase{uint64(0), false, true, "which is not a map"},
+ equalsTestCase{true, false, true, "which is not a map"},
+ equalsTestCase{[...]int{}, false, true, "which is not a map"},
+ equalsTestCase{func() {}, false, true, "which is not a map"},
+ equalsTestCase{&someInt, false, true, "which is not a map"},
+ equalsTestCase{[]int{}, false, true, "which is not a map"},
+ equalsTestCase{"taco", false, true, "which is not a map"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a map"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Pointers
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilPointer() {
+ var someInt int = 17
+ var someUint uint = 17
+
+ var nilInt1 *int
+ var nilInt2 *int
+ var nilUint *uint
+ var nonNilInt *int = &someInt
+ var nonNilUint *uint = &someUint
+
+ matcher := Equals(nilInt1)
+ ExpectEq("<nil>", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nilInt1, true, false, ""},
+ equalsTestCase{nilInt2, true, false, ""},
+ equalsTestCase{nonNilInt, false, false, ""},
+
+ // Incorrect type.
+ equalsTestCase{nilUint, false, true, "which is not a *int"},
+ equalsTestCase{nonNilUint, false, true, "which is not a *int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a *int"},
+ equalsTestCase{bool(false), false, true, "which is not a *int"},
+ equalsTestCase{int(0), false, true, "which is not a *int"},
+ equalsTestCase{int8(0), false, true, "which is not a *int"},
+ equalsTestCase{int16(0), false, true, "which is not a *int"},
+ equalsTestCase{int32(0), false, true, "which is not a *int"},
+ equalsTestCase{int64(0), false, true, "which is not a *int"},
+ equalsTestCase{uint(0), false, true, "which is not a *int"},
+ equalsTestCase{uint8(0), false, true, "which is not a *int"},
+ equalsTestCase{uint16(0), false, true, "which is not a *int"},
+ equalsTestCase{uint32(0), false, true, "which is not a *int"},
+ equalsTestCase{uint64(0), false, true, "which is not a *int"},
+ equalsTestCase{true, false, true, "which is not a *int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a *int"},
+ equalsTestCase{func() {}, false, true, "which is not a *int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a *int"},
+ equalsTestCase{[]int{}, false, true, "which is not a *int"},
+ equalsTestCase{"taco", false, true, "which is not a *int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a *int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilPointer() {
+ var someInt int = 17
+ var someOtherInt int = 17
+ var someUint uint = 17
+
+ var nilInt *int
+ var nilUint *uint
+ var nonNilInt1 *int = &someInt
+ var nonNilInt2 *int = &someOtherInt
+ var nonNilUint *uint = &someUint
+
+ matcher := Equals(nonNilInt1)
+ ExpectEq(fmt.Sprintf("%v", nonNilInt1), matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nonNilInt1, true, false, ""},
+ equalsTestCase{nonNilInt2, false, false, ""},
+ equalsTestCase{nilInt, false, false, ""},
+
+ // Incorrect type.
+ equalsTestCase{nilUint, false, true, "which is not a *int"},
+ equalsTestCase{nonNilUint, false, true, "which is not a *int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a *int"},
+ equalsTestCase{bool(false), false, true, "which is not a *int"},
+ equalsTestCase{int(0), false, true, "which is not a *int"},
+ equalsTestCase{int8(0), false, true, "which is not a *int"},
+ equalsTestCase{int16(0), false, true, "which is not a *int"},
+ equalsTestCase{int32(0), false, true, "which is not a *int"},
+ equalsTestCase{int64(0), false, true, "which is not a *int"},
+ equalsTestCase{uint(0), false, true, "which is not a *int"},
+ equalsTestCase{uint8(0), false, true, "which is not a *int"},
+ equalsTestCase{uint16(0), false, true, "which is not a *int"},
+ equalsTestCase{uint32(0), false, true, "which is not a *int"},
+ equalsTestCase{uint64(0), false, true, "which is not a *int"},
+ equalsTestCase{true, false, true, "which is not a *int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a *int"},
+ equalsTestCase{func() {}, false, true, "which is not a *int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a *int"},
+ equalsTestCase{[]int{}, false, true, "which is not a *int"},
+ equalsTestCase{"taco", false, true, "which is not a *int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a *int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Slices
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilSlice() {
+ var nilInt1 []int
+ var nilInt2 []int
+ var nilUint []uint
+
+ var nonNilInt []int = make([]int, 0)
+ var nonNilUint []uint = make([]uint, 0)
+
+ matcher := Equals(nilInt1)
+ ExpectEq("[]", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nilInt1, true, false, ""},
+ equalsTestCase{nilInt2, true, false, ""},
+ equalsTestCase{nonNilInt, false, false, ""},
+
+ // Incorrect type.
+ equalsTestCase{nilUint, false, true, "which is not a []int"},
+ equalsTestCase{nonNilUint, false, true, "which is not a []int"},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a []int"},
+ equalsTestCase{bool(false), false, true, "which is not a []int"},
+ equalsTestCase{int(0), false, true, "which is not a []int"},
+ equalsTestCase{int8(0), false, true, "which is not a []int"},
+ equalsTestCase{int16(0), false, true, "which is not a []int"},
+ equalsTestCase{int32(0), false, true, "which is not a []int"},
+ equalsTestCase{int64(0), false, true, "which is not a []int"},
+ equalsTestCase{uint(0), false, true, "which is not a []int"},
+ equalsTestCase{uint8(0), false, true, "which is not a []int"},
+ equalsTestCase{uint16(0), false, true, "which is not a []int"},
+ equalsTestCase{uint32(0), false, true, "which is not a []int"},
+ equalsTestCase{uint64(0), false, true, "which is not a []int"},
+ equalsTestCase{true, false, true, "which is not a []int"},
+ equalsTestCase{[...]int{}, false, true, "which is not a []int"},
+ equalsTestCase{func() {}, false, true, "which is not a []int"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a []int"},
+ equalsTestCase{"taco", false, true, "which is not a []int"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a []int"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilSlice() {
+ nonNil := make([]int, 0)
+ f := func() { Equals(nonNil) }
+ ExpectThat(f, Panics(HasSubstr("non-nil slice")))
+}
+
+////////////////////////////////////////////////////////////////////////
+// string
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) String() {
+ partial := "taco"
+ expected := fmt.Sprintf("%s%d", partial, 1)
+
+ matcher := Equals(expected)
+ ExpectEq("taco1", matcher.Description())
+
+ type stringAlias string
+
+ cases := []equalsTestCase{
+ // Correct types.
+ equalsTestCase{"taco1", true, false, ""},
+ equalsTestCase{"taco" + "1", true, false, ""},
+ equalsTestCase{expected, true, false, ""},
+ equalsTestCase{stringAlias("taco1"), true, false, ""},
+
+ equalsTestCase{"", false, false, ""},
+ equalsTestCase{"taco", false, false, ""},
+ equalsTestCase{"taco1\x00", false, false, ""},
+ equalsTestCase{"taco2", false, false, ""},
+ equalsTestCase{stringAlias("taco2"), false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a string"},
+ equalsTestCase{bool(false), false, true, "which is not a string"},
+ equalsTestCase{int(0), false, true, "which is not a string"},
+ equalsTestCase{int8(0), false, true, "which is not a string"},
+ equalsTestCase{int16(0), false, true, "which is not a string"},
+ equalsTestCase{int32(0), false, true, "which is not a string"},
+ equalsTestCase{int64(0), false, true, "which is not a string"},
+ equalsTestCase{uint(0), false, true, "which is not a string"},
+ equalsTestCase{uint8(0), false, true, "which is not a string"},
+ equalsTestCase{uint16(0), false, true, "which is not a string"},
+ equalsTestCase{uint32(0), false, true, "which is not a string"},
+ equalsTestCase{uint64(0), false, true, "which is not a string"},
+ equalsTestCase{true, false, true, "which is not a string"},
+ equalsTestCase{[...]int{}, false, true, "which is not a string"},
+ equalsTestCase{func() {}, false, true, "which is not a string"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a string"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a string"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) StringAlias() {
+ type stringAlias string
+
+ matcher := Equals(stringAlias("taco"))
+ ExpectEq("taco", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct types.
+ equalsTestCase{stringAlias("taco"), true, false, ""},
+ equalsTestCase{"taco", true, false, ""},
+
+ equalsTestCase{"burrito", false, false, ""},
+ equalsTestCase{stringAlias("burrito"), false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a string"},
+ equalsTestCase{bool(false), false, true, "which is not a string"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// struct
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) Struct() {
+ type someStruct struct{ foo uint }
+ f := func() { Equals(someStruct{17}) }
+ ExpectThat(f, Panics(HasSubstr("unsupported kind struct")))
+}
+
+////////////////////////////////////////////////////////////////////////
+// unsafe.Pointer
+////////////////////////////////////////////////////////////////////////
+
+func (t *EqualsTest) NilUnsafePointer() {
+ someInt := int(17)
+
+ var nilPtr1 unsafe.Pointer
+ var nilPtr2 unsafe.Pointer
+ var nonNilPtr unsafe.Pointer = unsafe.Pointer(&someInt)
+
+ matcher := Equals(nilPtr1)
+ ExpectEq("<nil>", matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nilPtr1, true, false, ""},
+ equalsTestCase{nilPtr2, true, false, ""},
+ equalsTestCase{nonNilPtr, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{bool(false), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int8(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int16(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int32(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int64(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint8(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint16(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint32(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint64(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{true, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{[...]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{make(chan int), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{func() {}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{&someInt, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{[]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{"taco", false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a unsafe.Pointer"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *EqualsTest) NonNilUnsafePointer() {
+ someInt := int(17)
+ someOtherInt := int(17)
+
+ var nilPtr unsafe.Pointer
+ var nonNilPtr1 unsafe.Pointer = unsafe.Pointer(&someInt)
+ var nonNilPtr2 unsafe.Pointer = unsafe.Pointer(&someOtherInt)
+
+ matcher := Equals(nonNilPtr1)
+ ExpectEq(fmt.Sprintf("%v", nonNilPtr1), matcher.Description())
+
+ cases := []equalsTestCase{
+ // Correct type.
+ equalsTestCase{nonNilPtr1, true, false, ""},
+ equalsTestCase{nonNilPtr2, false, false, ""},
+ equalsTestCase{nilPtr, false, false, ""},
+
+ // Other types.
+ equalsTestCase{0, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{bool(false), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int8(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int16(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int32(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{int64(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint8(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint16(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint32(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{uint64(0), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{true, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{[...]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{make(chan int), false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{func() {}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{map[int]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{&someInt, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{[]int{}, false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{"taco", false, true, "which is not a unsafe.Pointer"},
+ equalsTestCase{equalsTestCase{}, false, true, "which is not a unsafe.Pointer"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/error.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/error.go
new file mode 100644
index 00000000000..8a078e36d86
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/error.go
@@ -0,0 +1,51 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// Error returns a matcher that matches non-nil values implementing the
+// built-in error interface for whom the return value of Error() matches the
+// supplied matcher.
+//
+// For example:
+//
+// err := errors.New("taco burrito")
+//
+// Error(Equals("taco burrito")) // matches err
+// Error(HasSubstr("taco")) // matches err
+// Error(HasSubstr("enchilada")) // doesn't match err
+//
+func Error(m Matcher) Matcher {
+ return &errorMatcher{m}
+}
+
+type errorMatcher struct {
+ wrappedMatcher Matcher
+}
+
+func (m *errorMatcher) Description() string {
+ return "error " + m.wrappedMatcher.Description()
+}
+
+func (m *errorMatcher) Matches(c interface{}) error {
+ // Make sure that c is an error.
+ e, ok := c.(error)
+ if !ok {
+ return NewFatalError("which is not an error")
+ }
+
+ // Pass on the error text to the wrapped matcher.
+ return m.wrappedMatcher.Matches(e.Error())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/error_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/error_test.go
new file mode 100644
index 00000000000..f92167cad1d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/error_test.go
@@ -0,0 +1,92 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type ErrorTest struct {
+ matcherCalled bool
+ suppliedCandidate interface{}
+ wrappedError error
+
+ matcher Matcher
+}
+
+func init() { RegisterTestSuite(&ErrorTest{}) }
+
+func (t *ErrorTest) SetUp(i *TestInfo) {
+ wrapped := &fakeMatcher{
+ func(c interface{}) error {
+ t.matcherCalled = true
+ t.suppliedCandidate = c
+ return t.wrappedError
+ },
+ "is foo",
+ }
+
+ t.matcher = Error(wrapped)
+}
+
+func isFatal(err error) bool {
+ _, isFatal := err.(*FatalError)
+ return isFatal
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *ErrorTest) Description() {
+ ExpectThat(t.matcher.Description(), Equals("error is foo"))
+}
+
+func (t *ErrorTest) CandidateIsNil() {
+ err := t.matcher.Matches(nil)
+
+ ExpectThat(t.matcherCalled, Equals(false))
+ ExpectThat(err.Error(), Equals("which is not an error"))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *ErrorTest) CandidateIsString() {
+ err := t.matcher.Matches("taco")
+
+ ExpectThat(t.matcherCalled, Equals(false))
+ ExpectThat(err.Error(), Equals("which is not an error"))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *ErrorTest) CallsWrappedMatcher() {
+ candidate := errors.New("taco")
+ t.matcher.Matches(candidate)
+
+ ExpectThat(t.matcherCalled, Equals(true))
+ ExpectThat(t.suppliedCandidate, Equals("taco"))
+}
+
+func (t *ErrorTest) ReturnsWrappedMatcherResult() {
+ t.wrappedError = errors.New("burrito")
+ err := t.matcher.Matches(errors.New(""))
+ ExpectThat(err, Equals(t.wrappedError))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go
new file mode 100644
index 00000000000..4b9d103a381
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal.go
@@ -0,0 +1,39 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// GreaterOrEqual returns a matcher that matches integer, floating point, or
+// strings values v such that v >= x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// GreaterOrEqual will panic.
+func GreaterOrEqual(x interface{}) Matcher {
+ desc := fmt.Sprintf("greater than or equal to %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("greater than or equal to \"%s\"", x)
+ }
+
+ return transformDescription(Not(LessThan(x)), desc)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal_test.go
new file mode 100644
index 00000000000..f5e29d1ce59
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal_test.go
@@ -0,0 +1,1101 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "math"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type GreaterOrEqualTest struct {
+}
+
+func init() { RegisterTestSuite(&GreaterOrEqualTest{}) }
+
+type geTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *GreaterOrEqualTest) checkTestCases(matcher Matcher, cases []geTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+
+ ExpectThat(
+ (err == nil),
+ Equals(c.expectedResult),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(
+ c.shouldBeFatal,
+ isFatal,
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ ExpectThat(
+ err,
+ Error(Equals(c.expectedError)),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) IntegerCandidateBadTypes() {
+ matcher := GreaterOrEqual(int(-150))
+
+ cases := []geTestCase{
+ geTestCase{true, false, true, "which is not comparable"},
+ geTestCase{complex64(-151), false, true, "which is not comparable"},
+ geTestCase{complex128(-151), false, true, "which is not comparable"},
+ geTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ geTestCase{make(chan int), false, true, "which is not comparable"},
+ geTestCase{func() {}, false, true, "which is not comparable"},
+ geTestCase{map[int]int{}, false, true, "which is not comparable"},
+ geTestCase{&geTestCase{}, false, true, "which is not comparable"},
+ geTestCase{make([]int, 0), false, true, "which is not comparable"},
+ geTestCase{"-151", false, true, "which is not comparable"},
+ geTestCase{geTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) FloatCandidateBadTypes() {
+ matcher := GreaterOrEqual(float32(-150))
+
+ cases := []geTestCase{
+ geTestCase{true, false, true, "which is not comparable"},
+ geTestCase{complex64(-151), false, true, "which is not comparable"},
+ geTestCase{complex128(-151), false, true, "which is not comparable"},
+ geTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ geTestCase{make(chan int), false, true, "which is not comparable"},
+ geTestCase{func() {}, false, true, "which is not comparable"},
+ geTestCase{map[int]int{}, false, true, "which is not comparable"},
+ geTestCase{&geTestCase{}, false, true, "which is not comparable"},
+ geTestCase{make([]int, 0), false, true, "which is not comparable"},
+ geTestCase{"-151", false, true, "which is not comparable"},
+ geTestCase{geTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) StringCandidateBadTypes() {
+ matcher := GreaterOrEqual("17")
+
+ cases := []geTestCase{
+ geTestCase{true, false, true, "which is not comparable"},
+ geTestCase{int(0), false, true, "which is not comparable"},
+ geTestCase{int8(0), false, true, "which is not comparable"},
+ geTestCase{int16(0), false, true, "which is not comparable"},
+ geTestCase{int32(0), false, true, "which is not comparable"},
+ geTestCase{int64(0), false, true, "which is not comparable"},
+ geTestCase{uint(0), false, true, "which is not comparable"},
+ geTestCase{uint8(0), false, true, "which is not comparable"},
+ geTestCase{uint16(0), false, true, "which is not comparable"},
+ geTestCase{uint32(0), false, true, "which is not comparable"},
+ geTestCase{uint64(0), false, true, "which is not comparable"},
+ geTestCase{float32(0), false, true, "which is not comparable"},
+ geTestCase{float64(0), false, true, "which is not comparable"},
+ geTestCase{complex64(-151), false, true, "which is not comparable"},
+ geTestCase{complex128(-151), false, true, "which is not comparable"},
+ geTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ geTestCase{make(chan int), false, true, "which is not comparable"},
+ geTestCase{func() {}, false, true, "which is not comparable"},
+ geTestCase{map[int]int{}, false, true, "which is not comparable"},
+ geTestCase{&geTestCase{}, false, true, "which is not comparable"},
+ geTestCase{make([]int, 0), false, true, "which is not comparable"},
+ geTestCase{geTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) BadArgument() {
+ panicked := false
+
+ defer func() {
+ ExpectThat(panicked, Equals(true))
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ GreaterOrEqual(complex128(0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) NegativeIntegerLiteral() {
+ matcher := GreaterOrEqual(-150)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to -150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-(1 << 30), false, false, ""},
+ geTestCase{-151, false, false, ""},
+ geTestCase{-150, true, false, ""},
+ geTestCase{0, true, false, ""},
+ geTestCase{17, true, false, ""},
+
+ geTestCase{int(-(1 << 30)), false, false, ""},
+ geTestCase{int(-151), false, false, ""},
+ geTestCase{int(-150), true, false, ""},
+ geTestCase{int(0), true, false, ""},
+ geTestCase{int(17), true, false, ""},
+
+ geTestCase{int8(-127), true, false, ""},
+ geTestCase{int8(0), true, false, ""},
+ geTestCase{int8(17), true, false, ""},
+
+ geTestCase{int16(-(1 << 14)), false, false, ""},
+ geTestCase{int16(-151), false, false, ""},
+ geTestCase{int16(-150), true, false, ""},
+ geTestCase{int16(0), true, false, ""},
+ geTestCase{int16(17), true, false, ""},
+
+ geTestCase{int32(-(1 << 30)), false, false, ""},
+ geTestCase{int32(-151), false, false, ""},
+ geTestCase{int32(-150), true, false, ""},
+ geTestCase{int32(0), true, false, ""},
+ geTestCase{int32(17), true, false, ""},
+
+ geTestCase{int64(-(1 << 30)), false, false, ""},
+ geTestCase{int64(-151), false, false, ""},
+ geTestCase{int64(-150), true, false, ""},
+ geTestCase{int64(0), true, false, ""},
+ geTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint((1 << 32) - 151), true, false, ""},
+ geTestCase{uint(0), true, false, ""},
+ geTestCase{uint(17), true, false, ""},
+
+ geTestCase{uint8(0), true, false, ""},
+ geTestCase{uint8(17), true, false, ""},
+ geTestCase{uint8(253), true, false, ""},
+
+ geTestCase{uint16((1 << 16) - 151), true, false, ""},
+ geTestCase{uint16(0), true, false, ""},
+ geTestCase{uint16(17), true, false, ""},
+
+ geTestCase{uint32((1 << 32) - 151), true, false, ""},
+ geTestCase{uint32(0), true, false, ""},
+ geTestCase{uint32(17), true, false, ""},
+
+ geTestCase{uint64((1 << 64) - 151), true, false, ""},
+ geTestCase{uint64(0), true, false, ""},
+ geTestCase{uint64(17), true, false, ""},
+
+ geTestCase{uintptr((1 << 64) - 151), true, false, ""},
+ geTestCase{uintptr(0), true, false, ""},
+ geTestCase{uintptr(17), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-(1 << 30)), false, false, ""},
+ geTestCase{float32(-151), false, false, ""},
+ geTestCase{float32(-150.1), false, false, ""},
+ geTestCase{float32(-150), true, false, ""},
+ geTestCase{float32(-149.9), true, false, ""},
+ geTestCase{float32(0), true, false, ""},
+ geTestCase{float32(17), true, false, ""},
+ geTestCase{float32(160), true, false, ""},
+
+ geTestCase{float64(-(1 << 30)), false, false, ""},
+ geTestCase{float64(-151), false, false, ""},
+ geTestCase{float64(-150.1), false, false, ""},
+ geTestCase{float64(-150), true, false, ""},
+ geTestCase{float64(-149.9), true, false, ""},
+ geTestCase{float64(0), true, false, ""},
+ geTestCase{float64(17), true, false, ""},
+ geTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) ZeroIntegerLiteral() {
+ matcher := GreaterOrEqual(0)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 0"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-(1 << 30), false, false, ""},
+ geTestCase{-1, false, false, ""},
+ geTestCase{0, true, false, ""},
+ geTestCase{1, true, false, ""},
+ geTestCase{17, true, false, ""},
+ geTestCase{(1 << 30), true, false, ""},
+
+ geTestCase{int(-(1 << 30)), false, false, ""},
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(0), true, false, ""},
+ geTestCase{int(1), true, false, ""},
+ geTestCase{int(17), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(0), true, false, ""},
+ geTestCase{int8(1), true, false, ""},
+
+ geTestCase{int16(-(1 << 14)), false, false, ""},
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), true, false, ""},
+ geTestCase{int16(1), true, false, ""},
+ geTestCase{int16(17), true, false, ""},
+
+ geTestCase{int32(-(1 << 30)), false, false, ""},
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(0), true, false, ""},
+ geTestCase{int32(1), true, false, ""},
+ geTestCase{int32(17), true, false, ""},
+
+ geTestCase{int64(-(1 << 30)), false, false, ""},
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(0), true, false, ""},
+ geTestCase{int64(1), true, false, ""},
+ geTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint((1 << 32) - 1), true, false, ""},
+ geTestCase{uint(0), true, false, ""},
+ geTestCase{uint(17), true, false, ""},
+
+ geTestCase{uint8(0), true, false, ""},
+ geTestCase{uint8(17), true, false, ""},
+ geTestCase{uint8(253), true, false, ""},
+
+ geTestCase{uint16((1 << 16) - 1), true, false, ""},
+ geTestCase{uint16(0), true, false, ""},
+ geTestCase{uint16(17), true, false, ""},
+
+ geTestCase{uint32((1 << 32) - 1), true, false, ""},
+ geTestCase{uint32(0), true, false, ""},
+ geTestCase{uint32(17), true, false, ""},
+
+ geTestCase{uint64((1 << 64) - 1), true, false, ""},
+ geTestCase{uint64(0), true, false, ""},
+ geTestCase{uint64(17), true, false, ""},
+
+ geTestCase{uintptr((1 << 64) - 1), true, false, ""},
+ geTestCase{uintptr(0), true, false, ""},
+ geTestCase{uintptr(17), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-(1 << 30)), false, false, ""},
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(-0.1), false, false, ""},
+ geTestCase{float32(-0.0), true, false, ""},
+ geTestCase{float32(0), true, false, ""},
+ geTestCase{float32(0.1), true, false, ""},
+ geTestCase{float32(17), true, false, ""},
+ geTestCase{float32(160), true, false, ""},
+
+ geTestCase{float64(-(1 << 30)), false, false, ""},
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(-0.1), false, false, ""},
+ geTestCase{float64(-0), true, false, ""},
+ geTestCase{float64(0), true, false, ""},
+ geTestCase{float64(17), true, false, ""},
+ geTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) PositiveIntegerLiteral() {
+ matcher := GreaterOrEqual(150)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{149, false, false, ""},
+ geTestCase{150, true, false, ""},
+ geTestCase{151, true, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(149), false, false, ""},
+ geTestCase{int(150), true, false, ""},
+ geTestCase{int(151), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(0), false, false, ""},
+ geTestCase{int8(17), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(149), false, false, ""},
+ geTestCase{int16(150), true, false, ""},
+ geTestCase{int16(151), true, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(149), false, false, ""},
+ geTestCase{int32(150), true, false, ""},
+ geTestCase{int32(151), true, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(149), false, false, ""},
+ geTestCase{int64(150), true, false, ""},
+ geTestCase{int64(151), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(149), false, false, ""},
+ geTestCase{uint(150), true, false, ""},
+ geTestCase{uint(151), true, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(127), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(149), false, false, ""},
+ geTestCase{uint16(150), true, false, ""},
+ geTestCase{uint16(151), true, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(149), false, false, ""},
+ geTestCase{uint32(150), true, false, ""},
+ geTestCase{uint32(151), true, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(149), false, false, ""},
+ geTestCase{uint64(150), true, false, ""},
+ geTestCase{uint64(151), true, false, ""},
+
+ geTestCase{uintptr(0), false, false, ""},
+ geTestCase{uintptr(149), false, false, ""},
+ geTestCase{uintptr(150), true, false, ""},
+ geTestCase{uintptr(151), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(149), false, false, ""},
+ geTestCase{float32(149.9), false, false, ""},
+ geTestCase{float32(150), true, false, ""},
+ geTestCase{float32(150.1), true, false, ""},
+ geTestCase{float32(151), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(149), false, false, ""},
+ geTestCase{float64(149.9), false, false, ""},
+ geTestCase{float64(150), true, false, ""},
+ geTestCase{float64(150.1), true, false, ""},
+ geTestCase{float64(151), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Float literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) NegativeFloatLiteral() {
+ matcher := GreaterOrEqual(-150.1)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to -150.1"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-(1 << 30), false, false, ""},
+ geTestCase{-151, false, false, ""},
+ geTestCase{-150, true, false, ""},
+ geTestCase{0, true, false, ""},
+ geTestCase{17, true, false, ""},
+
+ geTestCase{int(-(1 << 30)), false, false, ""},
+ geTestCase{int(-151), false, false, ""},
+ geTestCase{int(-150), true, false, ""},
+ geTestCase{int(0), true, false, ""},
+ geTestCase{int(17), true, false, ""},
+
+ geTestCase{int8(-127), true, false, ""},
+ geTestCase{int8(0), true, false, ""},
+ geTestCase{int8(17), true, false, ""},
+
+ geTestCase{int16(-(1 << 14)), false, false, ""},
+ geTestCase{int16(-151), false, false, ""},
+ geTestCase{int16(-150), true, false, ""},
+ geTestCase{int16(0), true, false, ""},
+ geTestCase{int16(17), true, false, ""},
+
+ geTestCase{int32(-(1 << 30)), false, false, ""},
+ geTestCase{int32(-151), false, false, ""},
+ geTestCase{int32(-150), true, false, ""},
+ geTestCase{int32(0), true, false, ""},
+ geTestCase{int32(17), true, false, ""},
+
+ geTestCase{int64(-(1 << 30)), false, false, ""},
+ geTestCase{int64(-151), false, false, ""},
+ geTestCase{int64(-150), true, false, ""},
+ geTestCase{int64(0), true, false, ""},
+ geTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint((1 << 32) - 151), true, false, ""},
+ geTestCase{uint(0), true, false, ""},
+ geTestCase{uint(17), true, false, ""},
+
+ geTestCase{uint8(0), true, false, ""},
+ geTestCase{uint8(17), true, false, ""},
+ geTestCase{uint8(253), true, false, ""},
+
+ geTestCase{uint16((1 << 16) - 151), true, false, ""},
+ geTestCase{uint16(0), true, false, ""},
+ geTestCase{uint16(17), true, false, ""},
+
+ geTestCase{uint32((1 << 32) - 151), true, false, ""},
+ geTestCase{uint32(0), true, false, ""},
+ geTestCase{uint32(17), true, false, ""},
+
+ geTestCase{uint64((1 << 64) - 151), true, false, ""},
+ geTestCase{uint64(0), true, false, ""},
+ geTestCase{uint64(17), true, false, ""},
+
+ geTestCase{uintptr((1 << 64) - 151), true, false, ""},
+ geTestCase{uintptr(0), true, false, ""},
+ geTestCase{uintptr(17), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-(1 << 30)), false, false, ""},
+ geTestCase{float32(-151), false, false, ""},
+ geTestCase{float32(-150.2), false, false, ""},
+ geTestCase{float32(-150.1), true, false, ""},
+ geTestCase{float32(-150), true, false, ""},
+ geTestCase{float32(0), true, false, ""},
+ geTestCase{float32(17), true, false, ""},
+ geTestCase{float32(160), true, false, ""},
+
+ geTestCase{float64(-(1 << 30)), false, false, ""},
+ geTestCase{float64(-151), false, false, ""},
+ geTestCase{float64(-150.2), false, false, ""},
+ geTestCase{float64(-150.1), true, false, ""},
+ geTestCase{float64(-150), true, false, ""},
+ geTestCase{float64(0), true, false, ""},
+ geTestCase{float64(17), true, false, ""},
+ geTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) PositiveFloatLiteral() {
+ matcher := GreaterOrEqual(149.9)
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 149.9"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{149, false, false, ""},
+ geTestCase{150, true, false, ""},
+ geTestCase{151, true, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(149), false, false, ""},
+ geTestCase{int(150), true, false, ""},
+ geTestCase{int(151), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(0), false, false, ""},
+ geTestCase{int8(17), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(149), false, false, ""},
+ geTestCase{int16(150), true, false, ""},
+ geTestCase{int16(151), true, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(149), false, false, ""},
+ geTestCase{int32(150), true, false, ""},
+ geTestCase{int32(151), true, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(149), false, false, ""},
+ geTestCase{int64(150), true, false, ""},
+ geTestCase{int64(151), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(149), false, false, ""},
+ geTestCase{uint(150), true, false, ""},
+ geTestCase{uint(151), true, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(127), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(149), false, false, ""},
+ geTestCase{uint16(150), true, false, ""},
+ geTestCase{uint16(151), true, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(149), false, false, ""},
+ geTestCase{uint32(150), true, false, ""},
+ geTestCase{uint32(151), true, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(149), false, false, ""},
+ geTestCase{uint64(150), true, false, ""},
+ geTestCase{uint64(151), true, false, ""},
+
+ geTestCase{uintptr(0), false, false, ""},
+ geTestCase{uintptr(149), false, false, ""},
+ geTestCase{uintptr(150), true, false, ""},
+ geTestCase{uintptr(151), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(149), false, false, ""},
+ geTestCase{float32(149.8), false, false, ""},
+ geTestCase{float32(149.9), true, false, ""},
+ geTestCase{float32(150), true, false, ""},
+ geTestCase{float32(151), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(149), false, false, ""},
+ geTestCase{float64(149.8), false, false, ""},
+ geTestCase{float64(149.9), true, false, ""},
+ geTestCase{float64(150), true, false, ""},
+ geTestCase{float64(151), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Subtle cases
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterOrEqual(int64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{kTwoTo25 + 0, false, false, ""},
+ geTestCase{kTwoTo25 + 1, true, false, ""},
+ geTestCase{kTwoTo25 + 2, true, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), false, false, ""},
+ geTestCase{int16(32767), false, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int32(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 2), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(255), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(65535), false, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint32(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uintptr(0), false, false, ""},
+ geTestCase{uintptr(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uintptr(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uintptr(kTwoTo25 + 2), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ geTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterOrEqual(int64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{1 << 30, false, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(math.MaxInt32), false, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), false, false, ""},
+ geTestCase{int16(32767), false, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(math.MaxInt32), false, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ geTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ geTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 2), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(math.MaxUint32), false, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(255), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(65535), false, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(math.MaxUint32), false, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ geTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+
+ geTestCase{uintptr(0), false, false, ""},
+ geTestCase{uintptr(kTwoTo54 - 1), false, false, ""},
+ geTestCase{uintptr(kTwoTo54 + 0), false, false, ""},
+ geTestCase{uintptr(kTwoTo54 + 1), true, false, ""},
+ geTestCase{uintptr(kTwoTo54 + 2), true, false, ""},
+
+ // Floating point.
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterOrEqual(uint64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{kTwoTo25 + 0, false, false, ""},
+ geTestCase{kTwoTo25 + 1, true, false, ""},
+ geTestCase{kTwoTo25 + 2, true, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), false, false, ""},
+ geTestCase{int16(32767), false, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int32(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 2), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(255), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(65535), false, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint32(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+
+ geTestCase{uintptr(0), false, false, ""},
+ geTestCase{uintptr(kTwoTo25 + 0), false, false, ""},
+ geTestCase{uintptr(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uintptr(kTwoTo25 + 2), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ geTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ geTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterOrEqual(uint64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{-1, false, false, ""},
+ geTestCase{1 << 30, false, false, ""},
+
+ geTestCase{int(-1), false, false, ""},
+ geTestCase{int(math.MaxInt32), false, false, ""},
+
+ geTestCase{int8(-1), false, false, ""},
+ geTestCase{int8(127), false, false, ""},
+
+ geTestCase{int16(-1), false, false, ""},
+ geTestCase{int16(0), false, false, ""},
+ geTestCase{int16(32767), false, false, ""},
+
+ geTestCase{int32(-1), false, false, ""},
+ geTestCase{int32(math.MaxInt32), false, false, ""},
+
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ geTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ geTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 2), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint(0), false, false, ""},
+ geTestCase{uint(math.MaxUint32), false, false, ""},
+
+ geTestCase{uint8(0), false, false, ""},
+ geTestCase{uint8(255), false, false, ""},
+
+ geTestCase{uint16(0), false, false, ""},
+ geTestCase{uint16(65535), false, false, ""},
+
+ geTestCase{uint32(0), false, false, ""},
+ geTestCase{uint32(math.MaxUint32), false, false, ""},
+
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ geTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+
+ geTestCase{uintptr(0), false, false, ""},
+ geTestCase{uintptr(kTwoTo54 - 1), false, false, ""},
+ geTestCase{uintptr(kTwoTo54 + 0), false, false, ""},
+ geTestCase{uintptr(kTwoTo54 + 1), true, false, ""},
+ geTestCase{uintptr(kTwoTo54 + 2), true, false, ""},
+
+ // Floating point.
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterOrEqual(float32(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 3.3554432e+07"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{int64(kTwoTo25 - 1), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{int64(kTwoTo25 + 3), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{uint64(kTwoTo25 - 1), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{uint64(kTwoTo25 + 3), true, false, ""},
+
+ // Floating point.
+ geTestCase{float32(-1), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterOrEqual(float64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to 1.8014398509481984e+16"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ // Signed integers.
+ geTestCase{int64(-1), false, false, ""},
+ geTestCase{int64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{int64(kTwoTo54 + 3), true, false, ""},
+
+ // Unsigned integers.
+ geTestCase{uint64(0), false, false, ""},
+ geTestCase{uint64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{uint64(kTwoTo54 + 3), true, false, ""},
+
+ // Floating point.
+ geTestCase{float64(-1), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ geTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ geTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// String literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterOrEqualTest) EmptyString() {
+ matcher := GreaterOrEqual("")
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to \"\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ geTestCase{"", true, false, ""},
+ geTestCase{"\x00", true, false, ""},
+ geTestCase{"a", true, false, ""},
+ geTestCase{"foo", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) SingleNullByte() {
+ matcher := GreaterOrEqual("\x00")
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to \"\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ geTestCase{"", false, false, ""},
+ geTestCase{"\x00", true, false, ""},
+ geTestCase{"a", true, false, ""},
+ geTestCase{"foo", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterOrEqualTest) LongerString() {
+ matcher := GreaterOrEqual("foo\x00")
+ desc := matcher.Description()
+ expectedDesc := "greater than or equal to \"foo\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []geTestCase{
+ geTestCase{"", false, false, ""},
+ geTestCase{"\x00", false, false, ""},
+ geTestCase{"bar", false, false, ""},
+ geTestCase{"foo", false, false, ""},
+ geTestCase{"foo\x00", true, false, ""},
+ geTestCase{"fooa", true, false, ""},
+ geTestCase{"qux", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go
new file mode 100644
index 00000000000..3eef32178f8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than.go
@@ -0,0 +1,39 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// GreaterThan returns a matcher that matches integer, floating point, or
+// strings values v such that v > x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// GreaterThan will panic.
+func GreaterThan(x interface{}) Matcher {
+ desc := fmt.Sprintf("greater than %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("greater than \"%s\"", x)
+ }
+
+ return transformDescription(Not(LessOrEqual(x)), desc)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than_test.go
new file mode 100644
index 00000000000..bf70fe56633
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/greater_than_test.go
@@ -0,0 +1,1077 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "math"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type GreaterThanTest struct {
+}
+
+func init() { RegisterTestSuite(&GreaterThanTest{}) }
+
+type gtTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *GreaterThanTest) checkTestCases(matcher Matcher, cases []gtTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+
+ ExpectThat(
+ (err == nil),
+ Equals(c.expectedResult),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(
+ c.shouldBeFatal,
+ isFatal,
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ ExpectThat(
+ err,
+ Error(Equals(c.expectedError)),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) IntegerCandidateBadTypes() {
+ matcher := GreaterThan(int(-150))
+
+ cases := []gtTestCase{
+ gtTestCase{true, false, true, "which is not comparable"},
+ gtTestCase{complex64(-151), false, true, "which is not comparable"},
+ gtTestCase{complex128(-151), false, true, "which is not comparable"},
+ gtTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ gtTestCase{make(chan int), false, true, "which is not comparable"},
+ gtTestCase{func() {}, false, true, "which is not comparable"},
+ gtTestCase{map[int]int{}, false, true, "which is not comparable"},
+ gtTestCase{&gtTestCase{}, false, true, "which is not comparable"},
+ gtTestCase{make([]int, 0), false, true, "which is not comparable"},
+ gtTestCase{"-151", false, true, "which is not comparable"},
+ gtTestCase{gtTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) FloatCandidateBadTypes() {
+ matcher := GreaterThan(float32(-150))
+
+ cases := []gtTestCase{
+ gtTestCase{true, false, true, "which is not comparable"},
+ gtTestCase{complex64(-151), false, true, "which is not comparable"},
+ gtTestCase{complex128(-151), false, true, "which is not comparable"},
+ gtTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ gtTestCase{make(chan int), false, true, "which is not comparable"},
+ gtTestCase{func() {}, false, true, "which is not comparable"},
+ gtTestCase{map[int]int{}, false, true, "which is not comparable"},
+ gtTestCase{&gtTestCase{}, false, true, "which is not comparable"},
+ gtTestCase{make([]int, 0), false, true, "which is not comparable"},
+ gtTestCase{"-151", false, true, "which is not comparable"},
+ gtTestCase{gtTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) StringCandidateBadTypes() {
+ matcher := GreaterThan("17")
+
+ cases := []gtTestCase{
+ gtTestCase{true, false, true, "which is not comparable"},
+ gtTestCase{int(0), false, true, "which is not comparable"},
+ gtTestCase{int8(0), false, true, "which is not comparable"},
+ gtTestCase{int16(0), false, true, "which is not comparable"},
+ gtTestCase{int32(0), false, true, "which is not comparable"},
+ gtTestCase{int64(0), false, true, "which is not comparable"},
+ gtTestCase{uint(0), false, true, "which is not comparable"},
+ gtTestCase{uint8(0), false, true, "which is not comparable"},
+ gtTestCase{uint16(0), false, true, "which is not comparable"},
+ gtTestCase{uint32(0), false, true, "which is not comparable"},
+ gtTestCase{uint64(0), false, true, "which is not comparable"},
+ gtTestCase{float32(0), false, true, "which is not comparable"},
+ gtTestCase{float64(0), false, true, "which is not comparable"},
+ gtTestCase{complex64(-151), false, true, "which is not comparable"},
+ gtTestCase{complex128(-151), false, true, "which is not comparable"},
+ gtTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ gtTestCase{make(chan int), false, true, "which is not comparable"},
+ gtTestCase{func() {}, false, true, "which is not comparable"},
+ gtTestCase{map[int]int{}, false, true, "which is not comparable"},
+ gtTestCase{&gtTestCase{}, false, true, "which is not comparable"},
+ gtTestCase{make([]int, 0), false, true, "which is not comparable"},
+ gtTestCase{gtTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) BadArgument() {
+ panicked := false
+
+ defer func() {
+ ExpectThat(panicked, Equals(true))
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ GreaterThan(complex128(0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) NegativeIntegerLiteral() {
+ matcher := GreaterThan(-150)
+ desc := matcher.Description()
+ expectedDesc := "greater than -150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-(1 << 30), false, false, ""},
+ gtTestCase{-151, false, false, ""},
+ gtTestCase{-150, false, false, ""},
+ gtTestCase{-149, true, false, ""},
+ gtTestCase{0, true, false, ""},
+ gtTestCase{17, true, false, ""},
+
+ gtTestCase{int(-(1 << 30)), false, false, ""},
+ gtTestCase{int(-151), false, false, ""},
+ gtTestCase{int(-150), false, false, ""},
+ gtTestCase{int(-149), true, false, ""},
+ gtTestCase{int(0), true, false, ""},
+ gtTestCase{int(17), true, false, ""},
+
+ gtTestCase{int8(-127), true, false, ""},
+ gtTestCase{int8(0), true, false, ""},
+ gtTestCase{int8(17), true, false, ""},
+
+ gtTestCase{int16(-(1 << 14)), false, false, ""},
+ gtTestCase{int16(-151), false, false, ""},
+ gtTestCase{int16(-150), false, false, ""},
+ gtTestCase{int16(-149), true, false, ""},
+ gtTestCase{int16(0), true, false, ""},
+ gtTestCase{int16(17), true, false, ""},
+
+ gtTestCase{int32(-(1 << 30)), false, false, ""},
+ gtTestCase{int32(-151), false, false, ""},
+ gtTestCase{int32(-150), false, false, ""},
+ gtTestCase{int32(-149), true, false, ""},
+ gtTestCase{int32(0), true, false, ""},
+ gtTestCase{int32(17), true, false, ""},
+
+ gtTestCase{int64(-(1 << 30)), false, false, ""},
+ gtTestCase{int64(-151), false, false, ""},
+ gtTestCase{int64(-150), false, false, ""},
+ gtTestCase{int64(-149), true, false, ""},
+ gtTestCase{int64(0), true, false, ""},
+ gtTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint((1 << 32) - 151), true, false, ""},
+ gtTestCase{uint(0), true, false, ""},
+ gtTestCase{uint(17), true, false, ""},
+
+ gtTestCase{uint8(0), true, false, ""},
+ gtTestCase{uint8(17), true, false, ""},
+ gtTestCase{uint8(253), true, false, ""},
+
+ gtTestCase{uint16((1 << 16) - 151), true, false, ""},
+ gtTestCase{uint16(0), true, false, ""},
+ gtTestCase{uint16(17), true, false, ""},
+
+ gtTestCase{uint32((1 << 32) - 151), true, false, ""},
+ gtTestCase{uint32(0), true, false, ""},
+ gtTestCase{uint32(17), true, false, ""},
+
+ gtTestCase{uint64((1 << 64) - 151), true, false, ""},
+ gtTestCase{uint64(0), true, false, ""},
+ gtTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-(1 << 30)), false, false, ""},
+ gtTestCase{float32(-151), false, false, ""},
+ gtTestCase{float32(-150.1), false, false, ""},
+ gtTestCase{float32(-150), false, false, ""},
+ gtTestCase{float32(-149.9), true, false, ""},
+ gtTestCase{float32(0), true, false, ""},
+ gtTestCase{float32(17), true, false, ""},
+ gtTestCase{float32(160), true, false, ""},
+
+ gtTestCase{float64(-(1 << 30)), false, false, ""},
+ gtTestCase{float64(-151), false, false, ""},
+ gtTestCase{float64(-150.1), false, false, ""},
+ gtTestCase{float64(-150), false, false, ""},
+ gtTestCase{float64(-149.9), true, false, ""},
+ gtTestCase{float64(0), true, false, ""},
+ gtTestCase{float64(17), true, false, ""},
+ gtTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) ZeroIntegerLiteral() {
+ matcher := GreaterThan(0)
+ desc := matcher.Description()
+ expectedDesc := "greater than 0"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-(1 << 30), false, false, ""},
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{0, false, false, ""},
+ gtTestCase{1, true, false, ""},
+ gtTestCase{17, true, false, ""},
+ gtTestCase{(1 << 30), true, false, ""},
+
+ gtTestCase{int(-(1 << 30)), false, false, ""},
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(0), false, false, ""},
+ gtTestCase{int(1), true, false, ""},
+ gtTestCase{int(17), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(0), false, false, ""},
+ gtTestCase{int8(1), true, false, ""},
+
+ gtTestCase{int16(-(1 << 14)), false, false, ""},
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(1), true, false, ""},
+ gtTestCase{int16(17), true, false, ""},
+
+ gtTestCase{int32(-(1 << 30)), false, false, ""},
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(0), false, false, ""},
+ gtTestCase{int32(1), true, false, ""},
+ gtTestCase{int32(17), true, false, ""},
+
+ gtTestCase{int64(-(1 << 30)), false, false, ""},
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(0), false, false, ""},
+ gtTestCase{int64(1), true, false, ""},
+ gtTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint((1 << 32) - 1), true, false, ""},
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(1), true, false, ""},
+ gtTestCase{uint(17), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(1), true, false, ""},
+ gtTestCase{uint8(17), true, false, ""},
+ gtTestCase{uint8(253), true, false, ""},
+
+ gtTestCase{uint16((1 << 16) - 1), true, false, ""},
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(1), true, false, ""},
+ gtTestCase{uint16(17), true, false, ""},
+
+ gtTestCase{uint32((1 << 32) - 1), true, false, ""},
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(1), true, false, ""},
+ gtTestCase{uint32(17), true, false, ""},
+
+ gtTestCase{uint64((1 << 64) - 1), true, false, ""},
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(1), true, false, ""},
+ gtTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-(1 << 30)), false, false, ""},
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(-0.1), false, false, ""},
+ gtTestCase{float32(-0.0), false, false, ""},
+ gtTestCase{float32(0), false, false, ""},
+ gtTestCase{float32(0.1), true, false, ""},
+ gtTestCase{float32(17), true, false, ""},
+ gtTestCase{float32(160), true, false, ""},
+
+ gtTestCase{float64(-(1 << 30)), false, false, ""},
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(-0.1), false, false, ""},
+ gtTestCase{float64(-0), false, false, ""},
+ gtTestCase{float64(0), false, false, ""},
+ gtTestCase{float64(0.1), true, false, ""},
+ gtTestCase{float64(17), true, false, ""},
+ gtTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) PositiveIntegerLiteral() {
+ matcher := GreaterThan(150)
+ desc := matcher.Description()
+ expectedDesc := "greater than 150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{149, false, false, ""},
+ gtTestCase{150, false, false, ""},
+ gtTestCase{151, true, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(149), false, false, ""},
+ gtTestCase{int(150), false, false, ""},
+ gtTestCase{int(151), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(0), false, false, ""},
+ gtTestCase{int8(17), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(149), false, false, ""},
+ gtTestCase{int16(150), false, false, ""},
+ gtTestCase{int16(151), true, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(149), false, false, ""},
+ gtTestCase{int32(150), false, false, ""},
+ gtTestCase{int32(151), true, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(149), false, false, ""},
+ gtTestCase{int64(150), false, false, ""},
+ gtTestCase{int64(151), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(149), false, false, ""},
+ gtTestCase{uint(150), false, false, ""},
+ gtTestCase{uint(151), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(127), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(149), false, false, ""},
+ gtTestCase{uint16(150), false, false, ""},
+ gtTestCase{uint16(151), true, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(149), false, false, ""},
+ gtTestCase{uint32(150), false, false, ""},
+ gtTestCase{uint32(151), true, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(149), false, false, ""},
+ gtTestCase{uint64(150), false, false, ""},
+ gtTestCase{uint64(151), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(149), false, false, ""},
+ gtTestCase{float32(149.9), false, false, ""},
+ gtTestCase{float32(150), false, false, ""},
+ gtTestCase{float32(150.1), true, false, ""},
+ gtTestCase{float32(151), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(149), false, false, ""},
+ gtTestCase{float64(149.9), false, false, ""},
+ gtTestCase{float64(150), false, false, ""},
+ gtTestCase{float64(150.1), true, false, ""},
+ gtTestCase{float64(151), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Float literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) NegativeFloatLiteral() {
+ matcher := GreaterThan(-150.1)
+ desc := matcher.Description()
+ expectedDesc := "greater than -150.1"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-(1 << 30), false, false, ""},
+ gtTestCase{-151, false, false, ""},
+ gtTestCase{-150.1, false, false, ""},
+ gtTestCase{-150, true, false, ""},
+ gtTestCase{-149, true, false, ""},
+ gtTestCase{0, true, false, ""},
+ gtTestCase{17, true, false, ""},
+
+ gtTestCase{int(-(1 << 30)), false, false, ""},
+ gtTestCase{int(-151), false, false, ""},
+ gtTestCase{int(-150), true, false, ""},
+ gtTestCase{int(-149), true, false, ""},
+ gtTestCase{int(0), true, false, ""},
+ gtTestCase{int(17), true, false, ""},
+
+ gtTestCase{int8(-127), true, false, ""},
+ gtTestCase{int8(0), true, false, ""},
+ gtTestCase{int8(17), true, false, ""},
+
+ gtTestCase{int16(-(1 << 14)), false, false, ""},
+ gtTestCase{int16(-151), false, false, ""},
+ gtTestCase{int16(-150), true, false, ""},
+ gtTestCase{int16(-149), true, false, ""},
+ gtTestCase{int16(0), true, false, ""},
+ gtTestCase{int16(17), true, false, ""},
+
+ gtTestCase{int32(-(1 << 30)), false, false, ""},
+ gtTestCase{int32(-151), false, false, ""},
+ gtTestCase{int32(-150), true, false, ""},
+ gtTestCase{int32(-149), true, false, ""},
+ gtTestCase{int32(0), true, false, ""},
+ gtTestCase{int32(17), true, false, ""},
+
+ gtTestCase{int64(-(1 << 30)), false, false, ""},
+ gtTestCase{int64(-151), false, false, ""},
+ gtTestCase{int64(-150), true, false, ""},
+ gtTestCase{int64(-149), true, false, ""},
+ gtTestCase{int64(0), true, false, ""},
+ gtTestCase{int64(17), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint((1 << 32) - 151), true, false, ""},
+ gtTestCase{uint(0), true, false, ""},
+ gtTestCase{uint(17), true, false, ""},
+
+ gtTestCase{uint8(0), true, false, ""},
+ gtTestCase{uint8(17), true, false, ""},
+ gtTestCase{uint8(253), true, false, ""},
+
+ gtTestCase{uint16((1 << 16) - 151), true, false, ""},
+ gtTestCase{uint16(0), true, false, ""},
+ gtTestCase{uint16(17), true, false, ""},
+
+ gtTestCase{uint32((1 << 32) - 151), true, false, ""},
+ gtTestCase{uint32(0), true, false, ""},
+ gtTestCase{uint32(17), true, false, ""},
+
+ gtTestCase{uint64((1 << 64) - 151), true, false, ""},
+ gtTestCase{uint64(0), true, false, ""},
+ gtTestCase{uint64(17), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-(1 << 30)), false, false, ""},
+ gtTestCase{float32(-151), false, false, ""},
+ gtTestCase{float32(-150.2), false, false, ""},
+ gtTestCase{float32(-150.1), false, false, ""},
+ gtTestCase{float32(-150), true, false, ""},
+ gtTestCase{float32(0), true, false, ""},
+ gtTestCase{float32(17), true, false, ""},
+ gtTestCase{float32(160), true, false, ""},
+
+ gtTestCase{float64(-(1 << 30)), false, false, ""},
+ gtTestCase{float64(-151), false, false, ""},
+ gtTestCase{float64(-150.2), false, false, ""},
+ gtTestCase{float64(-150.1), false, false, ""},
+ gtTestCase{float64(-150), true, false, ""},
+ gtTestCase{float64(0), true, false, ""},
+ gtTestCase{float64(17), true, false, ""},
+ gtTestCase{float64(160), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) PositiveFloatLiteral() {
+ matcher := GreaterThan(149.9)
+ desc := matcher.Description()
+ expectedDesc := "greater than 149.9"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{149, false, false, ""},
+ gtTestCase{149.9, false, false, ""},
+ gtTestCase{150, true, false, ""},
+ gtTestCase{151, true, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(149), false, false, ""},
+ gtTestCase{int(150), true, false, ""},
+ gtTestCase{int(151), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(0), false, false, ""},
+ gtTestCase{int8(17), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(149), false, false, ""},
+ gtTestCase{int16(150), true, false, ""},
+ gtTestCase{int16(151), true, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(149), false, false, ""},
+ gtTestCase{int32(150), true, false, ""},
+ gtTestCase{int32(151), true, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(149), false, false, ""},
+ gtTestCase{int64(150), true, false, ""},
+ gtTestCase{int64(151), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(149), false, false, ""},
+ gtTestCase{uint(150), true, false, ""},
+ gtTestCase{uint(151), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(127), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(149), false, false, ""},
+ gtTestCase{uint16(150), true, false, ""},
+ gtTestCase{uint16(151), true, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(149), false, false, ""},
+ gtTestCase{uint32(150), true, false, ""},
+ gtTestCase{uint32(151), true, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(149), false, false, ""},
+ gtTestCase{uint64(150), true, false, ""},
+ gtTestCase{uint64(151), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(149), false, false, ""},
+ gtTestCase{float32(149.8), false, false, ""},
+ gtTestCase{float32(149.9), false, false, ""},
+ gtTestCase{float32(150), true, false, ""},
+ gtTestCase{float32(151), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(149), false, false, ""},
+ gtTestCase{float64(149.8), false, false, ""},
+ gtTestCase{float64(149.9), false, false, ""},
+ gtTestCase{float64(150), true, false, ""},
+ gtTestCase{float64(151), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Subtle cases
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterThan(int64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{kTwoTo25 + 0, false, false, ""},
+ gtTestCase{kTwoTo25 + 1, false, false, ""},
+ gtTestCase{kTwoTo25 + 2, true, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(32767), false, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 2), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(255), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(65535), false, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ gtTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterThan(int64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{1 << 30, false, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(math.MaxInt32), false, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(32767), false, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(math.MaxInt32), false, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 2), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(math.MaxUint32), false, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(255), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(65535), false, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(math.MaxUint32), false, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterThan(uint64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{kTwoTo25 + 0, false, false, ""},
+ gtTestCase{kTwoTo25 + 1, false, false, ""},
+ gtTestCase{kTwoTo25 + 2, true, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(32767), false, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int32(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 2), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(255), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(65535), false, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint32(kTwoTo25 + 2), true, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ gtTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterThan(uint64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{-1, false, false, ""},
+ gtTestCase{1 << 30, false, false, ""},
+
+ gtTestCase{int(-1), false, false, ""},
+ gtTestCase{int(math.MaxInt32), false, false, ""},
+
+ gtTestCase{int8(-1), false, false, ""},
+ gtTestCase{int8(127), false, false, ""},
+
+ gtTestCase{int16(-1), false, false, ""},
+ gtTestCase{int16(0), false, false, ""},
+ gtTestCase{int16(32767), false, false, ""},
+
+ gtTestCase{int32(-1), false, false, ""},
+ gtTestCase{int32(math.MaxInt32), false, false, ""},
+
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 2), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint(0), false, false, ""},
+ gtTestCase{uint(math.MaxUint32), false, false, ""},
+
+ gtTestCase{uint8(0), false, false, ""},
+ gtTestCase{uint8(255), false, false, ""},
+
+ gtTestCase{uint16(0), false, false, ""},
+ gtTestCase{uint16(65535), false, false, ""},
+
+ gtTestCase{uint32(0), false, false, ""},
+ gtTestCase{uint32(math.MaxUint32), false, false, ""},
+
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := GreaterThan(float32(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 3.3554432e+07"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{int64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{int64(kTwoTo25 + 3), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{uint64(kTwoTo25 + 3), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float32(-1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{float32(kTwoTo25 + 3), true, false, ""},
+
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ gtTestCase{float64(kTwoTo25 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := GreaterThan(float64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "greater than 1.8014398509481984e+16"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ // Signed integers.
+ gtTestCase{int64(-1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{int64(kTwoTo54 + 3), true, false, ""},
+
+ // Unsigned integers.
+ gtTestCase{uint64(0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{uint64(kTwoTo54 + 3), true, false, ""},
+
+ // Floating point.
+ gtTestCase{float64(-1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ gtTestCase{float64(kTwoTo54 + 3), true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// String literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *GreaterThanTest) EmptyString() {
+ matcher := GreaterThan("")
+ desc := matcher.Description()
+ expectedDesc := "greater than \"\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ gtTestCase{"", false, false, ""},
+ gtTestCase{"\x00", true, false, ""},
+ gtTestCase{"a", true, false, ""},
+ gtTestCase{"foo", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) SingleNullByte() {
+ matcher := GreaterThan("\x00")
+ desc := matcher.Description()
+ expectedDesc := "greater than \"\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ gtTestCase{"", false, false, ""},
+ gtTestCase{"\x00", false, false, ""},
+ gtTestCase{"\x00\x00", true, false, ""},
+ gtTestCase{"a", true, false, ""},
+ gtTestCase{"foo", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *GreaterThanTest) LongerString() {
+ matcher := GreaterThan("foo\x00")
+ desc := matcher.Description()
+ expectedDesc := "greater than \"foo\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []gtTestCase{
+ gtTestCase{"", false, false, ""},
+ gtTestCase{"\x00", false, false, ""},
+ gtTestCase{"bar", false, false, ""},
+ gtTestCase{"foo", false, false, ""},
+ gtTestCase{"foo\x00", false, false, ""},
+ gtTestCase{"foo\x00\x00", true, false, ""},
+ gtTestCase{"fooa", true, false, ""},
+ gtTestCase{"qux", true, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as.go
new file mode 100644
index 00000000000..3b286f73218
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as.go
@@ -0,0 +1,37 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// HasSameTypeAs returns a matcher that matches values with exactly the same
+// type as the supplied prototype.
+func HasSameTypeAs(p interface{}) Matcher {
+ expected := reflect.TypeOf(p)
+ pred := func(c interface{}) error {
+ actual := reflect.TypeOf(c)
+ if actual != expected {
+ return fmt.Errorf("which has type %v", actual)
+ }
+
+ return nil
+ }
+
+ return NewMatcher(pred, fmt.Sprintf("has type %v", expected))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as_test.go
new file mode 100644
index 00000000000..a4a3e308aa3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as_test.go
@@ -0,0 +1,181 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "io"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+func TestHasSameTypeAs(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Boilerplate
+////////////////////////////////////////////////////////////////////////
+
+type HasSameTypeAsTest struct {
+}
+
+func init() { RegisterTestSuite(&HasSameTypeAsTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *HasSameTypeAsTest) CandidateIsLiteralNil() {
+ matcher := HasSameTypeAs(nil)
+ var err error
+
+ // Description
+ ExpectEq("has type <nil>", matcher.Description())
+
+ // Literal nil
+ err = matcher.Matches(nil)
+ ExpectEq(nil, err)
+
+ // nil in interface variable
+ var r io.Reader
+ err = matcher.Matches(r)
+ ExpectEq(nil, err)
+
+ // int
+ err = matcher.Matches(17)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type int")))
+
+ // string
+ err = matcher.Matches("")
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type string")))
+
+ // nil map
+ var m map[string]string
+ err = matcher.Matches(m)
+
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type map[string]string")))
+
+ // Non-nil map
+ m = make(map[string]string)
+ err = matcher.Matches(m)
+
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type map[string]string")))
+}
+
+func (t *HasSameTypeAsTest) CandidateIsNilMap() {
+ var m map[string]string
+ matcher := HasSameTypeAs(m)
+ var err error
+
+ // Description
+ ExpectEq("has type map[string]string", matcher.Description())
+
+ // nil map
+ m = nil
+ err = matcher.Matches(m)
+ ExpectEq(nil, err)
+
+ // Non-nil map
+ m = make(map[string]string)
+ err = matcher.Matches(m)
+ ExpectEq(nil, err)
+
+ // Literal nil
+ err = matcher.Matches(nil)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type <nil>")))
+
+ // int
+ err = matcher.Matches(17)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type int")))
+
+ // string
+ err = matcher.Matches("")
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type string")))
+}
+
+func (t *HasSameTypeAsTest) CandidateIsNilInInterfaceVariable() {
+ var r io.Reader
+ matcher := HasSameTypeAs(r)
+ var err error
+
+ // Description
+ ExpectEq("has type <nil>", matcher.Description())
+
+ // nil in interface variable
+ r = nil
+ err = matcher.Matches(r)
+ ExpectEq(nil, err)
+
+ // Literal nil
+ err = matcher.Matches(nil)
+ ExpectEq(nil, err)
+
+ // int
+ err = matcher.Matches(17)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type int")))
+}
+
+func (t *HasSameTypeAsTest) CandidateIsString() {
+ matcher := HasSameTypeAs("")
+ var err error
+
+ // Description
+ ExpectEq("has type string", matcher.Description())
+
+ // string
+ err = matcher.Matches("taco")
+ ExpectEq(nil, err)
+
+ // string alias
+ type Foo string
+ err = matcher.Matches(Foo("taco"))
+ ExpectThat(err, Error(MatchesRegexp("which has type .*Foo")))
+
+ // Literal nil
+ err = matcher.Matches(nil)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type <nil>")))
+
+ // int
+ err = matcher.Matches(17)
+ AssertNe(nil, err)
+ ExpectThat(err, Error(Equals("which has type int")))
+}
+
+func (t *HasSameTypeAsTest) CandidateIsStringAlias() {
+ type Foo string
+ matcher := HasSameTypeAs(Foo(""))
+ var err error
+
+ // Description
+ ExpectThat(matcher.Description(), MatchesRegexp("has type .*Foo"))
+
+ // string alias
+ err = matcher.Matches(Foo("taco"))
+ ExpectEq(nil, err)
+
+ // string
+ err = matcher.Matches("taco")
+ ExpectThat(err, Error(Equals("which has type string")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_substr.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_substr.go
new file mode 100644
index 00000000000..bf5bd6ae6d3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_substr.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// HasSubstr returns a matcher that matches strings containing s as a
+// substring.
+func HasSubstr(s string) Matcher {
+ return NewMatcher(
+ func(c interface{}) error { return hasSubstr(s, c) },
+ fmt.Sprintf("has substring \"%s\"", s))
+}
+
+func hasSubstr(needle string, c interface{}) error {
+ v := reflect.ValueOf(c)
+ if v.Kind() != reflect.String {
+ return NewFatalError("which is not a string")
+ }
+
+ // Perform the substring search.
+ haystack := v.String()
+ if strings.Contains(haystack, needle) {
+ return nil
+ }
+
+ return errors.New("")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_substr_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_substr_test.go
new file mode 100644
index 00000000000..6fc913a2490
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/has_substr_test.go
@@ -0,0 +1,93 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type HasSubstrTest struct {
+
+}
+
+func init() { RegisterTestSuite(&HasSubstrTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *HasSubstrTest) Description() {
+ matcher := HasSubstr("taco")
+ ExpectThat(matcher.Description(), Equals("has substring \"taco\""))
+}
+
+func (t *HasSubstrTest) CandidateIsNil() {
+ matcher := HasSubstr("")
+ err := matcher.Matches(nil)
+
+ ExpectThat(err, Error(Equals("which is not a string")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *HasSubstrTest) CandidateIsInteger() {
+ matcher := HasSubstr("")
+ err := matcher.Matches(17)
+
+ ExpectThat(err, Error(Equals("which is not a string")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *HasSubstrTest) CandidateIsByteSlice() {
+ matcher := HasSubstr("")
+ err := matcher.Matches([]byte{17})
+
+ ExpectThat(err, Error(Equals("which is not a string")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *HasSubstrTest) CandidateDoesntHaveSubstring() {
+ matcher := HasSubstr("taco")
+ err := matcher.Matches("tac")
+
+ ExpectThat(err, Error(Equals("")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *HasSubstrTest) CandidateEqualsArg() {
+ matcher := HasSubstr("taco")
+ err := matcher.Matches("taco")
+
+ ExpectThat(err, Equals(nil))
+}
+
+func (t *HasSubstrTest) CandidateHasProperSubstring() {
+ matcher := HasSubstr("taco")
+ err := matcher.Matches("burritos and tacos")
+
+ ExpectThat(err, Equals(nil))
+}
+
+func (t *HasSubstrTest) EmptyStringIsAlwaysSubString() {
+ matcher := HasSubstr("")
+ err := matcher.Matches("asdf")
+
+ ExpectThat(err, Equals(nil))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/identical_to.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/identical_to.go
new file mode 100644
index 00000000000..ae6460ed966
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/identical_to.go
@@ -0,0 +1,134 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Is the type comparable according to the definition here?
+//
+// http://weekly.golang.org/doc/go_spec.html#Comparison_operators
+//
+func isComparable(t reflect.Type) bool {
+ switch t.Kind() {
+ case reflect.Array:
+ return isComparable(t.Elem())
+
+ case reflect.Struct:
+ for i := 0; i < t.NumField(); i++ {
+ if !isComparable(t.Field(i).Type) {
+ return false
+ }
+ }
+
+ return true
+
+ case reflect.Slice, reflect.Map, reflect.Func:
+ return false
+ }
+
+ return true
+}
+
+// Should the supplied type be allowed as an argument to IdenticalTo?
+func isLegalForIdenticalTo(t reflect.Type) (bool, error) {
+ // Allow the zero type.
+ if t == nil {
+ return true, nil
+ }
+
+ // Reference types are always okay; we compare pointers.
+ switch t.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan:
+ return true, nil
+ }
+
+ // Reject other non-comparable types.
+ if !isComparable(t) {
+ return false, errors.New(fmt.Sprintf("%v is not comparable", t))
+ }
+
+ return true, nil
+}
+
+// IdenticalTo(x) returns a matcher that matches values v with type identical
+// to x such that:
+//
+// 1. If v and x are of a reference type (slice, map, function, channel), then
+// they are either both nil or are references to the same object.
+//
+// 2. Otherwise, if v and x are not of a reference type but have a valid type,
+// then v == x.
+//
+// If v and x are both the invalid type (which results from the predeclared nil
+// value, or from nil interface variables), then the matcher is satisfied.
+//
+// This function will panic if x is of a value type that is not comparable. For
+// example, x cannot be an array of functions.
+func IdenticalTo(x interface{}) Matcher {
+ t := reflect.TypeOf(x)
+
+ // Reject illegal arguments.
+ if ok, err := isLegalForIdenticalTo(t); !ok {
+ panic("IdenticalTo: " + err.Error())
+ }
+
+ return &identicalToMatcher{x}
+}
+
+type identicalToMatcher struct {
+ x interface{}
+}
+
+func (m *identicalToMatcher) Description() string {
+ t := reflect.TypeOf(m.x)
+ return fmt.Sprintf("identical to <%v> %v", t, m.x)
+}
+
+func (m *identicalToMatcher) Matches(c interface{}) error {
+ // Make sure the candidate's type is correct.
+ t := reflect.TypeOf(m.x)
+ if ct := reflect.TypeOf(c); t != ct {
+ return NewFatalError(fmt.Sprintf("which is of type %v", ct))
+ }
+
+ // Special case: two values of the invalid type are always identical.
+ if t == nil {
+ return nil
+ }
+
+ // Handle reference types.
+ switch t.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Func, reflect.Chan:
+ xv := reflect.ValueOf(m.x)
+ cv := reflect.ValueOf(c)
+ if xv.Pointer() == cv.Pointer() {
+ return nil
+ }
+
+ return errors.New("which is not an identical reference")
+ }
+
+ // Are the values equal?
+ if m.x == c {
+ return nil
+ }
+
+ return errors.New("")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/identical_to_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/identical_to_test.go
new file mode 100644
index 00000000000..cc03b214ad6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/identical_to_test.go
@@ -0,0 +1,849 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "fmt"
+ "io"
+ "unsafe"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type IdenticalToTest struct {
+}
+
+func init() { RegisterTestSuite(&IdenticalToTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *IdenticalToTest) TypesNotIdentical() {
+ var m Matcher
+ var err error
+
+ type intAlias int
+
+ // Type alias expected value
+ m = IdenticalTo(intAlias(17))
+ err = m.Matches(int(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int")))
+
+ // Type alias candidate
+ m = IdenticalTo(int(17))
+ err = m.Matches(intAlias(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.intAlias")))
+
+ // int and uint
+ m = IdenticalTo(int(17))
+ err = m.Matches(uint(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type uint")))
+}
+
+func (t *IdenticalToTest) PredeclaredNilIdentifier() {
+ var m Matcher
+ var err error
+
+ // Nil literal
+ m = IdenticalTo(nil)
+ err = m.Matches(nil)
+ ExpectEq(nil, err)
+
+ // Zero interface var (which is the same as above since IdenticalTo takes an
+ // interface{} as an arg)
+ var nilReader io.Reader
+ var nilWriter io.Writer
+
+ m = IdenticalTo(nilReader)
+ err = m.Matches(nilWriter)
+ ExpectEq(nil, err)
+
+ // Typed nil value.
+ m = IdenticalTo(nil)
+ err = m.Matches((chan int)(nil))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type chan int")))
+
+ // Non-nil value.
+ m = IdenticalTo(nil)
+ err = m.Matches("taco")
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type string")))
+}
+
+func (t *IdenticalToTest) Slices() {
+ var m Matcher
+ var err error
+
+ // Nil expected value
+ m = IdenticalTo(([]int)(nil))
+ ExpectEq("identical to <[]int> []", m.Description())
+
+ err = m.Matches(([]int)(nil))
+ ExpectEq(nil, err)
+
+ err = m.Matches([]int{})
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+
+ // Non-nil expected value
+ o1 := make([]int, 1)
+ o2 := make([]int, 1)
+ m = IdenticalTo(o1)
+ ExpectEq(fmt.Sprintf("identical to <[]int> %v", o1), m.Description())
+
+ err = m.Matches(o1)
+ ExpectEq(nil, err)
+
+ err = m.Matches(o2)
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+}
+
+func (t *IdenticalToTest) Maps() {
+ var m Matcher
+ var err error
+
+ // Nil expected value
+ m = IdenticalTo((map[int]int)(nil))
+ ExpectEq("identical to <map[int]int> map[]", m.Description())
+
+ err = m.Matches((map[int]int)(nil))
+ ExpectEq(nil, err)
+
+ err = m.Matches(map[int]int{})
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+
+ // Non-nil expected value
+ o1 := map[int]int{}
+ o2 := map[int]int{}
+ m = IdenticalTo(o1)
+ ExpectEq(fmt.Sprintf("identical to <map[int]int> %v", o1), m.Description())
+
+ err = m.Matches(o1)
+ ExpectEq(nil, err)
+
+ err = m.Matches(o2)
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+}
+
+func (t *IdenticalToTest) Functions() {
+ var m Matcher
+ var err error
+
+ // Nil expected value
+ m = IdenticalTo((func())(nil))
+ ExpectEq("identical to <func()> <nil>", m.Description())
+
+ err = m.Matches((func())(nil))
+ ExpectEq(nil, err)
+
+ err = m.Matches(func(){})
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+
+ // Non-nil expected value
+ o1 := func() {}
+ o2 := func() {}
+ m = IdenticalTo(o1)
+ ExpectEq(fmt.Sprintf("identical to <func()> %v", o1), m.Description())
+
+ err = m.Matches(o1)
+ ExpectEq(nil, err)
+
+ err = m.Matches(o2)
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+}
+
+func (t *IdenticalToTest) Channels() {
+ var m Matcher
+ var err error
+
+ // Nil expected value
+ m = IdenticalTo((chan int)(nil))
+ ExpectEq("identical to <chan int> <nil>", m.Description())
+
+ err = m.Matches((chan int)(nil))
+ ExpectEq(nil, err)
+
+ err = m.Matches(make(chan int))
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+
+ // Non-nil expected value
+ o1 := make(chan int)
+ o2 := make(chan int)
+ m = IdenticalTo(o1)
+ ExpectEq(fmt.Sprintf("identical to <chan int> %v", o1), m.Description())
+
+ err = m.Matches(o1)
+ ExpectEq(nil, err)
+
+ err = m.Matches(o2)
+ ExpectThat(err, Error(Equals("which is not an identical reference")))
+}
+
+func (t *IdenticalToTest) Bools() {
+ var m Matcher
+ var err error
+
+ // false
+ m = IdenticalTo(false)
+ ExpectEq("identical to <bool> false", m.Description())
+
+ err = m.Matches(false)
+ ExpectEq(nil, err)
+
+ err = m.Matches(true)
+ ExpectThat(err, Error(Equals("")))
+
+ // true
+ m = IdenticalTo(true)
+ ExpectEq("identical to <bool> true", m.Description())
+
+ err = m.Matches(false)
+ ExpectThat(err, Error(Equals("")))
+
+ err = m.Matches(true)
+ ExpectEq(nil, err)
+}
+
+func (t *IdenticalToTest) Ints() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int(17))
+ ExpectEq("identical to <int> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Int8s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int8(17))
+ ExpectEq("identical to <int8> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int8(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int8
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Int16s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int16(17))
+ ExpectEq("identical to <int16> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int16(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int16
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Int32s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int32(17))
+ ExpectEq("identical to <int32> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int32(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int32
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int16(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int16")))
+}
+
+func (t *IdenticalToTest) Int64s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(int64(17))
+ ExpectEq("identical to <int64> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(int64(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType int64
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uints() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint(17))
+ ExpectEq("identical to <uint> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uint8s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint8(17))
+ ExpectEq("identical to <uint8> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint8(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint8
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uint16s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint16(17))
+ ExpectEq("identical to <uint16> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint16(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint16
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uint32s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint32(17))
+ ExpectEq("identical to <uint32> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint32(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint32
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uint64s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uint64(17))
+ ExpectEq("identical to <uint64> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uint64(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uint64
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Uintptrs() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(uintptr(17))
+ ExpectEq("identical to <uintptr> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(uintptr(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType uintptr
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Float32s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(float32(17))
+ ExpectEq("identical to <float32> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(float32(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType float32
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Float64s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(float64(17))
+ ExpectEq("identical to <float64> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(float64(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType float64
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Complex64s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(complex64(17))
+ ExpectEq("identical to <complex64> (17+0i)", m.Description())
+
+ // Identical value
+ err = m.Matches(complex64(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType complex64
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) Complex128s() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo(complex128(17))
+ ExpectEq("identical to <complex128> (17+0i)", m.Description())
+
+ // Identical value
+ err = m.Matches(complex128(17))
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType complex128
+ err = m.Matches(myType(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) EmptyComparableArrays() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo([0]int{})
+ ExpectEq("identical to <[0]int> []", m.Description())
+
+ // Identical value
+ err = m.Matches([0]int{})
+ ExpectEq(nil, err)
+
+ // Length too long
+ err = m.Matches([1]int{17})
+ ExpectThat(err, Error(Equals("which is of type [1]int")))
+
+ // Element type alias
+ type myType int
+ err = m.Matches([0]myType{})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [0]oglematchers_test.myType")))
+
+ // Completely wrong element type
+ err = m.Matches([0]int32{})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [0]int32")))
+}
+
+func (t *IdenticalToTest) NonEmptyComparableArrays() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo([2]int{17, 19})
+ ExpectEq("identical to <[2]int> [17 19]", m.Description())
+
+ // Identical value
+ err = m.Matches([2]int{17, 19})
+ ExpectEq(nil, err)
+
+ // Length too short
+ err = m.Matches([1]int{17})
+ ExpectThat(err, Error(Equals("which is of type [1]int")))
+
+ // Length too long
+ err = m.Matches([3]int{17, 19, 23})
+ ExpectThat(err, Error(Equals("which is of type [3]int")))
+
+ // First element different
+ err = m.Matches([2]int{13, 19})
+ ExpectThat(err, Error(Equals("")))
+
+ // Second element different
+ err = m.Matches([2]int{17, 23})
+ ExpectThat(err, Error(Equals("")))
+
+ // Element type alias
+ type myType int
+ err = m.Matches([2]myType{17, 19})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [2]oglematchers_test.myType")))
+
+ // Completely wrong element type
+ err = m.Matches([2]int32{17, 19})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [2]int32")))
+}
+
+func (t *IdenticalToTest) NonEmptyArraysOfComparableArrays() {
+ var m Matcher
+ var err error
+
+ x := [2][2]int{
+ [2]int{17, 19},
+ [2]int{23, 29},
+ }
+ m = IdenticalTo(x)
+ ExpectEq("identical to <[2][2]int> [[17 19] [23 29]]", m.Description())
+
+ // Identical value
+ err = m.Matches([2][2]int{[2]int{17, 19}, [2]int{23, 29}})
+ ExpectEq(nil, err)
+
+ // Outer length too short
+ err = m.Matches([1][2]int{[2]int{17, 19}})
+ ExpectThat(err, Error(Equals("which is of type [1][2]int")))
+
+ // Inner length too short
+ err = m.Matches([2][1]int{[1]int{17}, [1]int{23}})
+ ExpectThat(err, Error(Equals("which is of type [2][1]int")))
+
+ // First element different
+ err = m.Matches([2][2]int{[2]int{13, 19}, [2]int{23, 29}})
+ ExpectThat(err, Error(Equals("")))
+
+ // Element type alias
+ type myType int
+ err = m.Matches([2][2]myType{[2]myType{17, 19}, [2]myType{23, 29}})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type [2][2]oglematchers_test.myType")))
+}
+
+func (t *IdenticalToTest) NonComparableArrays() {
+ x := [0]func(){}
+ f := func() { IdenticalTo(x) }
+ ExpectThat(f, Panics(HasSubstr("is not comparable")))
+}
+
+func (t *IdenticalToTest) ArraysOfNonComparableArrays() {
+ x := [0][0]func(){}
+ f := func() { IdenticalTo(x) }
+ ExpectThat(f, Panics(HasSubstr("is not comparable")))
+}
+
+func (t *IdenticalToTest) Strings() {
+ var m Matcher
+ var err error
+
+ m = IdenticalTo("taco")
+ ExpectEq("identical to <string> taco", m.Description())
+
+ // Identical value
+ err = m.Matches("ta" + "co")
+ ExpectEq(nil, err)
+
+ // Type alias
+ type myType string
+ err = m.Matches(myType("taco"))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) ComparableStructs() {
+ var m Matcher
+ var err error
+
+ type subStruct struct {
+ i int
+ }
+
+ type myStruct struct {
+ u uint
+ s subStruct
+ }
+
+ x := myStruct{17, subStruct{19}}
+ m = IdenticalTo(x)
+ ExpectEq("identical to <oglematchers_test.myStruct> {17 {19}}", m.Description())
+
+ // Identical value
+ err = m.Matches(myStruct{17, subStruct{19}})
+ ExpectEq(nil, err)
+
+ // Wrong outer field
+ err = m.Matches(myStruct{13, subStruct{19}})
+ ExpectThat(err, Error(Equals("")))
+
+ // Wrong inner field
+ err = m.Matches(myStruct{17, subStruct{23}})
+ ExpectThat(err, Error(Equals("")))
+
+ // Type alias
+ type myType myStruct
+ err = m.Matches(myType{17, subStruct{19}})
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) NonComparableStructs() {
+ type subStruct struct {
+ s []int
+ }
+
+ type myStruct struct {
+ u uint
+ s subStruct
+ }
+
+ x := myStruct{17, subStruct{[]int{19}}}
+ f := func() { IdenticalTo(x) }
+ ExpectThat(f, Panics(AllOf(HasSubstr("IdenticalTo"), HasSubstr("comparable"))))
+}
+
+func (t *IdenticalToTest) NilUnsafePointer() {
+ var m Matcher
+ var err error
+
+ x := unsafe.Pointer(nil)
+ m = IdenticalTo(x)
+ ExpectEq(fmt.Sprintf("identical to <unsafe.Pointer> %v", x), m.Description())
+
+ // Identical value
+ err = m.Matches(unsafe.Pointer(nil))
+ ExpectEq(nil, err)
+
+ // Wrong value
+ j := 17
+ err = m.Matches(unsafe.Pointer(&j))
+ ExpectThat(err, Error(Equals("")))
+
+ // Type alias
+ type myType unsafe.Pointer
+ err = m.Matches(myType(unsafe.Pointer(nil)))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) NonNilUnsafePointer() {
+ var m Matcher
+ var err error
+
+ i := 17
+ x := unsafe.Pointer(&i)
+ m = IdenticalTo(x)
+ ExpectEq(fmt.Sprintf("identical to <unsafe.Pointer> %v", x), m.Description())
+
+ // Identical value
+ err = m.Matches(unsafe.Pointer(&i))
+ ExpectEq(nil, err)
+
+ // Nil value
+ err = m.Matches(unsafe.Pointer(nil))
+ ExpectThat(err, Error(Equals("")))
+
+ // Wrong value
+ j := 17
+ err = m.Matches(unsafe.Pointer(&j))
+ ExpectThat(err, Error(Equals("")))
+
+ // Type alias
+ type myType unsafe.Pointer
+ err = m.Matches(myType(unsafe.Pointer(&i)))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
+
+func (t *IdenticalToTest) IntAlias() {
+ var m Matcher
+ var err error
+
+ type intAlias int
+
+ m = IdenticalTo(intAlias(17))
+ ExpectEq("identical to <oglematchers_test.intAlias> 17", m.Description())
+
+ // Identical value
+ err = m.Matches(intAlias(17))
+ ExpectEq(nil, err)
+
+ // Int
+ err = m.Matches(int(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int")))
+
+ // Completely wrong type
+ err = m.Matches(int32(17))
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("which is of type int32")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go
new file mode 100644
index 00000000000..8402cdeaf09
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal.go
@@ -0,0 +1,41 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// LessOrEqual returns a matcher that matches integer, floating point, or
+// strings values v such that v <= x. Comparison is not defined between numeric
+// and string types, but is defined between all integer and floating point
+// types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// LessOrEqual will panic.
+func LessOrEqual(x interface{}) Matcher {
+ desc := fmt.Sprintf("less than or equal to %v", x)
+
+ // Special case: make it clear that strings are strings.
+ if reflect.TypeOf(x).Kind() == reflect.String {
+ desc = fmt.Sprintf("less than or equal to \"%s\"", x)
+ }
+
+ // Put LessThan last so that its error messages will be used in the event of
+ // failure.
+ return transformDescription(AnyOf(Equals(x), LessThan(x)), desc)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal_test.go
new file mode 100644
index 00000000000..a1a2ae7d60e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal_test.go
@@ -0,0 +1,1077 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "math"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type LessOrEqualTest struct {
+}
+
+func init() { RegisterTestSuite(&LessOrEqualTest{}) }
+
+type leTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *LessOrEqualTest) checkTestCases(matcher Matcher, cases []leTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+
+ ExpectThat(
+ (err == nil),
+ Equals(c.expectedResult),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(
+ c.shouldBeFatal,
+ isFatal,
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ ExpectThat(
+ err,
+ Error(Equals(c.expectedError)),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) IntegerCandidateBadTypes() {
+ matcher := LessOrEqual(int(-150))
+
+ cases := []leTestCase{
+ leTestCase{true, false, true, "which is not comparable"},
+ leTestCase{complex64(-151), false, true, "which is not comparable"},
+ leTestCase{complex128(-151), false, true, "which is not comparable"},
+ leTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ leTestCase{make(chan int), false, true, "which is not comparable"},
+ leTestCase{func() {}, false, true, "which is not comparable"},
+ leTestCase{map[int]int{}, false, true, "which is not comparable"},
+ leTestCase{&leTestCase{}, false, true, "which is not comparable"},
+ leTestCase{make([]int, 0), false, true, "which is not comparable"},
+ leTestCase{"-151", false, true, "which is not comparable"},
+ leTestCase{leTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) FloatCandidateBadTypes() {
+ matcher := LessOrEqual(float32(-150))
+
+ cases := []leTestCase{
+ leTestCase{true, false, true, "which is not comparable"},
+ leTestCase{complex64(-151), false, true, "which is not comparable"},
+ leTestCase{complex128(-151), false, true, "which is not comparable"},
+ leTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ leTestCase{make(chan int), false, true, "which is not comparable"},
+ leTestCase{func() {}, false, true, "which is not comparable"},
+ leTestCase{map[int]int{}, false, true, "which is not comparable"},
+ leTestCase{&leTestCase{}, false, true, "which is not comparable"},
+ leTestCase{make([]int, 0), false, true, "which is not comparable"},
+ leTestCase{"-151", false, true, "which is not comparable"},
+ leTestCase{leTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) StringCandidateBadTypes() {
+ matcher := LessOrEqual("17")
+
+ cases := []leTestCase{
+ leTestCase{true, false, true, "which is not comparable"},
+ leTestCase{int(0), false, true, "which is not comparable"},
+ leTestCase{int8(0), false, true, "which is not comparable"},
+ leTestCase{int16(0), false, true, "which is not comparable"},
+ leTestCase{int32(0), false, true, "which is not comparable"},
+ leTestCase{int64(0), false, true, "which is not comparable"},
+ leTestCase{uint(0), false, true, "which is not comparable"},
+ leTestCase{uint8(0), false, true, "which is not comparable"},
+ leTestCase{uint16(0), false, true, "which is not comparable"},
+ leTestCase{uint32(0), false, true, "which is not comparable"},
+ leTestCase{uint64(0), false, true, "which is not comparable"},
+ leTestCase{float32(0), false, true, "which is not comparable"},
+ leTestCase{float64(0), false, true, "which is not comparable"},
+ leTestCase{complex64(-151), false, true, "which is not comparable"},
+ leTestCase{complex128(-151), false, true, "which is not comparable"},
+ leTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ leTestCase{make(chan int), false, true, "which is not comparable"},
+ leTestCase{func() {}, false, true, "which is not comparable"},
+ leTestCase{map[int]int{}, false, true, "which is not comparable"},
+ leTestCase{&leTestCase{}, false, true, "which is not comparable"},
+ leTestCase{make([]int, 0), false, true, "which is not comparable"},
+ leTestCase{leTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) BadArgument() {
+ panicked := false
+
+ defer func() {
+ ExpectThat(panicked, Equals(true))
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ LessOrEqual(complex128(0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) NegativeIntegerLiteral() {
+ matcher := LessOrEqual(-150)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to -150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-(1 << 30), true, false, ""},
+ leTestCase{-151, true, false, ""},
+ leTestCase{-150, true, false, ""},
+ leTestCase{-149, false, false, ""},
+ leTestCase{0, false, false, ""},
+ leTestCase{17, false, false, ""},
+
+ leTestCase{int(-(1 << 30)), true, false, ""},
+ leTestCase{int(-151), true, false, ""},
+ leTestCase{int(-150), true, false, ""},
+ leTestCase{int(-149), false, false, ""},
+ leTestCase{int(0), false, false, ""},
+ leTestCase{int(17), false, false, ""},
+
+ leTestCase{int8(-127), false, false, ""},
+ leTestCase{int8(0), false, false, ""},
+ leTestCase{int8(17), false, false, ""},
+
+ leTestCase{int16(-(1 << 14)), true, false, ""},
+ leTestCase{int16(-151), true, false, ""},
+ leTestCase{int16(-150), true, false, ""},
+ leTestCase{int16(-149), false, false, ""},
+ leTestCase{int16(0), false, false, ""},
+ leTestCase{int16(17), false, false, ""},
+
+ leTestCase{int32(-(1 << 30)), true, false, ""},
+ leTestCase{int32(-151), true, false, ""},
+ leTestCase{int32(-150), true, false, ""},
+ leTestCase{int32(-149), false, false, ""},
+ leTestCase{int32(0), false, false, ""},
+ leTestCase{int32(17), false, false, ""},
+
+ leTestCase{int64(-(1 << 30)), true, false, ""},
+ leTestCase{int64(-151), true, false, ""},
+ leTestCase{int64(-150), true, false, ""},
+ leTestCase{int64(-149), false, false, ""},
+ leTestCase{int64(0), false, false, ""},
+ leTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint((1 << 32) - 151), false, false, ""},
+ leTestCase{uint(0), false, false, ""},
+ leTestCase{uint(17), false, false, ""},
+
+ leTestCase{uint8(0), false, false, ""},
+ leTestCase{uint8(17), false, false, ""},
+ leTestCase{uint8(253), false, false, ""},
+
+ leTestCase{uint16((1 << 16) - 151), false, false, ""},
+ leTestCase{uint16(0), false, false, ""},
+ leTestCase{uint16(17), false, false, ""},
+
+ leTestCase{uint32((1 << 32) - 151), false, false, ""},
+ leTestCase{uint32(0), false, false, ""},
+ leTestCase{uint32(17), false, false, ""},
+
+ leTestCase{uint64((1 << 64) - 151), false, false, ""},
+ leTestCase{uint64(0), false, false, ""},
+ leTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-(1 << 30)), true, false, ""},
+ leTestCase{float32(-151), true, false, ""},
+ leTestCase{float32(-150.1), true, false, ""},
+ leTestCase{float32(-150), true, false, ""},
+ leTestCase{float32(-149.9), false, false, ""},
+ leTestCase{float32(0), false, false, ""},
+ leTestCase{float32(17), false, false, ""},
+ leTestCase{float32(160), false, false, ""},
+
+ leTestCase{float64(-(1 << 30)), true, false, ""},
+ leTestCase{float64(-151), true, false, ""},
+ leTestCase{float64(-150.1), true, false, ""},
+ leTestCase{float64(-150), true, false, ""},
+ leTestCase{float64(-149.9), false, false, ""},
+ leTestCase{float64(0), false, false, ""},
+ leTestCase{float64(17), false, false, ""},
+ leTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) ZeroIntegerLiteral() {
+ matcher := LessOrEqual(0)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 0"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-(1 << 30), true, false, ""},
+ leTestCase{-1, true, false, ""},
+ leTestCase{0, true, false, ""},
+ leTestCase{1, false, false, ""},
+ leTestCase{17, false, false, ""},
+ leTestCase{(1 << 30), false, false, ""},
+
+ leTestCase{int(-(1 << 30)), true, false, ""},
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(0), true, false, ""},
+ leTestCase{int(1), false, false, ""},
+ leTestCase{int(17), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(0), true, false, ""},
+ leTestCase{int8(1), false, false, ""},
+
+ leTestCase{int16(-(1 << 14)), true, false, ""},
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(1), false, false, ""},
+ leTestCase{int16(17), false, false, ""},
+
+ leTestCase{int32(-(1 << 30)), true, false, ""},
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(0), true, false, ""},
+ leTestCase{int32(1), false, false, ""},
+ leTestCase{int32(17), false, false, ""},
+
+ leTestCase{int64(-(1 << 30)), true, false, ""},
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(0), true, false, ""},
+ leTestCase{int64(1), false, false, ""},
+ leTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint((1 << 32) - 1), false, false, ""},
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(1), false, false, ""},
+ leTestCase{uint(17), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(1), false, false, ""},
+ leTestCase{uint8(17), false, false, ""},
+ leTestCase{uint8(253), false, false, ""},
+
+ leTestCase{uint16((1 << 16) - 1), false, false, ""},
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(1), false, false, ""},
+ leTestCase{uint16(17), false, false, ""},
+
+ leTestCase{uint32((1 << 32) - 1), false, false, ""},
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(1), false, false, ""},
+ leTestCase{uint32(17), false, false, ""},
+
+ leTestCase{uint64((1 << 64) - 1), false, false, ""},
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(1), false, false, ""},
+ leTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-(1 << 30)), true, false, ""},
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(-0.1), true, false, ""},
+ leTestCase{float32(-0.0), true, false, ""},
+ leTestCase{float32(0), true, false, ""},
+ leTestCase{float32(0.1), false, false, ""},
+ leTestCase{float32(17), false, false, ""},
+ leTestCase{float32(160), false, false, ""},
+
+ leTestCase{float64(-(1 << 30)), true, false, ""},
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(-0.1), true, false, ""},
+ leTestCase{float64(-0), true, false, ""},
+ leTestCase{float64(0), true, false, ""},
+ leTestCase{float64(0.1), false, false, ""},
+ leTestCase{float64(17), false, false, ""},
+ leTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) PositiveIntegerLiteral() {
+ matcher := LessOrEqual(150)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{149, true, false, ""},
+ leTestCase{150, true, false, ""},
+ leTestCase{151, false, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(149), true, false, ""},
+ leTestCase{int(150), true, false, ""},
+ leTestCase{int(151), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(0), true, false, ""},
+ leTestCase{int8(17), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(149), true, false, ""},
+ leTestCase{int16(150), true, false, ""},
+ leTestCase{int16(151), false, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(149), true, false, ""},
+ leTestCase{int32(150), true, false, ""},
+ leTestCase{int32(151), false, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(149), true, false, ""},
+ leTestCase{int64(150), true, false, ""},
+ leTestCase{int64(151), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(149), true, false, ""},
+ leTestCase{uint(150), true, false, ""},
+ leTestCase{uint(151), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(127), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(149), true, false, ""},
+ leTestCase{uint16(150), true, false, ""},
+ leTestCase{uint16(151), false, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(149), true, false, ""},
+ leTestCase{uint32(150), true, false, ""},
+ leTestCase{uint32(151), false, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(149), true, false, ""},
+ leTestCase{uint64(150), true, false, ""},
+ leTestCase{uint64(151), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(149), true, false, ""},
+ leTestCase{float32(149.9), true, false, ""},
+ leTestCase{float32(150), true, false, ""},
+ leTestCase{float32(150.1), false, false, ""},
+ leTestCase{float32(151), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(149), true, false, ""},
+ leTestCase{float64(149.9), true, false, ""},
+ leTestCase{float64(150), true, false, ""},
+ leTestCase{float64(150.1), false, false, ""},
+ leTestCase{float64(151), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Float literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) NegativeFloatLiteral() {
+ matcher := LessOrEqual(-150.1)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to -150.1"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-(1 << 30), true, false, ""},
+ leTestCase{-151, true, false, ""},
+ leTestCase{-150.1, true, false, ""},
+ leTestCase{-150, false, false, ""},
+ leTestCase{-149, false, false, ""},
+ leTestCase{0, false, false, ""},
+ leTestCase{17, false, false, ""},
+
+ leTestCase{int(-(1 << 30)), true, false, ""},
+ leTestCase{int(-151), true, false, ""},
+ leTestCase{int(-150), false, false, ""},
+ leTestCase{int(-149), false, false, ""},
+ leTestCase{int(0), false, false, ""},
+ leTestCase{int(17), false, false, ""},
+
+ leTestCase{int8(-127), false, false, ""},
+ leTestCase{int8(0), false, false, ""},
+ leTestCase{int8(17), false, false, ""},
+
+ leTestCase{int16(-(1 << 14)), true, false, ""},
+ leTestCase{int16(-151), true, false, ""},
+ leTestCase{int16(-150), false, false, ""},
+ leTestCase{int16(-149), false, false, ""},
+ leTestCase{int16(0), false, false, ""},
+ leTestCase{int16(17), false, false, ""},
+
+ leTestCase{int32(-(1 << 30)), true, false, ""},
+ leTestCase{int32(-151), true, false, ""},
+ leTestCase{int32(-150), false, false, ""},
+ leTestCase{int32(-149), false, false, ""},
+ leTestCase{int32(0), false, false, ""},
+ leTestCase{int32(17), false, false, ""},
+
+ leTestCase{int64(-(1 << 30)), true, false, ""},
+ leTestCase{int64(-151), true, false, ""},
+ leTestCase{int64(-150), false, false, ""},
+ leTestCase{int64(-149), false, false, ""},
+ leTestCase{int64(0), false, false, ""},
+ leTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint((1 << 32) - 151), false, false, ""},
+ leTestCase{uint(0), false, false, ""},
+ leTestCase{uint(17), false, false, ""},
+
+ leTestCase{uint8(0), false, false, ""},
+ leTestCase{uint8(17), false, false, ""},
+ leTestCase{uint8(253), false, false, ""},
+
+ leTestCase{uint16((1 << 16) - 151), false, false, ""},
+ leTestCase{uint16(0), false, false, ""},
+ leTestCase{uint16(17), false, false, ""},
+
+ leTestCase{uint32((1 << 32) - 151), false, false, ""},
+ leTestCase{uint32(0), false, false, ""},
+ leTestCase{uint32(17), false, false, ""},
+
+ leTestCase{uint64((1 << 64) - 151), false, false, ""},
+ leTestCase{uint64(0), false, false, ""},
+ leTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-(1 << 30)), true, false, ""},
+ leTestCase{float32(-151), true, false, ""},
+ leTestCase{float32(-150.2), true, false, ""},
+ leTestCase{float32(-150.1), true, false, ""},
+ leTestCase{float32(-150), false, false, ""},
+ leTestCase{float32(0), false, false, ""},
+ leTestCase{float32(17), false, false, ""},
+ leTestCase{float32(160), false, false, ""},
+
+ leTestCase{float64(-(1 << 30)), true, false, ""},
+ leTestCase{float64(-151), true, false, ""},
+ leTestCase{float64(-150.2), true, false, ""},
+ leTestCase{float64(-150.1), true, false, ""},
+ leTestCase{float64(-150), false, false, ""},
+ leTestCase{float64(0), false, false, ""},
+ leTestCase{float64(17), false, false, ""},
+ leTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) PositiveFloatLiteral() {
+ matcher := LessOrEqual(149.9)
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 149.9"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{149, true, false, ""},
+ leTestCase{149.9, true, false, ""},
+ leTestCase{150, false, false, ""},
+ leTestCase{151, false, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(149), true, false, ""},
+ leTestCase{int(150), false, false, ""},
+ leTestCase{int(151), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(0), true, false, ""},
+ leTestCase{int8(17), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(149), true, false, ""},
+ leTestCase{int16(150), false, false, ""},
+ leTestCase{int16(151), false, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(149), true, false, ""},
+ leTestCase{int32(150), false, false, ""},
+ leTestCase{int32(151), false, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(149), true, false, ""},
+ leTestCase{int64(150), false, false, ""},
+ leTestCase{int64(151), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(149), true, false, ""},
+ leTestCase{uint(150), false, false, ""},
+ leTestCase{uint(151), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(127), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(149), true, false, ""},
+ leTestCase{uint16(150), false, false, ""},
+ leTestCase{uint16(151), false, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(149), true, false, ""},
+ leTestCase{uint32(150), false, false, ""},
+ leTestCase{uint32(151), false, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(149), true, false, ""},
+ leTestCase{uint64(150), false, false, ""},
+ leTestCase{uint64(151), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(149), true, false, ""},
+ leTestCase{float32(149.8), true, false, ""},
+ leTestCase{float32(149.9), true, false, ""},
+ leTestCase{float32(150), false, false, ""},
+ leTestCase{float32(151), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(149), true, false, ""},
+ leTestCase{float64(149.8), true, false, ""},
+ leTestCase{float64(149.9), true, false, ""},
+ leTestCase{float64(150), false, false, ""},
+ leTestCase{float64(151), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Subtle cases
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessOrEqual(int64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{kTwoTo25 + 0, true, false, ""},
+ leTestCase{kTwoTo25 + 1, true, false, ""},
+ leTestCase{kTwoTo25 + 2, false, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(32767), true, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(255), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(65535), true, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ leTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessOrEqual(int64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{1 << 30, true, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(math.MaxInt32), true, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(32767), true, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(math.MaxInt32), true, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(math.MaxUint32), true, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(255), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(65535), true, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(math.MaxUint32), true, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Floating point.
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessOrEqual(uint64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{kTwoTo25 + 0, true, false, ""},
+ leTestCase{kTwoTo25 + 1, true, false, ""},
+ leTestCase{kTwoTo25 + 2, false, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(32767), true, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int32(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(255), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(65535), true, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint32(kTwoTo25 + 2), false, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ leTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessOrEqual(uint64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{-1, true, false, ""},
+ leTestCase{1 << 30, true, false, ""},
+
+ leTestCase{int(-1), true, false, ""},
+ leTestCase{int(math.MaxInt32), true, false, ""},
+
+ leTestCase{int8(-1), true, false, ""},
+ leTestCase{int8(127), true, false, ""},
+
+ leTestCase{int16(-1), true, false, ""},
+ leTestCase{int16(0), true, false, ""},
+ leTestCase{int16(32767), true, false, ""},
+
+ leTestCase{int32(-1), true, false, ""},
+ leTestCase{int32(math.MaxInt32), true, false, ""},
+
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint(0), true, false, ""},
+ leTestCase{uint(math.MaxUint32), true, false, ""},
+
+ leTestCase{uint8(0), true, false, ""},
+ leTestCase{uint8(255), true, false, ""},
+
+ leTestCase{uint16(0), true, false, ""},
+ leTestCase{uint16(65535), true, false, ""},
+
+ leTestCase{uint32(0), true, false, ""},
+ leTestCase{uint32(math.MaxUint32), true, false, ""},
+
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Floating point.
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessOrEqual(float32(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 3.3554432e+07"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{int64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 2), true, false, ""},
+ leTestCase{int64(kTwoTo25 + 3), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{uint64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 2), true, false, ""},
+ leTestCase{uint64(kTwoTo25 + 3), false, false, ""},
+
+ // Floating point.
+ leTestCase{float32(-1), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 2), true, false, ""},
+ leTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 2), true, false, ""},
+ leTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessOrEqual(float64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to 1.8014398509481984e+16"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ // Signed integers.
+ leTestCase{int64(-1), true, false, ""},
+ leTestCase{int64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{int64(kTwoTo54 + 3), false, false, ""},
+
+ // Unsigned integers.
+ leTestCase{uint64(0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{uint64(kTwoTo54 + 3), false, false, ""},
+
+ // Floating point.
+ leTestCase{float64(-1), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 - 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 0), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 1), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 2), true, false, ""},
+ leTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// String literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessOrEqualTest) EmptyString() {
+ matcher := LessOrEqual("")
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to \"\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ leTestCase{"", true, false, ""},
+ leTestCase{"\x00", false, false, ""},
+ leTestCase{"a", false, false, ""},
+ leTestCase{"foo", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) SingleNullByte() {
+ matcher := LessOrEqual("\x00")
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to \"\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ leTestCase{"", true, false, ""},
+ leTestCase{"\x00", true, false, ""},
+ leTestCase{"\x00\x00", false, false, ""},
+ leTestCase{"a", false, false, ""},
+ leTestCase{"foo", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessOrEqualTest) LongerString() {
+ matcher := LessOrEqual("foo\x00")
+ desc := matcher.Description()
+ expectedDesc := "less than or equal to \"foo\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []leTestCase{
+ leTestCase{"", true, false, ""},
+ leTestCase{"\x00", true, false, ""},
+ leTestCase{"bar", true, false, ""},
+ leTestCase{"foo", true, false, ""},
+ leTestCase{"foo\x00", true, false, ""},
+ leTestCase{"foo\x00\x00", false, false, ""},
+ leTestCase{"fooa", false, false, ""},
+ leTestCase{"qux", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_than.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_than.go
new file mode 100644
index 00000000000..8258e45d99d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_than.go
@@ -0,0 +1,152 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+// LessThan returns a matcher that matches integer, floating point, or strings
+// values v such that v < x. Comparison is not defined between numeric and
+// string types, but is defined between all integer and floating point types.
+//
+// x must itself be an integer, floating point, or string type; otherwise,
+// LessThan will panic.
+func LessThan(x interface{}) Matcher {
+ v := reflect.ValueOf(x)
+ kind := v.Kind()
+
+ switch {
+ case isInteger(v):
+ case isFloat(v):
+ case kind == reflect.String:
+
+ default:
+ panic(fmt.Sprintf("LessThan: unexpected kind %v", kind))
+ }
+
+ return &lessThanMatcher{v}
+}
+
+type lessThanMatcher struct {
+ limit reflect.Value
+}
+
+func (m *lessThanMatcher) Description() string {
+ // Special case: make it clear that strings are strings.
+ if m.limit.Kind() == reflect.String {
+ return fmt.Sprintf("less than \"%s\"", m.limit.String())
+ }
+
+ return fmt.Sprintf("less than %v", m.limit.Interface())
+}
+
+func compareIntegers(v1, v2 reflect.Value) (err error) {
+ err = errors.New("")
+
+ switch {
+ case isSignedInteger(v1) && isSignedInteger(v2):
+ if v1.Int() < v2.Int() {
+ err = nil
+ }
+ return
+
+ case isSignedInteger(v1) && isUnsignedInteger(v2):
+ if v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() {
+ err = nil
+ }
+ return
+
+ case isUnsignedInteger(v1) && isSignedInteger(v2):
+ if v1.Uint() <= math.MaxInt64 && int64(v1.Uint()) < v2.Int() {
+ err = nil
+ }
+ return
+
+ case isUnsignedInteger(v1) && isUnsignedInteger(v2):
+ if v1.Uint() < v2.Uint() {
+ err = nil
+ }
+ return
+ }
+
+ panic(fmt.Sprintf("compareIntegers: %v %v", v1, v2))
+}
+
+func getFloat(v reflect.Value) float64 {
+ switch {
+ case isSignedInteger(v):
+ return float64(v.Int())
+
+ case isUnsignedInteger(v):
+ return float64(v.Uint())
+
+ case isFloat(v):
+ return v.Float()
+ }
+
+ panic(fmt.Sprintf("getFloat: %v", v))
+}
+
+func (m *lessThanMatcher) Matches(c interface{}) (err error) {
+ v1 := reflect.ValueOf(c)
+ v2 := m.limit
+
+ err = errors.New("")
+
+ // Handle strings as a special case.
+ if v1.Kind() == reflect.String && v2.Kind() == reflect.String {
+ if v1.String() < v2.String() {
+ err = nil
+ }
+ return
+ }
+
+ // If we get here, we require that we are dealing with integers or floats.
+ v1Legal := isInteger(v1) || isFloat(v1)
+ v2Legal := isInteger(v2) || isFloat(v2)
+ if !v1Legal || !v2Legal {
+ err = NewFatalError("which is not comparable")
+ return
+ }
+
+ // Handle the various comparison cases.
+ switch {
+ // Both integers
+ case isInteger(v1) && isInteger(v2):
+ return compareIntegers(v1, v2)
+
+ // At least one float32
+ case v1.Kind() == reflect.Float32 || v2.Kind() == reflect.Float32:
+ if float32(getFloat(v1)) < float32(getFloat(v2)) {
+ err = nil
+ }
+ return
+
+ // At least one float64
+ case v1.Kind() == reflect.Float64 || v2.Kind() == reflect.Float64:
+ if getFloat(v1) < getFloat(v2) {
+ err = nil
+ }
+ return
+ }
+
+ // We shouldn't get here.
+ panic(fmt.Sprintf("lessThanMatcher.Matches: Shouldn't get here: %v %v", v1, v2))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_than_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_than_test.go
new file mode 100644
index 00000000000..59f5b7f56bd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/less_than_test.go
@@ -0,0 +1,1057 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "math"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type LessThanTest struct {
+}
+
+func init() { RegisterTestSuite(&LessThanTest{}) }
+
+type ltTestCase struct {
+ candidate interface{}
+ expectedResult bool
+ shouldBeFatal bool
+ expectedError string
+}
+
+func (t *LessThanTest) checkTestCases(matcher Matcher, cases []ltTestCase) {
+ for i, c := range cases {
+ err := matcher.Matches(c.candidate)
+
+ ExpectThat(
+ (err == nil),
+ Equals(c.expectedResult),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ if err == nil {
+ continue
+ }
+
+ _, isFatal := err.(*FatalError)
+ ExpectEq(
+ c.shouldBeFatal,
+ isFatal,
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+
+ ExpectThat(
+ err,
+ Error(Equals(c.expectedError)),
+ "Case %d (candidate %v)",
+ i,
+ c.candidate)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) IntegerCandidateBadTypes() {
+ matcher := LessThan(int(-150))
+
+ cases := []ltTestCase{
+ ltTestCase{true, false, true, "which is not comparable"},
+ ltTestCase{complex64(-151), false, true, "which is not comparable"},
+ ltTestCase{complex128(-151), false, true, "which is not comparable"},
+ ltTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ ltTestCase{make(chan int), false, true, "which is not comparable"},
+ ltTestCase{func() {}, false, true, "which is not comparable"},
+ ltTestCase{map[int]int{}, false, true, "which is not comparable"},
+ ltTestCase{&ltTestCase{}, false, true, "which is not comparable"},
+ ltTestCase{make([]int, 0), false, true, "which is not comparable"},
+ ltTestCase{"-151", false, true, "which is not comparable"},
+ ltTestCase{ltTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) FloatCandidateBadTypes() {
+ matcher := LessThan(float32(-150))
+
+ cases := []ltTestCase{
+ ltTestCase{true, false, true, "which is not comparable"},
+ ltTestCase{complex64(-151), false, true, "which is not comparable"},
+ ltTestCase{complex128(-151), false, true, "which is not comparable"},
+ ltTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ ltTestCase{make(chan int), false, true, "which is not comparable"},
+ ltTestCase{func() {}, false, true, "which is not comparable"},
+ ltTestCase{map[int]int{}, false, true, "which is not comparable"},
+ ltTestCase{&ltTestCase{}, false, true, "which is not comparable"},
+ ltTestCase{make([]int, 0), false, true, "which is not comparable"},
+ ltTestCase{"-151", false, true, "which is not comparable"},
+ ltTestCase{ltTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) StringCandidateBadTypes() {
+ matcher := LessThan("17")
+
+ cases := []ltTestCase{
+ ltTestCase{true, false, true, "which is not comparable"},
+ ltTestCase{int(0), false, true, "which is not comparable"},
+ ltTestCase{int8(0), false, true, "which is not comparable"},
+ ltTestCase{int16(0), false, true, "which is not comparable"},
+ ltTestCase{int32(0), false, true, "which is not comparable"},
+ ltTestCase{int64(0), false, true, "which is not comparable"},
+ ltTestCase{uint(0), false, true, "which is not comparable"},
+ ltTestCase{uint8(0), false, true, "which is not comparable"},
+ ltTestCase{uint16(0), false, true, "which is not comparable"},
+ ltTestCase{uint32(0), false, true, "which is not comparable"},
+ ltTestCase{uint64(0), false, true, "which is not comparable"},
+ ltTestCase{float32(0), false, true, "which is not comparable"},
+ ltTestCase{float64(0), false, true, "which is not comparable"},
+ ltTestCase{complex64(-151), false, true, "which is not comparable"},
+ ltTestCase{complex128(-151), false, true, "which is not comparable"},
+ ltTestCase{[...]int{-151}, false, true, "which is not comparable"},
+ ltTestCase{make(chan int), false, true, "which is not comparable"},
+ ltTestCase{func() {}, false, true, "which is not comparable"},
+ ltTestCase{map[int]int{}, false, true, "which is not comparable"},
+ ltTestCase{&ltTestCase{}, false, true, "which is not comparable"},
+ ltTestCase{make([]int, 0), false, true, "which is not comparable"},
+ ltTestCase{ltTestCase{}, false, true, "which is not comparable"},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) BadArgument() {
+ panicked := false
+
+ defer func() {
+ ExpectThat(panicked, Equals(true))
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ LessThan(complex128(0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Integer literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) NegativeIntegerLiteral() {
+ matcher := LessThan(-150)
+ desc := matcher.Description()
+ expectedDesc := "less than -150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-(1 << 30), true, false, ""},
+ ltTestCase{-151, true, false, ""},
+ ltTestCase{-150, false, false, ""},
+ ltTestCase{0, false, false, ""},
+ ltTestCase{17, false, false, ""},
+
+ ltTestCase{int(-(1 << 30)), true, false, ""},
+ ltTestCase{int(-151), true, false, ""},
+ ltTestCase{int(-150), false, false, ""},
+ ltTestCase{int(0), false, false, ""},
+ ltTestCase{int(17), false, false, ""},
+
+ ltTestCase{int8(-127), false, false, ""},
+ ltTestCase{int8(0), false, false, ""},
+ ltTestCase{int8(17), false, false, ""},
+
+ ltTestCase{int16(-(1 << 14)), true, false, ""},
+ ltTestCase{int16(-151), true, false, ""},
+ ltTestCase{int16(-150), false, false, ""},
+ ltTestCase{int16(0), false, false, ""},
+ ltTestCase{int16(17), false, false, ""},
+
+ ltTestCase{int32(-(1 << 30)), true, false, ""},
+ ltTestCase{int32(-151), true, false, ""},
+ ltTestCase{int32(-150), false, false, ""},
+ ltTestCase{int32(0), false, false, ""},
+ ltTestCase{int32(17), false, false, ""},
+
+ ltTestCase{int64(-(1 << 30)), true, false, ""},
+ ltTestCase{int64(-151), true, false, ""},
+ ltTestCase{int64(-150), false, false, ""},
+ ltTestCase{int64(0), false, false, ""},
+ ltTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint((1 << 32) - 151), false, false, ""},
+ ltTestCase{uint(0), false, false, ""},
+ ltTestCase{uint(17), false, false, ""},
+
+ ltTestCase{uint8(0), false, false, ""},
+ ltTestCase{uint8(17), false, false, ""},
+ ltTestCase{uint8(253), false, false, ""},
+
+ ltTestCase{uint16((1 << 16) - 151), false, false, ""},
+ ltTestCase{uint16(0), false, false, ""},
+ ltTestCase{uint16(17), false, false, ""},
+
+ ltTestCase{uint32((1 << 32) - 151), false, false, ""},
+ ltTestCase{uint32(0), false, false, ""},
+ ltTestCase{uint32(17), false, false, ""},
+
+ ltTestCase{uint64((1 << 64) - 151), false, false, ""},
+ ltTestCase{uint64(0), false, false, ""},
+ ltTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-(1 << 30)), true, false, ""},
+ ltTestCase{float32(-151), true, false, ""},
+ ltTestCase{float32(-150.1), true, false, ""},
+ ltTestCase{float32(-150), false, false, ""},
+ ltTestCase{float32(-149.9), false, false, ""},
+ ltTestCase{float32(0), false, false, ""},
+ ltTestCase{float32(17), false, false, ""},
+ ltTestCase{float32(160), false, false, ""},
+
+ ltTestCase{float64(-(1 << 30)), true, false, ""},
+ ltTestCase{float64(-151), true, false, ""},
+ ltTestCase{float64(-150.1), true, false, ""},
+ ltTestCase{float64(-150), false, false, ""},
+ ltTestCase{float64(-149.9), false, false, ""},
+ ltTestCase{float64(0), false, false, ""},
+ ltTestCase{float64(17), false, false, ""},
+ ltTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) ZeroIntegerLiteral() {
+ matcher := LessThan(0)
+ desc := matcher.Description()
+ expectedDesc := "less than 0"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-(1 << 30), true, false, ""},
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{0, false, false, ""},
+ ltTestCase{1, false, false, ""},
+ ltTestCase{17, false, false, ""},
+ ltTestCase{(1 << 30), false, false, ""},
+
+ ltTestCase{int(-(1 << 30)), true, false, ""},
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(0), false, false, ""},
+ ltTestCase{int(1), false, false, ""},
+ ltTestCase{int(17), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(0), false, false, ""},
+ ltTestCase{int8(1), false, false, ""},
+
+ ltTestCase{int16(-(1 << 14)), true, false, ""},
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), false, false, ""},
+ ltTestCase{int16(1), false, false, ""},
+ ltTestCase{int16(17), false, false, ""},
+
+ ltTestCase{int32(-(1 << 30)), true, false, ""},
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(0), false, false, ""},
+ ltTestCase{int32(1), false, false, ""},
+ ltTestCase{int32(17), false, false, ""},
+
+ ltTestCase{int64(-(1 << 30)), true, false, ""},
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(0), false, false, ""},
+ ltTestCase{int64(1), false, false, ""},
+ ltTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint((1 << 32) - 1), false, false, ""},
+ ltTestCase{uint(0), false, false, ""},
+ ltTestCase{uint(17), false, false, ""},
+
+ ltTestCase{uint8(0), false, false, ""},
+ ltTestCase{uint8(17), false, false, ""},
+ ltTestCase{uint8(253), false, false, ""},
+
+ ltTestCase{uint16((1 << 16) - 1), false, false, ""},
+ ltTestCase{uint16(0), false, false, ""},
+ ltTestCase{uint16(17), false, false, ""},
+
+ ltTestCase{uint32((1 << 32) - 1), false, false, ""},
+ ltTestCase{uint32(0), false, false, ""},
+ ltTestCase{uint32(17), false, false, ""},
+
+ ltTestCase{uint64((1 << 64) - 1), false, false, ""},
+ ltTestCase{uint64(0), false, false, ""},
+ ltTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-(1 << 30)), true, false, ""},
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(-0.1), true, false, ""},
+ ltTestCase{float32(-0.0), false, false, ""},
+ ltTestCase{float32(0), false, false, ""},
+ ltTestCase{float32(0.1), false, false, ""},
+ ltTestCase{float32(17), false, false, ""},
+ ltTestCase{float32(160), false, false, ""},
+
+ ltTestCase{float64(-(1 << 30)), true, false, ""},
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(-0.1), true, false, ""},
+ ltTestCase{float64(-0), false, false, ""},
+ ltTestCase{float64(0), false, false, ""},
+ ltTestCase{float64(17), false, false, ""},
+ ltTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) PositiveIntegerLiteral() {
+ matcher := LessThan(150)
+ desc := matcher.Description()
+ expectedDesc := "less than 150"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{149, true, false, ""},
+ ltTestCase{150, false, false, ""},
+ ltTestCase{151, false, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(149), true, false, ""},
+ ltTestCase{int(150), false, false, ""},
+ ltTestCase{int(151), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(0), true, false, ""},
+ ltTestCase{int8(17), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(149), true, false, ""},
+ ltTestCase{int16(150), false, false, ""},
+ ltTestCase{int16(151), false, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(149), true, false, ""},
+ ltTestCase{int32(150), false, false, ""},
+ ltTestCase{int32(151), false, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(149), true, false, ""},
+ ltTestCase{int64(150), false, false, ""},
+ ltTestCase{int64(151), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(149), true, false, ""},
+ ltTestCase{uint(150), false, false, ""},
+ ltTestCase{uint(151), false, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(127), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(149), true, false, ""},
+ ltTestCase{uint16(150), false, false, ""},
+ ltTestCase{uint16(151), false, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(149), true, false, ""},
+ ltTestCase{uint32(150), false, false, ""},
+ ltTestCase{uint32(151), false, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(149), true, false, ""},
+ ltTestCase{uint64(150), false, false, ""},
+ ltTestCase{uint64(151), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(149), true, false, ""},
+ ltTestCase{float32(149.9), true, false, ""},
+ ltTestCase{float32(150), false, false, ""},
+ ltTestCase{float32(150.1), false, false, ""},
+ ltTestCase{float32(151), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(149), true, false, ""},
+ ltTestCase{float64(149.9), true, false, ""},
+ ltTestCase{float64(150), false, false, ""},
+ ltTestCase{float64(150.1), false, false, ""},
+ ltTestCase{float64(151), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Float literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) NegativeFloatLiteral() {
+ matcher := LessThan(-150.1)
+ desc := matcher.Description()
+ expectedDesc := "less than -150.1"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-(1 << 30), true, false, ""},
+ ltTestCase{-151, true, false, ""},
+ ltTestCase{-150, false, false, ""},
+ ltTestCase{0, false, false, ""},
+ ltTestCase{17, false, false, ""},
+
+ ltTestCase{int(-(1 << 30)), true, false, ""},
+ ltTestCase{int(-151), true, false, ""},
+ ltTestCase{int(-150), false, false, ""},
+ ltTestCase{int(0), false, false, ""},
+ ltTestCase{int(17), false, false, ""},
+
+ ltTestCase{int8(-127), false, false, ""},
+ ltTestCase{int8(0), false, false, ""},
+ ltTestCase{int8(17), false, false, ""},
+
+ ltTestCase{int16(-(1 << 14)), true, false, ""},
+ ltTestCase{int16(-151), true, false, ""},
+ ltTestCase{int16(-150), false, false, ""},
+ ltTestCase{int16(0), false, false, ""},
+ ltTestCase{int16(17), false, false, ""},
+
+ ltTestCase{int32(-(1 << 30)), true, false, ""},
+ ltTestCase{int32(-151), true, false, ""},
+ ltTestCase{int32(-150), false, false, ""},
+ ltTestCase{int32(0), false, false, ""},
+ ltTestCase{int32(17), false, false, ""},
+
+ ltTestCase{int64(-(1 << 30)), true, false, ""},
+ ltTestCase{int64(-151), true, false, ""},
+ ltTestCase{int64(-150), false, false, ""},
+ ltTestCase{int64(0), false, false, ""},
+ ltTestCase{int64(17), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint((1 << 32) - 151), false, false, ""},
+ ltTestCase{uint(0), false, false, ""},
+ ltTestCase{uint(17), false, false, ""},
+
+ ltTestCase{uint8(0), false, false, ""},
+ ltTestCase{uint8(17), false, false, ""},
+ ltTestCase{uint8(253), false, false, ""},
+
+ ltTestCase{uint16((1 << 16) - 151), false, false, ""},
+ ltTestCase{uint16(0), false, false, ""},
+ ltTestCase{uint16(17), false, false, ""},
+
+ ltTestCase{uint32((1 << 32) - 151), false, false, ""},
+ ltTestCase{uint32(0), false, false, ""},
+ ltTestCase{uint32(17), false, false, ""},
+
+ ltTestCase{uint64((1 << 64) - 151), false, false, ""},
+ ltTestCase{uint64(0), false, false, ""},
+ ltTestCase{uint64(17), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-(1 << 30)), true, false, ""},
+ ltTestCase{float32(-151), true, false, ""},
+ ltTestCase{float32(-150.2), true, false, ""},
+ ltTestCase{float32(-150.1), false, false, ""},
+ ltTestCase{float32(-150), false, false, ""},
+ ltTestCase{float32(0), false, false, ""},
+ ltTestCase{float32(17), false, false, ""},
+ ltTestCase{float32(160), false, false, ""},
+
+ ltTestCase{float64(-(1 << 30)), true, false, ""},
+ ltTestCase{float64(-151), true, false, ""},
+ ltTestCase{float64(-150.2), true, false, ""},
+ ltTestCase{float64(-150.1), false, false, ""},
+ ltTestCase{float64(-150), false, false, ""},
+ ltTestCase{float64(0), false, false, ""},
+ ltTestCase{float64(17), false, false, ""},
+ ltTestCase{float64(160), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) PositiveFloatLiteral() {
+ matcher := LessThan(149.9)
+ desc := matcher.Description()
+ expectedDesc := "less than 149.9"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{149, true, false, ""},
+ ltTestCase{150, false, false, ""},
+ ltTestCase{151, false, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(149), true, false, ""},
+ ltTestCase{int(150), false, false, ""},
+ ltTestCase{int(151), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(0), true, false, ""},
+ ltTestCase{int8(17), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(149), true, false, ""},
+ ltTestCase{int16(150), false, false, ""},
+ ltTestCase{int16(151), false, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(149), true, false, ""},
+ ltTestCase{int32(150), false, false, ""},
+ ltTestCase{int32(151), false, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(149), true, false, ""},
+ ltTestCase{int64(150), false, false, ""},
+ ltTestCase{int64(151), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(149), true, false, ""},
+ ltTestCase{uint(150), false, false, ""},
+ ltTestCase{uint(151), false, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(127), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(149), true, false, ""},
+ ltTestCase{uint16(150), false, false, ""},
+ ltTestCase{uint16(151), false, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(149), true, false, ""},
+ ltTestCase{uint32(150), false, false, ""},
+ ltTestCase{uint32(151), false, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(149), true, false, ""},
+ ltTestCase{uint64(150), false, false, ""},
+ ltTestCase{uint64(151), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(149), true, false, ""},
+ ltTestCase{float32(149.8), true, false, ""},
+ ltTestCase{float32(149.9), false, false, ""},
+ ltTestCase{float32(150), false, false, ""},
+ ltTestCase{float32(151), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(149), true, false, ""},
+ ltTestCase{float64(149.8), true, false, ""},
+ ltTestCase{float64(149.9), false, false, ""},
+ ltTestCase{float64(150), false, false, ""},
+ ltTestCase{float64(151), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Subtle cases
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) Int64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessThan(int64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{kTwoTo25 + 0, true, false, ""},
+ ltTestCase{kTwoTo25 + 1, false, false, ""},
+ ltTestCase{kTwoTo25 + 2, false, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), true, false, ""},
+ ltTestCase{int16(32767), true, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int32(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(255), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(65535), true, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Int64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessThan(int64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{1 << 30, true, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(math.MaxInt32), true, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), true, false, ""},
+ ltTestCase{int16(32767), true, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(math.MaxInt32), true, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ ltTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(math.MaxUint32), true, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(255), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(65535), true, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(math.MaxUint32), true, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Uint64NotExactlyRepresentableBySinglePrecision() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessThan(uint64(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 33554433"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{kTwoTo25 + 0, true, false, ""},
+ ltTestCase{kTwoTo25 + 1, false, false, ""},
+ ltTestCase{kTwoTo25 + 2, false, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), true, false, ""},
+ ltTestCase{int16(32767), true, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int32(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 2), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(255), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(65535), true, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint32(kTwoTo25 + 2), false, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 + 0), true, false, ""},
+ ltTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Uint64NotExactlyRepresentableByDoublePrecision() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessThan(uint64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 18014398509481985"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{-1, true, false, ""},
+ ltTestCase{1 << 30, true, false, ""},
+
+ ltTestCase{int(-1), true, false, ""},
+ ltTestCase{int(math.MaxInt32), true, false, ""},
+
+ ltTestCase{int8(-1), true, false, ""},
+ ltTestCase{int8(127), true, false, ""},
+
+ ltTestCase{int16(-1), true, false, ""},
+ ltTestCase{int16(0), true, false, ""},
+ ltTestCase{int16(32767), true, false, ""},
+
+ ltTestCase{int32(-1), true, false, ""},
+ ltTestCase{int32(math.MaxInt32), true, false, ""},
+
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 - 1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 + 0), true, false, ""},
+ ltTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 2), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint(0), true, false, ""},
+ ltTestCase{uint(math.MaxUint32), true, false, ""},
+
+ ltTestCase{uint8(0), true, false, ""},
+ ltTestCase{uint8(255), true, false, ""},
+
+ ltTestCase{uint16(0), true, false, ""},
+ ltTestCase{uint16(65535), true, false, ""},
+
+ ltTestCase{uint32(0), true, false, ""},
+ ltTestCase{uint32(math.MaxUint32), true, false, ""},
+
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 - 1), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Float32AboveExactIntegerRange() {
+ // Single-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo25 = 1 << 25
+ matcher := LessThan(float32(kTwoTo25 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 3.3554432e+07"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{int64(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{int64(kTwoTo25 + 3), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{uint64(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{uint64(kTwoTo25 + 3), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float32(-1), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float32(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float32(kTwoTo25 + 3), false, false, ""},
+
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo25 - 1), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 0), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo25 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) Float64AboveExactIntegerRange() {
+ // Double-precision floats don't have enough bits to represent the integers
+ // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value
+ // and should be treated as equivalent when floats are in the mix.
+ const kTwoTo54 = 1 << 54
+ matcher := LessThan(float64(kTwoTo54 + 1))
+
+ desc := matcher.Description()
+ expectedDesc := "less than 1.8014398509481984e+16"
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ // Signed integers.
+ ltTestCase{int64(-1), true, false, ""},
+ ltTestCase{int64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{int64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{int64(kTwoTo54 + 3), false, false, ""},
+
+ // Unsigned integers.
+ ltTestCase{uint64(0), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{uint64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{uint64(kTwoTo54 + 3), false, false, ""},
+
+ // Floating point.
+ ltTestCase{float64(-1), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 2), true, false, ""},
+ ltTestCase{float64(kTwoTo54 - 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 0), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 1), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 2), false, false, ""},
+ ltTestCase{float64(kTwoTo54 + 3), false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+////////////////////////////////////////////////////////////////////////
+// String literals
+////////////////////////////////////////////////////////////////////////
+
+func (t *LessThanTest) EmptyString() {
+ matcher := LessThan("")
+ desc := matcher.Description()
+ expectedDesc := "less than \"\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ ltTestCase{"", false, false, ""},
+ ltTestCase{"\x00", false, false, ""},
+ ltTestCase{"a", false, false, ""},
+ ltTestCase{"foo", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) SingleNullByte() {
+ matcher := LessThan("\x00")
+ desc := matcher.Description()
+ expectedDesc := "less than \"\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ ltTestCase{"", true, false, ""},
+ ltTestCase{"\x00", false, false, ""},
+ ltTestCase{"a", false, false, ""},
+ ltTestCase{"foo", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
+
+func (t *LessThanTest) LongerString() {
+ matcher := LessThan("foo\x00")
+ desc := matcher.Description()
+ expectedDesc := "less than \"foo\x00\""
+
+ ExpectThat(desc, Equals(expectedDesc))
+
+ cases := []ltTestCase{
+ ltTestCase{"", true, false, ""},
+ ltTestCase{"\x00", true, false, ""},
+ ltTestCase{"bar", true, false, ""},
+ ltTestCase{"foo", true, false, ""},
+ ltTestCase{"foo\x00", false, false, ""},
+ ltTestCase{"fooa", false, false, ""},
+ ltTestCase{"qux", false, false, ""},
+ }
+
+ t.checkTestCases(matcher, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matcher.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matcher.go
new file mode 100644
index 00000000000..78159a0727c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matcher.go
@@ -0,0 +1,86 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package oglematchers provides a set of matchers useful in a testing or
+// mocking framework. These matchers are inspired by and mostly compatible with
+// Google Test for C++ and Google JS Test.
+//
+// This package is used by github.com/smartystreets/assertions/internal/ogletest and
+// github.com/smartystreets/assertions/internal/oglemock, which may be more directly useful if you're not
+// writing your own testing package or defining your own matchers.
+package oglematchers
+
+// A Matcher is some predicate implicitly defining a set of values that it
+// matches. For example, GreaterThan(17) matches all numeric values greater
+// than 17, and HasSubstr("taco") matches all strings with the substring
+// "taco".
+//
+// Matchers are typically exposed to tests via constructor functions like
+// HasSubstr. In order to implement such a function you can either define your
+// own matcher type or use NewMatcher.
+type Matcher interface {
+ // Check whether the supplied value belongs to the the set defined by the
+ // matcher. Return a non-nil error if and only if it does not.
+ //
+ // The error describes why the value doesn't match. The error text is a
+ // relative clause that is suitable for being placed after the value. For
+ // example, a predicate that matches strings with a particular substring may,
+ // when presented with a numerical value, return the following error text:
+ //
+ // "which is not a string"
+ //
+ // Then the failure message may look like:
+ //
+ // Expected: has substring "taco"
+ // Actual: 17, which is not a string
+ //
+ // If the error is self-apparent based on the description of the matcher, the
+ // error text may be empty (but the error still non-nil). For example:
+ //
+ // Expected: 17
+ // Actual: 19
+ //
+ // If you are implementing a new matcher, see also the documentation on
+ // FatalError.
+ Matches(candidate interface{}) error
+
+ // Description returns a string describing the property that values matching
+ // this matcher have, as a verb phrase where the subject is the value. For
+ // example, "is greather than 17" or "has substring "taco"".
+ Description() string
+}
+
+// FatalError is an implementation of the error interface that may be returned
+// from matchers, indicating the error should be propagated. Returning a
+// *FatalError indicates that the matcher doesn't process values of the
+// supplied type, or otherwise doesn't know how to handle the value.
+//
+// For example, if GreaterThan(17) returned false for the value "taco" without
+// a fatal error, then Not(GreaterThan(17)) would return true. This is
+// technically correct, but is surprising and may mask failures where the wrong
+// sort of matcher is accidentally used. Instead, GreaterThan(17) can return a
+// fatal error, which will be propagated by Not().
+type FatalError struct {
+ errorText string
+}
+
+// NewFatalError creates a FatalError struct with the supplied error text.
+func NewFatalError(s string) *FatalError {
+ return &FatalError{s}
+}
+
+func (e *FatalError) Error() string {
+ return e.errorText
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp.go
new file mode 100644
index 00000000000..1ed63f30c4e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp.go
@@ -0,0 +1,69 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+)
+
+// MatchesRegexp returns a matcher that matches strings and byte slices whose
+// contents match the supplied regular expression. The semantics are those of
+// regexp.Match. In particular, that means the match is not implicitly anchored
+// to the ends of the string: MatchesRegexp("bar") will match "foo bar baz".
+func MatchesRegexp(pattern string) Matcher {
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ panic("MatchesRegexp: " + err.Error())
+ }
+
+ return &matchesRegexpMatcher{re}
+}
+
+type matchesRegexpMatcher struct {
+ re *regexp.Regexp
+}
+
+func (m *matchesRegexpMatcher) Description() string {
+ return fmt.Sprintf("matches regexp \"%s\"", m.re.String())
+}
+
+func (m *matchesRegexpMatcher) Matches(c interface{}) (err error) {
+ v := reflect.ValueOf(c)
+ isString := v.Kind() == reflect.String
+ isByteSlice := v.Kind() == reflect.Slice && v.Elem().Kind() == reflect.Uint8
+
+ err = errors.New("")
+
+ switch {
+ case isString:
+ if m.re.MatchString(v.String()) {
+ err = nil
+ }
+
+ case isByteSlice:
+ if m.re.Match(v.Bytes()) {
+ err = nil
+ }
+
+ default:
+ err = NewFatalError("which is not a string or []byte")
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp_test.go
new file mode 100644
index 00000000000..031c6cb3eff
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp_test.go
@@ -0,0 +1,92 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type MatchesRegexpTest struct {
+}
+
+func init() { RegisterTestSuite(&MatchesRegexpTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *MatchesRegexpTest) Description() {
+ m := MatchesRegexp("foo.*bar")
+ ExpectEq("matches regexp \"foo.*bar\"", m.Description())
+}
+
+func (t *MatchesRegexpTest) InvalidRegexp() {
+ ExpectThat(
+ func() { MatchesRegexp("(foo") },
+ Panics(HasSubstr("missing closing )")))
+}
+
+func (t *MatchesRegexpTest) CandidateIsNil() {
+ m := MatchesRegexp("")
+ err := m.Matches(nil)
+
+ ExpectThat(err, Error(Equals("which is not a string or []byte")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *MatchesRegexpTest) CandidateIsInteger() {
+ m := MatchesRegexp("")
+ err := m.Matches(17)
+
+ ExpectThat(err, Error(Equals("which is not a string or []byte")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *MatchesRegexpTest) NonMatchingCandidates() {
+ m := MatchesRegexp("fo[op]\\s+x")
+ var err error
+
+ err = m.Matches("fon x")
+ ExpectThat(err, Error(Equals("")))
+ ExpectFalse(isFatal(err))
+
+ err = m.Matches("fopx")
+ ExpectThat(err, Error(Equals("")))
+ ExpectFalse(isFatal(err))
+
+ err = m.Matches("fop ")
+ ExpectThat(err, Error(Equals("")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *MatchesRegexpTest) MatchingCandidates() {
+ m := MatchesRegexp("fo[op]\\s+x")
+ var err error
+
+ err = m.Matches("foo x")
+ ExpectEq(nil, err)
+
+ err = m.Matches("fop x")
+ ExpectEq(nil, err)
+
+ err = m.Matches("blah blah foo x blah blah")
+ ExpectEq(nil, err)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/new_matcher.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/new_matcher.go
new file mode 100644
index 00000000000..c9d8398ee63
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/new_matcher.go
@@ -0,0 +1,43 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// Create a matcher with the given description and predicate function, which
+// will be invoked to handle calls to Matchers.
+//
+// Using this constructor may be a convenience over defining your own type that
+// implements Matcher if you do not need any logic in your Description method.
+func NewMatcher(
+ predicate func(interface{}) error,
+ description string) Matcher {
+ return &predicateMatcher{
+ predicate: predicate,
+ description: description,
+ }
+}
+
+type predicateMatcher struct {
+ predicate func(interface{}) error
+ description string
+}
+
+func (pm *predicateMatcher) Matches(c interface{}) error {
+ return pm.predicate(c)
+}
+
+func (pm *predicateMatcher) Description() string {
+ return pm.description
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/not.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/not.go
new file mode 100644
index 00000000000..623789fe28a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/not.go
@@ -0,0 +1,53 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Not returns a matcher that inverts the set of values matched by the wrapped
+// matcher. It does not transform the result for values for which the wrapped
+// matcher returns a fatal error.
+func Not(m Matcher) Matcher {
+ return &notMatcher{m}
+}
+
+type notMatcher struct {
+ wrapped Matcher
+}
+
+func (m *notMatcher) Matches(c interface{}) (err error) {
+ err = m.wrapped.Matches(c)
+
+ // Did the wrapped matcher say yes?
+ if err == nil {
+ return errors.New("")
+ }
+
+ // Did the wrapped matcher return a fatal error?
+ if _, isFatal := err.(*FatalError); isFatal {
+ return err
+ }
+
+ // The wrapped matcher returned a non-fatal error.
+ return nil
+}
+
+func (m *notMatcher) Description() string {
+ return fmt.Sprintf("not(%s)", m.wrapped.Description())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/not_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/not_test.go
new file mode 100644
index 00000000000..9c65b85ef87
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/not_test.go
@@ -0,0 +1,108 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type fakeMatcher struct {
+ matchFunc func(interface{}) error
+ description string
+}
+
+func (m *fakeMatcher) Matches(c interface{}) error {
+ return m.matchFunc(c)
+}
+
+func (m *fakeMatcher) Description() string {
+ return m.description
+}
+
+type NotTest struct {
+
+}
+
+func init() { RegisterTestSuite(&NotTest{}) }
+func TestOgletest(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *NotTest) CallsWrapped() {
+ var suppliedCandidate interface{}
+ matchFunc := func(c interface{}) error {
+ suppliedCandidate = c
+ return nil
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Not(wrapped)
+
+ matcher.Matches(17)
+ ExpectThat(suppliedCandidate, Equals(17))
+}
+
+func (t *NotTest) WrappedReturnsTrue() {
+ matchFunc := func(c interface{}) error {
+ return nil
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Not(wrapped)
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("")))
+}
+
+func (t *NotTest) WrappedReturnsNonFatalError() {
+ matchFunc := func(c interface{}) error {
+ return errors.New("taco")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Not(wrapped)
+
+ err := matcher.Matches(0)
+ ExpectEq(nil, err)
+}
+
+func (t *NotTest) WrappedReturnsFatalError() {
+ matchFunc := func(c interface{}) error {
+ return NewFatalError("taco")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Not(wrapped)
+
+ err := matcher.Matches(0)
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *NotTest) Description() {
+ wrapped := &fakeMatcher{nil, "taco"}
+ matcher := Not(wrapped)
+
+ ExpectEq("not(taco)", matcher.Description())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/panics.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/panics.go
new file mode 100644
index 00000000000..d2cfc97869b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/panics.go
@@ -0,0 +1,74 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Panics matches zero-arg functions which, when invoked, panic with an error
+// that matches the supplied matcher.
+//
+// NOTE(jacobsa): This matcher cannot detect the case where the function panics
+// using panic(nil), by design of the language. See here for more info:
+//
+// http://goo.gl/9aIQL
+//
+func Panics(m Matcher) Matcher {
+ return &panicsMatcher{m}
+}
+
+type panicsMatcher struct {
+ wrappedMatcher Matcher
+}
+
+func (m *panicsMatcher) Description() string {
+ return "panics with: " + m.wrappedMatcher.Description()
+}
+
+func (m *panicsMatcher) Matches(c interface{}) (err error) {
+ // Make sure c is a zero-arg function.
+ v := reflect.ValueOf(c)
+ if v.Kind() != reflect.Func || v.Type().NumIn() != 0 {
+ err = NewFatalError("which is not a zero-arg function")
+ return
+ }
+
+ // Call the function and check its panic error.
+ defer func() {
+ if e := recover(); e != nil {
+ err = m.wrappedMatcher.Matches(e)
+
+ // Set a clearer error message if the matcher said no.
+ if err != nil {
+ wrappedClause := ""
+ if err.Error() != "" {
+ wrappedClause = ", " + err.Error()
+ }
+
+ err = errors.New(fmt.Sprintf("which panicked with: %v%s", e, wrappedClause))
+ }
+ }
+ }()
+
+ v.Call([]reflect.Value{})
+
+ // If we get here, the function didn't panic.
+ err = errors.New("which didn't panic")
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/panics_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/panics_test.go
new file mode 100644
index 00000000000..fbb66bf31e2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/panics_test.go
@@ -0,0 +1,141 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type PanicsTest struct {
+ matcherCalled bool
+ suppliedCandidate interface{}
+ wrappedError error
+
+ matcher Matcher
+}
+
+func init() { RegisterTestSuite(&PanicsTest{}) }
+
+func (t *PanicsTest) SetUp(i *TestInfo) {
+ wrapped := &fakeMatcher{
+ func(c interface{}) error {
+ t.matcherCalled = true
+ t.suppliedCandidate = c
+ return t.wrappedError
+ },
+ "foo",
+ }
+
+ t.matcher = Panics(wrapped)
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *PanicsTest) Description() {
+ ExpectThat(t.matcher.Description(), Equals("panics with: foo"))
+}
+
+func (t *PanicsTest) CandidateIsNil() {
+ err := t.matcher.Matches(nil)
+
+ ExpectThat(err, Error(Equals("which is not a zero-arg function")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PanicsTest) CandidateIsString() {
+ err := t.matcher.Matches("taco")
+
+ ExpectThat(err, Error(Equals("which is not a zero-arg function")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PanicsTest) CandidateTakesArgs() {
+ err := t.matcher.Matches(func(i int) string { return "" })
+
+ ExpectThat(err, Error(Equals("which is not a zero-arg function")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PanicsTest) CallsFunction() {
+ callCount := 0
+ t.matcher.Matches(func() string {
+ callCount++
+ return ""
+ })
+
+ ExpectThat(callCount, Equals(1))
+}
+
+func (t *PanicsTest) FunctionDoesntPanic() {
+ err := t.matcher.Matches(func() {})
+
+ ExpectThat(err, Error(Equals("which didn't panic")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *PanicsTest) CallsWrappedMatcher() {
+ expectedErr := 17
+ t.wrappedError = errors.New("")
+ t.matcher.Matches(func() { panic(expectedErr) })
+
+ ExpectThat(t.suppliedCandidate, Equals(expectedErr))
+}
+
+func (t *PanicsTest) WrappedReturnsTrue() {
+ err := t.matcher.Matches(func() { panic("") })
+
+ ExpectEq(nil, err)
+}
+
+func (t *PanicsTest) WrappedReturnsFatalErrorWithoutText() {
+ t.wrappedError = NewFatalError("")
+ err := t.matcher.Matches(func() { panic(17) })
+
+ ExpectThat(err, Error(Equals("which panicked with: 17")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *PanicsTest) WrappedReturnsFatalErrorWithText() {
+ t.wrappedError = NewFatalError("which blah")
+ err := t.matcher.Matches(func() { panic(17) })
+
+ ExpectThat(err, Error(Equals("which panicked with: 17, which blah")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *PanicsTest) WrappedReturnsNonFatalErrorWithoutText() {
+ t.wrappedError = errors.New("")
+ err := t.matcher.Matches(func() { panic(17) })
+
+ ExpectThat(err, Error(Equals("which panicked with: 17")))
+ ExpectFalse(isFatal(err))
+}
+
+func (t *PanicsTest) WrappedReturnsNonFatalErrorWithText() {
+ t.wrappedError = errors.New("which blah")
+ err := t.matcher.Matches(func() { panic(17) })
+
+ ExpectThat(err, Error(Equals("which panicked with: 17, which blah")))
+ ExpectFalse(isFatal(err))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/pointee.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/pointee.go
new file mode 100644
index 00000000000..c5383f2402f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/pointee.go
@@ -0,0 +1,65 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Return a matcher that matches non-nil pointers whose pointee matches the
+// wrapped matcher.
+func Pointee(m Matcher) Matcher {
+ return &pointeeMatcher{m}
+}
+
+type pointeeMatcher struct {
+ wrapped Matcher
+}
+
+func (m *pointeeMatcher) Matches(c interface{}) (err error) {
+ // Make sure the candidate is of the appropriate type.
+ cv := reflect.ValueOf(c)
+ if !cv.IsValid() || cv.Kind() != reflect.Ptr {
+ return NewFatalError("which is not a pointer")
+ }
+
+ // Make sure the candidate is non-nil.
+ if cv.IsNil() {
+ return NewFatalError("")
+ }
+
+ // Defer to the wrapped matcher. Fix up empty errors so that failure messages
+ // are more helpful than just printing a pointer for "Actual".
+ pointee := cv.Elem().Interface()
+ err = m.wrapped.Matches(pointee)
+ if err != nil && err.Error() == "" {
+ s := fmt.Sprintf("whose pointee is %v", pointee)
+
+ if _, ok := err.(*FatalError); ok {
+ err = NewFatalError(s)
+ } else {
+ err = errors.New(s)
+ }
+ }
+
+ return err
+}
+
+func (m *pointeeMatcher) Description() string {
+ return fmt.Sprintf("pointee(%s)", m.wrapped.Description())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/pointee_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/pointee_test.go
new file mode 100644
index 00000000000..3bb72a702be
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/pointee_test.go
@@ -0,0 +1,152 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "errors"
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type PointeeTest struct {}
+func init() { RegisterTestSuite(&PointeeTest{}) }
+
+func TestPointee(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *PointeeTest) Description() {
+ wrapped := &fakeMatcher{nil, "taco"}
+ matcher := Pointee(wrapped)
+
+ ExpectEq("pointee(taco)", matcher.Description())
+}
+
+func (t *PointeeTest) CandidateIsNotAPointer() {
+ matcher := Pointee(HasSubstr(""))
+ err := matcher.Matches([]byte{})
+
+ ExpectThat(err, Error(Equals("which is not a pointer")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PointeeTest) CandidateIsANilLiteral() {
+ matcher := Pointee(HasSubstr(""))
+ err := matcher.Matches(nil)
+
+ ExpectThat(err, Error(Equals("which is not a pointer")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PointeeTest) CandidateIsANilPointer() {
+ matcher := Pointee(HasSubstr(""))
+ err := matcher.Matches((*int)(nil))
+
+ ExpectThat(err, Error(Equals("")))
+ ExpectTrue(isFatal(err))
+}
+
+func (t *PointeeTest) CallsWrapped() {
+ var suppliedCandidate interface{}
+ matchFunc := func(c interface{}) error {
+ suppliedCandidate = c
+ return nil
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ someSlice := []byte{}
+ matcher.Matches(&someSlice)
+ ExpectThat(suppliedCandidate, IdenticalTo(someSlice))
+}
+
+func (t *PointeeTest) WrappedReturnsOkay() {
+ matchFunc := func(c interface{}) error {
+ return nil
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ err := matcher.Matches(new(int))
+ ExpectEq(nil, err)
+}
+
+func (t *PointeeTest) WrappedReturnsNonFatalNonEmptyError() {
+ matchFunc := func(c interface{}) error {
+ return errors.New("taco")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ i := 17
+ err := matcher.Matches(&i)
+ ExpectFalse(isFatal(err))
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *PointeeTest) WrappedReturnsNonFatalEmptyError() {
+ matchFunc := func(c interface{}) error {
+ return errors.New("")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ i := 17
+ err := matcher.Matches(&i)
+ ExpectFalse(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("whose pointee")))
+ ExpectThat(err, Error(HasSubstr("17")))
+}
+
+func (t *PointeeTest) WrappedReturnsFatalNonEmptyError() {
+ matchFunc := func(c interface{}) error {
+ return NewFatalError("taco")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ i := 17
+ err := matcher.Matches(&i)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(Equals("taco")))
+}
+
+func (t *PointeeTest) WrappedReturnsFatalEmptyError() {
+ matchFunc := func(c interface{}) error {
+ return NewFatalError("")
+ }
+
+ wrapped := &fakeMatcher{matchFunc, ""}
+ matcher := Pointee(wrapped)
+
+ i := 17
+ err := matcher.Matches(&i)
+ ExpectTrue(isFatal(err))
+ ExpectThat(err, Error(HasSubstr("whose pointee")))
+ ExpectThat(err, Error(HasSubstr("17")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go
new file mode 100644
index 00000000000..f79d0c03db1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglematchers/transform_description.go
@@ -0,0 +1,36 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers
+
+// transformDescription returns a matcher that is equivalent to the supplied
+// one, except that it has the supplied description instead of the one attached
+// to the existing matcher.
+func transformDescription(m Matcher, newDesc string) Matcher {
+ return &transformDescriptionMatcher{newDesc, m}
+}
+
+type transformDescriptionMatcher struct {
+ desc string
+ wrappedMatcher Matcher
+}
+
+func (m *transformDescriptionMatcher) Description() string {
+ return m.desc
+}
+
+func (m *transformDescriptionMatcher) Matches(c interface{}) error {
+ return m.wrappedMatcher.Matches(c)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/.gitignore b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/.gitignore
new file mode 100644
index 00000000000..dd8fc7468f4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/.gitignore
@@ -0,0 +1,5 @@
+*.6
+6.out
+_obj/
+_test/
+_testmain.go
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/.travis.yml b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/.travis.yml
new file mode 100644
index 00000000000..b97211926e8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/.travis.yml
@@ -0,0 +1,4 @@
+# Cf. http://docs.travis-ci.com/user/getting-started/
+# Cf. http://docs.travis-ci.com/user/languages/go/
+
+language: go
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/LICENSE b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/README.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/README.md
new file mode 100644
index 00000000000..c5cb5c06b33
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/README.md
@@ -0,0 +1,103 @@
+[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/oglemock?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/oglemock)
+
+`oglemock` is a mocking framework for the Go programming language with the
+following features:
+
+ * An extensive and extensible set of matchers for expressing call
+ expectations (provided by the [oglematchers][] package).
+
+ * Clean, readable output that tells you exactly what you need to know.
+
+ * Style and semantics similar to [Google Mock][googlemock] and
+ [Google JS Test][google-js-test].
+
+ * Seamless integration with the [ogletest][] unit testing framework.
+
+It can be integrated into any testing framework (including Go's `testing`
+package), but out of the box support is built in to [ogletest][] and that is the
+easiest place to use it.
+
+
+Installation
+------------
+
+First, make sure you have installed Go 1.0.2 or newer. See
+[here][golang-install] for instructions.
+
+Use the following command to install `oglemock` and its dependencies, and to
+keep them up to date:
+
+ go get -u github.com/smartystreets/assertions/internal/oglemock
+ go get -u github.com/smartystreets/assertions/internal/oglemock/createmock
+
+Those commands will install the `oglemock` package itself, along with the
+`createmock` tool that is used to auto-generate mock types.
+
+
+Generating and using mock types
+-------------------------------
+
+Automatically generating a mock implementation of an interface is easy. If you
+want to mock interfaces `Bar` and `Baz` from package `foo`, simply run the
+following:
+
+ createmock foo Bar Baz
+
+That will print source code that can be saved to a file and used in your tests.
+For example, to create a `mock_io` package containing mock implementations of
+`io.Reader` and `io.Writer`:
+
+ mkdir mock_io
+ createmock io Reader Writer > mock_io/mock_io.go
+
+The new package will be named `mock_io`, and contain types called `MockReader`
+and `MockWriter`, which implement `io.Reader` and `io.Writer` respectively.
+
+For each generated mock type, there is a corresponding function for creating an
+instance of that type given a `Controller` object (see below). For example, to
+create a mock reader:
+
+```go
+someController := [...] // See next section.
+someReader := mock_io.NewMockReader(someController, "Mock file reader")
+```
+
+The snippet above creates a mock `io.Reader` that reports failures to
+`someController`. The reader can subsequently have expectations set up and be
+passed to your code under test that uses an `io.Reader`.
+
+
+Getting ahold of a controller
+-----------------------------
+
+[oglemock.Controller][controller-ref] is used to create mock objects, and to set
+up and verify expectations for them. You can create one by calling
+`NewController` with an `ErrorReporter`, which is the basic type used to
+interface between `oglemock` and the testing framework within which it is being
+used.
+
+If you are using [ogletest][] you don't need to worry about any of this, since
+the `TestInfo` struct provided to your test's `SetUp` function already contains
+a working `Controller` that you can use to create mock object, and you can use
+the built-in `ExpectCall` function for setting expectations. (See the
+[ogletest documentation][ogletest-docs] for more info.) Otherwise, you will need
+to implement the simple [ErrorReporter interface][reporter-ref] for your test
+environment.
+
+
+Documentation
+-------------
+
+For thorough documentation, including information on how to set up expectations,
+see [here][oglemock-docs].
+
+
+[controller-ref]: http://godoc.org/github.com/smartystreets/assertions/internal/oglemock#Controller
+[reporter-ref]: http://godoc.org/github.com/smartystreets/assertions/internal/oglemock#ErrorReporter
+[golang-install]: http://golang.org/doc/install.html
+[google-js-test]: http://code.google.com/p/google-js-test/
+[googlemock]: http://code.google.com/p/googlemock/
+[oglematchers]: https://github.com/smartystreets/assertions/internal/oglematchers
+[oglemock-docs]: http://godoc.org/github.com/smartystreets/assertions/internal/oglemock
+[ogletest]: https://github.com/smartystreets/assertions/internal/ogletest
+[ogletest-docs]: http://godoc.org/github.com/smartystreets/assertions/internal/ogletest
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/action.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/action.go
new file mode 100644
index 00000000000..9fd40d81fe8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/action.go
@@ -0,0 +1,36 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+import (
+ "reflect"
+)
+
+// Action represents an action to be taken in response to a call to a mock
+// method.
+type Action interface {
+ // Set the signature of the function with which this action is being used.
+ // This must be called before Invoke is called.
+ SetSignature(signature reflect.Type) error
+
+ // Invoke runs the specified action, given the arguments to the mock method.
+ // It returns zero or more values that may be treated as the return values of
+ // the method. If the action doesn't return any values, it may return the nil
+ // slice.
+ //
+ // You must call SetSignature before calling Invoke.
+ Invoke(methodArgs []interface{}) []interface{}
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/controller.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/controller.go
new file mode 100644
index 00000000000..93a1d6239e1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/controller.go
@@ -0,0 +1,480 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "math"
+ "reflect"
+ "sync"
+)
+
+// PartialExpecation is a function that should be called exactly once with
+// expected arguments or matchers in order to set up an expected method call.
+// See Controller.ExpectMethodCall below. It returns an expectation that can be
+// further modified (e.g. by calling WillOnce).
+//
+// If the arguments are of the wrong type, the function reports a fatal error
+// and returns nil.
+type PartialExpecation func(...interface{}) Expectation
+
+// Controller represents an object that implements the central logic of
+// oglemock: recording and verifying expectations, responding to mock method
+// calls, and so on.
+type Controller interface {
+ // ExpectCall expresses an expectation that the method of the given name
+ // should be called on the supplied mock object. It returns a function that
+ // should be called with the expected arguments, matchers for the arguments,
+ // or a mix of both.
+ //
+ // fileName and lineNumber should indicate the line on which the expectation
+ // was made, if known.
+ //
+ // For example:
+ //
+ // mockWriter := [...]
+ // controller.ExpectCall(mockWriter, "Write", "foo.go", 17)(ElementsAre(0x1))
+ // .WillOnce(Return(1, nil))
+ //
+ // If the mock object doesn't have a method of the supplied name, the
+ // function reports a fatal error and returns nil.
+ ExpectCall(
+ o MockObject,
+ methodName string,
+ fileName string,
+ lineNumber int) PartialExpecation
+
+ // Finish causes the controller to check for any unsatisfied expectations,
+ // and report them as errors if they exist.
+ //
+ // The controller may panic if any of its methods (including this one) are
+ // called after Finish is called.
+ Finish()
+
+ // HandleMethodCall looks for a registered expectation matching the call of
+ // the given method on mock object o, invokes the appropriate action (if
+ // any), and returns the values returned by that action (if any).
+ //
+ // If the action returns nothing, the controller returns zero values. If
+ // there is no matching expectation, the controller reports an error and
+ // returns zero values.
+ //
+ // If the mock object doesn't have a method of the supplied name, the
+ // arguments are of the wrong type, or the action returns the wrong types,
+ // the function reports a fatal error.
+ //
+ // HandleMethodCall is exported for the sake of mock implementations, and
+ // should not be used directly.
+ HandleMethodCall(
+ o MockObject,
+ methodName string,
+ fileName string,
+ lineNumber int,
+ args []interface{}) []interface{}
+}
+
+// methodMap represents a map from method name to set of expectations for that
+// method.
+type methodMap map[string][]*InternalExpectation
+
+// objectMap represents a map from mock object ID to a methodMap for that object.
+type objectMap map[uintptr]methodMap
+
+// NewController sets up a fresh controller, without any expectations set, and
+// configures the controller to use the supplied error reporter.
+func NewController(reporter ErrorReporter) Controller {
+ return &controllerImpl{reporter, sync.RWMutex{}, objectMap{}}
+}
+
+type controllerImpl struct {
+ reporter ErrorReporter
+
+ mutex sync.RWMutex
+ expectationsByObject objectMap // Protected by mutex
+}
+
+// Return the list of registered expectations for the named method of the
+// supplied object, or an empty slice if none have been registered. When this
+// method returns, it is guaranteed that c.expectationsByObject has an entry
+// for the object.
+//
+// c.mutex must be held for reading.
+func (c *controllerImpl) getExpectationsLocked(
+ o MockObject,
+ methodName string) []*InternalExpectation {
+ id := o.Oglemock_Id()
+
+ // Look up the mock object.
+ expectationsByMethod, ok := c.expectationsByObject[id]
+ if !ok {
+ expectationsByMethod = methodMap{}
+ c.expectationsByObject[id] = expectationsByMethod
+ }
+
+ result, ok := expectationsByMethod[methodName]
+ if !ok {
+ return []*InternalExpectation{}
+ }
+
+ return result
+}
+
+// Add an expectation to the list registered for the named method of the
+// supplied mock object.
+//
+// c.mutex must be held for writing.
+func (c *controllerImpl) addExpectationLocked(
+ o MockObject,
+ methodName string,
+ exp *InternalExpectation) {
+ // Get the existing list.
+ existing := c.getExpectationsLocked(o, methodName)
+
+ // Store a modified list.
+ id := o.Oglemock_Id()
+ c.expectationsByObject[id][methodName] = append(existing, exp)
+}
+
+func (c *controllerImpl) ExpectCall(
+ o MockObject,
+ methodName string,
+ fileName string,
+ lineNumber int) PartialExpecation {
+ // Find the signature for the requested method.
+ ov := reflect.ValueOf(o)
+ method := ov.MethodByName(methodName)
+ if method.Kind() == reflect.Invalid {
+ c.reporter.ReportFatalError(
+ fileName,
+ lineNumber,
+ errors.New("Unknown method: "+methodName))
+ return nil
+ }
+
+ partialAlreadyCalled := false // Protected by c.mutex
+ return func(args ...interface{}) Expectation {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ // This function should only be called once.
+ if partialAlreadyCalled {
+ c.reporter.ReportFatalError(
+ fileName,
+ lineNumber,
+ errors.New("Partial expectation called more than once."))
+ return nil
+ }
+
+ partialAlreadyCalled = true
+
+ // Make sure that the number of args is legal. Keep in mind that the
+ // method's type has an extra receiver arg.
+ if len(args) != method.Type().NumIn() {
+ c.reporter.ReportFatalError(
+ fileName,
+ lineNumber,
+ errors.New(
+ fmt.Sprintf(
+ "Expectation for %s given wrong number of arguments: "+
+ "expected %d, got %d.",
+ methodName,
+ method.Type().NumIn(),
+ len(args))))
+ return nil
+ }
+
+ // Create an expectation and insert it into the controller's map.
+ exp := InternalNewExpectation(
+ c.reporter,
+ method.Type(),
+ args,
+ fileName,
+ lineNumber)
+
+ c.addExpectationLocked(o, methodName, exp)
+
+ // Return the expectation to the user.
+ return exp
+ }
+}
+
+func (c *controllerImpl) Finish() {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ // Check whether the minimum cardinality for each registered expectation has
+ // been satisfied.
+ for _, expectationsByMethod := range c.expectationsByObject {
+ for methodName, expectations := range expectationsByMethod {
+ for _, exp := range expectations {
+ exp.mutex.Lock()
+ defer exp.mutex.Unlock()
+
+ minCardinality, _ := computeCardinalityLocked(exp)
+ if exp.NumMatches < minCardinality {
+ c.reporter.ReportError(
+ exp.FileName,
+ exp.LineNumber,
+ errors.New(
+ fmt.Sprintf(
+ "Unsatisfied expectation; expected %s to be called "+
+ "at least %d times; called %d times.",
+ methodName,
+ minCardinality,
+ exp.NumMatches)))
+ }
+ }
+ }
+ }
+}
+
+// expectationMatches checks the matchers for the expectation against the
+// supplied arguments.
+func expectationMatches(exp *InternalExpectation, args []interface{}) bool {
+ matchers := exp.ArgMatchers
+ if len(args) != len(matchers) {
+ panic("expectationMatches: len(args)")
+ }
+
+ // Check each matcher.
+ for i, matcher := range matchers {
+ if err := matcher.Matches(args[i]); err != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Return the expectation that matches the supplied arguments. If there is more
+// than one such expectation, the one furthest along in the list for the method
+// is returned. If there is no such expectation, nil is returned.
+//
+// c.mutex must be held for reading.
+func (c *controllerImpl) chooseExpectationLocked(
+ o MockObject,
+ methodName string,
+ args []interface{}) *InternalExpectation {
+ // Do we have any expectations for this method?
+ expectations := c.getExpectationsLocked(o, methodName)
+ if len(expectations) == 0 {
+ return nil
+ }
+
+ for i := len(expectations) - 1; i >= 0; i-- {
+ if expectationMatches(expectations[i], args) {
+ return expectations[i]
+ }
+ }
+
+ return nil
+}
+
+// makeZeroReturnValues creates a []interface{} containing appropriate zero
+// values for returning from the supplied method type.
+func makeZeroReturnValues(signature reflect.Type) []interface{} {
+ result := make([]interface{}, signature.NumOut())
+
+ for i, _ := range result {
+ outType := signature.Out(i)
+ zeroVal := reflect.Zero(outType)
+ result[i] = zeroVal.Interface()
+ }
+
+ return result
+}
+
+// computeCardinality decides on the [min, max] range of the number of expected
+// matches for the supplied expectations, according to the rules documented in
+// expectation.go.
+//
+// exp.mutex must be held for reading.
+func computeCardinalityLocked(exp *InternalExpectation) (min, max uint) {
+ // Explicit cardinality.
+ if exp.ExpectedNumMatches >= 0 {
+ min = uint(exp.ExpectedNumMatches)
+ max = min
+ return
+ }
+
+ // Implicit count based on one-time actions.
+ if len(exp.OneTimeActions) != 0 {
+ min = uint(len(exp.OneTimeActions))
+ max = min
+
+ // If there is a fallback action, this is only a lower bound.
+ if exp.FallbackAction != nil {
+ max = math.MaxUint32
+ }
+
+ return
+ }
+
+ // Implicit lack of restriction based on a fallback action being configured.
+ if exp.FallbackAction != nil {
+ min = 0
+ max = math.MaxUint32
+ return
+ }
+
+ // Implicit cardinality of one.
+ min = 1
+ max = 1
+ return
+}
+
+// chooseAction returns the action that should be invoked for the i'th match to
+// the supplied expectation (counting from zero). If the implicit "return zero
+// values" action should be used, it returns nil.
+//
+// exp.mutex must be held for reading.
+func chooseActionLocked(i uint, exp *InternalExpectation) Action {
+ // Exhaust one-time actions first.
+ if i < uint(len(exp.OneTimeActions)) {
+ return exp.OneTimeActions[i]
+ }
+
+ // Fallback action (or nil if none is configured).
+ return exp.FallbackAction
+}
+
+// Find an action for the method call, updating expectation match state in the
+// process. Return either an action that should be invoked or a set of zero
+// values to return immediately.
+//
+// This is split out from HandleMethodCall in order to more easily avoid
+// invoking the action with locks held.
+func (c *controllerImpl) chooseActionAndUpdateExpectations(
+ o MockObject,
+ methodName string,
+ fileName string,
+ lineNumber int,
+ args []interface{},
+) (action Action, zeroVals []interface{}) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ // Find the signature for the requested method.
+ ov := reflect.ValueOf(o)
+ method := ov.MethodByName(methodName)
+ if method.Kind() == reflect.Invalid {
+ c.reporter.ReportFatalError(
+ fileName,
+ lineNumber,
+ errors.New("Unknown method: "+methodName),
+ )
+
+ // Should never get here in real code.
+ log.Println("ReportFatalError unexpectedly returned.")
+ return
+ }
+
+ // HACK(jacobsa): Make sure we got the correct number of arguments. This will
+ // need to be refined when issue #5 (variadic methods) is handled.
+ if len(args) != method.Type().NumIn() {
+ c.reporter.ReportFatalError(
+ fileName,
+ lineNumber,
+ errors.New(
+ fmt.Sprintf(
+ "Wrong number of arguments: expected %d; got %d",
+ method.Type().NumIn(),
+ len(args),
+ ),
+ ),
+ )
+
+ // Should never get here in real code.
+ log.Println("ReportFatalError unexpectedly returned.")
+ return
+ }
+
+ // Find an expectation matching this call.
+ expectation := c.chooseExpectationLocked(o, methodName, args)
+ if expectation == nil {
+ c.reporter.ReportError(
+ fileName,
+ lineNumber,
+ errors.New(
+ fmt.Sprintf("Unexpected call to %s with args: %v", methodName, args),
+ ),
+ )
+
+ zeroVals = makeZeroReturnValues(method.Type())
+ return
+ }
+
+ expectation.mutex.Lock()
+ defer expectation.mutex.Unlock()
+
+ // Increase the number of matches recorded, and check whether we're over the
+ // number expected.
+ expectation.NumMatches++
+ _, maxCardinality := computeCardinalityLocked(expectation)
+ if expectation.NumMatches > maxCardinality {
+ c.reporter.ReportError(
+ expectation.FileName,
+ expectation.LineNumber,
+ errors.New(
+ fmt.Sprintf(
+ "Unexpected call to %s: "+
+ "expected to be called at most %d times; called %d times.",
+ methodName,
+ maxCardinality,
+ expectation.NumMatches,
+ ),
+ ),
+ )
+
+ zeroVals = makeZeroReturnValues(method.Type())
+ return
+ }
+
+ // Choose an action to invoke. If there is none, just return zero values.
+ action = chooseActionLocked(expectation.NumMatches-1, expectation)
+ if action == nil {
+ zeroVals = makeZeroReturnValues(method.Type())
+ return
+ }
+
+ // Let the action take over.
+ return
+}
+
+func (c *controllerImpl) HandleMethodCall(
+ o MockObject,
+ methodName string,
+ fileName string,
+ lineNumber int,
+ args []interface{},
+) []interface{} {
+ // Figure out whether to invoke an action or return zero values.
+ action, zeroVals := c.chooseActionAndUpdateExpectations(
+ o,
+ methodName,
+ fileName,
+ lineNumber,
+ args,
+ )
+
+ if action != nil {
+ return action.Invoke(args)
+ }
+
+ return zeroVals
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/controller_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/controller_test.go
new file mode 100644
index 00000000000..0ff5e5c41bb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/controller_test.go
@@ -0,0 +1,1249 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/oglemock"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "reflect"
+)
+
+////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////
+
+type errorReport struct {
+ fileName string
+ lineNumber int
+ err error
+}
+
+type fakeErrorReporter struct {
+ errors []errorReport
+ fatalErrors []errorReport
+}
+
+func (r *fakeErrorReporter) ReportError(fileName string, lineNumber int, err error) {
+ report := errorReport{fileName, lineNumber, err}
+ r.errors = append(r.errors, report)
+}
+
+func (r *fakeErrorReporter) ReportFatalError(fileName string, lineNumber int, err error) {
+ report := errorReport{fileName, lineNumber, err}
+ r.fatalErrors = append(r.fatalErrors, report)
+}
+
+type trivialMockObject struct {
+ id uintptr
+ desc string
+}
+
+func (o *trivialMockObject) Oglemock_Id() uintptr {
+ return o.id
+}
+
+func (o *trivialMockObject) Oglemock_Description() string {
+ return o.desc
+}
+
+// Method being mocked
+func (o *trivialMockObject) StringToInt(s string) int {
+ return 0
+}
+
+// Method being mocked
+func (o *trivialMockObject) TwoIntsToString(i, j int) string {
+ return ""
+}
+
+type ControllerTest struct {
+ reporter fakeErrorReporter
+ controller Controller
+
+ mock1 MockObject
+ mock2 MockObject
+}
+
+func (t *ControllerTest) SetUp(c *TestInfo) {
+ t.reporter.errors = make([]errorReport, 0)
+ t.reporter.fatalErrors = make([]errorReport, 0)
+ t.controller = NewController(&t.reporter)
+
+ t.mock1 = &trivialMockObject{17, "taco"}
+ t.mock2 = &trivialMockObject{19, "burrito"}
+}
+
+func init() { RegisterTestSuite(&ControllerTest{}) }
+
+////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////
+
+func (t *ControllerTest) FinishWithoutAnyEvents() {
+ t.controller.Finish()
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) HandleCallForUnknownObject() {
+ p := []byte{255}
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "taco.go",
+ 112,
+ []interface{}{p})
+
+ // The error should be reported immediately.
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("taco.go", t.reporter.errors[0].fileName)
+ ExpectEq(112, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unexpected")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("StringToInt")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("[255]")))
+
+ // Finish should change nothing.
+ t.controller.Finish()
+
+ ExpectEq(1, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ExpectCallForUnknownMethod() {
+ ExpectEq(
+ nil,
+ t.controller.ExpectCall(t.mock1, "Frobnicate", "burrito.go", 117))
+
+ // A fatal error should be reported immediately.
+ AssertEq(0, len(t.reporter.errors))
+ AssertEq(1, len(t.reporter.fatalErrors))
+
+ report := t.reporter.fatalErrors[0]
+ ExpectEq("burrito.go", report.fileName)
+ ExpectEq(117, report.lineNumber)
+ ExpectThat(report.err, Error(HasSubstr("Unknown method")))
+ ExpectThat(report.err, Error(HasSubstr("Frobnicate")))
+}
+
+func (t *ControllerTest) PartialExpectationGivenWrongNumberOfArgs() {
+ ExpectEq(
+ nil,
+ t.controller.ExpectCall(t.mock1, "TwoIntsToString", "burrito.go", 117)(
+ 17, 19, 23))
+
+ // A fatal error should be reported immediately.
+ AssertEq(0, len(t.reporter.errors))
+ AssertEq(1, len(t.reporter.fatalErrors))
+
+ report := t.reporter.fatalErrors[0]
+ ExpectEq("burrito.go", report.fileName)
+ ExpectEq(117, report.lineNumber)
+ ExpectThat(report.err, Error(HasSubstr("TwoIntsToString")))
+ ExpectThat(report.err, Error(HasSubstr("arguments")))
+ ExpectThat(report.err, Error(HasSubstr("expected 2")))
+ ExpectThat(report.err, Error(HasSubstr("got 3")))
+}
+
+func (t *ControllerTest) PartialExpectationCalledTwice() {
+ partial := t.controller.ExpectCall(t.mock1, "StringToInt", "burrito.go", 117)
+ AssertNe(nil, partial("taco"))
+ ExpectEq(nil, partial("taco"))
+
+ // A fatal error should be reported immediately.
+ AssertEq(0, len(t.reporter.errors))
+ AssertEq(1, len(t.reporter.fatalErrors))
+
+ report := t.reporter.fatalErrors[0]
+ ExpectEq("burrito.go", report.fileName)
+ ExpectEq(117, report.lineNumber)
+ ExpectThat(report.err, Error(HasSubstr("called more than once")))
+}
+
+func (t *ControllerTest) HandleMethodCallForUnknownMethod() {
+ ExpectEq(
+ nil,
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "Frobnicate",
+ "burrito.go",
+ 117,
+ []interface{}{}))
+
+ // A fatal error should be reported immediately.
+ AssertEq(0, len(t.reporter.errors))
+ AssertEq(1, len(t.reporter.fatalErrors))
+
+ report := t.reporter.fatalErrors[0]
+ ExpectEq("burrito.go", report.fileName)
+ ExpectEq(117, report.lineNumber)
+ ExpectThat(report.err, Error(HasSubstr("Unknown method")))
+ ExpectThat(report.err, Error(HasSubstr("Frobnicate")))
+}
+
+func (t *ControllerTest) HandleMethodCallGivenWrongNumberOfArgs() {
+ t.controller.ExpectCall(t.mock1, "TwoIntsToString", "", 0)(17, 19)
+
+ ExpectEq(
+ nil,
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "TwoIntsToString",
+ "burrito.go",
+ 117,
+ []interface{}{17, 19, 23}))
+
+ // A fatal error should be reported immediately.
+ AssertEq(0, len(t.reporter.errors))
+ AssertEq(1, len(t.reporter.fatalErrors))
+
+ report := t.reporter.fatalErrors[0]
+ ExpectEq("burrito.go", report.fileName)
+ ExpectEq(117, report.lineNumber)
+ ExpectThat(report.err, Error(HasSubstr("arguments")))
+ ExpectThat(report.err, Error(HasSubstr("expected 2")))
+ ExpectThat(report.err, Error(HasSubstr("got 3")))
+}
+
+func (t *ControllerTest) ExpectThenNonMatchingCall() {
+ // Expectation -- set up a fallback action to make it optional.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "TwoIntsToString",
+ "burrito.go",
+ 117)
+
+ exp := partial(LessThan(10), Equals(2))
+ exp.WillRepeatedly(Return(""))
+
+ // Call
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "TwoIntsToString",
+ "taco.go",
+ 112,
+ []interface{}{8, 1})
+
+ // The error should be reported immediately.
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("taco.go", t.reporter.errors[0].fileName)
+ ExpectEq(112, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unexpected")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("TwoIntsToString")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("[8 1]")))
+
+ // Finish should change nothing.
+ t.controller.Finish()
+
+ ExpectEq(1, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ExplicitCardinalityNotSatisfied() {
+ // Expectation -- set up an explicit cardinality of three.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.Times(3)
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // The error should not yet be reported.
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+
+ // Finish should cause the error to be reported.
+ t.controller.Finish()
+
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("burrito.go", t.reporter.errors[0].fileName)
+ ExpectEq(117, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unsatisfied")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("StringToInt")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("at least 3 times")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("called 2 times")))
+}
+
+func (t *ControllerTest) ImplicitOneTimeActionCountNotSatisfied() {
+ // Expectation -- add three one-time actions.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Return(0))
+ exp.WillOnce(Return(1))
+ exp.WillOnce(Return(2))
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // The error should not yet be reported.
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+
+ // Finish should cause the error to be reported.
+ t.controller.Finish()
+
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("burrito.go", t.reporter.errors[0].fileName)
+ ExpectEq(117, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unsatisfied")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("StringToInt")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("at least 3 times")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("called 2 times")))
+}
+
+func (t *ControllerTest) ImplicitOneTimeActionLowerBoundNotSatisfied() {
+ // Expectation -- add three one-time actions and a fallback.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Return(0))
+ exp.WillOnce(Return(1))
+ exp.WillOnce(Return(2))
+ exp.WillRepeatedly(Return(3))
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // The error should not yet be reported.
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+
+ // Finish should cause the error to be reported.
+ t.controller.Finish()
+
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("burrito.go", t.reporter.errors[0].fileName)
+ ExpectEq(117, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unsatisfied")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("StringToInt")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("at least 3 times")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("called 2 times")))
+}
+
+func (t *ControllerTest) ImplicitCardinalityOfOneNotSatisfied() {
+ // Expectation -- add no actions.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ partial(HasSubstr(""))
+
+ // Don't call.
+
+ // The error should not yet be reported.
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+
+ // Finish should cause the error to be reported.
+ t.controller.Finish()
+
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("burrito.go", t.reporter.errors[0].fileName)
+ ExpectEq(117, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unsatisfied")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("StringToInt")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("at least 1 time")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("called 0 times")))
+}
+
+func (t *ControllerTest) ExplicitCardinalityOverrun() {
+ // Expectation -- call times(2).
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.Times(2)
+
+ // Call three times.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // The error should be reported immediately.
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("burrito.go", t.reporter.errors[0].fileName)
+ ExpectEq(117, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unexpected")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("StringToInt")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("at most 2 times")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("called 3 times")))
+
+ // Finish should change nothing.
+ t.controller.Finish()
+
+ ExpectEq(1, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ImplicitOneTimeActionCountOverrun() {
+ // Expectation -- add a one-time action.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Return(0))
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // The error should be reported immediately.
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("burrito.go", t.reporter.errors[0].fileName)
+ ExpectEq(117, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unexpected")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("StringToInt")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("at most 1 time")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("called 2 times")))
+
+ // Finish should change nothing.
+ t.controller.Finish()
+
+ ExpectEq(1, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ImplicitCardinalityOfOneOverrun() {
+ // Expectation -- don't add any actions.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ partial(HasSubstr(""))
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // The error should be reported immediately.
+ AssertEq(1, len(t.reporter.errors))
+ AssertEq(0, len(t.reporter.fatalErrors))
+
+ ExpectEq("burrito.go", t.reporter.errors[0].fileName)
+ ExpectEq(117, t.reporter.errors[0].lineNumber)
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("Unexpected")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("StringToInt")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("at most 1 time")))
+ ExpectThat(t.reporter.errors[0].err, Error(HasSubstr("called 2 times")))
+
+ // Finish should change nothing.
+ t.controller.Finish()
+
+ ExpectEq(1, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ExplicitCardinalitySatisfied() {
+ // Expectation -- set up an explicit cardinality of two.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.Times(2)
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // There should be no errors.
+ t.controller.Finish()
+
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ImplicitOneTimeActionCountSatisfied() {
+ // Expectation -- set up two one-time actions.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Return(0))
+ exp.WillOnce(Return(1))
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // There should be no errors.
+ t.controller.Finish()
+
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ImplicitOneTimeActionLowerBoundJustSatisfied() {
+ // Expectation -- set up two one-time actions and a fallback.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Return(0))
+ exp.WillOnce(Return(1))
+ exp.WillRepeatedly(Return(2))
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // There should be no errors.
+ t.controller.Finish()
+
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ImplicitOneTimeActionLowerBoundMoreThanSatisfied() {
+ // Expectation -- set up two one-time actions and a fallback.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Return(0))
+ exp.WillOnce(Return(1))
+ exp.WillRepeatedly(Return(2))
+
+ // Call four times.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // There should be no errors.
+ t.controller.Finish()
+
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) FallbackActionConfiguredWithZeroCalls() {
+ // Expectation -- set up a fallback action.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillRepeatedly(Return(0))
+
+ // Don't call.
+
+ // There should be no errors.
+ t.controller.Finish()
+
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) FallbackActionConfiguredWithMultipleCalls() {
+ // Expectation -- set up a fallback action.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillRepeatedly(Return(0))
+
+ // Call twice.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // There should be no errors.
+ t.controller.Finish()
+
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) ImplicitCardinalityOfOneSatisfied() {
+ // Expectation -- don't add actions.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ partial(HasSubstr(""))
+
+ // Call once.
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ // There should be no errors.
+ t.controller.Finish()
+
+ ExpectEq(0, len(t.reporter.errors))
+ ExpectEq(0, len(t.reporter.fatalErrors))
+}
+
+func (t *ControllerTest) InvokesOneTimeActions() {
+ var res []interface{}
+
+ // Expectation -- set up two one-time actions.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ suppliedArg := ""
+ expectedReturn := 17
+
+ f := func(s string) int {
+ suppliedArg = s
+ return expectedReturn
+ }
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Invoke(f))
+ exp.WillOnce(Return(1))
+
+ AssertThat(t.reporter.fatalErrors, ElementsAre())
+
+ // Call 0
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{"taco"})
+
+ ExpectEq("taco", suppliedArg)
+ ExpectThat(res, ElementsAre(IdenticalTo(expectedReturn)))
+
+ // Call 1
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(1))
+}
+
+func (t *ControllerTest) InvokesFallbackActionAfterOneTimeActions() {
+ var res []interface{}
+
+ // Expectation -- set up two one-time actions and a fallback.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Return(0))
+ exp.WillOnce(Return(1))
+ exp.WillRepeatedly(Return(2))
+
+ // Call 0
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(0))
+
+ // Call 1
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(1))
+
+ // Call 2
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(2))
+
+ // Call 3
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(2))
+}
+
+func (t *ControllerTest) InvokesFallbackActionWithoutOneTimeActions() {
+ var res []interface{}
+
+ // Expectation -- set up only a fallback action.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillRepeatedly(Return(2))
+
+ // Call 0
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(2))
+
+ // Call 1
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(2))
+
+ // Call 2
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(2))
+}
+
+func (t *ControllerTest) ImplicitActionReturnsZeroInts() {
+ var res []interface{}
+
+ // Expectation -- set up a cardinality of two.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.Times(2)
+
+ // Call 0
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(reflect.TypeOf(res[0]), Equals(reflect.TypeOf(int(0))))
+ ExpectThat(res[0], Equals(0))
+
+ // Call 1
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(reflect.TypeOf(res[0]), Equals(reflect.TypeOf(int(0))))
+ ExpectThat(res[0], Equals(0))
+}
+
+func (t *ControllerTest) ImplicitActionReturnsEmptyStrings() {
+ var res []interface{}
+
+ // Expectation -- set up a cardinality of two.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "TwoIntsToString",
+ "burrito.go",
+ 117)
+
+ exp := partial(LessThan(100), LessThan(100))
+ exp.Times(2)
+
+ // Call 0
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "TwoIntsToString",
+ "",
+ 0,
+ []interface{}{0, 0})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(""))
+
+ // Call 1
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "TwoIntsToString",
+ "",
+ 0,
+ []interface{}{0, 0})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(""))
+}
+
+func (t *ControllerTest) ExpectationsAreMatchedLastToFirst() {
+ var res []interface{}
+
+ // General expectation
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillRepeatedly(Return(17))
+
+ // More specific expectation
+ partial = t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp = partial(Equals("taco"))
+ exp.WillRepeatedly(Return(19))
+
+ // Call -- the second expectation should match.
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{"taco"})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(19))
+
+ // Call -- the first expectation should match because the second doesn't.
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{"burrito"})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(17))
+}
+
+func (t *ControllerTest) ExpectationsAreSegregatedByMockObject() {
+ var res []interface{}
+
+ // Expectation for mock1 -- return 17.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillRepeatedly(Return(17))
+
+ // Expectation for mock2 -- return 19.
+ partial = t.controller.ExpectCall(
+ t.mock2,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp = partial(HasSubstr(""))
+ exp.WillRepeatedly(Return(19))
+
+ // Call mock1.
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(17))
+
+ // Call mock2.
+ res = t.controller.HandleMethodCall(
+ t.mock2,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(19))
+}
+
+func (t *ControllerTest) ExpectationsAreSegregatedByMethodName() {
+ var res []interface{}
+
+ // Expectation for StringToInt
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillRepeatedly(Return(17))
+
+ // Expectation for TwoIntsToString
+ partial = t.controller.ExpectCall(
+ t.mock1,
+ "TwoIntsToString",
+ "burrito.go",
+ 117)
+
+ exp = partial(1, 2)
+ exp.WillRepeatedly(Return("taco"))
+
+ // Call StringToInt.
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals(17))
+
+ // Call TwoIntsToString.
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "TwoIntsToString",
+ "",
+ 0,
+ []interface{}{1, 2})
+
+ ExpectThat(len(res), Equals(1))
+ ExpectThat(res[0], Equals("taco"))
+}
+
+func (t *ControllerTest) ActionCallsAgainMatchingDifferentExpectation() {
+ var res []interface{}
+
+ // Expectation for StringToInt
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.WillOnce(Return(17))
+
+ // Expectation for TwoIntsToString -- pretend we call StringToInt.
+ partial = t.controller.ExpectCall(
+ t.mock1,
+ "TwoIntsToString",
+ "burrito.go",
+ 117)
+
+ exp = partial(1, 2)
+ exp.WillOnce(Invoke(func(int, int) string {
+ t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "taco.go",
+ 112,
+ []interface{}{""})
+
+ return "queso"
+ }))
+
+ // Call TwoIntsToString.
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "TwoIntsToString",
+ "",
+ 0,
+ []interface{}{1, 2})
+
+ AssertThat(res, ElementsAre("queso"))
+
+ // Finish. Everything should be satisfied.
+ t.controller.Finish()
+
+ ExpectThat(t.reporter.errors, ElementsAre())
+ ExpectThat(t.reporter.fatalErrors, ElementsAre())
+}
+
+func (t *ControllerTest) ActionCallsAgainMatchingSameExpectation() {
+ var res []interface{}
+
+ // Expectation for StringToInt -- should be called twice. The first time it
+ // should call itself.
+ partial := t.controller.ExpectCall(
+ t.mock1,
+ "StringToInt",
+ "burrito.go",
+ 117)
+
+ exp := partial(HasSubstr(""))
+ exp.Times(2)
+ exp.WillOnce(Invoke(func(string) int {
+ subCallRes := t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "taco.go",
+ 112,
+ []interface{}{""})
+
+ return subCallRes[0].(int) + 19
+ }))
+
+ exp.WillOnce(Return(17))
+
+ // Call.
+ res = t.controller.HandleMethodCall(
+ t.mock1,
+ "StringToInt",
+ "",
+ 0,
+ []interface{}{""})
+
+ AssertThat(res, ElementsAre(17+19))
+
+ // Finish. Everything should be satisfied.
+ t.controller.Finish()
+
+ ExpectThat(t.reporter.errors, ElementsAre())
+ ExpectThat(t.reporter.fatalErrors, ElementsAre())
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/createmock.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/createmock.go
new file mode 100644
index 00000000000..c5427dc8ba9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/createmock.go
@@ -0,0 +1,245 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// createmock is used to generate source code for mock versions of interfaces
+// from installed packages.
+package main
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "go/build"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "regexp"
+ "text/template"
+
+ // Ensure that the generate package, which is used by the generated code, is
+ // installed by goinstall.
+ _ "github.com/smartystreets/assertions/internal/oglemock/generate"
+)
+
+var fSamePackage = flag.Bool(
+ "same_package",
+ false,
+ "Generate output appropriate for including in the same package as the "+
+ "mocked interfaces.")
+
+// A template for generated code that is used to print the result.
+const tmplStr = `
+{{$interfacePkgPath := .InterfacePkgPath}}
+
+package main
+
+import (
+ {{range $identifier, $import := .Imports}}
+ {{$identifier}} "{{$import}}"
+ {{end}}
+)
+
+func getTypeForPtr(ptr interface{}) reflect.Type {
+ return reflect.TypeOf(ptr).Elem()
+}
+
+func main() {
+ // Reduce noise in logging output.
+ log.SetFlags(0)
+
+ interfaces := []reflect.Type{
+ {{range $typeName := .TypeNames}}
+ getTypeForPtr((*{{pathBase $interfacePkgPath}}.{{$typeName}})(nil)),
+ {{end}}
+ }
+
+ err := generate.GenerateMockSource(
+ os.Stdout,
+ "{{.OutputPkgPath}}",
+ interfaces)
+
+ if err != nil {
+ log.Fatalf("Error generating mock source: %v", err)
+ }
+}
+`
+
+// A map from import identifier to package to use that identifier for,
+// containing elements for each import needed by the generated code.
+type importMap map[string]string
+
+type tmplArg struct {
+ // The full path of the package from which the interfaces come.
+ InterfacePkgPath string
+
+ // The package path to assume for the generated code.
+ OutputPkgPath string
+
+ // Imports needed by the generated code.
+ Imports importMap
+
+ // Types to be mocked, relative to their package's name.
+ TypeNames []string
+}
+
+var unknownPackageRegexp = regexp.MustCompile(
+ `tool\.go:\d+:\d+: cannot find package "([^"]+)"`)
+
+var undefinedInterfaceRegexp = regexp.MustCompile(`tool\.go:\d+: undefined: [\pL_0-9]+\.([\pL_0-9]+)`)
+
+// Does the 'go build' output indicate that a package wasn't found? If so,
+// return the name of the package.
+func findUnknownPackage(output []byte) *string {
+ if match := unknownPackageRegexp.FindSubmatch(output); match != nil {
+ res := string(match[1])
+ return &res
+ }
+
+ return nil
+}
+
+// Does the 'go build' output indicate that an interface wasn't found? If so,
+// return the name of the interface.
+func findUndefinedInterface(output []byte) *string {
+ if match := undefinedInterfaceRegexp.FindSubmatch(output); match != nil {
+ res := string(match[1])
+ return &res
+ }
+
+ return nil
+}
+
+// Split out from main so that deferred calls are executed even in the event of
+// an error.
+func run() error {
+ // Reduce noise in logging output.
+ log.SetFlags(0)
+
+ // Check the command-line arguments.
+ flag.Parse()
+
+ cmdLineArgs := flag.Args()
+ if len(cmdLineArgs) < 2 {
+ return errors.New("Usage: createmock [package] [interface ...]")
+ }
+
+ // Create a temporary directory inside of $GOPATH to hold generated code.
+ buildPkg, err := build.Import("github.com/smartystreets/assertions/internal/oglemock", "", build.FindOnly)
+ if err != nil {
+ return errors.New(fmt.Sprintf("Couldn't find oglemock in $GOPATH: %v", err))
+ }
+
+ tmpDir, err := ioutil.TempDir(buildPkg.SrcRoot, "tmp-createmock-")
+ if err != nil {
+ return errors.New(fmt.Sprintf("Creating temp dir: %v", err))
+ }
+
+ defer os.RemoveAll(tmpDir)
+
+ // Create a file to hold generated code.
+ codeFile, err := os.Create(path.Join(tmpDir, "tool.go"))
+ if err != nil {
+ return errors.New(fmt.Sprintf("Couldn't create a file to hold code: %v", err))
+ }
+
+ // Create an appropriate path for the built binary.
+ binaryPath := path.Join(tmpDir, "tool")
+
+ // Create an appropriate template argument.
+ arg := tmplArg{
+ InterfacePkgPath: cmdLineArgs[0],
+ TypeNames: cmdLineArgs[1:],
+ }
+
+ if *fSamePackage {
+ arg.OutputPkgPath = arg.InterfacePkgPath
+ } else {
+ arg.OutputPkgPath = "mock_" + path.Base(arg.InterfacePkgPath)
+ }
+
+ arg.Imports = make(importMap)
+ arg.Imports[path.Base(arg.InterfacePkgPath)] = arg.InterfacePkgPath
+ arg.Imports["generate"] = "github.com/smartystreets/assertions/internal/oglemock/generate"
+ arg.Imports["log"] = "log"
+ arg.Imports["os"] = "os"
+ arg.Imports["reflect"] = "reflect"
+
+ // Execute the template to generate code that will itself generate the mock
+ // code. Write the code to the temp file.
+ tmpl := template.Must(
+ template.New("code").Funcs(
+ template.FuncMap{
+ "pathBase": path.Base,
+ }).Parse(tmplStr))
+ if err := tmpl.Execute(codeFile, arg); err != nil {
+ return errors.New(fmt.Sprintf("Error executing template: %v", err))
+ }
+
+ codeFile.Close()
+
+ // Attempt to build the code.
+ cmd := exec.Command("go", "build", "-o", binaryPath)
+ cmd.Dir = tmpDir
+ buildOutput, err := cmd.CombinedOutput()
+
+ if err != nil {
+ // Did the compilation fail due to the user-specified package not being found?
+ pkg := findUnknownPackage(buildOutput)
+ if pkg != nil && *pkg == arg.InterfacePkgPath {
+ return errors.New(fmt.Sprintf("Unknown package: %s", *pkg))
+ }
+
+ // Did the compilation fail due to an unknown interface?
+ if in := findUndefinedInterface(buildOutput); in != nil {
+ return errors.New(fmt.Sprintf("Unknown interface: %s", *in))
+ }
+
+ // Otherwise return a generic error.
+ return errors.New(fmt.Sprintf(
+ "%s\n\nError building generated code:\n\n"+
+ " %v\n\nPlease report this oglemock bug.",
+ buildOutput,
+ err))
+ }
+
+ // Run the binary.
+ cmd = exec.Command(binaryPath)
+ binaryOutput, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return errors.New(fmt.Sprintf(
+ "%s\n\nError running generated code:\n\n"+
+ " %v\n\n Please report this oglemock bug.",
+ binaryOutput,
+ err))
+ }
+
+ // Copy its output.
+ _, err = os.Stdout.Write(binaryOutput)
+ if err != nil {
+ return errors.New(fmt.Sprintf("Error copying binary output: %v", err))
+ }
+
+ return nil
+}
+
+func main() {
+ if err := run(); err != nil {
+ fmt.Fprintf(os.Stderr, "%s\n", err.Error())
+ os.Exit(1)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/createmock_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/createmock_test.go
new file mode 100644
index 00000000000..ddfc07a3e8d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/createmock_test.go
@@ -0,0 +1,233 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/build"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "syscall"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+var dumpNew = flag.Bool("dump_new", false, "Dump new golden files.")
+
+////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////
+
+var tempDir string
+var createmockPath string
+
+type CreateMockTest struct {
+}
+
+func TestOgletest(t *testing.T) { RunTests(t) }
+func init() { RegisterTestSuite(&CreateMockTest{}) }
+
+func (t *CreateMockTest) SetUpTestSuite() {
+ // Create a temporary file to hold the built createmock binary.
+ tempDir, err := ioutil.TempDir("", "createmock-")
+ if err != nil {
+ panic("Creating temporary directory: " + err.Error())
+ }
+
+ createmockPath = path.Join(tempDir, "createmock")
+
+ // Build the createmock tool so that it can be used in the tests below.
+ cmd := exec.Command("go", "build", "-o", createmockPath, "github.com/smartystreets/assertions/internal/oglemock/createmock")
+ if output, err := cmd.CombinedOutput(); err != nil {
+ panic(fmt.Sprintf("Error building createmock: %v\n\n%s", err, output))
+ }
+}
+
+func (t *CreateMockTest) TearDownTestSuite() {
+ // Delete the createmock binary we built above.
+ os.RemoveAll(tempDir)
+ tempDir = ""
+ createmockPath = ""
+}
+
+func (t *CreateMockTest) runGoldenTest(
+ caseName string,
+ expectedReturnCode int,
+ createmockArgs ...string) {
+ // Run createmock.
+ cmd := exec.Command(createmockPath, createmockArgs...)
+ output, err := cmd.CombinedOutput()
+
+ // Make sure the process actually exited.
+ exitError, ok := err.(*exec.ExitError)
+ if err != nil && (!ok || !exitError.Exited()) {
+ panic("exec.Command.CombinedOutput: " + err.Error())
+ }
+
+ // Extract a return code.
+ var actualReturnCode int
+ if exitError != nil {
+ actualReturnCode = exitError.Sys().(syscall.WaitStatus).ExitStatus()
+ }
+
+ // Make sure the return code is correct.
+ ExpectEq(expectedReturnCode, actualReturnCode)
+
+ // Read the golden file.
+ goldenPath := path.Join("testdata", "golden."+caseName)
+ goldenData := readFileOrDie(goldenPath)
+
+ // Compare the two.
+ identical := (string(output) == string(goldenData))
+ ExpectTrue(identical, "Output doesn't match for case '%s'.", caseName)
+
+ // Write out a new golden file if requested.
+ if !identical && *dumpNew {
+ writeContentsToFileOrDie(output, goldenPath)
+ }
+}
+
+// Ensure that when createmock is run with the supplied args, it produces
+// output that can be compiled.
+func (t *CreateMockTest) runCompilationTest(createmockArgs ...string) {
+ // Create a temporary directory inside of $GOPATH to hold generated code.
+ buildPkg, err := build.Import("github.com/smartystreets/assertions/internal/oglemock", "", build.FindOnly)
+ AssertEq(nil, err)
+
+ tmpDir, err := ioutil.TempDir(buildPkg.SrcRoot, "tmp-createmock_test-")
+ AssertEq(nil, err)
+ defer os.RemoveAll(tmpDir)
+
+ // Create a file to hold the mock code.
+ codeFile, err := os.Create(path.Join(tmpDir, "mock.go"))
+ AssertEq(nil, err)
+
+ // Run createmock and save its output to the file created above.
+ stdErrBuf := new(bytes.Buffer)
+
+ cmd := exec.Command(createmockPath, createmockArgs...)
+ cmd.Stdout = codeFile
+ cmd.Stderr = stdErrBuf
+
+ err = cmd.Run()
+ AssertEq(nil, err, "createmock stderr output:\n\n%s", stdErrBuf.String())
+ codeFile.Close()
+
+ // Run 'go build' in the directory and make sure it exits with return code
+ // zero.
+ cmd = exec.Command("go", "build")
+ cmd.Dir = tmpDir
+ output, err := cmd.CombinedOutput()
+
+ ExpectEq(nil, err, "go build output:\n\n%s", output)
+}
+
+func writeContentsToFileOrDie(contents []byte, path string) {
+ if err := ioutil.WriteFile(path, contents, 0600); err != nil {
+ panic("ioutil.WriteFile: " + err.Error())
+ }
+}
+
+func readFileOrDie(path string) []byte {
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ panic("ioutil.ReadFile: " + err.Error())
+ }
+
+ return contents
+}
+
+////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////
+
+func (t *CreateMockTest) NoPackage() {
+ t.runGoldenTest(
+ "no_package",
+ 1)
+}
+
+func (t *CreateMockTest) NoInterfaces() {
+ t.runGoldenTest(
+ "no_interfaces",
+ 1,
+ "io")
+}
+
+func (t *CreateMockTest) UnknownPackage() {
+ t.runGoldenTest(
+ "unknown_package",
+ 1,
+ "foo/bar",
+ "Reader")
+}
+
+func (t *CreateMockTest) UnknownInterface() {
+ t.runGoldenTest(
+ "unknown_interface",
+ 1,
+ "io",
+ "Frobnicator")
+}
+
+func (t *CreateMockTest) GCSBucket() {
+ t.runGoldenTest(
+ "gcs_bucket",
+ 0,
+ "github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs",
+ "Bucket")
+}
+
+func (t *CreateMockTest) GCSBucket_SamePackage() {
+ t.runGoldenTest(
+ "gcs_bucket_same_package",
+ 0,
+ "--same_package",
+ "github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs",
+ "Bucket")
+}
+
+func (t *CreateMockTest) IoReaderAndWriter() {
+ t.runCompilationTest(
+ "io",
+ "Reader",
+ "Writer")
+}
+
+func (t *CreateMockTest) OsFileInfo() {
+ // Note that os is also used by the code that createmock generates; there
+ // should be no conflict.
+ t.runCompilationTest(
+ "os",
+ "FileInfo")
+}
+
+func (t *CreateMockTest) ComplicatedSamplePackage() {
+ t.runCompilationTest(
+ "github.com/smartystreets/assertions/internal/oglemock/generate/testdata/complicated_pkg",
+ "ComplicatedThing")
+}
+
+func (t *CreateMockTest) RenamedSamplePackage() {
+ t.runCompilationTest(
+ "github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg",
+ "SomeInterface")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs/bucket.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs/bucket.go
new file mode 100644
index 00000000000..da714f305c5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs/bucket.go
@@ -0,0 +1,23 @@
+package gcs
+
+import "golang.org/x/net/context"
+
+type Bucket interface {
+ Name() string
+ CreateObject(context.Context, *CreateObjectRequest) (*Object, error)
+ CopyObject(ctx context.Context, req *CopyObjectRequest) (o *Object, err error)
+}
+
+type Object struct {
+}
+
+type CreateObjectRequest struct {
+}
+
+type CopyObjectRequest struct {
+}
+
+type Int int
+type Array []int
+type Chan <-chan int
+type Ptr *int
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.gcs_bucket b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.gcs_bucket
new file mode 100644
index 00000000000..05a5114546c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.gcs_bucket
@@ -0,0 +1,125 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package mock_gcs
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ gcs "github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs"
+ context "golang.org/x/net/context"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockBucket interface {
+ gcs.Bucket
+ oglemock.MockObject
+}
+
+type mockBucket struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockBucket(
+ c oglemock.Controller,
+ desc string) MockBucket {
+ return &mockBucket{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockBucket) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockBucket) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockBucket) CopyObject(p0 context.Context, p1 *gcs.CopyObjectRequest) (o0 *gcs.Object, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "CopyObject",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockBucket.CopyObject: invalid return values: %v", retVals))
+ }
+
+ // o0 *gcs.Object
+ if retVals[0] != nil {
+ o0 = retVals[0].(*gcs.Object)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockBucket) CreateObject(p0 context.Context, p1 *gcs.CreateObjectRequest) (o0 *gcs.Object, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "CreateObject",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockBucket.CreateObject: invalid return values: %v", retVals))
+ }
+
+ // o0 *gcs.Object
+ if retVals[0] != nil {
+ o0 = retVals[0].(*gcs.Object)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockBucket) Name() (o0 string) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Name",
+ file,
+ line,
+ []interface{}{})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockBucket.Name: invalid return values: %v", retVals))
+ }
+
+ // o0 string
+ if retVals[0] != nil {
+ o0 = retVals[0].(string)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.gcs_bucket_same_package b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.gcs_bucket_same_package
new file mode 100644
index 00000000000..d78819076f5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.gcs_bucket_same_package
@@ -0,0 +1,124 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package gcs
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ context "golang.org/x/net/context"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockBucket interface {
+ Bucket
+ oglemock.MockObject
+}
+
+type mockBucket struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockBucket(
+ c oglemock.Controller,
+ desc string) MockBucket {
+ return &mockBucket{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockBucket) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockBucket) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockBucket) CopyObject(p0 context.Context, p1 *CopyObjectRequest) (o0 *Object, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "CopyObject",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockBucket.CopyObject: invalid return values: %v", retVals))
+ }
+
+ // o0 *Object
+ if retVals[0] != nil {
+ o0 = retVals[0].(*Object)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockBucket) CreateObject(p0 context.Context, p1 *CreateObjectRequest) (o0 *Object, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "CreateObject",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockBucket.CreateObject: invalid return values: %v", retVals))
+ }
+
+ // o0 *Object
+ if retVals[0] != nil {
+ o0 = retVals[0].(*Object)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockBucket) Name() (o0 string) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Name",
+ file,
+ line,
+ []interface{}{})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockBucket.Name: invalid return values: %v", retVals))
+ }
+
+ // o0 string
+ if retVals[0] != nil {
+ o0 = retVals[0].(string)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.no_interfaces b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.no_interfaces
new file mode 100644
index 00000000000..b70535fae6b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.no_interfaces
@@ -0,0 +1 @@
+Usage: createmock [package] [interface ...]
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.no_package b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.no_package
new file mode 100644
index 00000000000..b70535fae6b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.no_package
@@ -0,0 +1 @@
+Usage: createmock [package] [interface ...]
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.unknown_interface b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.unknown_interface
new file mode 100644
index 00000000000..c32950a1790
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.unknown_interface
@@ -0,0 +1 @@
+Unknown interface: Frobnicator
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.unknown_package b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.unknown_package
new file mode 100644
index 00000000000..d07e915d2cf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/golden.unknown_package
@@ -0,0 +1 @@
+Unknown package: foo/bar
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/do_all.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/do_all.go
new file mode 100644
index 00000000000..c0cd3ffbd69
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/do_all.go
@@ -0,0 +1,53 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Create an Action that invokes the supplied actions one after another. The
+// return values from the final action are used; others are ignored.
+func DoAll(first Action, others ...Action) Action {
+ return &doAll{
+ wrapped: append([]Action{first}, others...),
+ }
+}
+
+type doAll struct {
+ wrapped []Action
+}
+
+func (a *doAll) SetSignature(signature reflect.Type) (err error) {
+ for i, w := range a.wrapped {
+ err = w.SetSignature(signature)
+ if err != nil {
+ err = fmt.Errorf("Action %v: %v", i, err)
+ return
+ }
+ }
+
+ return
+}
+
+func (a *doAll) Invoke(methodArgs []interface{}) (rets []interface{}) {
+ for _, w := range a.wrapped {
+ rets = w.Invoke(methodArgs)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/do_all_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/do_all_test.go
new file mode 100644
index 00000000000..f835b66c7c5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/do_all_test.go
@@ -0,0 +1,90 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock_test
+
+import (
+ "reflect"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/oglemock"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+func TestDoAll(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////
+// Boilerplate
+////////////////////////////////////////////////////////////
+
+type DoAllTest struct {
+}
+
+func init() { RegisterTestSuite(&DoAllTest{}) }
+
+////////////////////////////////////////////////////////////
+// Test functions
+////////////////////////////////////////////////////////////
+
+func (t *DoAllTest) FirstActionDoesntLikeSignature() {
+ f := func(a int, b string) {}
+
+ a0 := oglemock.Invoke(func() {})
+ a1 := oglemock.Invoke(f)
+ a2 := oglemock.Return()
+
+ err := oglemock.DoAll(a0, a1, a2).SetSignature(reflect.TypeOf(f))
+ ExpectThat(err, Error(HasSubstr("Action 0")))
+ ExpectThat(err, Error(HasSubstr("func()")))
+}
+
+func (t *DoAllTest) LastActionDoesntLikeSignature() {
+ f := func(a int, b string) {}
+
+ a0 := oglemock.Invoke(f)
+ a1 := oglemock.Invoke(f)
+ a2 := oglemock.Return(17)
+
+ err := oglemock.DoAll(a0, a1, a2).SetSignature(reflect.TypeOf(f))
+ ExpectThat(err, Error(HasSubstr("Action 2")))
+ ExpectThat(err, Error(HasSubstr("1 vals; expected 0")))
+}
+
+func (t *DoAllTest) SingleAction() {
+ f := func(a int) string { return "" }
+ a0 := oglemock.Return("taco")
+
+ action := oglemock.DoAll(a0)
+ AssertEq(nil, action.SetSignature(reflect.TypeOf(f)))
+
+ rets := action.Invoke([]interface{}{17})
+ ExpectThat(rets, ElementsAre("taco"))
+}
+
+func (t *DoAllTest) MultipleActions() {
+ f := func(a int) string { return "" }
+
+ var saved int
+ a0 := oglemock.SaveArg(0, &saved)
+ a1 := oglemock.Return("taco")
+
+ action := oglemock.DoAll(a0, a1)
+ AssertEq(nil, action.SetSignature(reflect.TypeOf(f)))
+
+ rets := action.Invoke([]interface{}{17})
+ ExpectEq(17, saved)
+ ExpectThat(rets, ElementsAre("taco"))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/doc.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/doc.go
new file mode 100644
index 00000000000..d397f652033
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/doc.go
@@ -0,0 +1,28 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package oglemock provides a mocking framework for unit tests.
+//
+// Among its features are the following:
+//
+// * An extensive and extensible set of matchers for expressing call
+// expectations (provided by the oglematchers package).
+//
+// * Style and semantics similar to Google Mock and Google JS Test.
+//
+// * Easy integration with the ogletest unit testing framework.
+//
+// See https://github.com/smartystreets/assertions/internal/oglemock for more information.
+package oglemock
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/error_reporter.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/error_reporter.go
new file mode 100644
index 00000000000..0c3a65ee187
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/error_reporter.go
@@ -0,0 +1,29 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+// ErrorReporter is an interface that wraps methods for reporting errors that
+// should cause test failures.
+type ErrorReporter interface {
+ // Report that some failure (e.g. an unsatisfied expectation) occurred. If
+ // known, fileName and lineNumber should contain information about where it
+ // occurred. The test may continue if the test framework supports it.
+ ReportError(fileName string, lineNumber int, err error)
+
+ // Like ReportError, but the test should be halted immediately. It is assumed
+ // that this method does not return.
+ ReportFatalError(fileName string, lineNumber int, err error)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/expectation.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/expectation.go
new file mode 100644
index 00000000000..d18bfb8bce9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/expectation.go
@@ -0,0 +1,59 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+// Expectation is an expectation for zero or more calls to a mock method with
+// particular arguments or sets of arguments.
+type Expectation interface {
+ // Times expresses that a matching method call should happen exactly N times.
+ // Times must not be called more than once, and must not be called after
+ // WillOnce or WillRepeatedly.
+ //
+ // The full rules for the cardinality of an expectation are as follows:
+ //
+ // 1. If an explicit cardinality is set with Times(N), then anything other
+ // than exactly N matching calls will cause a test failure.
+ //
+ // 2. Otherwise, if there are any one-time actions set up, then it is
+ // expected there will be at least that many matching calls. If there is
+ // not also a fallback action, then it is expected that there will be
+ // exactly that many.
+ //
+ // 3. Otherwise, if there is a fallback action configured, any number of
+ // matching calls (including zero) is allowed.
+ //
+ // 4. Otherwise, the implicit cardinality is one.
+ //
+ Times(n uint) Expectation
+
+ // WillOnce configures a "one-time action". WillOnce can be called zero or
+ // more times, but must be called after any call to Times and before any call
+ // to WillRepeatedly.
+ //
+ // When matching method calls are made on the mock object, one-time actions
+ // are invoked one per matching call in the order that they were set up until
+ // they are exhausted. Afterward the fallback action, if any, will be used.
+ WillOnce(a Action) Expectation
+
+ // WillRepeatedly configures a "fallback action". WillRepeatedly can be
+ // called zero or one times, and must not be called before Times or WillOnce.
+ //
+ // Once all one-time actions are exhausted (see above), the fallback action
+ // will be invoked for any further method calls. If WillRepeatedly is not
+ // called, the fallback action is implicitly an action that returns zero
+ // values for the method's return values.
+ WillRepeatedly(a Action) Expectation
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/generate.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/generate.go
new file mode 100644
index 00000000000..aca3de5541b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/generate.go
@@ -0,0 +1,369 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package generate implements code generation for mock classes. This is an
+// implementation detail of the createmock command, which you probably want to
+// use directly instead.
+package generate
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "path"
+ "reflect"
+ "regexp"
+ "text/template"
+)
+
+const gTmplStr = `
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package {{pathBase .OutputPkgPath}}
+
+import (
+ {{range $identifier, $import := .Imports}}{{$identifier}} "{{$import}}"
+ {{end}}
+)
+
+{{range .Interfaces}}
+ {{$interfaceName := printf "Mock%s" .Name}}
+ {{$structName := printf "mock%s" .Name}}
+
+ type {{$interfaceName}} interface {
+ {{getTypeString .}}
+ oglemock.MockObject
+ }
+
+ type {{$structName}} struct {
+ controller oglemock.Controller
+ description string
+ }
+
+ func New{{printf "Mock%s" .Name}}(
+ c oglemock.Controller,
+ desc string) {{$interfaceName}} {
+ return &{{$structName}}{
+ controller: c,
+ description: desc,
+ }
+ }
+
+ func (m *{{$structName}}) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+ }
+
+ func (m *{{$structName}}) Oglemock_Description() string {
+ return m.description
+ }
+
+ {{range getMethods .}}
+ {{$funcType := .Type}}
+ {{$inputTypes := getInputs $funcType}}
+ {{$outputTypes := getOutputs $funcType}}
+
+ func (m *{{$structName}}) {{.Name}}({{range $i, $type := $inputTypes}}p{{$i}} {{getInputTypeString $i $funcType}}, {{end}}) ({{range $i, $type := $outputTypes}}o{{$i}} {{getTypeString $type}}, {{end}}) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "{{.Name}}",
+ file,
+ line,
+ []interface{}{ {{range $i, $type := $inputTypes}}p{{$i}}, {{end}} })
+
+ if len(retVals) != {{len $outputTypes}} {
+ panic(fmt.Sprintf("{{$structName}}.{{.Name}}: invalid return values: %v", retVals))
+ }
+
+ {{range $i, $type := $outputTypes}}
+ // o{{$i}} {{getTypeString $type}}
+ if retVals[{{$i}}] != nil {
+ o{{$i}} = retVals[{{$i}}].({{getTypeString $type}})
+ }
+ {{end}}
+
+ return
+ }
+ {{end}}
+{{end}}
+`
+
+type tmplArg struct {
+ // The set of interfaces to mock, and the full name of the package from which
+ // they all come.
+ Interfaces []reflect.Type
+ InterfacePkgPath string
+
+ // The package path for the generate code.
+ OutputPkgPath string
+
+ // Imports needed by the interfaces.
+ Imports importMap
+}
+
+func (a *tmplArg) getInputTypeString(i int, ft reflect.Type) string {
+ numInputs := ft.NumIn()
+ if i == numInputs-1 && ft.IsVariadic() {
+ return "..." + a.getTypeString(ft.In(i).Elem())
+ }
+
+ return a.getTypeString(ft.In(i))
+}
+
+func (a *tmplArg) getTypeString(t reflect.Type) string {
+ return typeString(t, a.OutputPkgPath)
+}
+
+func getMethods(it reflect.Type) []reflect.Method {
+ numMethods := it.NumMethod()
+ methods := make([]reflect.Method, numMethods)
+
+ for i := 0; i < numMethods; i++ {
+ methods[i] = it.Method(i)
+ }
+
+ return methods
+}
+
+func getInputs(ft reflect.Type) []reflect.Type {
+ numIn := ft.NumIn()
+ inputs := make([]reflect.Type, numIn)
+
+ for i := 0; i < numIn; i++ {
+ inputs[i] = ft.In(i)
+ }
+
+ return inputs
+}
+
+func getOutputs(ft reflect.Type) []reflect.Type {
+ numOut := ft.NumOut()
+ outputs := make([]reflect.Type, numOut)
+
+ for i := 0; i < numOut; i++ {
+ outputs[i] = ft.Out(i)
+ }
+
+ return outputs
+}
+
+// A map from import identifier to package to use that identifier for,
+// containing elements for each import needed by a set of mocked interfaces.
+type importMap map[string]string
+
+var typePackageIdentifierRegexp = regexp.MustCompile(`^([\pL_0-9]+)\.[\pL_0-9]+$`)
+
+// Add an import for the supplied type, without recursing.
+func addImportForType(imports importMap, t reflect.Type) {
+ // If there is no package path, this is a built-in type and we don't need an
+ // import.
+ pkgPath := t.PkgPath()
+ if pkgPath == "" {
+ return
+ }
+
+ // Work around a bug in Go:
+ //
+ // http://code.google.com/p/go/issues/detail?id=2660
+ //
+ var errorPtr *error
+ if t == reflect.TypeOf(errorPtr).Elem() {
+ return
+ }
+
+ // Use the identifier that's part of the type's string representation as the
+ // import identifier. This means that we'll do the right thing for package
+ // "foo/bar" with declaration "package baz".
+ match := typePackageIdentifierRegexp.FindStringSubmatch(t.String())
+ if match == nil {
+ return
+ }
+
+ imports[match[1]] = pkgPath
+}
+
+// Add all necessary imports for the type, recursing as appropriate.
+func addImportsForType(imports importMap, t reflect.Type) {
+ // Add any import needed for the type itself.
+ addImportForType(imports, t)
+
+ // Handle special cases where recursion is needed.
+ switch t.Kind() {
+ case reflect.Array, reflect.Chan, reflect.Ptr, reflect.Slice:
+ addImportsForType(imports, t.Elem())
+
+ case reflect.Func:
+ // Input parameters.
+ for i := 0; i < t.NumIn(); i++ {
+ addImportsForType(imports, t.In(i))
+ }
+
+ // Return values.
+ for i := 0; i < t.NumOut(); i++ {
+ addImportsForType(imports, t.Out(i))
+ }
+
+ case reflect.Map:
+ addImportsForType(imports, t.Key())
+ addImportsForType(imports, t.Elem())
+ }
+}
+
+// Add imports for each of the methods of the interface, but not the interface
+// itself.
+func addImportsForInterfaceMethods(imports importMap, it reflect.Type) {
+ // Handle each method.
+ for i := 0; i < it.NumMethod(); i++ {
+ m := it.Method(i)
+ addImportsForType(imports, m.Type)
+ }
+}
+
+// Given a set of interfaces, return a map from import identifier to package to
+// use that identifier for, containing elements for each import needed by the
+// mock versions of those interfaces in a package with the given path.
+func getImports(
+ interfaces []reflect.Type,
+ pkgPath string) importMap {
+ imports := make(importMap)
+ for _, it := range interfaces {
+ addImportForType(imports, it)
+ addImportsForInterfaceMethods(imports, it)
+ }
+
+ // Make sure there are imports for other types used by the generated code
+ // itself.
+ imports["fmt"] = "fmt"
+ imports["oglemock"] = "github.com/smartystreets/assertions/internal/oglemock"
+ imports["runtime"] = "runtime"
+ imports["unsafe"] = "unsafe"
+
+ // Remove any self-imports generated above.
+ for k, v := range imports {
+ if v == pkgPath {
+ delete(imports, k)
+ }
+ }
+
+ return imports
+}
+
+// Given a set of interfaces to mock, write out source code suitable for
+// inclusion in a package with the supplied full package path containing mock
+// implementations of those interfaces.
+func GenerateMockSource(
+ w io.Writer,
+ outputPkgPath string,
+ interfaces []reflect.Type) (err error) {
+ // Sanity-check arguments.
+ if outputPkgPath == "" {
+ return errors.New("Package path must be non-empty.")
+ }
+
+ if len(interfaces) == 0 {
+ return errors.New("List of interfaces must be non-empty.")
+ }
+
+ // Make sure each type is indeed an interface.
+ for _, it := range interfaces {
+ if it.Kind() != reflect.Interface {
+ return errors.New("Invalid type: " + it.String())
+ }
+ }
+
+ // Make sure each interface is from the same package.
+ interfacePkgPath := interfaces[0].PkgPath()
+ for _, t := range interfaces {
+ if t.PkgPath() != interfacePkgPath {
+ err = fmt.Errorf(
+ "Package path mismatch: %q vs. %q",
+ interfacePkgPath,
+ t.PkgPath())
+
+ return
+ }
+ }
+
+ // Set up an appropriate template arg.
+ arg := tmplArg{
+ Interfaces: interfaces,
+ InterfacePkgPath: interfacePkgPath,
+ OutputPkgPath: outputPkgPath,
+ Imports: getImports(interfaces, outputPkgPath),
+ }
+
+ // Configure and parse the template.
+ tmpl := template.New("code")
+ tmpl.Funcs(template.FuncMap{
+ "pathBase": path.Base,
+ "getMethods": getMethods,
+ "getInputs": getInputs,
+ "getOutputs": getOutputs,
+ "getInputTypeString": arg.getInputTypeString,
+ "getTypeString": arg.getTypeString,
+ })
+
+ _, err = tmpl.Parse(gTmplStr)
+ if err != nil {
+ err = fmt.Errorf("Parse: %v", err)
+ return
+ }
+
+ // Execute the template, collecting the raw output into a buffer.
+ buf := new(bytes.Buffer)
+ if err := tmpl.Execute(buf, arg); err != nil {
+ return err
+ }
+
+ // Parse the output.
+ fset := token.NewFileSet()
+ astFile, err := parser.ParseFile(
+ fset,
+ path.Base(outputPkgPath+".go"),
+ buf,
+ parser.ParseComments)
+
+ if err != nil {
+ err = fmt.Errorf("parser.ParseFile: %v", err)
+ return
+ }
+
+ // Sort the import lines in the AST in the same way that gofmt does.
+ ast.SortImports(fset, astFile)
+
+ // Pretty-print the AST, using the same options that gofmt does by default.
+ cfg := &printer.Config{
+ Mode: printer.UseSpaces | printer.TabIndent,
+ Tabwidth: 8,
+ }
+
+ if err = cfg.Fprint(w, fset, astFile); err != nil {
+ return errors.New("Error pretty printing: " + err.Error())
+ }
+
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/generate_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/generate_test.go
new file mode 100644
index 00000000000..8347e4d030b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/generate_test.go
@@ -0,0 +1,168 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package generate_test
+
+import (
+ "bytes"
+ "flag"
+ "image"
+ "io"
+ "io/ioutil"
+ "path"
+ "reflect"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/oglemock/generate"
+ "github.com/smartystreets/assertions/internal/oglemock/generate/testdata/complicated_pkg"
+ "github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+var dumpNew = flag.Bool("dump_new", false, "Dump new golden files.")
+
+////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////
+
+type GenerateTest struct {
+}
+
+func TestOgletest(t *testing.T) { RunTests(t) }
+func init() { RegisterTestSuite(&GenerateTest{}) }
+
+func (t *GenerateTest) runGoldenTest(
+ caseName string,
+ outputPkgPath string,
+ nilPtrs ...interface{}) {
+ // Make a slice of interface types to give to GenerateMockSource.
+ interfaces := make([]reflect.Type, len(nilPtrs))
+ for i, ptr := range nilPtrs {
+ interfaces[i] = reflect.TypeOf(ptr).Elem()
+ }
+
+ // Create the mock source.
+ buf := new(bytes.Buffer)
+ err := generate.GenerateMockSource(buf, outputPkgPath, interfaces)
+ AssertEq(nil, err, "Error from GenerateMockSource: %v", err)
+
+ // Read the golden file.
+ goldenPath := path.Join("testdata", "golden."+caseName+".go")
+ goldenData := readFileOrDie(goldenPath)
+
+ // Compare the two.
+ identical := (buf.String() == string(goldenData))
+ ExpectTrue(identical, "Output doesn't match for case '%s'.", caseName)
+
+ // Write out a new golden file if requested.
+ if !identical && *dumpNew {
+ writeContentsToFileOrDie(buf.Bytes(), goldenPath)
+ }
+}
+
+func writeContentsToFileOrDie(contents []byte, path string) {
+ if err := ioutil.WriteFile(path, contents, 0600); err != nil {
+ panic("ioutil.WriteFile: " + err.Error())
+ }
+}
+
+func readFileOrDie(path string) []byte {
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ panic("ioutil.ReadFile: " + err.Error())
+ }
+
+ return contents
+}
+
+////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////
+
+func (t *GenerateTest) EmptyOutputPackagePath() {
+ err := generate.GenerateMockSource(
+ new(bytes.Buffer),
+ "",
+ []reflect.Type{
+ reflect.TypeOf((*io.Reader)(nil)).Elem(),
+ })
+
+ ExpectThat(err, Error(HasSubstr("Package path")))
+ ExpectThat(err, Error(HasSubstr("non-empty")))
+}
+
+func (t *GenerateTest) EmptySetOfInterfaces() {
+ err := generate.GenerateMockSource(
+ new(bytes.Buffer),
+ "foo",
+ []reflect.Type{})
+
+ ExpectThat(err, Error(HasSubstr("interfaces")))
+ ExpectThat(err, Error(HasSubstr("non-empty")))
+}
+
+func (t *GenerateTest) NonInterfaceType() {
+ err := generate.GenerateMockSource(
+ new(bytes.Buffer),
+ "foo",
+ []reflect.Type{
+ reflect.TypeOf((*io.Reader)(nil)).Elem(),
+ reflect.TypeOf(17),
+ reflect.TypeOf((*io.Writer)(nil)).Elem(),
+ })
+
+ ExpectThat(err, Error(HasSubstr("Invalid type")))
+}
+
+func (t *GenerateTest) IoReaderAndWriter() {
+ // Mock io.Reader and io.Writer.
+ t.runGoldenTest(
+ "io_reader_writer",
+ "some/pkg",
+ (*io.Reader)(nil),
+ (*io.Writer)(nil))
+}
+
+func (t *GenerateTest) IoReaderAndWriter_SamePackage() {
+ // Mock io.Reader and io.Writer.
+ t.runGoldenTest(
+ "io_reader_writer_same_package",
+ "io",
+ (*io.Reader)(nil),
+ (*io.Writer)(nil))
+}
+
+func (t *GenerateTest) Image() {
+ t.runGoldenTest(
+ "image",
+ "some/pkg",
+ (*image.Image)(nil),
+ (*image.PalettedImage)(nil))
+}
+
+func (t *GenerateTest) ComplicatedPackage() {
+ t.runGoldenTest(
+ "complicated_pkg",
+ "some/pkg",
+ (*complicated_pkg.ComplicatedThing)(nil))
+}
+
+func (t *GenerateTest) RenamedPackage() {
+ t.runGoldenTest(
+ "renamed_pkg",
+ "some/pkg",
+ (*tony.SomeInterface)(nil))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/complicated_pkg/complicated_pkg.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/complicated_pkg/complicated_pkg.go
new file mode 100644
index 00000000000..acc054370d5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/complicated_pkg/complicated_pkg.go
@@ -0,0 +1,41 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package complicated_pkg contains an interface with lots of interesting
+// cases, for use in integration testing.
+package complicated_pkg
+
+import (
+ "image"
+ "io"
+ "net"
+
+ "github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg"
+)
+
+type Byte uint8
+
+type ComplicatedThing interface {
+ Channels(a chan chan<- <-chan net.Conn) chan int
+ Pointers(a *int, b *net.Conn, c **io.Reader) (*int, error)
+ Functions(a func(int, image.Image) int) func(string, int) net.Conn
+ Maps(a map[string]*int) (map[int]*string, error)
+ Arrays(a [3]string) ([3]int, error)
+ Slices(a []string) ([]int, error)
+ NamedScalarType(a Byte) ([]Byte, error)
+ EmptyInterface(a interface{}) (interface{}, error)
+ RenamedPackage(a tony.SomeUint8Alias)
+ Variadic(a int, b ...net.Conn) int
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.complicated_pkg.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.complicated_pkg.go
new file mode 100644
index 00000000000..6bcf1979837
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.complicated_pkg.go
@@ -0,0 +1,311 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package pkg
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ complicated_pkg "github.com/smartystreets/assertions/internal/oglemock/generate/testdata/complicated_pkg"
+ tony "github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg"
+ image "image"
+ io "io"
+ net "net"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockComplicatedThing interface {
+ complicated_pkg.ComplicatedThing
+ oglemock.MockObject
+}
+
+type mockComplicatedThing struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockComplicatedThing(
+ c oglemock.Controller,
+ desc string) MockComplicatedThing {
+ return &mockComplicatedThing{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockComplicatedThing) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockComplicatedThing) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockComplicatedThing) Arrays(p0 [3]string) (o0 [3]int, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Arrays",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockComplicatedThing.Arrays: invalid return values: %v", retVals))
+ }
+
+ // o0 [3]int
+ if retVals[0] != nil {
+ o0 = retVals[0].([3]int)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) Channels(p0 chan chan<- <-chan net.Conn) (o0 chan int) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Channels",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockComplicatedThing.Channels: invalid return values: %v", retVals))
+ }
+
+ // o0 chan int
+ if retVals[0] != nil {
+ o0 = retVals[0].(chan int)
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) EmptyInterface(p0 interface{}) (o0 interface{}, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "EmptyInterface",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockComplicatedThing.EmptyInterface: invalid return values: %v", retVals))
+ }
+
+ // o0 interface { }
+ if retVals[0] != nil {
+ o0 = retVals[0].(interface{})
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) Functions(p0 func(int, image.Image) int) (o0 func(string, int) net.Conn) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Functions",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockComplicatedThing.Functions: invalid return values: %v", retVals))
+ }
+
+ // o0 func(string, int) (net.Conn)
+ if retVals[0] != nil {
+ o0 = retVals[0].(func(string, int) net.Conn)
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) Maps(p0 map[string]*int) (o0 map[int]*string, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Maps",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockComplicatedThing.Maps: invalid return values: %v", retVals))
+ }
+
+ // o0 map[int]*string
+ if retVals[0] != nil {
+ o0 = retVals[0].(map[int]*string)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) NamedScalarType(p0 complicated_pkg.Byte) (o0 []complicated_pkg.Byte, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "NamedScalarType",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockComplicatedThing.NamedScalarType: invalid return values: %v", retVals))
+ }
+
+ // o0 []complicated_pkg.Byte
+ if retVals[0] != nil {
+ o0 = retVals[0].([]complicated_pkg.Byte)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) Pointers(p0 *int, p1 *net.Conn, p2 **io.Reader) (o0 *int, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Pointers",
+ file,
+ line,
+ []interface{}{p0, p1, p2})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockComplicatedThing.Pointers: invalid return values: %v", retVals))
+ }
+
+ // o0 *int
+ if retVals[0] != nil {
+ o0 = retVals[0].(*int)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) RenamedPackage(p0 tony.SomeUint8Alias) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "RenamedPackage",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 0 {
+ panic(fmt.Sprintf("mockComplicatedThing.RenamedPackage: invalid return values: %v", retVals))
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) Slices(p0 []string) (o0 []int, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Slices",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockComplicatedThing.Slices: invalid return values: %v", retVals))
+ }
+
+ // o0 []int
+ if retVals[0] != nil {
+ o0 = retVals[0].([]int)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+func (m *mockComplicatedThing) Variadic(p0 int, p1 ...net.Conn) (o0 int) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Variadic",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockComplicatedThing.Variadic: invalid return values: %v", retVals))
+ }
+
+ // o0 int
+ if retVals[0] != nil {
+ o0 = retVals[0].(int)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.image.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.image.go
new file mode 100644
index 00000000000..dd083e2930e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.image.go
@@ -0,0 +1,238 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package pkg
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ image "image"
+ color "image/color"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockImage interface {
+ image.Image
+ oglemock.MockObject
+}
+
+type mockImage struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockImage(
+ c oglemock.Controller,
+ desc string) MockImage {
+ return &mockImage{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockImage) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockImage) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockImage) At(p0 int, p1 int) (o0 color.Color) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "At",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockImage.At: invalid return values: %v", retVals))
+ }
+
+ // o0 color.Color
+ if retVals[0] != nil {
+ o0 = retVals[0].(color.Color)
+ }
+
+ return
+}
+
+func (m *mockImage) Bounds() (o0 image.Rectangle) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Bounds",
+ file,
+ line,
+ []interface{}{})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockImage.Bounds: invalid return values: %v", retVals))
+ }
+
+ // o0 image.Rectangle
+ if retVals[0] != nil {
+ o0 = retVals[0].(image.Rectangle)
+ }
+
+ return
+}
+
+func (m *mockImage) ColorModel() (o0 color.Model) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "ColorModel",
+ file,
+ line,
+ []interface{}{})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockImage.ColorModel: invalid return values: %v", retVals))
+ }
+
+ // o0 color.Model
+ if retVals[0] != nil {
+ o0 = retVals[0].(color.Model)
+ }
+
+ return
+}
+
+type MockPalettedImage interface {
+ image.PalettedImage
+ oglemock.MockObject
+}
+
+type mockPalettedImage struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockPalettedImage(
+ c oglemock.Controller,
+ desc string) MockPalettedImage {
+ return &mockPalettedImage{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockPalettedImage) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockPalettedImage) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockPalettedImage) At(p0 int, p1 int) (o0 color.Color) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "At",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockPalettedImage.At: invalid return values: %v", retVals))
+ }
+
+ // o0 color.Color
+ if retVals[0] != nil {
+ o0 = retVals[0].(color.Color)
+ }
+
+ return
+}
+
+func (m *mockPalettedImage) Bounds() (o0 image.Rectangle) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Bounds",
+ file,
+ line,
+ []interface{}{})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockPalettedImage.Bounds: invalid return values: %v", retVals))
+ }
+
+ // o0 image.Rectangle
+ if retVals[0] != nil {
+ o0 = retVals[0].(image.Rectangle)
+ }
+
+ return
+}
+
+func (m *mockPalettedImage) ColorIndexAt(p0 int, p1 int) (o0 uint8) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "ColorIndexAt",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockPalettedImage.ColorIndexAt: invalid return values: %v", retVals))
+ }
+
+ // o0 uint8
+ if retVals[0] != nil {
+ o0 = retVals[0].(uint8)
+ }
+
+ return
+}
+
+func (m *mockPalettedImage) ColorModel() (o0 color.Model) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "ColorModel",
+ file,
+ line,
+ []interface{}{})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockPalettedImage.ColorModel: invalid return values: %v", retVals))
+ }
+
+ // o0 color.Model
+ if retVals[0] != nil {
+ o0 = retVals[0].(color.Model)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.io_reader_writer.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.io_reader_writer.go
new file mode 100644
index 00000000000..2d1c7df0490
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.io_reader_writer.go
@@ -0,0 +1,127 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package pkg
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ io "io"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockReader interface {
+ io.Reader
+ oglemock.MockObject
+}
+
+type mockReader struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockReader(
+ c oglemock.Controller,
+ desc string) MockReader {
+ return &mockReader{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockReader) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockReader) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockReader) Read(p0 []uint8) (o0 int, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Read",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockReader.Read: invalid return values: %v", retVals))
+ }
+
+ // o0 int
+ if retVals[0] != nil {
+ o0 = retVals[0].(int)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+type MockWriter interface {
+ io.Writer
+ oglemock.MockObject
+}
+
+type mockWriter struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockWriter(
+ c oglemock.Controller,
+ desc string) MockWriter {
+ return &mockWriter{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockWriter) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockWriter) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockWriter) Write(p0 []uint8) (o0 int, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Write",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockWriter.Write: invalid return values: %v", retVals))
+ }
+
+ // o0 int
+ if retVals[0] != nil {
+ o0 = retVals[0].(int)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.io_reader_writer_same_package.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.io_reader_writer_same_package.go
new file mode 100644
index 00000000000..86c4b0391e6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.io_reader_writer_same_package.go
@@ -0,0 +1,126 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package io
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockReader interface {
+ Reader
+ oglemock.MockObject
+}
+
+type mockReader struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockReader(
+ c oglemock.Controller,
+ desc string) MockReader {
+ return &mockReader{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockReader) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockReader) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockReader) Read(p0 []uint8) (o0 int, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Read",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockReader.Read: invalid return values: %v", retVals))
+ }
+
+ // o0 int
+ if retVals[0] != nil {
+ o0 = retVals[0].(int)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
+
+type MockWriter interface {
+ Writer
+ oglemock.MockObject
+}
+
+type mockWriter struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockWriter(
+ c oglemock.Controller,
+ desc string) MockWriter {
+ return &mockWriter{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockWriter) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockWriter) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockWriter) Write(p0 []uint8) (o0 int, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Write",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockWriter.Write: invalid return values: %v", retVals))
+ }
+
+ // o0 int
+ if retVals[0] != nil {
+ o0 = retVals[0].(int)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.renamed_pkg.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.renamed_pkg.go
new file mode 100644
index 00000000000..fe3d313007a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/golden.renamed_pkg.go
@@ -0,0 +1,66 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package pkg
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ tony "github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockSomeInterface interface {
+ tony.SomeInterface
+ oglemock.MockObject
+}
+
+type mockSomeInterface struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockSomeInterface(
+ c oglemock.Controller,
+ desc string) MockSomeInterface {
+ return &mockSomeInterface{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockSomeInterface) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockSomeInterface) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockSomeInterface) DoFoo(p0 int) (o0 int) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "DoFoo",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockSomeInterface.DoFoo: invalid return values: %v", retVals))
+ }
+
+ // o0 int
+ if retVals[0] != nil {
+ o0 = retVals[0].(int)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg/renamed_pkg.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg/renamed_pkg.go
new file mode 100644
index 00000000000..1461cd6960d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/testdata/renamed_pkg/renamed_pkg.go
@@ -0,0 +1,24 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package that calls itself something different than its package path would
+// have you believe.
+package tony
+
+type SomeUint8Alias uint8
+
+type SomeInterface interface {
+ DoFoo(a int) int
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/type_string.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/type_string.go
new file mode 100644
index 00000000000..c4d46e718d9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/type_string.go
@@ -0,0 +1,147 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package generate
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Return the string that should be used to refer to the supplied type within
+// the given package. The output is not guaranteed to be pretty, and should be
+// run through a tool like gofmt afterward.
+//
+// For example, a pointer to an io.Reader may be rendered as "*Reader" or
+// "*io.Reader" depending on whether the package path is "io" or not.
+func typeString(
+ t reflect.Type,
+ pkgPath string) (s string) {
+ // Is this type named? If so we use its name, possibly with a package prefix.
+ //
+ // Examples:
+ //
+ // int
+ // string
+ // error
+ // gcs.Bucket
+ //
+ if t.Name() != "" {
+ if t.PkgPath() == pkgPath {
+ s = t.Name()
+ } else {
+ s = t.String()
+ }
+
+ return
+ }
+
+ // This type is unnamed. Recurse.
+ switch t.Kind() {
+ case reflect.Array:
+ s = fmt.Sprintf("[%d]%s", t.Len(), typeString(t.Elem(), pkgPath))
+
+ case reflect.Chan:
+ s = fmt.Sprintf("%s %s", t.ChanDir(), typeString(t.Elem(), pkgPath))
+
+ case reflect.Func:
+ s = typeString_Func(t, pkgPath)
+
+ case reflect.Interface:
+ s = typeString_Interface(t, pkgPath)
+
+ case reflect.Map:
+ s = fmt.Sprintf(
+ "map[%s]%s",
+ typeString(t.Key(), pkgPath),
+ typeString(t.Elem(), pkgPath))
+
+ case reflect.Ptr:
+ s = fmt.Sprintf("*%s", typeString(t.Elem(), pkgPath))
+
+ case reflect.Slice:
+ s = fmt.Sprintf("[]%s", typeString(t.Elem(), pkgPath))
+
+ case reflect.Struct:
+ s = typeString_Struct(t, pkgPath)
+
+ default:
+ log.Panicf("Unhandled kind %v for type: %v", t.Kind(), t)
+ }
+
+ return
+}
+
+func typeString_FuncOrMethod(
+ name string,
+ t reflect.Type,
+ pkgPath string) (s string) {
+ // Deal with input types.
+ var in []string
+ for i := 0; i < t.NumIn(); i++ {
+ in = append(in, typeString(t.In(i), pkgPath))
+ }
+
+ // And output types.
+ var out []string
+ for i := 0; i < t.NumOut(); i++ {
+ out = append(out, typeString(t.Out(i), pkgPath))
+ }
+
+ // Put it all together.
+ s = fmt.Sprintf(
+ "%s(%s) (%s)",
+ name,
+ strings.Join(in, ", "),
+ strings.Join(out, ", "))
+
+ return
+}
+
+func typeString_Func(
+ t reflect.Type,
+ pkgPath string) (s string) {
+ return typeString_FuncOrMethod("func", t, pkgPath)
+}
+
+func typeString_Struct(
+ t reflect.Type,
+ pkgPath string) (s string) {
+ var fields []string
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ fString := fmt.Sprintf("%s %s", f.Name, typeString(f.Type, pkgPath))
+ fields = append(fields, fString)
+ }
+
+ s = fmt.Sprintf("struct { %s }", strings.Join(fields, "; "))
+ return
+}
+
+func typeString_Interface(
+ t reflect.Type,
+ pkgPath string) (s string) {
+ var methods []string
+ for i := 0; i < t.NumMethod(); i++ {
+ m := t.Method(i)
+ mString := typeString_FuncOrMethod(m.Name, m.Type, pkgPath)
+ methods = append(methods, mString)
+ }
+
+ s = fmt.Sprintf("interface { %s }", strings.Join(methods, "; "))
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/type_string_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/type_string_test.go
new file mode 100644
index 00000000000..7d13c4e177e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/generate/type_string_test.go
@@ -0,0 +1,220 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package generate
+
+import (
+ "io"
+ "reflect"
+ "testing"
+ "unsafe"
+
+ "github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+func TestTypeString(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Boilerplate
+////////////////////////////////////////////////////////////////////////
+
+type TypeStringTest struct {
+}
+
+func init() { RegisterTestSuite(&TypeStringTest{}) }
+
+////////////////////////////////////////////////////////////////////////
+// Test functions
+////////////////////////////////////////////////////////////////////////
+
+func (t *TypeStringTest) TestCases() {
+ const gcsPkgPath = "github.com/smartystreets/assertions/internal/oglemock/createmock/testdata/gcs"
+ to := reflect.TypeOf
+
+ testCases := []struct {
+ t reflect.Type
+ pkgPath string
+ expected string
+ }{
+ /////////////////////////
+ // Scalar types
+ /////////////////////////
+
+ 0: {to(true), "", "bool"},
+ 1: {to(true), "some/pkg", "bool"},
+ 2: {to(int(17)), "some/pkg", "int"},
+ 3: {to(int32(17)), "some/pkg", "int32"},
+ 4: {to(uint(17)), "some/pkg", "uint"},
+ 5: {to(uint32(17)), "some/pkg", "uint32"},
+ 6: {to(uintptr(17)), "some/pkg", "uintptr"},
+ 7: {to(float32(17)), "some/pkg", "float32"},
+ 8: {to(complex64(17)), "some/pkg", "complex64"},
+
+ /////////////////////////
+ // Structs
+ /////////////////////////
+
+ 9: {to(gcs.Object{}), "some/pkg", "gcs.Object"},
+ 10: {to(gcs.Object{}), gcsPkgPath, "Object"},
+
+ 11: {
+ to(struct {
+ a int
+ b gcs.Object
+ }{}),
+ "some/pkg",
+ "struct { a int; b gcs.Object }",
+ },
+
+ 12: {
+ to(struct {
+ a int
+ b gcs.Object
+ }{}),
+ gcsPkgPath,
+ "struct { a int; b Object }",
+ },
+
+ /////////////////////////
+ // Pointers
+ /////////////////////////
+
+ 13: {to((*int)(nil)), gcsPkgPath, "*int"},
+ 14: {to((*gcs.Object)(nil)), "some/pkg", "*gcs.Object"},
+ 15: {to((*gcs.Object)(nil)), gcsPkgPath, "*Object"},
+
+ /////////////////////////
+ // Arrays
+ /////////////////////////
+
+ 16: {to([3]int{}), "some/pkg", "[3]int"},
+ 17: {to([3]gcs.Object{}), gcsPkgPath, "[3]Object"},
+
+ /////////////////////////
+ // Channels
+ /////////////////////////
+
+ 18: {to((chan int)(nil)), "some/pkg", "chan int"},
+ 19: {to((<-chan int)(nil)), "some/pkg", "<-chan int"},
+ 20: {to((chan<- int)(nil)), "some/pkg", "chan<- int"},
+ 21: {to((<-chan gcs.Object)(nil)), gcsPkgPath, "<-chan Object"},
+
+ /////////////////////////
+ // Functions
+ /////////////////////////
+
+ 22: {
+ to(func(int, gcs.Object) {}),
+ gcsPkgPath,
+ "func(int, Object) ()",
+ },
+
+ 23: {
+ to(func() (*gcs.Object, error) { return nil, nil }),
+ gcsPkgPath,
+ "func() (*Object, error)",
+ },
+
+ 24: {
+ to(func(int, gcs.Object) (*gcs.Object, error) { return nil, nil }),
+ gcsPkgPath,
+ "func(int, Object) (*Object, error)",
+ },
+
+ /////////////////////////
+ // Interfaces
+ /////////////////////////
+
+ 25: {to((*error)(nil)).Elem(), "some/pkg", "error"},
+ 26: {to((*io.Reader)(nil)).Elem(), "some/pkg", "io.Reader"},
+ 27: {to((*io.Reader)(nil)).Elem(), "io", "Reader"},
+
+ 28: {
+ to((*interface{})(nil)).Elem(),
+ "some/pkg",
+ "interface { }",
+ },
+
+ 29: {
+ to((*interface {
+ Foo(int)
+ Bar(gcs.Object)
+ })(nil)).Elem(),
+ "some/pkg",
+ "interface { Bar(gcs.Object) (); Foo(int) () }",
+ },
+
+ 30: {
+ to((*interface {
+ Foo(int)
+ Bar(gcs.Object)
+ })(nil)).Elem(),
+ gcsPkgPath,
+ "interface { Bar(Object) (); Foo(int) () }",
+ },
+
+ /////////////////////////
+ // Maps
+ /////////////////////////
+
+ 31: {to(map[*gcs.Object]gcs.Object{}), gcsPkgPath, "map[*Object]Object"},
+
+ /////////////////////////
+ // Slices
+ /////////////////////////
+
+ 32: {to([]int{}), "some/pkg", "[]int"},
+ 33: {to([]gcs.Object{}), gcsPkgPath, "[]Object"},
+
+ /////////////////////////
+ // Strings
+ /////////////////////////
+
+ 34: {to(""), gcsPkgPath, "string"},
+
+ /////////////////////////
+ // Unsafe pointer
+ /////////////////////////
+
+ 35: {to(unsafe.Pointer(nil)), gcsPkgPath, "unsafe.Pointer"},
+
+ /////////////////////////
+ // Other named types
+ /////////////////////////
+
+ 36: {to(gcs.Int(17)), "some/pkg", "gcs.Int"},
+ 37: {to(gcs.Int(17)), gcsPkgPath, "Int"},
+
+ 38: {to(gcs.Array{}), "some/pkg", "gcs.Array"},
+ 39: {to(gcs.Array{}), gcsPkgPath, "Array"},
+
+ 40: {to(gcs.Chan(nil)), "some/pkg", "gcs.Chan"},
+ 41: {to(gcs.Chan(nil)), gcsPkgPath, "Chan"},
+
+ 42: {to(gcs.Ptr(nil)), "some/pkg", "gcs.Ptr"},
+ 43: {to(gcs.Ptr(nil)), gcsPkgPath, "Ptr"},
+
+ 44: {to((*gcs.Int)(nil)), "some/pkg", "*gcs.Int"},
+ 45: {to((*gcs.Int)(nil)), gcsPkgPath, "*Int"},
+ }
+
+ for i, tc := range testCases {
+ ExpectEq(
+ tc.expected,
+ typeString(tc.t, tc.pkgPath),
+ "Case %d: %v, %q", i, tc.t, tc.pkgPath)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/integration_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/integration_test.go
new file mode 100644
index 00000000000..e72f0cbb13b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/integration_test.go
@@ -0,0 +1,129 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock_test
+
+import (
+ "errors"
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/oglemock"
+ "github.com/smartystreets/assertions/internal/oglemock/sample/mock_io"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "path"
+ "runtime"
+)
+
+////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////
+
+func getLineNumber() int {
+ _, _, line, _ := runtime.Caller(1)
+ return line
+}
+
+type IntegrationTest struct {
+ reporter fakeErrorReporter
+ controller oglemock.Controller
+
+ reader mock_io.MockReader
+}
+
+func init() { RegisterTestSuite(&IntegrationTest{}) }
+
+func (t *IntegrationTest) SetUp(c *TestInfo) {
+ t.reporter.errors = make([]errorReport, 0)
+ t.reporter.fatalErrors = make([]errorReport, 0)
+ t.controller = oglemock.NewController(&t.reporter)
+
+ t.reader = mock_io.NewMockReader(t.controller, "")
+}
+
+////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////
+
+func (t *IntegrationTest) UnexpectedCall() {
+ t.reader.Read([]uint8{1, 2, 3})
+ expectedLine := getLineNumber() - 1
+
+ // An error should have been reported.
+ AssertEq(1, len(t.reporter.errors), "%v", t.reporter.errors)
+ AssertEq(0, len(t.reporter.fatalErrors), "%v", t.reporter.fatalErrors)
+
+ r := t.reporter.errors[0]
+ ExpectEq("integration_test.go", path.Base(r.fileName))
+ ExpectEq(expectedLine, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("Unexpected")))
+ ExpectThat(r.err, Error(HasSubstr("Read")))
+ ExpectThat(r.err, Error(HasSubstr("[1 2 3]")))
+}
+
+func (t *IntegrationTest) ZeroValues() {
+ // Make an unexpected call.
+ n, err := t.reader.Read([]uint8{})
+
+ // Check the return values.
+ ExpectEq(0, n)
+ ExpectEq(nil, err)
+}
+
+func (t *IntegrationTest) ExpectedCalls() {
+ // Expectations
+ t.controller.ExpectCall(t.reader, "Read", "", 112)(nil).
+ WillOnce(oglemock.Return(17, nil)).
+ WillOnce(oglemock.Return(19, nil))
+
+ t.controller.ExpectCall(t.reader, "Read", "", 112)(Not(Equals(nil))).
+ WillOnce(oglemock.Return(23, errors.New("taco")))
+
+ // Calls
+ var n int
+ var err error
+
+ n, err = t.reader.Read(nil)
+ ExpectEq(17, n)
+ ExpectEq(nil, err)
+
+ n, err = t.reader.Read([]byte{})
+ ExpectEq(23, n)
+ ExpectThat(err, Error(Equals("taco")))
+
+ n, err = t.reader.Read(nil)
+ ExpectEq(19, n)
+ ExpectEq(nil, err)
+
+ // Errors
+ AssertEq(0, len(t.reporter.errors), "%v", t.reporter.errors)
+ AssertEq(0, len(t.reporter.fatalErrors), "%v", t.reporter.fatalErrors)
+}
+
+func (t *IntegrationTest) WrongTypeForReturn() {
+ t.controller.ExpectCall(t.reader, "Read", "foo.go", 112)(nil).
+ WillOnce(oglemock.Return(0, errors.New(""))).
+ WillOnce(oglemock.Return("taco", errors.New("")))
+
+ // Errors
+ AssertEq(0, len(t.reporter.errors), "%v", t.reporter.errors)
+ AssertEq(1, len(t.reporter.fatalErrors), "%v", t.reporter.fatalErrors)
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("foo.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("Return")))
+ ExpectThat(r.err, Error(HasSubstr("arg 0")))
+ ExpectThat(r.err, Error(HasSubstr("int")))
+ ExpectThat(r.err, Error(HasSubstr("string")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/internal_expectation.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/internal_expectation.go
new file mode 100644
index 00000000000..8fa8aeafa42
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/internal_expectation.go
@@ -0,0 +1,180 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+import (
+ "errors"
+ "fmt"
+ "github.com/smartystreets/assertions/internal/oglematchers"
+ "reflect"
+ "sync"
+)
+
+// InternalExpectation is exported for purposes of testing only. You should not
+// touch it.
+//
+// InternalExpectation represents an expectation for zero or more calls to a
+// mock method, and a set of actions to be taken when those calls are received.
+type InternalExpectation struct {
+ // The signature of the method to which this expectation is bound, for
+ // checking action types.
+ methodSignature reflect.Type
+
+ // An error reporter to use for reporting errors in the way that expectations
+ // are set.
+ errorReporter ErrorReporter
+
+ // A mutex protecting mutable fields of the struct.
+ mutex sync.Mutex
+
+ // Matchers that the arguments to the mock method must satisfy in order to
+ // match this expectation.
+ ArgMatchers []oglematchers.Matcher
+
+ // The name of the file in which this expectation was expressed.
+ FileName string
+
+ // The line number at which this expectation was expressed.
+ LineNumber int
+
+ // The number of times this expectation should be matched, as explicitly
+ // listed by the user. If there was no explicit number expressed, this is -1.
+ ExpectedNumMatches int
+
+ // Actions to be taken for the first N calls, one per call in order, where N
+ // is the length of this slice.
+ OneTimeActions []Action
+
+ // An action to be taken when the one-time actions have expired, or nil if
+ // there is no such action.
+ FallbackAction Action
+
+ // The number of times this expectation has been matched so far.
+ NumMatches uint
+}
+
+// InternalNewExpectation is exported for purposes of testing only. You should
+// not touch it.
+func InternalNewExpectation(
+ reporter ErrorReporter,
+ methodSignature reflect.Type,
+ args []interface{},
+ fileName string,
+ lineNumber int) *InternalExpectation {
+ result := &InternalExpectation{}
+
+ // Store fields that can be stored directly.
+ result.methodSignature = methodSignature
+ result.errorReporter = reporter
+ result.FileName = fileName
+ result.LineNumber = lineNumber
+
+ // Set up defaults.
+ result.ExpectedNumMatches = -1
+ result.OneTimeActions = make([]Action, 0)
+
+ // Set up the ArgMatchers slice, using Equals(x) for each x that is not a
+ // matcher itself.
+ result.ArgMatchers = make([]oglematchers.Matcher, len(args))
+ for i, x := range args {
+ if matcher, ok := x.(oglematchers.Matcher); ok {
+ result.ArgMatchers[i] = matcher
+ } else {
+ result.ArgMatchers[i] = oglematchers.Equals(x)
+ }
+ }
+
+ return result
+}
+
+func (e *InternalExpectation) Times(n uint) Expectation {
+ e.mutex.Lock()
+ defer e.mutex.Unlock()
+
+ // It is illegal to call this more than once.
+ if e.ExpectedNumMatches != -1 {
+ e.reportFatalError("Times called more than once.")
+ return nil
+ }
+
+ // It is illegal to call this after any actions are configured.
+ if len(e.OneTimeActions) != 0 {
+ e.reportFatalError("Times called after WillOnce.")
+ return nil
+ }
+
+ if e.FallbackAction != nil {
+ e.reportFatalError("Times called after WillRepeatedly.")
+ return nil
+ }
+
+ // Make sure the number is reasonable (and will fit in an int).
+ if n > 1000 {
+ e.reportFatalError("Expectation.Times: N must be at most 1000")
+ return nil
+ }
+
+ e.ExpectedNumMatches = int(n)
+ return e
+}
+
+func (e *InternalExpectation) WillOnce(a Action) Expectation {
+ e.mutex.Lock()
+ defer e.mutex.Unlock()
+
+ // It is illegal to call this after WillRepeatedly.
+ if e.FallbackAction != nil {
+ e.reportFatalError("WillOnce called after WillRepeatedly.")
+ return nil
+ }
+
+ // Tell the action about the method's signature.
+ if err := a.SetSignature(e.methodSignature); err != nil {
+ e.reportFatalError(fmt.Sprintf("WillOnce given invalid action: %v", err))
+ return nil
+ }
+
+ // Store the action.
+ e.OneTimeActions = append(e.OneTimeActions, a)
+
+ return e
+}
+
+func (e *InternalExpectation) WillRepeatedly(a Action) Expectation {
+ e.mutex.Lock()
+ defer e.mutex.Unlock()
+
+ // It is illegal to call this twice.
+ if e.FallbackAction != nil {
+ e.reportFatalError("WillRepeatedly called more than once.")
+ return nil
+ }
+
+ // Tell the action about the method's signature.
+ if err := a.SetSignature(e.methodSignature); err != nil {
+ e.reportFatalError(fmt.Sprintf("WillRepeatedly given invalid action: %v", err))
+ return nil
+ }
+
+ // Store the action.
+ e.FallbackAction = a
+
+ return e
+}
+
+func (e *InternalExpectation) reportFatalError(errorText string) {
+ e.errorReporter.ReportFatalError(e.FileName, e.LineNumber, errors.New(errorText))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/internal_expectation_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/internal_expectation_test.go
new file mode 100644
index 00000000000..977fe1ac3f7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/internal_expectation_test.go
@@ -0,0 +1,265 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/oglemock"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "reflect"
+)
+
+////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////
+
+var emptyReturnSig reflect.Type = reflect.TypeOf(func(i int) {})
+var float64ReturnSig reflect.Type = reflect.TypeOf(func(i int) float64 { return 17.0 })
+
+type InternalExpectationTest struct {
+ reporter fakeErrorReporter
+}
+
+func init() { RegisterTestSuite(&InternalExpectationTest{}) }
+
+func (t *InternalExpectationTest) SetUp(c *TestInfo) {
+ t.reporter.errors = make([]errorReport, 0)
+ t.reporter.fatalErrors = make([]errorReport, 0)
+}
+
+func (t *InternalExpectationTest) makeExpectation(
+ sig reflect.Type,
+ args []interface{},
+ fileName string,
+ lineNumber int) *InternalExpectation {
+ return InternalNewExpectation(&t.reporter, sig, args, fileName, lineNumber)
+}
+
+////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////
+
+func (t *InternalExpectationTest) StoresFileNameAndLineNumber() {
+ args := []interface{}{}
+ exp := t.makeExpectation(emptyReturnSig, args, "taco", 17)
+
+ ExpectThat(exp.FileName, Equals("taco"))
+ ExpectThat(exp.LineNumber, Equals(17))
+}
+
+func (t *InternalExpectationTest) NoArgs() {
+ args := []interface{}{}
+ exp := t.makeExpectation(emptyReturnSig, args, "", 0)
+
+ ExpectThat(len(exp.ArgMatchers), Equals(0))
+}
+
+func (t *InternalExpectationTest) MixOfMatchersAndNonMatchers() {
+ args := []interface{}{Equals(17), 19, Equals(23)}
+ exp := t.makeExpectation(emptyReturnSig, args, "", 0)
+
+ // Matcher args
+ ExpectThat(len(exp.ArgMatchers), Equals(3))
+ ExpectThat(exp.ArgMatchers[0], Equals(args[0]))
+ ExpectThat(exp.ArgMatchers[2], Equals(args[2]))
+
+ // Non-matcher arg
+ var err error
+ matcher1 := exp.ArgMatchers[1]
+
+ err = matcher1.Matches(17)
+ ExpectNe(nil, err)
+
+ err = matcher1.Matches(19)
+ ExpectEq(nil, err)
+
+ err = matcher1.Matches(23)
+ ExpectNe(nil, err)
+}
+
+func (t *InternalExpectationTest) NoTimes() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "", 0)
+
+ ExpectThat(exp.ExpectedNumMatches, Equals(-1))
+}
+
+func (t *InternalExpectationTest) TimesN() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "", 0)
+ exp.Times(17)
+
+ ExpectThat(exp.ExpectedNumMatches, Equals(17))
+}
+
+func (t *InternalExpectationTest) NoActions() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "", 0)
+
+ ExpectThat(len(exp.OneTimeActions), Equals(0))
+ ExpectThat(exp.FallbackAction, Equals(nil))
+}
+
+func (t *InternalExpectationTest) WillOnce() {
+ action0 := Return(17.0)
+ action1 := Return(19.0)
+
+ exp := t.makeExpectation(float64ReturnSig, []interface{}{}, "", 0)
+ exp.WillOnce(action0).WillOnce(action1)
+
+ ExpectThat(len(exp.OneTimeActions), Equals(2))
+ ExpectThat(exp.OneTimeActions[0], Equals(action0))
+ ExpectThat(exp.OneTimeActions[1], Equals(action1))
+}
+
+func (t *InternalExpectationTest) WillRepeatedly() {
+ action := Return(17.0)
+
+ exp := t.makeExpectation(float64ReturnSig, []interface{}{}, "", 0)
+ exp.WillRepeatedly(action)
+
+ ExpectThat(exp.FallbackAction, Equals(action))
+}
+
+func (t *InternalExpectationTest) BothKindsOfAction() {
+ action0 := Return(17.0)
+ action1 := Return(19.0)
+ action2 := Return(23.0)
+
+ exp := t.makeExpectation(float64ReturnSig, []interface{}{}, "", 0)
+ exp.WillOnce(action0).WillOnce(action1).WillRepeatedly(action2)
+
+ ExpectThat(len(exp.OneTimeActions), Equals(2))
+ ExpectThat(exp.OneTimeActions[0], Equals(action0))
+ ExpectThat(exp.OneTimeActions[1], Equals(action1))
+ ExpectThat(exp.FallbackAction, Equals(action2))
+}
+
+func (t *InternalExpectationTest) TimesCalledWithHugeNumber() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "taco.go", 112)
+ exp.Times(1 << 30)
+
+ AssertEq(1, len(t.reporter.fatalErrors))
+ AssertEq(0, len(t.reporter.errors))
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("taco.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("Times")))
+ ExpectThat(r.err, Error(HasSubstr("N must be at most 1000")))
+}
+
+func (t *InternalExpectationTest) TimesCalledTwice() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "taco.go", 112)
+ exp.Times(17)
+ exp.Times(17)
+
+ AssertEq(1, len(t.reporter.fatalErrors))
+ AssertEq(0, len(t.reporter.errors))
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("taco.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("Times")))
+ ExpectThat(r.err, Error(HasSubstr("more than once")))
+}
+
+func (t *InternalExpectationTest) TimesCalledAfterWillOnce() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "taco.go", 112)
+ exp.WillOnce(Return())
+ exp.Times(17)
+
+ AssertEq(1, len(t.reporter.fatalErrors))
+ AssertEq(0, len(t.reporter.errors))
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("taco.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("Times")))
+ ExpectThat(r.err, Error(HasSubstr("after WillOnce")))
+}
+
+func (t *InternalExpectationTest) TimesCalledAfterWillRepeatedly() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "taco.go", 112)
+ exp.WillRepeatedly(Return())
+ exp.Times(17)
+
+ AssertEq(1, len(t.reporter.fatalErrors))
+ AssertEq(0, len(t.reporter.errors))
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("taco.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("Times")))
+ ExpectThat(r.err, Error(HasSubstr("after WillRepeatedly")))
+}
+
+func (t *InternalExpectationTest) WillOnceCalledAfterWillRepeatedly() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "taco.go", 112)
+ exp.WillRepeatedly(Return())
+ exp.WillOnce(Return())
+
+ AssertEq(1, len(t.reporter.fatalErrors))
+ AssertEq(0, len(t.reporter.errors))
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("taco.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("WillOnce")))
+ ExpectThat(r.err, Error(HasSubstr("after WillRepeatedly")))
+}
+
+func (t *InternalExpectationTest) OneTimeActionRejectsSignature() {
+ exp := t.makeExpectation(float64ReturnSig, []interface{}{}, "taco.go", 112)
+ exp.WillOnce(Return("taco"))
+
+ AssertEq(1, len(t.reporter.fatalErrors))
+ AssertEq(0, len(t.reporter.errors))
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("taco.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("arg 0")))
+ ExpectThat(r.err, Error(HasSubstr("expected float64")))
+ ExpectThat(r.err, Error(HasSubstr("given string")))
+}
+
+func (t *InternalExpectationTest) WillRepeatedlyCalledTwice() {
+ exp := t.makeExpectation(emptyReturnSig, []interface{}{}, "taco.go", 112)
+ exp.WillRepeatedly(Return())
+ exp.WillRepeatedly(Return())
+
+ AssertEq(1, len(t.reporter.fatalErrors))
+ AssertEq(0, len(t.reporter.errors))
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("taco.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("WillRepeatedly")))
+ ExpectThat(r.err, Error(HasSubstr("once")))
+}
+
+func (t *InternalExpectationTest) FallbackActionRejectsSignature() {
+ exp := t.makeExpectation(float64ReturnSig, []interface{}{}, "taco.go", 112)
+ exp.WillRepeatedly(Return("taco"))
+
+ AssertEq(1, len(t.reporter.fatalErrors))
+ AssertEq(0, len(t.reporter.errors))
+
+ r := t.reporter.fatalErrors[0]
+ ExpectEq("taco.go", r.fileName)
+ ExpectEq(112, r.lineNumber)
+ ExpectThat(r.err, Error(HasSubstr("arg 0")))
+ ExpectThat(r.err, Error(HasSubstr("expected float64")))
+ ExpectThat(r.err, Error(HasSubstr("given string")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/invoke.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/invoke.go
new file mode 100644
index 00000000000..07630cbbb7e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/invoke.go
@@ -0,0 +1,73 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+)
+
+// Create an Action that invokes the supplied function, returning whatever it
+// returns. The signature of the function must match that of the mocked method
+// exactly.
+func Invoke(f interface{}) Action {
+ // Make sure f is a function.
+ fv := reflect.ValueOf(f)
+ fk := fv.Kind()
+
+ if fk != reflect.Func {
+ desc := "<nil>"
+ if fk != reflect.Invalid {
+ desc = fv.Type().String()
+ }
+
+ panic(fmt.Sprintf("Invoke: expected function, got %s", desc))
+ }
+
+ return &invokeAction{fv}
+}
+
+type invokeAction struct {
+ f reflect.Value
+}
+
+func (a *invokeAction) SetSignature(signature reflect.Type) error {
+ // The signature must match exactly.
+ ft := a.f.Type()
+ if ft != signature {
+ return errors.New(fmt.Sprintf("Invoke: expected %v, got %v", signature, ft))
+ }
+
+ return nil
+}
+
+func (a *invokeAction) Invoke(vals []interface{}) []interface{} {
+ // Create a slice of args for the function.
+ in := make([]reflect.Value, len(vals))
+ for i, x := range vals {
+ in[i] = reflect.ValueOf(x)
+ }
+
+ // Call the function and return its return values.
+ out := a.f.Call(in)
+ result := make([]interface{}, len(out))
+ for i, v := range out {
+ result[i] = v.Interface()
+ }
+
+ return result
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/invoke_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/invoke_test.go
new file mode 100644
index 00000000000..9e1478ba8cc
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/invoke_test.go
@@ -0,0 +1,110 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/oglemock"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "reflect"
+)
+
+////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////
+
+type InvokeTest struct {
+}
+
+func init() { RegisterTestSuite(&InvokeTest{}) }
+
+////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////
+
+func (t *InvokeTest) ArgumentIsNil() {
+ f := func() { oglemock.Invoke(nil) }
+ ExpectThat(f, Panics(MatchesRegexp("Invoke.*function.*<nil>")))
+}
+
+func (t *InvokeTest) ArgumentIsInt() {
+ f := func() { oglemock.Invoke(17) }
+ ExpectThat(f, Panics(MatchesRegexp("Invoke.*function.*int")))
+}
+
+func (t *InvokeTest) FunctionHasOneWrongInputType() {
+ f := func(a int, b int32, c string) {}
+ g := func(a int, b int, c string) {}
+
+ err := oglemock.Invoke(f).SetSignature(reflect.TypeOf(g))
+ ExpectThat(err, Error(HasSubstr("func(int, int32, string)")))
+ ExpectThat(err, Error(HasSubstr("func(int, int, string)")))
+}
+
+func (t *InvokeTest) FunctionHasOneWrongOutputType() {
+ f := func() (int32, string) { return 0, "" }
+ g := func() (int, string) { return 0, "" }
+
+ err := oglemock.Invoke(f).SetSignature(reflect.TypeOf(g))
+ ExpectThat(err, Error(HasSubstr("func() (int32, string)")))
+ ExpectThat(err, Error(HasSubstr("func() (int, string)")))
+}
+
+func (t *InvokeTest) CallsFunction() {
+ var actualArg0, actualArg1 interface{}
+
+ f := func(a uintptr, b int8) {
+ actualArg0 = a
+ actualArg1 = b
+ }
+
+ a := oglemock.Invoke(f)
+
+ // Set signature.
+ AssertEq(nil, a.SetSignature(reflect.TypeOf(f)))
+
+ // Call the action.
+ expectedArg0 := uintptr(17)
+ expectedArg1 := int8(-7)
+
+ a.Invoke([]interface{}{expectedArg0, expectedArg1})
+
+ ExpectThat(actualArg0, IdenticalTo(expectedArg0))
+ ExpectThat(actualArg1, IdenticalTo(expectedArg1))
+}
+
+func (t *InvokeTest) ReturnsFunctionResult() {
+ expectedReturn0 := int16(3)
+ expectedReturn1 := "taco"
+
+ f := func() (int16, string) {
+ return expectedReturn0, expectedReturn1
+ }
+
+ a := oglemock.Invoke(f)
+
+ // Set signature.
+ AssertEq(nil, a.SetSignature(reflect.TypeOf(f)))
+
+ // Call the action.
+ res := a.Invoke([]interface{}{})
+
+ ExpectThat(
+ res,
+ ElementsAre(
+ IdenticalTo(expectedReturn0),
+ IdenticalTo(expectedReturn1)))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/mock_object.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/mock_object.go
new file mode 100644
index 00000000000..de995efc667
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/mock_object.go
@@ -0,0 +1,30 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+// MockObject is an interface that mock object implementations must conform to
+// in order to register expectations with and hand off calls to a
+// MockController. Users should not interact with this interface directly.
+type MockObject interface {
+ // Oglemock_Id returns an identifier for the mock object that is guaranteed
+ // to be unique within the process at least until the mock object is garbage
+ // collected.
+ Oglemock_Id() uintptr
+
+ // Oglemock_Description returns a description of the mock object that may be
+ // helpful in test failure messages.
+ Oglemock_Description() string
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/return.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/return.go
new file mode 100644
index 00000000000..c66d248f44a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/return.go
@@ -0,0 +1,251 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+)
+
+var intType = reflect.TypeOf(int(0))
+var float64Type = reflect.TypeOf(float64(0))
+var complex128Type = reflect.TypeOf(complex128(0))
+
+// Return creates an Action that returns the values passed to Return as
+// arguments, after suitable legal type conversions. The following rules apply.
+// Given an argument x to Return and a corresponding type T in the method's
+// signature, at least one of the following must hold:
+//
+// * x is assignable to T. (See "Assignability" in the language spec.) Note
+// that this in particular applies that x may be a type that implements an
+// interface T. It also implies that the nil literal can be used if T is a
+// pointer, function, interface, slice, channel, or map type.
+//
+// * T is any numeric type, and x is an int that is in-range for that type.
+// This facilities using raw integer constants: Return(17).
+//
+// * T is a floating-point or complex number type, and x is a float64. This
+// facilities using raw floating-point constants: Return(17.5).
+//
+// * T is a complex number type, and x is a complex128. This facilities using
+// raw complex constants: Return(17+2i).
+//
+func Return(vals ...interface{}) Action {
+ return &returnAction{vals, nil}
+}
+
+type returnAction struct {
+ returnVals []interface{}
+ signature reflect.Type
+}
+
+func (a *returnAction) Invoke(vals []interface{}) []interface{} {
+ if a.signature == nil {
+ panic("You must first call SetSignature with a valid signature.")
+ }
+
+ res, err := a.buildInvokeResult(a.signature)
+ if err != nil {
+ panic(err)
+ }
+
+ return res
+}
+
+func (a *returnAction) SetSignature(signature reflect.Type) error {
+ if _, err := a.buildInvokeResult(signature); err != nil {
+ return err
+ }
+
+ a.signature = signature
+ return nil
+}
+
+// A version of Invoke that does error checking, used by both public methods.
+func (a *returnAction) buildInvokeResult(
+ sig reflect.Type) (res []interface{}, err error) {
+ // Check the length of the return value.
+ numOut := sig.NumOut()
+ numVals := len(a.returnVals)
+
+ if numOut != numVals {
+ err = errors.New(
+ fmt.Sprintf("Return given %d vals; expected %d.", numVals, numOut))
+ return
+ }
+
+ // Attempt to coerce each return value.
+ res = make([]interface{}, numOut)
+
+ for i, val := range a.returnVals {
+ resType := sig.Out(i)
+ res[i], err = a.coerce(val, resType)
+
+ if err != nil {
+ res = nil
+ err = errors.New(fmt.Sprintf("Return: arg %d: %v", i, err))
+ return
+ }
+ }
+
+ return
+}
+
+func (a *returnAction) coerce(x interface{}, t reflect.Type) (interface{}, error) {
+ xv := reflect.ValueOf(x)
+ rv := reflect.New(t).Elem()
+
+ // Special case: the language spec says that the predeclared identifier nil
+ // is assignable to pointers, functions, interface, slices, channels, and map
+ // types. However, reflect.ValueOf(nil) returns an invalid value that will
+ // not cooperate below. So handle invalid values here, assuming that they
+ // resulted from Return(nil).
+ if !xv.IsValid() {
+ switch t.Kind() {
+ case reflect.Ptr, reflect.Func, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.UnsafePointer:
+ return rv.Interface(), nil
+ }
+
+ return nil, errors.New(fmt.Sprintf("expected %v, given <nil>", t))
+ }
+
+ // If x is assignable to type t, let the reflect package do the heavy
+ // lifting.
+ if reflect.TypeOf(x).AssignableTo(t) {
+ rv.Set(xv)
+ return rv.Interface(), nil
+ }
+
+ // Handle numeric types as described in the documentation on Return.
+ switch {
+ case xv.Type() == intType && a.isNumeric(t):
+ return a.coerceInt(xv.Int(), t)
+
+ case xv.Type() == float64Type && (a.isFloatingPoint(t) || a.isComplex(t)):
+ return a.coerceFloat(xv.Float(), t)
+
+ case xv.Type() == complex128Type && a.isComplex(t):
+ return a.coerceComplex(xv.Complex(), t)
+ }
+
+ // The value wasn't of a legal type.
+ return nil, errors.New(fmt.Sprintf("expected %v, given %v", t, xv.Type()))
+}
+
+func (a *returnAction) isNumeric(t reflect.Type) bool {
+ return (t.Kind() >= reflect.Int && t.Kind() <= reflect.Uint64) ||
+ a.isFloatingPoint(t) ||
+ a.isComplex(t)
+}
+
+func (a *returnAction) isFloatingPoint(t reflect.Type) bool {
+ return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64
+}
+
+func (a *returnAction) isComplex(t reflect.Type) bool {
+ return t.Kind() == reflect.Complex64 || t.Kind() == reflect.Complex128
+}
+
+func (a *returnAction) coerceInt(x int64, t reflect.Type) (interface{}, error) {
+ k := t.Kind()
+
+ // Floating point and complex numbers: promote appropriately.
+ if a.isFloatingPoint(t) || a.isComplex(t) {
+ return a.coerceFloat(float64(x), t)
+ }
+
+ // Integers: range check.
+ var min, max int64
+ unsigned := false
+
+ switch k {
+ case reflect.Int8:
+ min = math.MinInt8
+ max = math.MaxInt8
+
+ case reflect.Int16:
+ min = math.MinInt16
+ max = math.MaxInt16
+
+ case reflect.Int32:
+ min = math.MinInt32
+ max = math.MaxInt32
+
+ case reflect.Int64:
+ min = math.MinInt64
+ max = math.MaxInt64
+
+ case reflect.Uint:
+ unsigned = true
+ min = 0
+ max = math.MaxUint32
+
+ case reflect.Uint8:
+ unsigned = true
+ min = 0
+ max = math.MaxUint8
+
+ case reflect.Uint16:
+ unsigned = true
+ min = 0
+ max = math.MaxUint16
+
+ case reflect.Uint32:
+ unsigned = true
+ min = 0
+ max = math.MaxUint32
+
+ case reflect.Uint64:
+ unsigned = true
+ min = 0
+ max = math.MaxInt64
+
+ default:
+ panic(fmt.Sprintf("Unexpected type: %v", t))
+ }
+
+ if x < min || x > max {
+ return nil, errors.New("int value out of range")
+ }
+
+ rv := reflect.New(t).Elem()
+ if unsigned {
+ rv.SetUint(uint64(x))
+ } else {
+ rv.SetInt(x)
+ }
+
+ return rv.Interface(), nil
+}
+
+func (a *returnAction) coerceFloat(x float64, t reflect.Type) (interface{}, error) {
+ // Promote complex numbers.
+ if a.isComplex(t) {
+ return a.coerceComplex(complex(x, 0), t)
+ }
+
+ rv := reflect.New(t).Elem()
+ rv.SetFloat(x)
+ return rv.Interface(), nil
+}
+
+func (a *returnAction) coerceComplex(x complex128, t reflect.Type) (interface{}, error) {
+ rv := reflect.New(t).Elem()
+ rv.SetComplex(x)
+ return rv.Interface(), nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/return_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/return_test.go
new file mode 100644
index 00000000000..f1794bd764a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/return_test.go
@@ -0,0 +1,978 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock_test
+
+import (
+ "bytes"
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/oglemock"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "io"
+ "math"
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////
+
+var someInt int = 17
+
+type ReturnTest struct {
+}
+
+func init() { RegisterTestSuite(&ReturnTest{}) }
+func TestOgletest(t *testing.T) { RunTests(t) }
+
+type returnTestCase struct {
+ suppliedVal interface{}
+ expectedVal interface{}
+ expectedSetSignatureErrorSubstring string
+}
+
+func (t *ReturnTest) runTestCases(signature reflect.Type, cases []returnTestCase) {
+ for i, c := range cases {
+ a := oglemock.Return(c.suppliedVal)
+
+ // SetSignature
+ err := a.SetSignature(signature)
+ if c.expectedSetSignatureErrorSubstring == "" {
+ ExpectEq(nil, err, "Test case %d: %v", i, c)
+
+ if err != nil {
+ continue
+ }
+ } else {
+ ExpectThat(err, Error(HasSubstr(c.expectedSetSignatureErrorSubstring)),
+ "Test case %d: %v", i, c)
+ continue
+ }
+
+ // Invoke
+ res := a.Invoke([]interface{}{})
+ AssertThat(res, ElementsAre(Any()))
+ ExpectThat(res[0], IdenticalTo(c.expectedVal), "Test case %d: %v", i, c)
+ }
+}
+
+////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////
+
+func (t *ReturnTest) SetSignatureNotCalled() {
+ a := oglemock.Return()
+ f := func() { a.Invoke([]interface{}{}) }
+ ExpectThat(f, Panics(MatchesRegexp("first call SetSignature")))
+}
+
+func (t *ReturnTest) NoReturnValues() {
+ sig := reflect.TypeOf(func() {})
+ var a oglemock.Action
+ var err error
+ var vals []interface{}
+
+ // No values.
+ a = oglemock.Return()
+ err = a.SetSignature(sig)
+ AssertEq(nil, err)
+
+ vals = a.Invoke([]interface{}{})
+ ExpectThat(vals, ElementsAre())
+
+ // One value.
+ a = oglemock.Return(17)
+ err = a.SetSignature(sig)
+ ExpectThat(err, Error(HasSubstr("given 1 val")))
+ ExpectThat(err, Error(HasSubstr("expected 0")))
+
+ // Two values.
+ a = oglemock.Return(17, 19)
+ err = a.SetSignature(sig)
+ ExpectThat(err, Error(HasSubstr("given 2 vals")))
+ ExpectThat(err, Error(HasSubstr("expected 0")))
+}
+
+func (t *ReturnTest) MultipleReturnValues() {
+ sig := reflect.TypeOf(func() (int, string) { return 0, "" })
+ var a oglemock.Action
+ var err error
+ var vals []interface{}
+
+ // No values.
+ a = oglemock.Return()
+ err = a.SetSignature(sig)
+ ExpectThat(err, Error(HasSubstr("given 0 vals")))
+ ExpectThat(err, Error(HasSubstr("expected 2")))
+
+ // One value.
+ a = oglemock.Return(17)
+ err = a.SetSignature(sig)
+ ExpectThat(err, Error(HasSubstr("given 1 val")))
+ ExpectThat(err, Error(HasSubstr("expected 2")))
+
+ // Two values.
+ a = oglemock.Return(17, "taco")
+ err = a.SetSignature(sig)
+ AssertEq(nil, err)
+
+ vals = a.Invoke([]interface{}{})
+ ExpectThat(vals, ElementsAre(IdenticalTo(int(17)), "taco"))
+}
+
+func (t *ReturnTest) Bool() {
+ sig := reflect.TypeOf(func() bool { return false })
+ cases := []returnTestCase{
+ // Identical types.
+ {bool(true), bool(true), ""},
+ {bool(false), bool(false), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Int() {
+ sig := reflect.TypeOf(func() int { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {int(math.MinInt32), int(math.MinInt32), ""},
+ {int(math.MaxInt32), int(math.MaxInt32), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Int8() {
+ sig := reflect.TypeOf(func() int8 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {int8(math.MinInt8), int8(math.MinInt8), ""},
+ {int8(math.MaxInt8), int8(math.MaxInt8), ""},
+
+ // In-range ints.
+ {int(math.MinInt8), int8(math.MinInt8), ""},
+ {int(math.MaxInt8), int8(math.MaxInt8), ""},
+
+ // Out of range ints.
+ {int(math.MinInt8 - 1), nil, "out of range"},
+ {int(math.MaxInt8 + 1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Int16() {
+ sig := reflect.TypeOf(func() int16 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {int16(math.MinInt16), int16(math.MinInt16), ""},
+ {int16(math.MaxInt16), int16(math.MaxInt16), ""},
+
+ // In-range ints.
+ {int(math.MinInt16), int16(math.MinInt16), ""},
+ {int(math.MaxInt16), int16(math.MaxInt16), ""},
+
+ // Out of range ints.
+ {int(math.MinInt16 - 1), nil, "out of range"},
+ {int(math.MaxInt16 + 1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int8(1), nil, "given int8"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Int32() {
+ sig := reflect.TypeOf(func() int32 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {int32(math.MinInt32), int32(math.MinInt32), ""},
+ {int32(math.MaxInt32), int32(math.MaxInt32), ""},
+
+ // Aliased version of type.
+ {rune(17), int32(17), ""},
+
+ // In-range ints.
+ {int(math.MinInt32), int32(math.MinInt32), ""},
+ {int(math.MaxInt32), int32(math.MaxInt32), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Rune() {
+ sig := reflect.TypeOf(func() rune { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {rune(math.MinInt32), rune(math.MinInt32), ""},
+ {rune(math.MaxInt32), rune(math.MaxInt32), ""},
+
+ // Aliased version of type.
+ {int32(17), rune(17), ""},
+
+ // In-range ints.
+ {int(math.MinInt32), rune(math.MinInt32), ""},
+ {int(math.MaxInt32), rune(math.MaxInt32), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Int64() {
+ sig := reflect.TypeOf(func() int64 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {int64(math.MinInt64), int64(math.MinInt64), ""},
+ {int64(math.MaxInt64), int64(math.MaxInt64), ""},
+
+ // In-range ints.
+ {int(math.MinInt32), int64(math.MinInt32), ""},
+ {int(math.MaxInt32), int64(math.MaxInt32), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Uint() {
+ sig := reflect.TypeOf(func() uint { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {uint(0), uint(0), ""},
+ {uint(math.MaxUint32), uint(math.MaxUint32), ""},
+
+ // In-range ints.
+ {int(0), uint(0), ""},
+ {int(math.MaxInt32), uint(math.MaxInt32), ""},
+
+ // Out of range ints.
+ {int(-1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Uint8() {
+ sig := reflect.TypeOf(func() uint8 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {uint8(0), uint8(0), ""},
+ {uint8(math.MaxUint8), uint8(math.MaxUint8), ""},
+
+ // Aliased version of type.
+ {byte(17), uint8(17), ""},
+
+ // In-range ints.
+ {int(0), uint8(0), ""},
+ {int(math.MaxUint8), uint8(math.MaxUint8), ""},
+
+ // Out of range ints.
+ {int(-1), nil, "out of range"},
+ {int(math.MaxUint8 + 1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Byte() {
+ sig := reflect.TypeOf(func() byte { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {byte(0), byte(0), ""},
+ {byte(math.MaxUint8), byte(math.MaxUint8), ""},
+
+ // Aliased version of type.
+ {uint8(17), byte(17), ""},
+
+ // In-range ints.
+ {int(0), byte(0), ""},
+ {int(math.MaxUint8), byte(math.MaxUint8), ""},
+
+ // Out of range ints.
+ {int(-1), nil, "out of range"},
+ {int(math.MaxUint8 + 1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Uint16() {
+ sig := reflect.TypeOf(func() uint16 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {uint16(0), uint16(0), ""},
+ {uint16(math.MaxUint16), uint16(math.MaxUint16), ""},
+
+ // In-range ints.
+ {int(0), uint16(0), ""},
+ {int(math.MaxUint16), uint16(math.MaxUint16), ""},
+
+ // Out of range ints.
+ {int(-1), nil, "out of range"},
+ {int(math.MaxUint16 + 1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Uint32() {
+ sig := reflect.TypeOf(func() uint32 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {uint32(0), uint32(0), ""},
+ {uint32(math.MaxUint32), uint32(math.MaxUint32), ""},
+
+ // In-range ints.
+ {int(0), uint32(0), ""},
+ {int(math.MaxInt32), uint32(math.MaxInt32), ""},
+
+ // Out of range ints.
+ {int(-1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Uint64() {
+ sig := reflect.TypeOf(func() uint64 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {uint64(0), uint64(0), ""},
+ {uint64(math.MaxUint64), uint64(math.MaxUint64), ""},
+
+ // In-range ints.
+ {int(0), uint64(0), ""},
+ {int(math.MaxInt32), uint64(math.MaxInt32), ""},
+
+ // Out of range ints.
+ {int(-1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Uintptr() {
+ sig := reflect.TypeOf(func() uintptr { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {uintptr(17), uintptr(17), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Float32() {
+ sig := reflect.TypeOf(func() float32 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {float32(-17.5), float32(-17.5), ""},
+ {float32(17.5), float32(17.5), ""},
+
+ // In-range ints.
+ {int(-17), float32(-17), ""},
+ {int(17), float32(17), ""},
+
+ // Float64s
+ {float64(-17.5), float32(-17.5), ""},
+ {float64(17.5), float32(17.5), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Float64() {
+ sig := reflect.TypeOf(func() float64 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {float64(-17.5), float64(-17.5), ""},
+ {float64(17.5), float64(17.5), ""},
+
+ // In-range ints.
+ {int(-17), float64(-17), ""},
+ {int(17), float64(17), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float32(1), nil, "given float32"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Complex64() {
+ sig := reflect.TypeOf(func() complex64 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {complex64(-17.5 - 1i), complex64(-17.5 - 1i), ""},
+ {complex64(17.5 + 1i), complex64(17.5 + 1i), ""},
+
+ // In-range ints.
+ {int(-17), complex64(-17), ""},
+ {int(17), complex64(17), ""},
+
+ // Float64s
+ {float64(-17.5), complex64(-17.5), ""},
+ {float64(17.5), complex64(17.5), ""},
+
+ // Complex128s
+ {complex128(-17.5 - 1i), complex64(-17.5 - 1i), ""},
+ {complex128(17.5 + 1i), complex64(17.5 + 1i), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float32(1), nil, "given float32"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Complex128() {
+ sig := reflect.TypeOf(func() complex128 { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {complex128(-17.5 - 1i), complex128(-17.5 - 1i), ""},
+ {complex128(17.5 + 1i), complex128(17.5 + 1i), ""},
+
+ // In-range ints.
+ {int(-17), complex128(-17), ""},
+ {int(17), complex128(17), ""},
+
+ // Float64s
+ {float64(-17.5), complex128(-17.5), ""},
+ {float64(17.5), complex128(17.5), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float32(1), nil, "given float32"},
+ {complex64(1), nil, "given complex64"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) ArrayOfInt() {
+ type namedElemType int
+
+ sig := reflect.TypeOf(func() [2]int { return [2]int{0, 0} })
+ cases := []returnTestCase{
+ // Identical types.
+ {[2]int{19, 23}, [2]int{19, 23}, ""},
+
+ // Wrong length.
+ {[1]int{17}, nil, "given [1]int"},
+
+ // Wrong element types.
+ {[2]namedElemType{19, 23}, nil, "given [2]oglemock_test.namedElemType"},
+ {[2]string{"", ""}, nil, "given [2]string"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) ChanOfInt() {
+ type namedElemType int
+ someChan := make(chan int)
+
+ sig := reflect.TypeOf(func() chan int { return nil })
+ cases := []returnTestCase{
+ // Identical types.
+ {someChan, someChan, ""},
+
+ // Nil values.
+ {(interface{})(nil), (chan int)(nil), ""},
+ {(chan int)(nil), (chan int)(nil), ""},
+
+ // Wrong element types.
+ {make(chan string), nil, "given chan string"},
+ {make(chan namedElemType), nil, "given chan oglemock_test.namedElemType"},
+
+ // Wrong direction
+ {(<-chan int)(someChan), nil, "given <-chan int"},
+ {(chan<- int)(someChan), nil, "given chan<- int"},
+
+ // Wrong types.
+ {(func())(nil), nil, "given func()"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) SendChanOfInt() {
+ type namedElemType int
+
+ someChan := make(chan<- int)
+ someBidirectionalChannel := make(chan int)
+
+ sig := reflect.TypeOf(func() chan<- int { return nil })
+ cases := []returnTestCase{
+ // Identical types.
+ {someChan, someChan, ""},
+
+ // Nil values.
+ {(interface{})(nil), (chan<- int)(nil), ""},
+ {(chan int)(nil), (chan<- int)(nil), ""},
+
+ // Bidirectional channel
+ {someBidirectionalChannel, (chan<- int)(someBidirectionalChannel), ""},
+
+ // Wrong direction
+ {(<-chan int)(someBidirectionalChannel), nil, "given <-chan int"},
+
+ // Wrong element types.
+ {make(chan string), nil, "given chan string"},
+ {make(chan namedElemType), nil, "given chan oglemock_test.namedElemType"},
+
+ // Wrong types.
+ {(func())(nil), nil, "given func()"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) RecvChanOfInt() {
+ type namedElemType int
+
+ someChan := make(<-chan int)
+ someBidirectionalChannel := make(chan int)
+
+ sig := reflect.TypeOf(func() <-chan int { return nil })
+ cases := []returnTestCase{
+ // Identical types.
+ {someChan, someChan, ""},
+
+ // Nil values.
+ {(interface{})(nil), (<-chan int)(nil), ""},
+ {(chan int)(nil), (<-chan int)(nil), ""},
+
+ // Bidirectional channel
+ {someBidirectionalChannel, (<-chan int)(someBidirectionalChannel), ""},
+
+ // Wrong direction
+ {(chan<- int)(someBidirectionalChannel), nil, "given chan<- int"},
+
+ // Wrong element types.
+ {make(chan string), nil, "given chan string"},
+ {make(chan namedElemType), nil, "given chan oglemock_test.namedElemType"},
+
+ // Wrong types.
+ {(func())(nil), nil, "given func()"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Func() {
+ someFunc := func(string) int { return 0 }
+
+ sig := reflect.TypeOf(func() func(string) int { return nil })
+ cases := []returnTestCase{
+ // Identical types.
+ {someFunc, someFunc, ""},
+
+ // Nil values.
+ {(interface{})(nil), (func(string) int)(nil), ""},
+ {(func(string) int)(nil), (func(string) int)(nil), ""},
+
+ // Wrong parameter and return types.
+ {func(int) int { return 0 }, nil, "given func(int) int"},
+ {func(string) string { return "" }, nil, "given func(string) string"},
+
+ // Wrong types.
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {(chan int)(nil), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Interface() {
+ sig := reflect.TypeOf(func() io.Reader { return nil })
+
+ someBuffer := new(bytes.Buffer)
+
+ cases := []returnTestCase{
+ // Type that implements interface.
+ {someBuffer, someBuffer, ""},
+
+ // Nil value.
+ {(interface{})(nil), (interface{})(nil), ""},
+
+ // Non-implementing types.
+ {(chan int)(nil), nil, "given chan int"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) MapFromStringToInt() {
+ type namedElemType string
+
+ someMap := make(map[string]int)
+
+ sig := reflect.TypeOf(func() map[string]int { return nil })
+ cases := []returnTestCase{
+ // Identical types.
+ {someMap, someMap, ""},
+
+ // Nil values.
+ {(interface{})(nil), (map[string]int)(nil), ""},
+ {(map[string]int)(nil), (map[string]int)(nil), ""},
+
+ // Wrong element types.
+ {make(map[int]int), nil, "given map[int]int"},
+ {make(map[namedElemType]int), nil, "given map[oglemock_test.namedElemType]int"},
+ {make(map[string]string), nil, "given map[string]string"},
+
+ // Wrong types.
+ {(func())(nil), nil, "given func()"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) PointerToString() {
+ type namedElemType string
+
+ someStr := ""
+
+ sig := reflect.TypeOf(func() *string { return nil })
+ cases := []returnTestCase{
+ // Identical types.
+ {(*string)(&someStr), (*string)(&someStr), ""},
+
+ // Nil values.
+ {(interface{})(nil), (*string)(nil), ""},
+ {(*string)(nil), (*string)(nil), ""},
+
+ // Wrong element types.
+ {&someInt, nil, "given *int"},
+
+ // Wrong types.
+ {(func())(nil), nil, "given func()"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {unsafe.Pointer(&someStr), nil, "given unsafe.Pointer"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) SliceOfInts() {
+ type namedElemType int
+
+ someSlice := make([]int, 1)
+
+ sig := reflect.TypeOf(func() []int { return nil })
+ cases := []returnTestCase{
+ // Identical types.
+ {someSlice, someSlice, ""},
+
+ // Nil values.
+ {(interface{})(nil), ([]int)(nil), ""},
+ {([]int)(nil), ([]int)(nil), ""},
+
+ // Wrong element types.
+ {make([]string, 1), nil, "given []string"},
+ {make([]namedElemType, 1), nil, "given []oglemock_test.namedElemType"},
+
+ // Wrong types.
+ {(func())(nil), nil, "given func()"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) String() {
+ sig := reflect.TypeOf(func() string { return "" })
+ cases := []returnTestCase{
+ // Identical types.
+ {string(""), string(""), ""},
+ {string("taco"), string("taco"), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) Struct() {
+ type myStruct struct {
+ a int
+ }
+
+ type otherStruct struct{}
+
+ sig := reflect.TypeOf(func() myStruct { return myStruct{0} })
+ cases := []returnTestCase{
+ // Identical types.
+ {myStruct{17}, myStruct{17}, ""},
+
+ // Wrong field types.
+ {otherStruct{}, nil, "given oglemock_test.otherStruct"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) UnsafePointer() {
+ someStr := ""
+
+ sig := reflect.TypeOf(func() unsafe.Pointer { return nil })
+ cases := []returnTestCase{
+ // Identical types.
+ {unsafe.Pointer(&someStr), unsafe.Pointer(&someStr), ""},
+
+ // Nil values.
+ {(interface{})(nil), unsafe.Pointer(nil), ""},
+ {unsafe.Pointer(nil), unsafe.Pointer(nil), ""},
+
+ // Wrong types.
+ {(func())(nil), nil, "given func()"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {(*string)(&someStr), nil, "given *string"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) UserDefinedNumericType() {
+ type myType int16
+
+ sig := reflect.TypeOf(func() myType { return 0 })
+ cases := []returnTestCase{
+ // Identical types.
+ {myType(math.MinInt16), myType(math.MinInt16), ""},
+ {myType(math.MaxInt16), myType(math.MaxInt16), ""},
+
+ // In-range ints.
+ {int(math.MinInt16), myType(math.MinInt16), ""},
+ {int(math.MaxInt16), myType(math.MaxInt16), ""},
+
+ // Out of range ints.
+ {int(math.MinInt16 - 1), nil, "out of range"},
+ {int(math.MaxInt16 + 1), nil, "out of range"},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int16(1), nil, "given int16"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
+
+func (t *ReturnTest) UserDefinedNonNumericType() {
+ type myType string
+
+ sig := reflect.TypeOf(func() myType { return "" })
+ cases := []returnTestCase{
+ // Identical types.
+ {myType("taco"), myType("taco"), ""},
+
+ // Wrong types.
+ {nil, nil, "given <nil>"},
+ {int(1), nil, "given int"},
+ {float64(1), nil, "given float64"},
+ {complex128(1), nil, "given complex128"},
+ {string(""), nil, "given string"},
+ {&someInt, nil, "given *int"},
+ {make(chan int), nil, "given chan int"},
+ }
+
+ t.runTestCases(sig, cases)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/sample/README.markdown b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/sample/README.markdown
new file mode 100644
index 00000000000..60d5d2cb1ab
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/sample/README.markdown
@@ -0,0 +1,6 @@
+This directory contains sample code generated with the `createmock` command. For
+example, the file `mock_io.go` can be regenerated with:
+
+ createmock io Reader > sample/mock_io/mock_io.go
+
+The files are also used by `integration_test.go`.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/sample/mock_io/mock_io.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/sample/mock_io/mock_io.go
new file mode 100644
index 00000000000..76e8f00056d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/sample/mock_io/mock_io.go
@@ -0,0 +1,71 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package mock_io
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ io "io"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockReader interface {
+ io.Reader
+ oglemock.MockObject
+}
+
+type mockReader struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockReader(
+ c oglemock.Controller,
+ desc string) MockReader {
+ return &mockReader{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockReader) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockReader) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockReader) Read(p0 []uint8) (o0 int, o1 error) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Read",
+ file,
+ line,
+ []interface{}{p0})
+
+ if len(retVals) != 2 {
+ panic(fmt.Sprintf("mockReader.Read: invalid return values: %v", retVals))
+ }
+
+ // o0 int
+ if retVals[0] != nil {
+ o0 = retVals[0].(int)
+ }
+
+ // o1 error
+ if retVals[1] != nil {
+ o1 = retVals[1].(error)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/save_arg.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/save_arg.go
new file mode 100644
index 00000000000..27cfcf6193b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/save_arg.go
@@ -0,0 +1,83 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Create an Action that saves the argument at the given zero-based index to
+// the supplied destination, which must be a pointer to a type that is
+// assignable from the argument type.
+func SaveArg(index int, dst interface{}) Action {
+ return &saveArg{
+ index: index,
+ dstPointer: dst,
+ }
+}
+
+type saveArg struct {
+ index int
+ dstPointer interface{}
+
+ // Set by SetSignature.
+ dstValue reflect.Value
+}
+
+func (a *saveArg) SetSignature(signature reflect.Type) (err error) {
+ // Extract the source type.
+ if a.index >= signature.NumIn() {
+ err = fmt.Errorf(
+ "Out of range argument index %v for function type %v",
+ a.index,
+ signature)
+ return
+ }
+
+ srcType := signature.In(a.index)
+
+ // The destination must be a pointer.
+ v := reflect.ValueOf(a.dstPointer)
+ if v.Kind() != reflect.Ptr {
+ err = fmt.Errorf("Destination is %v, not a pointer", v.Kind())
+ return
+ }
+
+ // Dereference the pointer.
+ if v.IsNil() {
+ err = fmt.Errorf("Destination pointer must be non-nil")
+ return
+ }
+
+ a.dstValue = v.Elem()
+
+ // The destination must be assignable from the source.
+ if !srcType.AssignableTo(a.dstValue.Type()) {
+ err = fmt.Errorf(
+ "%v is not assignable to %v",
+ srcType,
+ a.dstValue.Type())
+ return
+ }
+
+ return
+}
+
+func (a *saveArg) Invoke(methodArgs []interface{}) (rets []interface{}) {
+ a.dstValue.Set(reflect.ValueOf(methodArgs[a.index]))
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/save_arg_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/save_arg_test.go
new file mode 100644
index 00000000000..4051907e0dd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/oglemock/save_arg_test.go
@@ -0,0 +1,132 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglemock_test
+
+import (
+ "io"
+ "os"
+ "reflect"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/oglemock"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+func TestSaveArg(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////
+// Boilerplate
+////////////////////////////////////////////////////////////
+
+type SaveArgTest struct {
+}
+
+func init() { RegisterTestSuite(&SaveArgTest{}) }
+
+////////////////////////////////////////////////////////////
+// Test functions
+////////////////////////////////////////////////////////////
+
+func (t *SaveArgTest) FunctionHasNoArguments() {
+ const index = 0
+ var dst int
+ f := func() (int, string) { return 0, "" }
+
+ err := oglemock.SaveArg(index, &dst).SetSignature(reflect.TypeOf(f))
+ ExpectThat(err, Error(HasSubstr("index 0")))
+ ExpectThat(err, Error(HasSubstr("Out of range")))
+ ExpectThat(err, Error(HasSubstr("func() (int, string)")))
+}
+
+func (t *SaveArgTest) ArgumentIndexOutOfRange() {
+ const index = 2
+ var dst int
+ f := func(a int, b int) {}
+
+ err := oglemock.SaveArg(index, &dst).SetSignature(reflect.TypeOf(f))
+ ExpectThat(err, Error(HasSubstr("index 2")))
+ ExpectThat(err, Error(HasSubstr("Out of range")))
+ ExpectThat(err, Error(HasSubstr("func(int, int)")))
+}
+
+func (t *SaveArgTest) DestinationIsLiteralNil() {
+ const index = 0
+ f := func(a int, b int) {}
+
+ err := oglemock.SaveArg(index, nil).SetSignature(reflect.TypeOf(f))
+ ExpectThat(err, Error(HasSubstr("not a pointer")))
+}
+
+func (t *SaveArgTest) DestinationIsNotAPointer() {
+ const index = 0
+ f := func(a int, b int) {}
+
+ err := oglemock.SaveArg(index, uint(17)).SetSignature(reflect.TypeOf(f))
+ ExpectThat(err, Error(HasSubstr("pointer")))
+ ExpectThat(err, Error(HasSubstr("uint")))
+}
+
+func (t *SaveArgTest) DestinationIsNilPointer() {
+ const index = 1
+ var dst *int
+ f := func(a int, b int) {}
+
+ err := oglemock.SaveArg(index, dst).SetSignature(reflect.TypeOf(f))
+ ExpectThat(err, Error(HasSubstr("pointer")))
+ ExpectThat(err, Error(HasSubstr("non-nil")))
+}
+
+func (t *SaveArgTest) DestinationNotAssignableFromSource() {
+ const index = 1
+ var dst int
+ f := func(a int, b string) {}
+
+ err := oglemock.SaveArg(index, &dst).SetSignature(reflect.TypeOf(f))
+ ExpectThat(err, Error(HasSubstr("int")))
+ ExpectThat(err, Error(HasSubstr("assignable")))
+ ExpectThat(err, Error(HasSubstr("string")))
+}
+
+func (t *SaveArgTest) ExactTypeMatch() {
+ const index = 1
+ var dst int
+ f := func(a int, b int) {}
+
+ action := oglemock.SaveArg(index, &dst)
+ AssertEq(nil, action.SetSignature(reflect.TypeOf(f)))
+
+ var a int = 17
+ var b int = 19
+ _ = action.Invoke([]interface{}{a, b})
+
+ ExpectEq(19, dst)
+}
+
+func (t *SaveArgTest) AssignableTypeMatch() {
+ const index = 1
+ var dst io.Reader
+ f := func(a int, b *os.File) {}
+
+ action := oglemock.SaveArg(index, &dst)
+ AssertEq(nil, action.SetSignature(reflect.TypeOf(f)))
+
+ var a int = 17
+ var b *os.File = os.Stdout
+ _ = action.Invoke([]interface{}{a, b})
+
+ ExpectEq(os.Stdout, dst)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/.gitignore b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/.gitignore
new file mode 100644
index 00000000000..dd8fc7468f4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/.gitignore
@@ -0,0 +1,5 @@
+*.6
+6.out
+_obj/
+_test/
+_testmain.go
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/.travis.yml b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/.travis.yml
new file mode 100644
index 00000000000..b97211926e8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/.travis.yml
@@ -0,0 +1,4 @@
+# Cf. http://docs.travis-ci.com/user/getting-started/
+# Cf. http://docs.travis-ci.com/user/languages/go/
+
+language: go
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/LICENSE b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/README.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/README.md
new file mode 100644
index 00000000000..8e54862082b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/README.md
@@ -0,0 +1,151 @@
+[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/ogletest?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/ogletest)
+
+`ogletest` is a unit testing framework for Go with the following features:
+
+ * An extensive and extensible set of matchers for expressing expectations.
+ * Automatic failure messages; no need to say `t.Errorf("Expected %v, got
+ %v"...)`.
+ * Clean, readable output that tells you exactly what you need to know.
+ * Built-in support for mocking through the [oglemock][] package.
+ * Style and semantics similar to [Google Test][googletest] and
+ [Google JS Test][google-js-test].
+
+It integrates with Go's built-in `testing` package, so it works with the
+`go test` command, and even with other types of test within your package. Unlike
+the `testing` package which offers only basic capabilities for signalling
+failures, it offers ways to express expectations and get nice failure messages
+automatically.
+
+
+Installation
+------------
+
+First, make sure you have installed Go 1.0.2 or newer. See
+[here][golang-install] for instructions.
+
+Use the following command to install `ogletest` and its dependencies, and to
+keep them up to date:
+
+ go get -u github.com/smartystreets/assertions/internal/ogletest
+
+
+Documentation
+-------------
+
+See [here][reference] for package documentation containing an exhaustive list of
+exported symbols. Alternatively, you can install the package and then use
+`godoc`:
+
+ godoc github.com/smartystreets/assertions/internal/ogletest
+
+An important part of `ogletest` is its use of matchers provided by the
+[oglematchers][matcher-reference] package. See that package's documentation
+for information on the built-in matchers available, and check out the
+`oglematchers.Matcher` interface if you want to define your own.
+
+
+Example
+-------
+
+Let's say you have a function in your package `people` with the following
+signature:
+
+```go
+// GetRandomPerson returns the name and phone number of Tony, Dennis, or Scott.
+func GetRandomPerson() (name, phone string) {
+ [...]
+}
+```
+
+A silly function, but it will do for an example. You can write a couple of tests
+for it as follows:
+
+```go
+package people
+
+import (
+ "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/ogletest"
+ "testing"
+)
+
+// Give ogletest a chance to run your tests when invoked by 'go test'.
+func TestOgletest(t *testing.T) { ogletest.RunTests(t) }
+
+// Create a test suite, which groups together logically related test methods
+// (defined below). You can share common setup and teardown code here; see the
+// package docs for more info.
+type PeopleTest struct {}
+func init() { ogletest.RegisterTestSuite(&PeopleTest{}) }
+
+func (t *PeopleTest) ReturnsCorrectNames() {
+ // Call the function a few times, and make sure it never strays from the set
+ // of expected names.
+ for i := 0; i < 25; i++ {
+ name, _ := GetRandomPerson()
+ ogletest.ExpectThat(name, oglematchers.AnyOf("Tony", "Dennis", "Scott"))
+ }
+}
+
+func (t *PeopleTest) FormatsPhoneNumbersCorrectly() {
+ // Call the function a few times, and make sure it returns phone numbers in a
+ // standard US format.
+ for i := 0; i < 25; i++ {
+ _, phone := GetRandomPerson()
+ ogletest.ExpectThat(phone, oglematchers.MatchesRegexp(`^\(\d{3}\) \d{3}-\d{4}$`))
+}
+```
+
+Note that test control functions (`RunTests`, `ExpectThat`, and so on) are part
+of the `ogletest` package, whereas built-in matchers (`AnyOf`, `MatchesRegexp`,
+and more) are part of the [oglematchers][matcher-reference] library. You can of
+course use dot imports so that you don't need to prefix each function with its
+package name:
+
+```go
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+```
+
+If you save the test in a file whose name ends in `_test.go`, you can run your
+tests by simply invoking the following in your package directory:
+
+ go test
+
+Here's what the failure output of ogletest looks like, if your function's
+implementation is bad.
+
+ [----------] Running tests from PeopleTest
+ [ RUN ] PeopleTest.FormatsPhoneNumbersCorrectly
+ people_test.go:32:
+ Expected: matches regexp "^\(\d{3}\) \d{3}-\d{4}$"
+ Actual: +1 800 555 5555
+
+ [ FAILED ] PeopleTest.FormatsPhoneNumbersCorrectly
+ [ RUN ] PeopleTest.ReturnsCorrectNames
+ people_test.go:23:
+ Expected: or(Tony, Dennis, Scott)
+ Actual: Bart
+
+ [ FAILED ] PeopleTest.ReturnsCorrectNames
+ [----------] Finished with tests from PeopleTest
+
+And if the test passes:
+
+ [----------] Running tests from PeopleTest
+ [ RUN ] PeopleTest.FormatsPhoneNumbersCorrectly
+ [ OK ] PeopleTest.FormatsPhoneNumbersCorrectly
+ [ RUN ] PeopleTest.ReturnsCorrectNames
+ [ OK ] PeopleTest.ReturnsCorrectNames
+ [----------] Finished with tests from PeopleTest
+
+
+[reference]: http://godoc.org/github.com/smartystreets/assertions/internal/ogletest
+[matcher-reference]: http://godoc.org/github.com/smartystreets/assertions/internal/oglematchers
+[golang-install]: http://golang.org/doc/install.html
+[googletest]: http://code.google.com/p/googletest/
+[google-js-test]: http://code.google.com/p/google-js-test/
+[howtowrite]: http://golang.org/doc/code.html
+[oglemock]: https://github.com/smartystreets/assertions/internal/oglemock
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/assert_aliases.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/assert_aliases.go
new file mode 100644
index 00000000000..70fa25c327a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/assert_aliases.go
@@ -0,0 +1,70 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+// AssertEq(e, a) is equivalent to AssertThat(a, oglematchers.Equals(e)).
+func AssertEq(expected, actual interface{}, errorParts ...interface{}) {
+ assertThat(
+ actual,
+ oglematchers.Equals(expected),
+ 1,
+ errorParts)
+}
+
+// AssertNe(e, a) is equivalent to
+// AssertThat(a, oglematchers.Not(oglematchers.Equals(e))).
+func AssertNe(expected, actual interface{}, errorParts ...interface{}) {
+ assertThat(
+ actual,
+ oglematchers.Not(oglematchers.Equals(expected)),
+ 1,
+ errorParts)
+}
+
+// AssertLt(x, y) is equivalent to AssertThat(x, oglematchers.LessThan(y)).
+func AssertLt(x, y interface{}, errorParts ...interface{}) {
+ assertThat(x, oglematchers.LessThan(y), 1, errorParts)
+}
+
+// AssertLe(x, y) is equivalent to AssertThat(x, oglematchers.LessOrEqual(y)).
+func AssertLe(x, y interface{}, errorParts ...interface{}) {
+ assertThat(x, oglematchers.LessOrEqual(y), 1, errorParts)
+}
+
+// AssertGt(x, y) is equivalent to AssertThat(x, oglematchers.GreaterThan(y)).
+func AssertGt(x, y interface{}, errorParts ...interface{}) {
+ assertThat(x, oglematchers.GreaterThan(y), 1, errorParts)
+}
+
+// AssertGe(x, y) is equivalent to
+// AssertThat(x, oglematchers.GreaterOrEqual(y)).
+func AssertGe(x, y interface{}, errorParts ...interface{}) {
+ assertThat(x, oglematchers.GreaterOrEqual(y), 1, errorParts)
+}
+
+// AssertTrue(b) is equivalent to AssertThat(b, oglematchers.Equals(true)).
+func AssertTrue(b interface{}, errorParts ...interface{}) {
+ assertThat(b, oglematchers.Equals(true), 1, errorParts)
+}
+
+// AssertFalse(b) is equivalent to AssertThat(b, oglematchers.Equals(false)).
+func AssertFalse(b interface{}, errorParts ...interface{}) {
+ assertThat(b, oglematchers.Equals(false), 1, errorParts)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/assert_that.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/assert_that.go
new file mode 100644
index 00000000000..65c8fbce052
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/assert_that.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+func assertThat(
+ x interface{},
+ m oglematchers.Matcher,
+ depth int,
+ errorParts []interface{}) {
+ passed := expectThat(x, m, depth+1, errorParts)
+ if !passed {
+ AbortTest()
+ }
+}
+
+// AssertThat is identical to ExpectThat, except that in the event of failure
+// it halts the currently running test immediately. It is thus useful for
+// things like bounds checking:
+//
+// someSlice := [...]
+// AssertEq(1, len(someSlice)) // Protects next line from panicking.
+// ExpectEq("taco", someSlice[0])
+//
+func AssertThat(
+ x interface{},
+ m oglematchers.Matcher,
+ errorParts ...interface{}) {
+ assertThat(x, m, 1, errorParts)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/doc.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/doc.go
new file mode 100644
index 00000000000..bf6507fae4d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/doc.go
@@ -0,0 +1,51 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package ogletest provides a framework for writing expressive unit tests. It
+// integrates with the builtin testing package, so it works with the gotest
+// command. Unlike the testing package which offers only basic capabilities for
+// signalling failures, it offers ways to express expectations and get nice
+// failure messages automatically.
+//
+// For example:
+//
+// ////////////////////////////////////////////////////////////////////////
+// // testing package test
+// ////////////////////////////////////////////////////////////////////////
+//
+// someStr, err := ComputeSomeString()
+// if err != nil {
+// t.Errorf("ComputeSomeString: expected nil error, got %v", err)
+// }
+//
+// !strings.Contains(someStr, "foo") {
+// t.Errorf("ComputeSomeString: expected substring foo, got %v", someStr)
+// }
+//
+// ////////////////////////////////////////////////////////////////////////
+// // ogletest test
+// ////////////////////////////////////////////////////////////////////////
+//
+// someStr, err := ComputeSomeString()
+// ExpectEq(nil, err)
+// ExpectThat(someStr, HasSubstr("foo")
+//
+// Failure messages require no work from the user, and look like the following:
+//
+// foo_test.go:103:
+// Expected: has substring "foo"
+// Actual: "bar baz"
+//
+package ogletest
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_aliases.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_aliases.go
new file mode 100644
index 00000000000..5bc1dc120a7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_aliases.go
@@ -0,0 +1,64 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import "github.com/smartystreets/assertions/internal/oglematchers"
+
+// ExpectEq(e, a) is equivalent to ExpectThat(a, oglematchers.Equals(e)).
+func ExpectEq(expected, actual interface{}, errorParts ...interface{}) {
+ expectThat(actual, oglematchers.Equals(expected), 1, errorParts)
+}
+
+// ExpectNe(e, a) is equivalent to
+// ExpectThat(a, oglematchers.Not(oglematchers.Equals(e))).
+func ExpectNe(expected, actual interface{}, errorParts ...interface{}) {
+ expectThat(
+ actual,
+ oglematchers.Not(oglematchers.Equals(expected)),
+ 1,
+ errorParts)
+}
+
+// ExpectLt(x, y) is equivalent to ExpectThat(x, oglematchers.LessThan(y)).
+func ExpectLt(x, y interface{}, errorParts ...interface{}) {
+ expectThat(x, oglematchers.LessThan(y), 1, errorParts)
+}
+
+// ExpectLe(x, y) is equivalent to ExpectThat(x, oglematchers.LessOrEqual(y)).
+func ExpectLe(x, y interface{}, errorParts ...interface{}) {
+ expectThat(x, oglematchers.LessOrEqual(y), 1, errorParts)
+}
+
+// ExpectGt(x, y) is equivalent to ExpectThat(x, oglematchers.GreaterThan(y)).
+func ExpectGt(x, y interface{}, errorParts ...interface{}) {
+ expectThat(x, oglematchers.GreaterThan(y), 1, errorParts)
+}
+
+// ExpectGe(x, y) is equivalent to
+// ExpectThat(x, oglematchers.GreaterOrEqual(y)).
+func ExpectGe(x, y interface{}, errorParts ...interface{}) {
+ expectThat(x, oglematchers.GreaterOrEqual(y), 1, errorParts)
+}
+
+// ExpectTrue(b) is equivalent to ExpectThat(b, oglematchers.Equals(true)).
+func ExpectTrue(b interface{}, errorParts ...interface{}) {
+ expectThat(b, oglematchers.Equals(true), 1, errorParts)
+}
+
+// ExpectFalse(b) is equivalent to ExpectThat(b, oglematchers.Equals(false)).
+func ExpectFalse(b interface{}, errorParts ...interface{}) {
+ expectThat(b, oglematchers.Equals(false), 1, errorParts)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_call.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_call.go
new file mode 100644
index 00000000000..b8bf542a8bc
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_call.go
@@ -0,0 +1,59 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "github.com/smartystreets/assertions/internal/oglemock"
+ "runtime"
+)
+
+// ExpectCall expresses an expectation that the method of the given name
+// should be called on the supplied mock object. It returns a function that
+// should be called with the expected arguments, matchers for the arguments,
+// or a mix of both.
+//
+// For example:
+//
+// mockWriter := [...]
+// ogletest.ExpectCall(mockWriter, "Write")(oglematchers.ElementsAre(0x1))
+// .WillOnce(oglemock.Return(1, nil))
+//
+// This is a shortcut for calling i.MockController.ExpectCall, where i is the
+// TestInfo struct for the currently-running test. Unlike that direct approach,
+// this function automatically sets the correct file name and line number for
+// the expectation.
+func ExpectCall(o oglemock.MockObject, method string) oglemock.PartialExpecation {
+ // Get information about the call site.
+ _, file, lineNumber, ok := runtime.Caller(1)
+ if !ok {
+ panic("ExpectCall: runtime.Caller")
+ }
+
+ // Grab the current test info.
+ info := currentlyRunningTest
+ if info == nil {
+ panic("ExpectCall: no test info.")
+ }
+
+ // Grab the mock controller.
+ controller := currentlyRunningTest.MockController
+ if controller == nil {
+ panic("ExpectCall: no mock controller.")
+ }
+
+ // Report the expectation.
+ return controller.ExpectCall(o, method, file, lineNumber)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_that.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_that.go
new file mode 100644
index 00000000000..69fc669785d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_that.go
@@ -0,0 +1,100 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "fmt"
+ "path"
+ "reflect"
+ "runtime"
+
+ "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+// ExpectThat confirms that the supplied matcher matches the value x, adding a
+// failure record to the currently running test if it does not. If additional
+// parameters are supplied, the first will be used as a format string for the
+// later ones, and the user-supplied error message will be added to the test
+// output in the event of a failure.
+//
+// For example:
+//
+// ExpectThat(userName, Equals("jacobsa"))
+// ExpectThat(users[i], Equals("jacobsa"), "while processing user %d", i)
+//
+func ExpectThat(
+ x interface{},
+ m oglematchers.Matcher,
+ errorParts ...interface{}) {
+ expectThat(x, m, 1, errorParts)
+}
+
+// The generalized form of ExpectThat. depth is the distance on the stack
+// between the caller's frame and the user's frame. Returns passed iff the
+// match succeeded.
+func expectThat(
+ x interface{},
+ m oglematchers.Matcher,
+ depth int,
+ errorParts []interface{}) (passed bool) {
+ // Check whether the value matches. If it does, we are finished.
+ matcherErr := m.Matches(x)
+ if matcherErr == nil {
+ passed = true
+ return
+ }
+
+ var r FailureRecord
+
+ // Get information about the call site.
+ var ok bool
+ if _, r.FileName, r.LineNumber, ok = runtime.Caller(depth + 1); !ok {
+ panic("expectThat: runtime.Caller")
+ }
+
+ r.FileName = path.Base(r.FileName)
+
+ // Create an appropriate failure message. Make sure that the expected and
+ // actual values align properly.
+ relativeClause := ""
+ if matcherErr.Error() != "" {
+ relativeClause = fmt.Sprintf(", %s", matcherErr.Error())
+ }
+
+ r.Error = fmt.Sprintf(
+ "Expected: %s\nActual: %v%s",
+ m.Description(),
+ x,
+ relativeClause)
+
+ // Add the user error, if any.
+ if len(errorParts) != 0 {
+ v := reflect.ValueOf(errorParts[0])
+ if v.Kind() != reflect.String {
+ panic(fmt.Sprintf("ExpectThat: invalid format string type %v", v.Kind()))
+ }
+
+ r.Error = fmt.Sprintf(
+ "%s\n%s",
+ r.Error,
+ fmt.Sprintf(v.String(), errorParts[1:]...))
+ }
+
+ // Report the failure.
+ AddFailureRecord(r)
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_that_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_that_test.go
new file mode 100644
index 00000000000..e3e37236700
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/expect_that_test.go
@@ -0,0 +1,168 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "errors"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+// Set up a new test state with empty fields.
+func setUpCurrentTest() {
+ currentlyRunningTest = newTestInfo()
+}
+
+type fakeExpectThatMatcher struct {
+ desc string
+ err error
+}
+
+func (m *fakeExpectThatMatcher) Matches(c interface{}) error {
+ return m.err
+}
+
+func (m *fakeExpectThatMatcher) Description() string {
+ return m.desc
+}
+
+func assertEqInt(t *testing.T, e, c int) {
+ if e != c {
+ t.Fatalf("Expected %d, got %d", e, c)
+ }
+}
+
+func expectEqInt(t *testing.T, e, c int) {
+ if e != c {
+ t.Errorf("Expected %v, got %v", e, c)
+ }
+}
+
+func expectEqStr(t *testing.T, e, c string) {
+ if e != c {
+ t.Errorf("Expected %s, got %s", e, c)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func TestNoCurrentTest(t *testing.T) {
+ panicked := false
+
+ defer func() {
+ if !panicked {
+ t.Errorf("Expected panic; got none.")
+ }
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ currentlyRunningTest = nil
+ ExpectThat(17, Equals(19))
+}
+
+func TestNoFailure(t *testing.T) {
+ setUpCurrentTest()
+ matcher := &fakeExpectThatMatcher{"", nil}
+ ExpectThat(17, matcher)
+
+ assertEqInt(t, 0, len(currentlyRunningTest.failureRecords))
+}
+
+func TestInvalidFormatString(t *testing.T) {
+ panicked := false
+
+ defer func() {
+ if !panicked {
+ t.Errorf("Expected panic; got none.")
+ }
+ }()
+
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ setUpCurrentTest()
+ matcher := &fakeExpectThatMatcher{"", errors.New("")}
+ ExpectThat(17, matcher, 19, "blah")
+}
+
+func TestNoMatchWithoutErrorText(t *testing.T) {
+ setUpCurrentTest()
+ matcher := &fakeExpectThatMatcher{"taco", errors.New("")}
+ ExpectThat(17, matcher)
+
+ assertEqInt(t, 1, len(currentlyRunningTest.failureRecords))
+
+ record := currentlyRunningTest.failureRecords[0]
+ expectEqStr(t, "expect_that_test.go", record.FileName)
+ expectEqInt(t, 119, record.LineNumber)
+ expectEqStr(t, "Expected: taco\nActual: 17", record.Error)
+}
+
+func TestNoMatchWithErrorTExt(t *testing.T) {
+ setUpCurrentTest()
+ matcher := &fakeExpectThatMatcher{"taco", errors.New("which is foo")}
+ ExpectThat(17, matcher)
+
+ assertEqInt(t, 1, len(currentlyRunningTest.failureRecords))
+ record := currentlyRunningTest.failureRecords[0]
+
+ expectEqStr(
+ t,
+ "Expected: taco\nActual: 17, which is foo",
+ record.Error)
+}
+
+func TestFailureWithUserMessage(t *testing.T) {
+ setUpCurrentTest()
+ matcher := &fakeExpectThatMatcher{"taco", errors.New("")}
+ ExpectThat(17, matcher, "Asd: %d %s", 19, "taco")
+
+ assertEqInt(t, 1, len(currentlyRunningTest.failureRecords))
+ record := currentlyRunningTest.failureRecords[0]
+
+ expectEqStr(t, "Expected: taco\nActual: 17\nAsd: 19 taco", record.Error)
+}
+
+func TestAdditionalFailure(t *testing.T) {
+ setUpCurrentTest()
+ matcher := &fakeExpectThatMatcher{"", errors.New("")}
+
+ // Fail twice.
+ ExpectThat(17, matcher, "taco")
+ ExpectThat(19, matcher, "burrito")
+
+ assertEqInt(t, 2, len(currentlyRunningTest.failureRecords))
+ record1 := currentlyRunningTest.failureRecords[0]
+ record2 := currentlyRunningTest.failureRecords[1]
+
+ expectEqStr(t, "Expected: \nActual: 17\ntaco", record1.Error)
+ expectEqStr(t, "Expected: \nActual: 19\nburrito", record2.Error)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/failure.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/failure.go
new file mode 100644
index 00000000000..95be2cf4502
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/failure.go
@@ -0,0 +1,90 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "fmt"
+ "path"
+ "runtime"
+)
+
+// FailureRecord represents a single failed expectation or assertion for a
+// test. Most users don't want to interact with these directly; they are
+// generated implicitly using ExpectThat, AssertThat, ExpectLt, etc.
+type FailureRecord struct {
+ // The file name within which the expectation failed, e.g. "foo_test.go".
+ FileName string
+
+ // The line number at which the expectation failed.
+ LineNumber int
+
+ // The error associated with the file:line pair above. For example, the
+ // following expectation:
+ //
+ // ExpectEq(17, "taco")"
+ //
+ // May cause this error:
+ //
+ // Expected: 17
+ // Actual: "taco", which is not numeric
+ //
+ Error string
+}
+
+// Record a failure for the currently running test (and continue running it).
+// Most users will want to use ExpectThat, ExpectEq, etc. instead of this
+// function. Those that do want to report arbitrary errors will probably be
+// satisfied with AddFailure, which is easier to use.
+func AddFailureRecord(r FailureRecord) {
+ currentlyRunningTest.mu.Lock()
+ defer currentlyRunningTest.mu.Unlock()
+
+ currentlyRunningTest.failureRecords = append(
+ currentlyRunningTest.failureRecords,
+ r)
+}
+
+// Call AddFailureRecord with a record whose file name and line number come
+// from the caller of this function, and whose error string is created by
+// calling fmt.Sprintf using the arguments to this function.
+func AddFailure(format string, a ...interface{}) {
+ r := FailureRecord{
+ Error: fmt.Sprintf(format, a...),
+ }
+
+ // Get information about the call site.
+ var ok bool
+ if _, r.FileName, r.LineNumber, ok = runtime.Caller(1); !ok {
+ panic("Can't find caller")
+ }
+
+ r.FileName = path.Base(r.FileName)
+
+ AddFailureRecord(r)
+}
+
+// A sentinel type that is used in a conspiracy between AbortTest and runTests.
+// If runTests sees an abortError as the value given to a panic() call, it will
+// avoid printing the panic error.
+type abortError struct {
+}
+
+// Immediately stop executing the running test, causing it to fail with the
+// failures previously recorded. Behavior is undefined if no failures have been
+// recorded.
+func AbortTest() {
+ panic(abortError{})
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/integration_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/integration_test.go
new file mode 100644
index 00000000000..ec45184e326
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/integration_test.go
@@ -0,0 +1,265 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest_test
+
+import (
+ "errors"
+ "flag"
+ "fmt"
+ "go/build"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path"
+ "regexp"
+ "strings"
+ "syscall"
+ "testing"
+)
+
+const ogletestPkg = "github.com/smartystreets/assertions/internal/ogletest"
+
+var dumpNew = flag.Bool("dump_new", false, "Dump new golden files.")
+var objDir string
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+// Install the possibly locally-modified copy of ogletest, so that these
+// integration tests run using the package currently being worked on by the
+// programmer. Also install other dependencies needed by the test cases, so
+// that `go test` complaining about non-up-to-date packages doesn't make it
+// into the golden files.
+func installLocalPackages() error {
+ cmd := exec.Command(
+ "go",
+ "install",
+ ogletestPkg,
+ "github.com/smartystreets/assertions/internal/oglemock",
+ "github.com/smartystreets/assertions/internal/ogletest/test_cases/mock_image")
+
+ output, err := cmd.CombinedOutput()
+
+ if err != nil {
+ return errors.New(fmt.Sprintf("%v:\n%s", err, output))
+ }
+
+ return nil
+}
+
+// getCaseNames looks for integration test cases as files in the test_cases
+// directory.
+func getCaseNames() ([]string, error) {
+ // Open the test cases directory.
+ dir, err := os.Open("test_cases")
+ if err != nil {
+ return nil, errors.New(fmt.Sprintf("Opening dir: %v", err))
+ }
+
+ // Get a list of the names in the directory.
+ names, err := dir.Readdirnames(0)
+ if err != nil {
+ return nil, errors.New(fmt.Sprintf("Readdirnames: %v", err))
+ }
+
+ // Filter the names.
+ result := make([]string, len(names))
+ resultLen := 0
+ for _, name := range names {
+ // Skip golden files and hidden files.
+ if strings.HasPrefix(name, "golden.") || strings.HasPrefix(name, ".") {
+ continue
+ }
+
+ // Check for the right format.
+ if !strings.HasSuffix(name, ".test.go") {
+ continue
+ }
+
+ // Store the name minus the extension.
+ result[resultLen] = name[:len(name)-8]
+ resultLen++
+ }
+
+ return result[:resultLen], nil
+}
+
+func writeContentsToFileOrDie(contents []byte, path string) {
+ if err := ioutil.WriteFile(path, contents, 0600); err != nil {
+ panic("ioutil.WriteFile: " + err.Error())
+ }
+}
+
+func readFileOrDie(path string) []byte {
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ panic("ioutil.ReadFile: " + err.Error())
+ }
+
+ return contents
+}
+
+// cleanOutput transforms the supplied output so that it no longer contains
+// information that changes from run to run, making the golden tests less
+// flaky.
+func cleanOutput(o []byte, testPkg string) []byte {
+ // Replace references to the last component of the test package name, which
+ // contains a unique number.
+ o = []byte(strings.Replace(string(o), path.Base(testPkg), "somepkg", -1))
+
+ // Replace things that look like line numbers and process counters in stack
+ // traces.
+ stackFrameRe := regexp.MustCompile(`\t\S+\.(c|go):\d+`)
+ o = stackFrameRe.ReplaceAll(o, []byte("\tsome_file.txt:0"))
+
+ // Replace full paths in failure messages with fake paths.
+ pathRe := regexp.MustCompile(`/\S+/(\w+\.(?:go|s):\d+)`)
+ o = pathRe.ReplaceAll(o, []byte("/some/path/$1"))
+
+ // Replace unstable timings in gotest fail messages.
+ timingRe1 := regexp.MustCompile(`--- FAIL: .* \(\d\.\d{2}s\)`)
+ o = timingRe1.ReplaceAll(o, []byte("--- FAIL: TestSomething (1.23s)"))
+
+ timingRe2 := regexp.MustCompile(`FAIL.*somepkg\s*\d\.\d{2,}s`)
+ o = timingRe2.ReplaceAll(o, []byte("FAIL somepkg 1.234s"))
+
+ timingRe3 := regexp.MustCompile(`ok.*somepkg\s*\d\.\d{2,}s`)
+ o = timingRe3.ReplaceAll(o, []byte("ok somepkg 1.234s"))
+
+ timingRe4 := regexp.MustCompile(`SlowTest \([0-9.]+ms\)`)
+ o = timingRe4.ReplaceAll(o, []byte("SlowTest (1234ms)"))
+
+ return o
+}
+
+// Create a temporary package directory somewhere that 'go test' can find, and
+// return the directory and package name.
+func createTempPackageDir(caseName string) (dir, pkg string) {
+ // Figure out where the local source code for ogletest is.
+ buildPkg, err := build.Import(ogletestPkg, "", build.FindOnly)
+ if err != nil {
+ panic("Finding ogletest tree: " + err.Error())
+ }
+
+ // Create a temporary directory underneath this.
+ ogletestPkgDir := buildPkg.Dir
+ prefix := fmt.Sprintf("tmp-%s-", caseName)
+
+ dir, err = ioutil.TempDir(ogletestPkgDir, prefix)
+ if err != nil {
+ panic("ioutil.TempDir: " + err.Error())
+ }
+
+ pkg = path.Join("github.com/smartystreets/assertions/internal/ogletest", dir[len(ogletestPkgDir):])
+ return
+}
+
+// runTestCase runs the case with the supplied name (e.g. "passing"), and
+// returns its output and exit code.
+func runTestCase(name string) ([]byte, int, error) {
+ // Create a temporary directory for the test files.
+ testDir, testPkg := createTempPackageDir(name)
+ defer os.RemoveAll(testDir)
+
+ // Create the test source file.
+ sourceFile := name + ".test.go"
+ testContents := readFileOrDie(path.Join("test_cases", sourceFile))
+ writeContentsToFileOrDie(testContents, path.Join(testDir, name+"_test.go"))
+
+ // Invoke 'go test'. Use the package directory as working dir instead of
+ // giving the package name as an argument so that 'go test' prints passing
+ // test output. Special case: pass a test filter to the filtered case.
+ cmd := exec.Command("go", "test")
+ if name == "filtered" {
+ cmd.Args = append(cmd.Args, "--ogletest.run=Test(Bar|Baz)")
+ }
+
+ cmd.Dir = testDir
+ output, err := cmd.CombinedOutput()
+
+ // Clean up the process's output.
+ output = cleanOutput(output, testPkg)
+
+ // Did the process exist with zero code?
+ if err == nil {
+ return output, 0, nil
+ }
+
+ // Make sure the process actually exited.
+ exitError, ok := err.(*exec.ExitError)
+ if !ok || !exitError.Exited() {
+ return nil, 0, errors.New("exec.Command.Output: " + err.Error())
+ }
+
+ return output, exitError.Sys().(syscall.WaitStatus).ExitStatus(), nil
+}
+
+// checkGolden file checks the supplied actual output for the named test case
+// against the golden file for that case. If requested by the user, it rewrites
+// the golden file on failure.
+func checkAgainstGoldenFile(caseName string, output []byte) bool {
+ goldenFile := path.Join("test_cases", "golden."+caseName+"_test")
+ goldenContents := readFileOrDie(goldenFile)
+
+ result := string(output) == string(goldenContents)
+ if !result && *dumpNew {
+ writeContentsToFileOrDie(output, goldenFile)
+ }
+
+ return result
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func TestGoldenFiles(t *testing.T) {
+ // Ensure the local package is installed. This will prevent the test cases
+ // from using the installed version, which may be out of date.
+ err := installLocalPackages()
+ if err != nil {
+ t.Fatalf("Error installing local ogletest: %v", err)
+ }
+
+ // We expect there to be at least one case.
+ caseNames, err := getCaseNames()
+ if err != nil || len(caseNames) == 0 {
+ t.Fatalf("Error getting cases: %v", err)
+ }
+
+ // Run each test case.
+ for _, caseName := range caseNames {
+ // Run the test case.
+ output, exitCode, err := runTestCase(caseName)
+ if err != nil {
+ t.Fatalf("Running test case %s: %v", caseName, err)
+ }
+
+ // Check the status code. We assume all test cases fail except for the
+ // passing one.
+ shouldPass := (caseName == "passing" || caseName == "no_cases")
+ didPass := exitCode == 0
+ if shouldPass != didPass {
+ t.Errorf("Bad exit code for test case %s: %d", caseName, exitCode)
+ }
+
+ // Check the output against the golden file.
+ if !checkAgainstGoldenFile(caseName, output) {
+ t.Errorf("Output for test case %s doesn't match golden file.", caseName)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/register.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/register.go
new file mode 100644
index 00000000000..756f2aa9ad9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/register.go
@@ -0,0 +1,86 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+// The input to ogletest.Register. Most users will want to use
+// ogletest.RegisterTestSuite.
+//
+// A test suite is the basic unit of registration in ogletest. It consists of
+// zero or more named test functions which will be run in sequence, along with
+// optional setup and tear-down functions.
+type TestSuite struct {
+ // The name of the overall suite, e.g. "MyPackageTest".
+ Name string
+
+ // If non-nil, a function that will be run exactly once, before any of the
+ // test functions are run.
+ SetUp func()
+
+ // The test functions comprising this suite.
+ TestFunctions []TestFunction
+
+ // If non-nil, a function that will be run exactly once, after all of the
+ // test functions have run.
+ TearDown func()
+}
+
+type TestFunction struct {
+ // The name of this test function, relative to the suite in which it resides.
+ // If the name is "TweaksFrobnicator", then the function might be presented
+ // in the ogletest UI as "FooTest.TweaksFrobnicator".
+ Name string
+
+ // If non-nil, a function that is run before Run, passed a pointer to a
+ // struct containing information about the test run.
+ SetUp func(*TestInfo)
+
+ // The function to invoke for the test body. Must be non-nil. Will not be run
+ // if SetUp panics.
+ Run func()
+
+ // If non-nil, a function that is run after Run.
+ TearDown func()
+}
+
+// Register a test suite for execution by RunTests.
+//
+// This is the most general registration mechanism. Most users will want
+// RegisterTestSuite, which is a wrapper around this function that requires
+// less boilerplate.
+//
+// Panics on invalid input.
+func Register(suite TestSuite) {
+ // Make sure the suite is legal.
+ if suite.Name == "" {
+ panic("Test suites must have names.")
+ }
+
+ for _, tf := range suite.TestFunctions {
+ if tf.Name == "" {
+ panic("Test functions must have names.")
+ }
+
+ if tf.Run == nil {
+ panic("Test functions must have non-nil run fields.")
+ }
+ }
+
+ // Save the suite for later.
+ registeredSuites = append(registeredSuites, suite)
+}
+
+// The list of test suites previously registered.
+var registeredSuites []TestSuite
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/register_test_suite.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/register_test_suite.go
new file mode 100644
index 00000000000..7303dfa8a6e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/register_test_suite.go
@@ -0,0 +1,193 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/smartystreets/assertions/internal/ogletest/srcutil"
+)
+
+// Test suites that implement this interface have special meaning to
+// RegisterTestSuite.
+type SetUpTestSuiteInterface interface {
+ // This method will be called exactly once, before the first test method is
+ // run. The receiver of this method will be a zero value of the test suite
+ // type, and is not shared with any other methods. Use this method to set up
+ // any necessary global state shared by all of the test methods.
+ SetUpTestSuite()
+}
+
+// Test suites that implement this interface have special meaning to
+// RegisterTestSuite.
+type TearDownTestSuiteInterface interface {
+ // This method will be called exactly once, after the last test method is
+ // run. The receiver of this method will be a zero value of the test suite
+ // type, and is not shared with any other methods. Use this method to clean
+ // up after any necessary global state shared by all of the test methods.
+ TearDownTestSuite()
+}
+
+// Test suites that implement this interface have special meaning to
+// Register.
+type SetUpInterface interface {
+ // This method is called before each test method is invoked, with the same
+ // receiver as that test method. At the time this method is invoked, the
+ // receiver is a zero value for the test suite type. Use this method for
+ // common setup code that works on data not shared across tests.
+ SetUp(*TestInfo)
+}
+
+// Test suites that implement this interface have special meaning to
+// Register.
+type TearDownInterface interface {
+ // This method is called after each test method is invoked, with the same
+ // receiver as that test method. Use this method for common cleanup code that
+ // works on data not shared across tests.
+ TearDown()
+}
+
+// RegisterTestSuite tells ogletest about a test suite containing tests that it
+// should run. Any exported method on the type pointed to by the supplied
+// prototype value will be treated as test methods, with the exception of the
+// methods defined by the following interfaces, which when present are treated
+// as described in the documentation for those interfaces:
+//
+// * SetUpTestSuiteInterface
+// * SetUpInterface
+// * TearDownInterface
+// * TearDownTestSuiteInterface
+//
+// Each test method is invoked on a different receiver, which is initially a
+// zero value of the test suite type.
+//
+// Example:
+//
+// // Some value that is needed by the tests but is expensive to compute.
+// var someExpensiveThing uint
+//
+// type FooTest struct {
+// // Path to a temporary file used by the tests. Each test gets a
+// // different temporary file.
+// tempFile string
+// }
+// func init() { ogletest.RegisterTestSuite(&FooTest{}) }
+//
+// func (t *FooTest) SetUpTestSuite() {
+// someExpensiveThing = ComputeSomeExpensiveThing()
+// }
+//
+// func (t *FooTest) SetUp(ti *ogletest.TestInfo) {
+// t.tempFile = CreateTempFile()
+// }
+//
+// func (t *FooTest) TearDown() {
+// DeleteTempFile(t.tempFile)
+// }
+//
+// func (t *FooTest) FrobinicatorIsSuccessfullyTweaked() {
+// res := DoSomethingWithExpensiveThing(someExpensiveThing, t.tempFile)
+// ExpectThat(res, Equals(true))
+// }
+//
+func RegisterTestSuite(p interface{}) {
+ if p == nil {
+ panic("RegisterTestSuite called with nil suite.")
+ }
+
+ val := reflect.ValueOf(p)
+ typ := val.Type()
+ var zeroInstance reflect.Value
+
+ // We will transform to a TestSuite struct.
+ suite := TestSuite{}
+ suite.Name = typ.Elem().Name()
+
+ zeroInstance = reflect.New(typ.Elem())
+ if i, ok := zeroInstance.Interface().(SetUpTestSuiteInterface); ok {
+ suite.SetUp = func() { i.SetUpTestSuite() }
+ }
+
+ zeroInstance = reflect.New(typ.Elem())
+ if i, ok := zeroInstance.Interface().(TearDownTestSuiteInterface); ok {
+ suite.TearDown = func() { i.TearDownTestSuite() }
+ }
+
+ // Transform a list of test methods for the suite, filtering them to just the
+ // ones that we don't need to skip.
+ for _, method := range filterMethods(suite.Name, srcutil.GetMethodsInSourceOrder(typ)) {
+ var tf TestFunction
+ tf.Name = method.Name
+
+ // Create an instance to be operated on by all of the TestFunction's
+ // internal functions.
+ instance := reflect.New(typ.Elem())
+
+ // Bind the functions to the instance.
+ if i, ok := instance.Interface().(SetUpInterface); ok {
+ tf.SetUp = func(ti *TestInfo) { i.SetUp(ti) }
+ }
+
+ methodCopy := method
+ tf.Run = func() { runTestMethod(instance, methodCopy) }
+
+ if i, ok := instance.Interface().(TearDownInterface); ok {
+ tf.TearDown = func() { i.TearDown() }
+ }
+
+ // Save the TestFunction.
+ suite.TestFunctions = append(suite.TestFunctions, tf)
+ }
+
+ // Register the suite.
+ Register(suite)
+}
+
+func runTestMethod(suite reflect.Value, method reflect.Method) {
+ if method.Func.Type().NumIn() != 1 {
+ panic(fmt.Sprintf(
+ "%s: expected 1 args, actually %d.",
+ method.Name,
+ method.Func.Type().NumIn()))
+ }
+
+ method.Func.Call([]reflect.Value{suite})
+}
+
+func filterMethods(suiteName string, in []reflect.Method) (out []reflect.Method) {
+ for _, m := range in {
+ // Skip set up, tear down, and unexported methods.
+ if isSpecialMethod(m.Name) || !isExportedMethod(m.Name) {
+ continue
+ }
+
+ out = append(out, m)
+ }
+
+ return
+}
+
+func isSpecialMethod(name string) bool {
+ return (name == "SetUpTestSuite") ||
+ (name == "TearDownTestSuite") ||
+ (name == "SetUp") ||
+ (name == "TearDown")
+}
+
+func isExportedMethod(name string) bool {
+ return len(name) > 0 && name[0] >= 'A' && name[0] <= 'Z'
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/run_tests.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/run_tests.go
new file mode 100644
index 00000000000..003aeb019de
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/run_tests.go
@@ -0,0 +1,354 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "os"
+ "path"
+ "regexp"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/smartystreets/assertions/internal/reqtrace"
+)
+
+var fTestFilter = flag.String(
+ "ogletest.run",
+ "",
+ "Regexp for matching tests to run.")
+
+var fStopEarly = flag.Bool(
+ "ogletest.stop_early",
+ false,
+ "If true, stop after the first failure.")
+
+// runTestsOnce protects RunTests from executing multiple times.
+var runTestsOnce sync.Once
+
+func isAbortError(x interface{}) bool {
+ _, ok := x.(abortError)
+ return ok
+}
+
+// Run a single test function, returning a slice of failure records.
+func runTestFunction(tf TestFunction) (failures []FailureRecord) {
+ // Set up a clean slate for this test. Make sure to reset it after everything
+ // below is finished, so we don't accidentally use it elsewhere.
+ currentlyRunningTest = newTestInfo()
+ defer func() {
+ currentlyRunningTest = nil
+ }()
+
+ ti := currentlyRunningTest
+
+ // Start a trace.
+ var reportOutcome reqtrace.ReportFunc
+ ti.Ctx, reportOutcome = reqtrace.Trace(ti.Ctx, tf.Name)
+
+ // Run the SetUp function, if any, paying attention to whether it panics.
+ setUpPanicked := false
+ if tf.SetUp != nil {
+ setUpPanicked = runWithProtection(func() { tf.SetUp(ti) })
+ }
+
+ // Run the test function itself, but only if the SetUp function didn't panic.
+ // (This includes AssertThat errors.)
+ if !setUpPanicked {
+ runWithProtection(tf.Run)
+ }
+
+ // Run the TearDown function, if any.
+ if tf.TearDown != nil {
+ runWithProtection(tf.TearDown)
+ }
+
+ // Tell the mock controller for the tests to report any errors it's sitting
+ // on.
+ ti.MockController.Finish()
+
+ // Report the outcome to reqtrace.
+ if len(ti.failureRecords) == 0 {
+ reportOutcome(nil)
+ } else {
+ reportOutcome(fmt.Errorf("%v failure records", len(ti.failureRecords)))
+ }
+
+ return ti.failureRecords
+}
+
+// Run everything registered with Register (including via the wrapper
+// RegisterTestSuite).
+//
+// Failures are communicated to the supplied testing.T object. This is the
+// bridge between ogletest and the testing package (and `go test`); you should
+// ensure that it's called at least once by creating a test function compatible
+// with `go test` and calling it there.
+//
+// For example:
+//
+// import (
+// "github.com/smartystreets/assertions/internal/ogletest"
+// "testing"
+// )
+//
+// func TestOgletest(t *testing.T) {
+// ogletest.RunTests(t)
+// }
+//
+func RunTests(t *testing.T) {
+ runTestsOnce.Do(func() { runTestsInternal(t) })
+}
+
+// Signalling between RunTests and StopRunningTests.
+var gStopRunning uint64
+
+// Request that RunTests stop what it's doing. After the currently running test
+// is finished, including tear-down, the program will exit with an error code.
+func StopRunningTests() {
+ atomic.StoreUint64(&gStopRunning, 1)
+}
+
+// runTestsInternal does the real work of RunTests, which simply wraps it in a
+// sync.Once.
+func runTestsInternal(t *testing.T) {
+ // Process each registered suite.
+ for _, suite := range registeredSuites {
+ // Stop now if we've already seen a failure and we've been told to stop
+ // early.
+ if t.Failed() && *fStopEarly {
+ break
+ }
+
+ // Print a banner.
+ fmt.Printf("[----------] Running tests from %s\n", suite.Name)
+
+ // Run the SetUp function, if any.
+ if suite.SetUp != nil {
+ suite.SetUp()
+ }
+
+ // Run each test function that the user has not told us to skip.
+ stoppedEarly := false
+ for _, tf := range filterTestFunctions(suite) {
+ // Did the user request that we stop running tests? If so, skip the rest
+ // of this suite (and exit after tearing it down).
+ if atomic.LoadUint64(&gStopRunning) != 0 {
+ stoppedEarly = true
+ break
+ }
+
+ // Print a banner for the start of this test function.
+ fmt.Printf("[ RUN ] %s.%s\n", suite.Name, tf.Name)
+
+ // Run the test function.
+ startTime := time.Now()
+ failures := runTestFunction(tf)
+ runDuration := time.Since(startTime)
+
+ // Print any failures, and mark the test as having failed if there are any.
+ for _, record := range failures {
+ t.Fail()
+ fmt.Printf(
+ "%s:%d:\n%s\n\n",
+ record.FileName,
+ record.LineNumber,
+ record.Error)
+ }
+
+ // Print a banner for the end of the test.
+ bannerMessage := "[ OK ]"
+ if len(failures) != 0 {
+ bannerMessage = "[ FAILED ]"
+ }
+
+ // Print a summary of the time taken, if long enough.
+ var timeMessage string
+ if runDuration >= 25*time.Millisecond {
+ timeMessage = fmt.Sprintf(" (%s)", runDuration.String())
+ }
+
+ fmt.Printf(
+ "%s %s.%s%s\n",
+ bannerMessage,
+ suite.Name,
+ tf.Name,
+ timeMessage)
+
+ // Stop running tests from this suite if we've been told to stop early
+ // and this test failed.
+ if t.Failed() && *fStopEarly {
+ break
+ }
+ }
+
+ // Run the suite's TearDown function, if any.
+ if suite.TearDown != nil {
+ suite.TearDown()
+ }
+
+ // Were we told to exit early?
+ if stoppedEarly {
+ fmt.Println("Exiting early due to user request.")
+ os.Exit(1)
+ }
+
+ fmt.Printf("[----------] Finished with tests from %s\n", suite.Name)
+ }
+}
+
+// Return true iff the supplied program counter appears to lie within panic().
+func isPanic(pc uintptr) bool {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return false
+ }
+
+ return f.Name() == "runtime.gopanic" || f.Name() == "runtime.sigpanic"
+}
+
+// Find the deepest stack frame containing something that appears to be a
+// panic. Return the 'skip' value that a caller to this function would need
+// to supply to runtime.Caller for that frame, or a negative number if not found.
+func findPanic() int {
+ localSkip := -1
+ for i := 0; ; i++ {
+ // Stop if we've passed the base of the stack.
+ pc, _, _, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+
+ // Is this a panic?
+ if isPanic(pc) {
+ localSkip = i
+ }
+ }
+
+ return localSkip - 1
+}
+
+// Attempt to find the file base name and line number for the ultimate source
+// of a panic, on the panicking stack. Return a human-readable sentinel if
+// unsuccessful.
+func findPanicFileLine() (string, int) {
+ panicSkip := findPanic()
+ if panicSkip < 0 {
+ return "(unknown)", 0
+ }
+
+ // Find the trigger of the panic.
+ _, file, line, ok := runtime.Caller(panicSkip + 1)
+ if !ok {
+ return "(unknown)", 0
+ }
+
+ return path.Base(file), line
+}
+
+// Run the supplied function, catching panics (including AssertThat errors) and
+// reporting them to the currently-running test as appropriate. Return true iff
+// the function panicked.
+func runWithProtection(f func()) (panicked bool) {
+ defer func() {
+ // If the test didn't panic, we're done.
+ r := recover()
+ if r == nil {
+ return
+ }
+
+ panicked = true
+
+ // We modify the currently running test below.
+ currentlyRunningTest.mu.Lock()
+ defer currentlyRunningTest.mu.Unlock()
+
+ // If the function panicked (and the panic was not due to an AssertThat
+ // failure), add a failure for the panic.
+ if !isAbortError(r) {
+ var panicRecord FailureRecord
+ panicRecord.FileName, panicRecord.LineNumber = findPanicFileLine()
+ panicRecord.Error = fmt.Sprintf(
+ "panic: %v\n\n%s", r, formatPanicStack())
+
+ currentlyRunningTest.failureRecords = append(
+ currentlyRunningTest.failureRecords,
+ panicRecord)
+ }
+ }()
+
+ f()
+ return
+}
+
+func formatPanicStack() string {
+ buf := new(bytes.Buffer)
+
+ // Find the panic. If successful, we'll skip to below it. Otherwise, we'll
+ // format everything.
+ var initialSkip int
+ if panicSkip := findPanic(); panicSkip >= 0 {
+ initialSkip = panicSkip + 1
+ }
+
+ for i := initialSkip; ; i++ {
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+
+ // Choose a function name to display.
+ funcName := "(unknown)"
+ if f := runtime.FuncForPC(pc); f != nil {
+ funcName = f.Name()
+ }
+
+ // Stop if we've gotten as far as the test runner code.
+ if funcName == "github.com/smartystreets/assertions/internal/ogletest.runTestMethod" ||
+ funcName == "github.com/smartystreets/assertions/internal/ogletest.runWithProtection" {
+ break
+ }
+
+ // Add an entry for this frame.
+ fmt.Fprintf(buf, "%s\n\t%s:%d\n", funcName, file, line)
+ }
+
+ return buf.String()
+}
+
+// Filter test functions according to the user-supplied filter flag.
+func filterTestFunctions(suite TestSuite) (out []TestFunction) {
+ re, err := regexp.Compile(*fTestFilter)
+ if err != nil {
+ panic("Invalid value for --ogletest.run: " + err.Error())
+ }
+
+ for _, tf := range suite.TestFunctions {
+ fullName := fmt.Sprintf("%s.%s", suite.Name, tf.Name)
+ if !re.MatchString(fullName) {
+ continue
+ }
+
+ out = append(out, tf)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/docs.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/docs.go
new file mode 100644
index 00000000000..d9b9bc8e5fe
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/docs.go
@@ -0,0 +1,5 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+
+// Functions for working with source code.
+package srcutil
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/methods.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/methods.go
new file mode 100644
index 00000000000..a8c5828ea3a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/methods.go
@@ -0,0 +1,65 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package srcutil
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "sort"
+)
+
+func getLine(m reflect.Method) int {
+ pc := m.Func.Pointer()
+
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ panic(fmt.Sprintf("Couldn't get runtime func for method (pc=%d): %v", pc, m))
+ }
+
+ _, line := f.FileLine(pc)
+ return line
+}
+
+type sortableMethodSet []reflect.Method
+
+func (s sortableMethodSet) Len() int {
+ return len(s)
+}
+
+func (s sortableMethodSet) Less(i, j int) bool {
+ return getLine(s[i]) < getLine(s[j])
+}
+
+func (s sortableMethodSet) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Given a type t, return all of the methods of t sorted such that source file
+// order is preserved. Order across files is undefined. Order within lines is
+// undefined.
+func GetMethodsInSourceOrder(t reflect.Type) []reflect.Method {
+ // Build the list of methods.
+ methods := sortableMethodSet{}
+ for i := 0; i < t.NumMethod(); i++ {
+ methods = append(methods, t.Method(i))
+ }
+
+ // Sort it.
+ sort.Sort(methods)
+
+ return methods
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/methods_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/methods_test.go
new file mode 100644
index 00000000000..95c07fd4697
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/srcutil/methods_test.go
@@ -0,0 +1,107 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package srcutil_test
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "github.com/smartystreets/assertions/internal/ogletest/srcutil"
+)
+
+func TestRegisterMethodsTest(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type MethodsTest struct {
+}
+
+func init() { RegisterTestSuite(&MethodsTest{}) }
+
+type OneMethodType int
+
+func (x OneMethodType) Foo() {}
+
+type MultipleMethodsType int
+
+func (x MultipleMethodsType) Foo() {}
+func (x MultipleMethodsType) Bar() {}
+func (x MultipleMethodsType) Baz() {}
+
+type methodNameMatcher struct {
+ expected string
+}
+
+func (m *methodNameMatcher) Description() string {
+ return fmt.Sprintf("method named %s", m.expected)
+}
+
+func (m *methodNameMatcher) Matches(x interface{}) error {
+ method, ok := x.(reflect.Method)
+ if !ok {
+ panic("Invalid argument.")
+ }
+
+ if method.Name != m.expected {
+ return fmt.Errorf("whose name is %s", method.Name)
+ }
+
+ return nil
+}
+
+func NameIs(name string) Matcher {
+ return &methodNameMatcher{name}
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *MethodsTest) NoMethods() {
+ type foo int
+
+ methods := srcutil.GetMethodsInSourceOrder(reflect.TypeOf(foo(17)))
+ ExpectThat(methods, ElementsAre())
+}
+
+func (t *MethodsTest) OneMethod() {
+ methods := srcutil.GetMethodsInSourceOrder(reflect.TypeOf(OneMethodType(17)))
+ ExpectThat(
+ methods,
+ ElementsAre(
+ NameIs("Foo"),
+ ))
+}
+
+func (t *MethodsTest) MultipleMethods() {
+ methods := srcutil.GetMethodsInSourceOrder(reflect.TypeOf(MultipleMethodsType(17)))
+ ExpectThat(
+ methods,
+ ElementsAre(
+ NameIs("Foo"),
+ NameIs("Bar"),
+ NameIs("Baz"),
+ ))
+
+ ExpectEq("Foo", methods[0].Name)
+ ExpectEq("Bar", methods[1].Name)
+ ExpectEq("Baz", methods[2].Name)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/failing.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/failing.test.go
new file mode 100644
index 00000000000..17c50e19487
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/failing.test.go
@@ -0,0 +1,252 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "fmt"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+func TestFailingTest(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Usual failures
+////////////////////////////////////////////////////////////////////////
+
+type FailingTest struct {
+}
+
+var _ TearDownInterface = &FailingTest{}
+var _ TearDownTestSuiteInterface = &FailingTest{}
+
+func init() { RegisterTestSuite(&FailingTest{}) }
+
+func (t *FailingTest) TearDown() {
+ fmt.Println("TearDown running.")
+}
+
+func (t *FailingTest) TearDownTestSuite() {
+ fmt.Println("TearDownTestSuite running.")
+}
+
+func (t *FailingTest) PassingMethod() {
+}
+
+func (t *FailingTest) Equals() {
+ ExpectThat(17, Equals(17.5))
+ ExpectThat(17, Equals("taco"))
+}
+
+func (t *FailingTest) LessThan() {
+ ExpectThat(18, LessThan(17))
+ ExpectThat(18, LessThan("taco"))
+}
+
+func (t *FailingTest) HasSubstr() {
+ ExpectThat("taco", HasSubstr("ac"))
+ ExpectThat(17, HasSubstr("ac"))
+}
+
+func (t *FailingTest) ExpectWithUserErrorMessages() {
+ ExpectThat(17, Equals(19), "foo bar: %d", 112)
+ ExpectEq(17, 17.5, "foo bar: %d", 112)
+ ExpectLe(17, 16.9, "foo bar: %d", 112)
+ ExpectLt(17, 16.9, "foo bar: %d", 112)
+ ExpectGe(17, 17.1, "foo bar: %d", 112)
+ ExpectGt(17, "taco", "foo bar: %d", 112)
+ ExpectNe(17, 17.0, "foo bar: %d", 112)
+ ExpectFalse(true, "foo bar: %d", 112)
+ ExpectTrue(false, "foo bar: %d", 112)
+}
+
+func (t *FailingTest) AssertWithUserErrorMessages() {
+ AssertThat(17, Equals(19), "foo bar: %d", 112)
+}
+
+func (t *FailingTest) ExpectationAliases() {
+ ExpectEq(17, 17.5)
+ ExpectEq("taco", 17.5)
+
+ ExpectLe(17, 16.9)
+ ExpectLt(17, 16.9)
+ ExpectLt(17, "taco")
+
+ ExpectGe(17, 17.1)
+ ExpectGt(17, 17.1)
+ ExpectGt(17, "taco")
+
+ ExpectNe(17, 17.0)
+ ExpectNe(17, "taco")
+
+ ExpectFalse(true)
+ ExpectFalse("taco")
+
+ ExpectTrue(false)
+ ExpectTrue("taco")
+}
+
+func (t *FailingTest) AssertThatFailure() {
+ AssertThat(17, Equals(19))
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AssertEqFailure() {
+ AssertEq(19, 17)
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AssertNeFailure() {
+ AssertNe(19, 19)
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AssertLeFailure() {
+ AssertLe(19, 17)
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AssertLtFailure() {
+ AssertLt(19, 17)
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AssertGeFailure() {
+ AssertGe(17, 19)
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AssertGtFailure() {
+ AssertGt(17, 19)
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AssertTrueFailure() {
+ AssertTrue("taco")
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AssertFalseFailure() {
+ AssertFalse("taco")
+ panic("Shouldn't get here.")
+}
+
+func (t *FailingTest) AddFailureRecord() {
+ r := FailureRecord{
+ FileName: "foo.go",
+ LineNumber: 17,
+ Error: "taco\nburrito",
+ }
+
+ AddFailureRecord(r)
+}
+
+func (t *FailingTest) AddFailure() {
+ AddFailure("taco")
+ AddFailure("burrito: %d", 17)
+}
+
+func (t *FailingTest) AddFailureThenAbortTest() {
+ AddFailure("enchilada")
+ AbortTest()
+ fmt.Println("Shouldn't get here.")
+}
+
+////////////////////////////////////////////////////////////////////////
+// Expectation failure during SetUp
+////////////////////////////////////////////////////////////////////////
+
+type ExpectFailDuringSetUpTest struct {
+}
+
+func init() { RegisterTestSuite(&ExpectFailDuringSetUpTest{}) }
+
+func (t *ExpectFailDuringSetUpTest) SetUp(i *TestInfo) {
+ ExpectFalse(true)
+}
+
+func (t *ExpectFailDuringSetUpTest) TearDown() {
+ fmt.Println("TearDown running.")
+}
+
+func (t *ExpectFailDuringSetUpTest) PassingMethod() {
+ fmt.Println("Method running.")
+}
+
+////////////////////////////////////////////////////////////////////////
+// Assertion failure during SetUp
+////////////////////////////////////////////////////////////////////////
+
+type AssertFailDuringSetUpTest struct {
+}
+
+func init() { RegisterTestSuite(&AssertFailDuringSetUpTest{}) }
+
+func (t *AssertFailDuringSetUpTest) SetUp(i *TestInfo) {
+ AssertFalse(true)
+}
+
+func (t *AssertFailDuringSetUpTest) TearDown() {
+ fmt.Println("TearDown running.")
+}
+
+func (t *AssertFailDuringSetUpTest) PassingMethod() {
+ fmt.Println("Method running.")
+}
+
+////////////////////////////////////////////////////////////////////////
+// Expectation failure during TearDown
+////////////////////////////////////////////////////////////////////////
+
+type ExpectFailDuringTearDownTest struct {
+}
+
+func init() { RegisterTestSuite(&ExpectFailDuringTearDownTest{}) }
+
+func (t *ExpectFailDuringTearDownTest) SetUp(i *TestInfo) {
+ fmt.Println("SetUp running.")
+}
+
+func (t *ExpectFailDuringTearDownTest) TearDown() {
+ ExpectFalse(true)
+}
+
+func (t *ExpectFailDuringTearDownTest) PassingMethod() {
+ fmt.Println("Method running.")
+}
+
+////////////////////////////////////////////////////////////////////////
+// Assertion failure during TearDown
+////////////////////////////////////////////////////////////////////////
+
+type AssertFailDuringTearDownTest struct {
+}
+
+func init() { RegisterTestSuite(&AssertFailDuringTearDownTest{}) }
+
+func (t *AssertFailDuringTearDownTest) SetUp(i *TestInfo) {
+ fmt.Println("SetUp running.")
+}
+
+func (t *AssertFailDuringTearDownTest) TearDown() {
+ AssertFalse(true)
+}
+
+func (t *AssertFailDuringTearDownTest) PassingMethod() {
+ fmt.Println("Method running.")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/filtered.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/filtered.test.go
new file mode 100644
index 00000000000..e559c5f926c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/filtered.test.go
@@ -0,0 +1,79 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "fmt"
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "testing"
+)
+
+func TestFiltered(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Partially filtered out
+////////////////////////////////////////////////////////////////////////
+
+type PartiallyFilteredTest struct {
+}
+
+func init() { RegisterTestSuite(&PartiallyFilteredTest{}) }
+
+func (t *PartiallyFilteredTest) PassingTestFoo() {
+ ExpectThat(19, Equals(19))
+}
+
+func (t *PartiallyFilteredTest) PassingTestBar() {
+ ExpectThat(17, Equals(17))
+}
+
+func (t *PartiallyFilteredTest) PartiallyFilteredTestFoo() {
+ ExpectThat(18, LessThan(17))
+}
+
+func (t *PartiallyFilteredTest) PartiallyFilteredTestBar() {
+ ExpectThat("taco", HasSubstr("blah"))
+}
+
+func (t *PartiallyFilteredTest) PartiallyFilteredTestBaz() {
+ ExpectThat(18, LessThan(17))
+}
+
+////////////////////////////////////////////////////////////////////////
+// Completely filtered out
+////////////////////////////////////////////////////////////////////////
+
+type CompletelyFilteredTest struct {
+}
+
+func init() { RegisterTestSuite(&CompletelyFilteredTest{}) }
+
+func (t *CompletelyFilteredTest) SetUpTestSuite() {
+ fmt.Println("SetUpTestSuite run!")
+}
+
+func (t *CompletelyFilteredTest) TearDownTestSuite() {
+ fmt.Println("TearDownTestSuite run!")
+}
+
+func (t *PartiallyFilteredTest) SomePassingTest() {
+ ExpectThat(19, Equals(19))
+}
+
+func (t *PartiallyFilteredTest) SomeFailingTest() {
+ ExpectThat(19, Equals(17))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.failing_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.failing_test
new file mode 100644
index 00000000000..f0cd76af500
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.failing_test
@@ -0,0 +1,278 @@
+[----------] Running tests from FailingTest
+[ RUN ] FailingTest.PassingMethod
+TearDown running.
+[ OK ] FailingTest.PassingMethod
+[ RUN ] FailingTest.Equals
+TearDown running.
+failing_test.go:52:
+Expected: 17.5
+Actual: 17
+
+failing_test.go:53:
+Expected: taco
+Actual: 17, which is not a string
+
+[ FAILED ] FailingTest.Equals
+[ RUN ] FailingTest.LessThan
+TearDown running.
+failing_test.go:57:
+Expected: less than 17
+Actual: 18
+
+failing_test.go:58:
+Expected: less than "taco"
+Actual: 18, which is not comparable
+
+[ FAILED ] FailingTest.LessThan
+[ RUN ] FailingTest.HasSubstr
+TearDown running.
+failing_test.go:63:
+Expected: has substring "ac"
+Actual: 17, which is not a string
+
+[ FAILED ] FailingTest.HasSubstr
+[ RUN ] FailingTest.ExpectWithUserErrorMessages
+TearDown running.
+failing_test.go:67:
+Expected: 19
+Actual: 17
+foo bar: 112
+
+failing_test.go:68:
+Expected: 17
+Actual: 17.5
+foo bar: 112
+
+failing_test.go:69:
+Expected: less than or equal to 16.9
+Actual: 17
+foo bar: 112
+
+failing_test.go:70:
+Expected: less than 16.9
+Actual: 17
+foo bar: 112
+
+failing_test.go:71:
+Expected: greater than or equal to 17.1
+Actual: 17
+foo bar: 112
+
+failing_test.go:72:
+Expected: greater than "taco"
+Actual: 17, which is not comparable
+foo bar: 112
+
+failing_test.go:73:
+Expected: not(17)
+Actual: 17
+foo bar: 112
+
+failing_test.go:74:
+Expected: false
+Actual: true
+foo bar: 112
+
+failing_test.go:75:
+Expected: true
+Actual: false
+foo bar: 112
+
+[ FAILED ] FailingTest.ExpectWithUserErrorMessages
+[ RUN ] FailingTest.AssertWithUserErrorMessages
+TearDown running.
+failing_test.go:79:
+Expected: 19
+Actual: 17
+foo bar: 112
+
+[ FAILED ] FailingTest.AssertWithUserErrorMessages
+[ RUN ] FailingTest.ExpectationAliases
+TearDown running.
+failing_test.go:83:
+Expected: 17
+Actual: 17.5
+
+failing_test.go:84:
+Expected: taco
+Actual: 17.5, which is not a string
+
+failing_test.go:86:
+Expected: less than or equal to 16.9
+Actual: 17
+
+failing_test.go:87:
+Expected: less than 16.9
+Actual: 17
+
+failing_test.go:88:
+Expected: less than "taco"
+Actual: 17, which is not comparable
+
+failing_test.go:90:
+Expected: greater than or equal to 17.1
+Actual: 17
+
+failing_test.go:91:
+Expected: greater than 17.1
+Actual: 17
+
+failing_test.go:92:
+Expected: greater than "taco"
+Actual: 17, which is not comparable
+
+failing_test.go:94:
+Expected: not(17)
+Actual: 17
+
+failing_test.go:95:
+Expected: not(17)
+Actual: taco, which is not numeric
+
+failing_test.go:97:
+Expected: false
+Actual: true
+
+failing_test.go:98:
+Expected: false
+Actual: taco, which is not a bool
+
+failing_test.go:100:
+Expected: true
+Actual: false
+
+failing_test.go:101:
+Expected: true
+Actual: taco, which is not a bool
+
+[ FAILED ] FailingTest.ExpectationAliases
+[ RUN ] FailingTest.AssertThatFailure
+TearDown running.
+failing_test.go:105:
+Expected: 19
+Actual: 17
+
+[ FAILED ] FailingTest.AssertThatFailure
+[ RUN ] FailingTest.AssertEqFailure
+TearDown running.
+failing_test.go:110:
+Expected: 19
+Actual: 17
+
+[ FAILED ] FailingTest.AssertEqFailure
+[ RUN ] FailingTest.AssertNeFailure
+TearDown running.
+failing_test.go:115:
+Expected: not(19)
+Actual: 19
+
+[ FAILED ] FailingTest.AssertNeFailure
+[ RUN ] FailingTest.AssertLeFailure
+TearDown running.
+failing_test.go:120:
+Expected: less than or equal to 17
+Actual: 19
+
+[ FAILED ] FailingTest.AssertLeFailure
+[ RUN ] FailingTest.AssertLtFailure
+TearDown running.
+failing_test.go:125:
+Expected: less than 17
+Actual: 19
+
+[ FAILED ] FailingTest.AssertLtFailure
+[ RUN ] FailingTest.AssertGeFailure
+TearDown running.
+failing_test.go:130:
+Expected: greater than or equal to 19
+Actual: 17
+
+[ FAILED ] FailingTest.AssertGeFailure
+[ RUN ] FailingTest.AssertGtFailure
+TearDown running.
+failing_test.go:135:
+Expected: greater than 19
+Actual: 17
+
+[ FAILED ] FailingTest.AssertGtFailure
+[ RUN ] FailingTest.AssertTrueFailure
+TearDown running.
+failing_test.go:140:
+Expected: true
+Actual: taco, which is not a bool
+
+[ FAILED ] FailingTest.AssertTrueFailure
+[ RUN ] FailingTest.AssertFalseFailure
+TearDown running.
+failing_test.go:145:
+Expected: false
+Actual: taco, which is not a bool
+
+[ FAILED ] FailingTest.AssertFalseFailure
+[ RUN ] FailingTest.AddFailureRecord
+TearDown running.
+foo.go:17:
+taco
+burrito
+
+[ FAILED ] FailingTest.AddFailureRecord
+[ RUN ] FailingTest.AddFailure
+TearDown running.
+failing_test.go:160:
+taco
+
+failing_test.go:161:
+burrito: 17
+
+[ FAILED ] FailingTest.AddFailure
+[ RUN ] FailingTest.AddFailureThenAbortTest
+TearDown running.
+failing_test.go:165:
+enchilada
+
+[ FAILED ] FailingTest.AddFailureThenAbortTest
+TearDownTestSuite running.
+[----------] Finished with tests from FailingTest
+[----------] Running tests from ExpectFailDuringSetUpTest
+[ RUN ] ExpectFailDuringSetUpTest.PassingMethod
+Method running.
+TearDown running.
+failing_test.go:180:
+Expected: false
+Actual: true
+
+[ FAILED ] ExpectFailDuringSetUpTest.PassingMethod
+[----------] Finished with tests from ExpectFailDuringSetUpTest
+[----------] Running tests from AssertFailDuringSetUpTest
+[ RUN ] AssertFailDuringSetUpTest.PassingMethod
+TearDown running.
+failing_test.go:201:
+Expected: false
+Actual: true
+
+[ FAILED ] AssertFailDuringSetUpTest.PassingMethod
+[----------] Finished with tests from AssertFailDuringSetUpTest
+[----------] Running tests from ExpectFailDuringTearDownTest
+[ RUN ] ExpectFailDuringTearDownTest.PassingMethod
+SetUp running.
+Method running.
+failing_test.go:226:
+Expected: false
+Actual: true
+
+[ FAILED ] ExpectFailDuringTearDownTest.PassingMethod
+[----------] Finished with tests from ExpectFailDuringTearDownTest
+[----------] Running tests from AssertFailDuringTearDownTest
+[ RUN ] AssertFailDuringTearDownTest.PassingMethod
+SetUp running.
+Method running.
+failing_test.go:247:
+Expected: false
+Actual: true
+
+[ FAILED ] AssertFailDuringTearDownTest.PassingMethod
+[----------] Finished with tests from AssertFailDuringTearDownTest
+--- FAIL: TestSomething (1.23s)
+FAIL
+exit status 1
+FAIL somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.filtered_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.filtered_test
new file mode 100644
index 00000000000..39fa697140c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.filtered_test
@@ -0,0 +1,24 @@
+[----------] Running tests from PartiallyFilteredTest
+[ RUN ] PartiallyFilteredTest.PassingTestBar
+[ OK ] PartiallyFilteredTest.PassingTestBar
+[ RUN ] PartiallyFilteredTest.PartiallyFilteredTestBar
+filtered_test.go:49:
+Expected: has substring "blah"
+Actual: taco
+
+[ FAILED ] PartiallyFilteredTest.PartiallyFilteredTestBar
+[ RUN ] PartiallyFilteredTest.PartiallyFilteredTestBaz
+filtered_test.go:53:
+Expected: less than 17
+Actual: 18
+
+[ FAILED ] PartiallyFilteredTest.PartiallyFilteredTestBaz
+[----------] Finished with tests from PartiallyFilteredTest
+[----------] Running tests from CompletelyFilteredTest
+SetUpTestSuite run!
+TearDownTestSuite run!
+[----------] Finished with tests from CompletelyFilteredTest
+--- FAIL: TestSomething (1.23s)
+FAIL
+exit status 1
+FAIL somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.mock_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.mock_test
new file mode 100644
index 00000000000..4ca29791de7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.mock_test
@@ -0,0 +1,25 @@
+[----------] Running tests from MockTest
+[ RUN ] MockTest.ExpectationSatisfied
+[ OK ] MockTest.ExpectationSatisfied
+[ RUN ] MockTest.MockExpectationNotSatisfied
+/some/path/mock_test.go:56:
+Unsatisfied expectation; expected At to be called at least 1 times; called 0 times.
+
+[ FAILED ] MockTest.MockExpectationNotSatisfied
+[ RUN ] MockTest.ExpectCallForUnknownMethod
+/some/path/mock_test.go:61:
+Unknown method: FooBar
+
+[ FAILED ] MockTest.ExpectCallForUnknownMethod
+[ RUN ] MockTest.UnexpectedCall
+/some/path/mock_test.go:65:
+Unexpected call to At with args: [11 23]
+
+[ FAILED ] MockTest.UnexpectedCall
+[ RUN ] MockTest.InvokeFunction
+[ OK ] MockTest.InvokeFunction
+[----------] Finished with tests from MockTest
+--- FAIL: TestSomething (1.23s)
+FAIL
+exit status 1
+FAIL somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.no_cases_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.no_cases_test
new file mode 100644
index 00000000000..8631385581d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.no_cases_test
@@ -0,0 +1,6 @@
+[----------] Running tests from NoCasesTest
+SetUpTestSuite run!
+TearDownTestSuite run!
+[----------] Finished with tests from NoCasesTest
+PASS
+ok somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.panicking_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.panicking_test
new file mode 100644
index 00000000000..32eac65f7ba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.panicking_test
@@ -0,0 +1,90 @@
+[----------] Running tests from PanickingTest
+[ RUN ] PanickingTest.ExplicitPanic
+TearDown running.
+panicking_test.go:47:
+panic: Panic in ExplicitPanic
+
+github.com/smartystreets/assertions/internal/ogletest/somepkg_test.(*PanickingTest).ExplicitPanic
+ some_file.txt:0
+runtime.call16
+ /some/path/asm_amd64.s:401
+reflect.Value.call
+ some_file.txt:0
+reflect.Value.Call
+ some_file.txt:0
+
+
+[ FAILED ] PanickingTest.ExplicitPanic
+[ RUN ] PanickingTest.ExplicitPanicInHelperFunction
+TearDown running.
+panicking_test.go:34:
+panic: Panic in someFuncThatPanics
+
+github.com/smartystreets/assertions/internal/ogletest/somepkg_test.someFuncThatPanics
+ some_file.txt:0
+github.com/smartystreets/assertions/internal/ogletest/somepkg_test.(*PanickingTest).ExplicitPanicInHelperFunction
+ some_file.txt:0
+runtime.call16
+ /some/path/asm_amd64.s:401
+reflect.Value.call
+ some_file.txt:0
+reflect.Value.Call
+ some_file.txt:0
+
+
+[ FAILED ] PanickingTest.ExplicitPanicInHelperFunction
+[ RUN ] PanickingTest.NilPointerDerefence
+TearDown running.
+panicking_test.go:56:
+panic: runtime error: invalid memory address or nil pointer dereference
+
+github.com/smartystreets/assertions/internal/ogletest/somepkg_test.(*PanickingTest).NilPointerDerefence
+ some_file.txt:0
+runtime.call16
+ /some/path/asm_amd64.s:401
+reflect.Value.call
+ some_file.txt:0
+reflect.Value.Call
+ some_file.txt:0
+
+
+[ FAILED ] PanickingTest.NilPointerDerefence
+[ RUN ] PanickingTest.ZzzSomeOtherTest
+TearDown running.
+[ OK ] PanickingTest.ZzzSomeOtherTest
+[----------] Finished with tests from PanickingTest
+[----------] Running tests from SetUpPanicTest
+[ RUN ] SetUpPanicTest.SomeTestCase
+SetUp about to panic.
+TearDown running.
+panicking_test.go:74:
+panic: Panic in SetUp
+
+github.com/smartystreets/assertions/internal/ogletest/somepkg_test.(*SetUpPanicTest).SetUp
+ some_file.txt:0
+github.com/smartystreets/assertions/internal/ogletest.func·003
+ some_file.txt:0
+github.com/smartystreets/assertions/internal/ogletest.func·007
+ some_file.txt:0
+
+
+[ FAILED ] SetUpPanicTest.SomeTestCase
+[----------] Finished with tests from SetUpPanicTest
+[----------] Running tests from TearDownPanicTest
+[ RUN ] TearDownPanicTest.SomeTestCase
+TearDown about to panic.
+panicking_test.go:95:
+panic: Panic in TearDown
+
+github.com/smartystreets/assertions/internal/ogletest/somepkg_test.(*TearDownPanicTest).TearDown
+ some_file.txt:0
+github.com/smartystreets/assertions/internal/ogletest.func·005
+ some_file.txt:0
+
+
+[ FAILED ] TearDownPanicTest.SomeTestCase
+[----------] Finished with tests from TearDownPanicTest
+--- FAIL: TestSomething (1.23s)
+FAIL
+exit status 1
+FAIL somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.passing_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.passing_test
new file mode 100644
index 00000000000..031128842ac
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.passing_test
@@ -0,0 +1,22 @@
+[----------] Running tests from PassingTest
+[ RUN ] PassingTest.EmptyTestMethod
+[ OK ] PassingTest.EmptyTestMethod
+[ RUN ] PassingTest.SuccessfullMatches
+[ OK ] PassingTest.SuccessfullMatches
+[ RUN ] PassingTest.ExpectAliases
+[ OK ] PassingTest.ExpectAliases
+[ RUN ] PassingTest.AssertAliases
+[ OK ] PassingTest.AssertAliases
+[ RUN ] PassingTest.SlowTest
+[ OK ] PassingTest.SlowTest (1234ms)
+[----------] Finished with tests from PassingTest
+[----------] Running tests from PassingTestWithHelpers
+SetUpTestSuite ran.
+[ RUN ] PassingTestWithHelpers.EmptyTestMethod
+SetUp ran.
+TearDown ran.
+[ OK ] PassingTestWithHelpers.EmptyTestMethod
+TearDownTestSuite ran.
+[----------] Finished with tests from PassingTestWithHelpers
+PASS
+ok somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.run_twice_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.run_twice_test
new file mode 100644
index 00000000000..0749f916454
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.run_twice_test
@@ -0,0 +1,14 @@
+[----------] Running tests from RunTwiceTest
+[ RUN ] RunTwiceTest.PassingMethod
+[ OK ] RunTwiceTest.PassingMethod
+[ RUN ] RunTwiceTest.FailingMethod
+run_twice_test.go:46:
+Expected: 17.5
+Actual: 17
+
+[ FAILED ] RunTwiceTest.FailingMethod
+[----------] Finished with tests from RunTwiceTest
+--- FAIL: TestSomething (1.23s)
+FAIL
+exit status 1
+FAIL somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.stop_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.stop_test
new file mode 100644
index 00000000000..e7d42c79337
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.stop_test
@@ -0,0 +1,13 @@
+[----------] Running tests from StopTest
+[ RUN ] StopTest.First
+TearDown running.
+[ OK ] StopTest.First
+[ RUN ] StopTest.Second
+About to call StopRunningTests.
+Called StopRunningTests.
+TearDown running.
+[ OK ] StopTest.Second
+TearDownTestSuite running.
+Exiting early due to user request.
+exit status 1
+FAIL somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.unexported_test b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.unexported_test
new file mode 100644
index 00000000000..6221e65ea9a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/golden.unexported_test
@@ -0,0 +1,12 @@
+[----------] Running tests from UnexportedTest
+[ RUN ] UnexportedTest.SomeTest
+unexported_test.go:42:
+Expected: 4
+Actual: 3
+
+[ FAILED ] UnexportedTest.SomeTest
+[----------] Finished with tests from UnexportedTest
+--- FAIL: TestSomething (1.23s)
+FAIL
+exit status 1
+FAIL somepkg 1.234s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/mock.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/mock.test.go
new file mode 100644
index 00000000000..8e0fca9cc01
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/mock.test.go
@@ -0,0 +1,82 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ "github.com/smartystreets/assertions/internal/oglemock"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "github.com/smartystreets/assertions/internal/ogletest/test_cases/mock_image"
+ "image/color"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type MockTest struct {
+ controller oglemock.Controller
+ image mock_image.MockImage
+}
+
+func init() { RegisterTestSuite(&MockTest{}) }
+func TestMockTest(t *testing.T) { RunTests(t) }
+
+func (t *MockTest) SetUp(i *TestInfo) {
+ t.controller = i.MockController
+ t.image = mock_image.NewMockImage(t.controller, "some mock image")
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *MockTest) ExpectationSatisfied() {
+ ExpectCall(t.image, "At")(11, GreaterThan(19)).
+ WillOnce(oglemock.Return(color.Gray{0}))
+
+ ExpectThat(t.image.At(11, 23), IdenticalTo(color.Gray{0}))
+}
+
+func (t *MockTest) MockExpectationNotSatisfied() {
+ ExpectCall(t.image, "At")(11, GreaterThan(19)).
+ WillOnce(oglemock.Return(color.Gray{0}))
+}
+
+func (t *MockTest) ExpectCallForUnknownMethod() {
+ ExpectCall(t.image, "FooBar")(11)
+}
+
+func (t *MockTest) UnexpectedCall() {
+ t.image.At(11, 23)
+}
+
+func (t *MockTest) InvokeFunction() {
+ var suppliedX, suppliedY int
+ f := func(x, y int) color.Color {
+ suppliedX = x
+ suppliedY = y
+ return color.Gray{17}
+ }
+
+ ExpectCall(t.image, "At")(Any(), Any()).
+ WillOnce(oglemock.Invoke(f))
+
+ ExpectThat(t.image.At(-1, 12), IdenticalTo(color.Gray{17}))
+ ExpectEq(-1, suppliedX)
+ ExpectEq(12, suppliedY)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/mock_image/mock_image.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/mock_image/mock_image.go
new file mode 100644
index 00000000000..a8d55bc4808
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/mock_image/mock_image.go
@@ -0,0 +1,115 @@
+// This file was auto-generated using createmock. See the following page for
+// more information:
+//
+// https://github.com/smartystreets/assertions/internal/oglemock
+//
+
+package mock_image
+
+import (
+ fmt "fmt"
+ oglemock "github.com/smartystreets/assertions/internal/oglemock"
+ image "image"
+ color "image/color"
+ runtime "runtime"
+ unsafe "unsafe"
+)
+
+type MockImage interface {
+ image.Image
+ oglemock.MockObject
+}
+
+type mockImage struct {
+ controller oglemock.Controller
+ description string
+}
+
+func NewMockImage(
+ c oglemock.Controller,
+ desc string) MockImage {
+ return &mockImage{
+ controller: c,
+ description: desc,
+ }
+}
+
+func (m *mockImage) Oglemock_Id() uintptr {
+ return uintptr(unsafe.Pointer(m))
+}
+
+func (m *mockImage) Oglemock_Description() string {
+ return m.description
+}
+
+func (m *mockImage) At(p0 int, p1 int) (o0 color.Color) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "At",
+ file,
+ line,
+ []interface{}{p0, p1})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockImage.At: invalid return values: %v", retVals))
+ }
+
+ // o0 color.Color
+ if retVals[0] != nil {
+ o0 = retVals[0].(color.Color)
+ }
+
+ return
+}
+
+func (m *mockImage) Bounds() (o0 image.Rectangle) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "Bounds",
+ file,
+ line,
+ []interface{}{})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockImage.Bounds: invalid return values: %v", retVals))
+ }
+
+ // o0 image.Rectangle
+ if retVals[0] != nil {
+ o0 = retVals[0].(image.Rectangle)
+ }
+
+ return
+}
+
+func (m *mockImage) ColorModel() (o0 color.Model) {
+ // Get a file name and line number for the caller.
+ _, file, line, _ := runtime.Caller(1)
+
+ // Hand the call off to the controller, which does most of the work.
+ retVals := m.controller.HandleMethodCall(
+ m,
+ "ColorModel",
+ file,
+ line,
+ []interface{}{})
+
+ if len(retVals) != 1 {
+ panic(fmt.Sprintf("mockImage.ColorModel: invalid return values: %v", retVals))
+ }
+
+ // o0 color.Model
+ if retVals[0] != nil {
+ o0 = retVals[0].(color.Model)
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/no_cases.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/no_cases.test.go
new file mode 100644
index 00000000000..ad204e053e5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/no_cases.test.go
@@ -0,0 +1,41 @@
+// Copyright 2012 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "fmt"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "testing"
+)
+
+func TestNoCases(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type NoCasesTest struct {
+}
+
+func init() { RegisterTestSuite(&NoCasesTest{}) }
+
+func (t *NoCasesTest) SetUpTestSuite() {
+ fmt.Println("SetUpTestSuite run!")
+}
+
+func (t *NoCasesTest) TearDownTestSuite() {
+ fmt.Println("TearDownTestSuite run!")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/panicking.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/panicking.test.go
new file mode 100644
index 00000000000..59d1fe3bae0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/panicking.test.go
@@ -0,0 +1,99 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "fmt"
+ "log"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+func TestPanickingTest(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// PanickingTest
+////////////////////////////////////////////////////////////////////////
+
+func someFuncThatPanics() {
+ panic("Panic in someFuncThatPanics")
+}
+
+type PanickingTest struct {
+}
+
+func init() { RegisterTestSuite(&PanickingTest{}) }
+
+func (t *PanickingTest) TearDown() {
+ fmt.Println("TearDown running.")
+}
+
+func (t *PanickingTest) ExplicitPanic() {
+ panic("Panic in ExplicitPanic")
+}
+
+func (t *PanickingTest) ExplicitPanicInHelperFunction() {
+ someFuncThatPanics()
+}
+
+func (t *PanickingTest) NilPointerDerefence() {
+ var p *int
+ log.Println(*p)
+}
+
+func (t *PanickingTest) ZzzSomeOtherTest() {
+ ExpectThat(17, Equals(17.0))
+}
+
+////////////////////////////////////////////////////////////////////////
+// SetUpPanicTest
+////////////////////////////////////////////////////////////////////////
+
+type SetUpPanicTest struct {
+}
+
+func init() { RegisterTestSuite(&SetUpPanicTest{}) }
+
+func (t *SetUpPanicTest) SetUp(ti *TestInfo) {
+ fmt.Println("SetUp about to panic.")
+ panic("Panic in SetUp")
+}
+
+func (t *SetUpPanicTest) TearDown() {
+ fmt.Println("TearDown running.")
+}
+
+func (t *SetUpPanicTest) SomeTestCase() {
+}
+
+////////////////////////////////////////////////////////////////////////
+// TearDownPanicTest
+////////////////////////////////////////////////////////////////////////
+
+type TearDownPanicTest struct {
+}
+
+func init() { RegisterTestSuite(&TearDownPanicTest{}) }
+
+func (t *TearDownPanicTest) TearDown() {
+ fmt.Println("TearDown about to panic.")
+ panic("Panic in TearDown")
+}
+
+func (t *TearDownPanicTest) SomeTestCase() {
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/passing.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/passing.test.go
new file mode 100644
index 00000000000..01d8e63446e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/passing.test.go
@@ -0,0 +1,120 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+func TestPassingTest(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// PassingTest
+////////////////////////////////////////////////////////////////////////
+
+type PassingTest struct {
+}
+
+func init() { RegisterTestSuite(&PassingTest{}) }
+
+func (t *PassingTest) EmptyTestMethod() {
+}
+
+func (t *PassingTest) SuccessfullMatches() {
+ ExpectThat(17, Equals(17.0))
+ ExpectThat(16.9, LessThan(17))
+ ExpectThat("taco", HasSubstr("ac"))
+
+ AssertThat(17, Equals(17.0))
+ AssertThat(16.9, LessThan(17))
+ AssertThat("taco", HasSubstr("ac"))
+}
+
+func (t *PassingTest) ExpectAliases() {
+ ExpectEq(17, 17.0)
+
+ ExpectLe(17, 17.0)
+ ExpectLe(17, 18.0)
+ ExpectLt(17, 18.0)
+
+ ExpectGe(17, 17.0)
+ ExpectGe(17, 16.0)
+ ExpectGt(17, 16.0)
+
+ ExpectNe(17, 18.0)
+
+ ExpectTrue(true)
+ ExpectFalse(false)
+}
+
+func (t *PassingTest) AssertAliases() {
+ AssertEq(17, 17.0)
+
+ AssertLe(17, 17.0)
+ AssertLe(17, 18.0)
+ AssertLt(17, 18.0)
+
+ AssertGe(17, 17.0)
+ AssertGe(17, 16.0)
+ AssertGt(17, 16.0)
+
+ AssertNe(17, 18.0)
+
+ AssertTrue(true)
+ AssertFalse(false)
+}
+
+func (t *PassingTest) SlowTest() {
+ time.Sleep(37 * time.Millisecond)
+}
+
+////////////////////////////////////////////////////////////////////////
+// PassingTestWithHelpers
+////////////////////////////////////////////////////////////////////////
+
+type PassingTestWithHelpers struct {
+}
+
+var _ SetUpTestSuiteInterface = &PassingTestWithHelpers{}
+var _ SetUpInterface = &PassingTestWithHelpers{}
+var _ TearDownInterface = &PassingTestWithHelpers{}
+var _ TearDownTestSuiteInterface = &PassingTestWithHelpers{}
+
+func init() { RegisterTestSuite(&PassingTestWithHelpers{}) }
+
+func (t *PassingTestWithHelpers) SetUpTestSuite() {
+ fmt.Println("SetUpTestSuite ran.")
+}
+
+func (t *PassingTestWithHelpers) SetUp(ti *TestInfo) {
+ fmt.Println("SetUp ran.")
+}
+
+func (t *PassingTestWithHelpers) TearDown() {
+ fmt.Println("TearDown ran.")
+}
+
+func (t *PassingTestWithHelpers) TearDownTestSuite() {
+ fmt.Println("TearDownTestSuite ran.")
+}
+
+func (t *PassingTestWithHelpers) EmptyTestMethod() {
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/run_twice.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/run_twice.test.go
new file mode 100644
index 00000000000..a3a36c17525
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/run_twice.test.go
@@ -0,0 +1,47 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type RunTwiceTest struct {
+}
+
+func init() { RegisterTestSuite(&RunTwiceTest{}) }
+
+// Set up two helpers that call RunTests. The test should still only be run
+// once.
+func TestOgletest(t *testing.T) { RunTests(t) }
+func TestOgletest2(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *RunTwiceTest) PassingMethod() {
+}
+
+func (t *RunTwiceTest) FailingMethod() {
+ ExpectThat(17, Equals(17.5))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/stop.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/stop.test.go
new file mode 100644
index 00000000000..a008c081e91
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/stop.test.go
@@ -0,0 +1,61 @@
+// Copyright 2015 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ "fmt"
+ "testing"
+
+ . "github.com/smartystreets/assertions/internal/ogletest"
+)
+
+func TestStop(t *testing.T) { RunTests(t) }
+
+////////////////////////////////////////////////////////////////////////
+// Boilerplate
+////////////////////////////////////////////////////////////////////////
+
+type StopTest struct {
+}
+
+var _ TearDownInterface = &StopTest{}
+var _ TearDownTestSuiteInterface = &StopTest{}
+
+func init() { RegisterTestSuite(&StopTest{}) }
+
+func (t *StopTest) TearDown() {
+ fmt.Println("TearDown running.")
+}
+
+func (t *StopTest) TearDownTestSuite() {
+ fmt.Println("TearDownTestSuite running.")
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *StopTest) First() {
+}
+
+func (t *StopTest) Second() {
+ fmt.Println("About to call StopRunningTests.")
+ StopRunningTests()
+ fmt.Println("Called StopRunningTests.")
+}
+
+func (t *StopTest) Third() {
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/unexported.test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/unexported.test.go
new file mode 100644
index 00000000000..a425e78d404
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_cases/unexported.test.go
@@ -0,0 +1,43 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package oglematchers_test
+
+import (
+ . "github.com/smartystreets/assertions/internal/oglematchers"
+ . "github.com/smartystreets/assertions/internal/ogletest"
+ "testing"
+)
+
+////////////////////////////////////////////////////////////////////////
+// Helpers
+////////////////////////////////////////////////////////////////////////
+
+type UnexportedTest struct {
+}
+
+func init() { RegisterTestSuite(&UnexportedTest{}) }
+func TestUnexportedTest(t *testing.T) { RunTests(t) }
+
+func (t *UnexportedTest) someUnexportedMethod() {
+}
+
+////////////////////////////////////////////////////////////////////////
+// Tests
+////////////////////////////////////////////////////////////////////////
+
+func (t *UnexportedTest) SomeTest() {
+ ExpectThat(3, Equals(4))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_info.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_info.go
new file mode 100644
index 00000000000..3ae12526bc2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/ogletest/test_info.go
@@ -0,0 +1,91 @@
+// Copyright 2011 Aaron Jacobs. All Rights Reserved.
+// Author: aaronjjacobs@gmail.com (Aaron Jacobs)
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ogletest
+
+import (
+ "sync"
+
+ "golang.org/x/net/context"
+
+ "github.com/smartystreets/assertions/internal/oglemock"
+)
+
+// TestInfo represents information about a currently running or previously-run
+// test.
+type TestInfo struct {
+ // A mock controller that is set up to report errors to the ogletest test
+ // runner. This can be used for setting up mock expectations and handling
+ // mock calls. The Finish method should not be run by the user; ogletest will
+ // do that automatically after the test's TearDown method is run.
+ //
+ // Note that this feature is still experimental, and is subject to change.
+ MockController oglemock.Controller
+
+ // A context that can be used by tests for long-running operations. In
+ // particular, this enables conveniently tracing the execution of a test
+ // function with reqtrace.
+ Ctx context.Context
+
+ // A mutex protecting shared state.
+ mu sync.RWMutex
+
+ // A set of failure records that the test has produced.
+ //
+ // GUARDED_BY(mu)
+ failureRecords []FailureRecord
+}
+
+// currentlyRunningTest is the state for the currently running test, if any.
+var currentlyRunningTest *TestInfo
+
+// newTestInfo creates a valid but empty TestInfo struct.
+func newTestInfo() (info *TestInfo) {
+ info = &TestInfo{}
+ info.MockController = oglemock.NewController(&testInfoErrorReporter{info})
+ info.Ctx = context.Background()
+
+ return
+}
+
+// testInfoErrorReporter is an oglemock.ErrorReporter that writes failure
+// records into a test info struct.
+type testInfoErrorReporter struct {
+ testInfo *TestInfo
+}
+
+func (r *testInfoErrorReporter) ReportError(
+ fileName string,
+ lineNumber int,
+ err error) {
+ r.testInfo.mu.Lock()
+ defer r.testInfo.mu.Unlock()
+
+ record := FailureRecord{
+ FileName: fileName,
+ LineNumber: lineNumber,
+ Error: err.Error(),
+ }
+
+ r.testInfo.failureRecords = append(r.testInfo.failureRecords, record)
+}
+
+func (r *testInfoErrorReporter) ReportFatalError(
+ fileName string,
+ lineNumber int,
+ err error) {
+ r.ReportError(fileName, lineNumber, err)
+ AbortTest()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/.gitignore b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/.gitignore
new file mode 100644
index 00000000000..daf913b1b34
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/LICENSE b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/LICENSE
new file mode 100644
index 00000000000..8f71f43fee3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/README.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/README.md
new file mode 100644
index 00000000000..4392452b5b1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/README.md
@@ -0,0 +1,53 @@
+[![GoDoc](https://godoc.org/github.com/smartystreets/assertions/internal/reqtrace?status.svg)](https://godoc.org/github.com/smartystreets/assertions/internal/reqtrace)
+
+reqtrace is a package for simple request tracing. It requires nothing of its
+user except:
+
+ * They must use [golang.org/x/net/context][context].
+ * They must add a single line to each function they want to be visible in
+ traces.
+
+[context]: http://godoc.org/golang.org/x/net/context
+
+In particular, reqtrace is console-based and doesn't require an HTTP server.
+
+**Warning**: This package is still barebones and in its early days. I reserve
+the right to make backwards-incompatible changes to its API. But if it's useful
+to you in your current form, have at it.
+
+## Use
+
+Call reqtrace.Trace anywhere you want to start a new root trace. (This is
+probably where you create your root context.) This returns a new context that
+you should pass to child operations, and a reporting function that you must use
+to inform reqtrace when the trace is complete.
+
+For example:
+
+```Go
+func HandleRequest(r *someRequest) (err error) {
+ ctx, report := reqtrace.Trace(context.Background(), "HandleRequest")
+ defer func() { report(err) }()
+
+ // Do two things for this request.
+ DoSomething(ctx, r)
+ DoSomethingElse(ctx, r)
+}
+```
+
+Within other functions that you want to show up in the trace, you
+reqtrace.StartSpan (or its more convenient sibling reqtrace.StartSpanWithError):
+
+```Go
+func DoSomething(ctx context.Context, r *someRequest) (err error) {
+ defer reqtrace.StartSpanWithError(&ctx, &err, "DoSomething")()
+
+ // Process the request somehow using ctx. If downstream code also annotes
+ // using reqtrace, reqtrace will know that its spans are descendants of
+ // this one.
+ CallAnotherLibrary(ctx, r.Param)
+}
+```
+
+When `--reqtrace.enable` is set, the completion of a trace will cause helpful
+ASCII art to be spit out.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/reqtrace.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/reqtrace.go
new file mode 100644
index 00000000000..853c024244f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/reqtrace.go
@@ -0,0 +1,132 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package reqtrace contains a very simple request tracing framework.
+package reqtrace
+
+import (
+ "flag"
+
+ "golang.org/x/net/context"
+)
+
+type contextKey int
+
+var fEnabled = flag.Bool("reqtrace.enable", false, "Collect and print traces.")
+
+// The key used to associate a *traceState with a context.
+const traceStateKey contextKey = 0
+
+// A function that must be called exactly once to report the outcome of an
+// operation represented by a span.
+type ReportFunc func(error)
+
+// Return false only if traces are disabled, i.e. Trace will never cause a
+// trace to be initiated.
+//
+// REQUIRES: flag.Parsed()
+func Enabled() (enabled bool) {
+ enabled = *fEnabled
+ return
+}
+
+// Begin a span within the current trace. Return a new context that should be
+// used for operations that logically occur within the span, and a report
+// function that must be called with the outcome of the logical operation
+// represented by the span.
+//
+// If no trace is active, no span will be created but ctx and report will still
+// be valid.
+func StartSpan(
+ parent context.Context,
+ desc string) (ctx context.Context, report ReportFunc) {
+ // Look for the trace state.
+ val := parent.Value(traceStateKey)
+ if val == nil {
+ // Nothing to do.
+ ctx = parent
+ report = func(err error) {}
+ return
+ }
+
+ ts := val.(*traceState)
+
+ // Set up the report function.
+ report = ts.CreateSpan(desc)
+
+ // For now we don't do anything interesting with the context. In the future,
+ // we may use it to record span hierarchy.
+ ctx = parent
+
+ return
+}
+
+// A wrapper around StartSpan that can be more convenient to use when the
+// lifetime of a span matches the lifetime of a function. Intended to be used
+// in a defer statement within a function using a named error return parameter.
+//
+// Equivalent to calling StartSpan with *ctx, replacing *ctx with the resulting
+// new context, then setting f to a function that will invoke the report
+// function with the contents of *error at the time that it is called.
+//
+// Example:
+//
+// func DoSomething(ctx context.Context) (err error) {
+// defer reqtrace.StartSpanWithError(&ctx, &err, "DoSomething")()
+// [...]
+// }
+//
+func StartSpanWithError(
+ ctx *context.Context,
+ err *error,
+ desc string) (f func()) {
+ var report ReportFunc
+ *ctx, report = StartSpan(*ctx, desc)
+ f = func() { report(*err) }
+ return
+}
+
+// Like StartSpan, but begins a root span for a new trace if no trace is active
+// in the supplied context and tracing is enabled for the process.
+func Trace(
+ parent context.Context,
+ desc string) (ctx context.Context, report ReportFunc) {
+ // If tracing is disabled, this is a no-op.
+ if !*fEnabled {
+ ctx = parent
+ report = func(err error) {}
+ return
+ }
+
+ // Is this context already being traced? If so, simply add a span.
+ if parent.Value(traceStateKey) != nil {
+ ctx, report = StartSpan(parent, desc)
+ return
+ }
+
+ // Set up a new trace state.
+ ts := new(traceState)
+ baseReport := ts.CreateSpan(desc)
+
+ // Log when finished.
+ report = func(err error) {
+ baseReport(err)
+ ts.Log()
+ }
+
+ // Set up the context.
+ ctx = context.WithValue(parent, traceStateKey, ts)
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/trace_state.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/trace_state.go
new file mode 100644
index 00000000000..614ef90d892
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/internal/reqtrace/trace_state.go
@@ -0,0 +1,175 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reqtrace
+
+import (
+ "log"
+ "math"
+ "os"
+ "strings"
+ "sync"
+ "time"
+)
+
+const logFlags = 0
+
+var gLogger = log.New(os.Stderr, "reqtrace: ", logFlags)
+
+type span struct {
+ // Fixed at creation.
+ desc string
+ start time.Time
+
+ // Updated by report functions.
+ finished bool
+ end time.Time
+ err error
+}
+
+// All of the state for a particular trace root. The zero value is usable.
+type traceState struct {
+ mu sync.Mutex
+
+ // The list of spans associated with this state. Append-only.
+ //
+ // GUARDED_BY(mu)
+ spans []*span
+}
+
+func (ts *traceState) report(spanIndex int, err error) {
+ ts.mu.Lock()
+ defer ts.mu.Unlock()
+
+ s := ts.spans[spanIndex]
+ s.finished = true
+ s.end = time.Now()
+ s.err = err
+}
+
+// Associate a new span with the trace. Return a function that will report its
+// completion.
+func (ts *traceState) CreateSpan(desc string) (report ReportFunc) {
+ ts.mu.Lock()
+ defer ts.mu.Unlock()
+
+ index := len(ts.spans)
+ ts.spans = append(ts.spans, &span{desc: desc, start: time.Now()})
+
+ report = func(err error) { ts.report(index, err) }
+ return
+}
+
+func round(x float64) float64 {
+ if x < 0 {
+ return math.Ceil(x - 0.5)
+ }
+
+ return math.Floor(x + 0.5)
+}
+
+// Log information about the spans in this trace.
+func (ts *traceState) Log() {
+ ts.mu.Lock()
+ defer ts.mu.Unlock()
+ gLogger.Println()
+
+ // Special case: we require at least one span.
+ if len(ts.spans) == 0 {
+ return
+ }
+
+ // Print a banner for this trace.
+ const bannerHalfLength = 45
+
+ gLogger.Println()
+ gLogger.Printf(
+ "%s %s %s",
+ strings.Repeat("=", bannerHalfLength),
+ ts.spans[0].desc,
+ strings.Repeat("=", bannerHalfLength))
+ gLogger.Printf("Start time: %v", ts.spans[0].start.Format(time.RFC3339Nano))
+ gLogger.Println()
+
+ // Find the minimum start time and maximum end time of all durations.
+ var minStart time.Time
+ var maxEnd time.Time
+ for _, s := range ts.spans {
+ if !s.finished {
+ continue
+ }
+
+ if minStart.IsZero() || s.start.Before(minStart) {
+ minStart = s.start
+ }
+
+ if maxEnd.Before(s.end) {
+ maxEnd = s.end
+ }
+ }
+
+ // Bail out if something weird happened.
+ //
+ // TODO(jacobsa): Be more graceful.
+ totalDuration := maxEnd.Sub(minStart)
+ if minStart.IsZero() || maxEnd.IsZero() || totalDuration <= 0 {
+ gLogger.Println("(Weird trace)")
+ return
+ }
+
+ // Calculate the number of nanoseconds elapsed, as a floating point number.
+ totalNs := float64(totalDuration / time.Nanosecond)
+
+ // Log each span with some ASCII art showing its length relative to the
+ // total.
+ const totalNumCols float64 = 120
+ for _, s := range ts.spans {
+ if !s.finished {
+ gLogger.Printf("(Unfinished: %s)", s.desc)
+ gLogger.Println()
+ continue
+ }
+
+ // Calculate the duration of the span, and its width relative to the
+ // longest span.
+ d := s.end.Sub(s.start)
+ if d <= 0 {
+ gLogger.Println("(Weird duration)")
+ gLogger.Println()
+ continue
+ }
+
+ durationRatio := float64(d/time.Nanosecond) / totalNs
+
+ // We will offset the label and banner proportional to the time since the
+ // start of the earliest span.
+ offsetRatio := float64(s.start.Sub(minStart)/time.Nanosecond) / totalNs
+ offsetChars := int(round(offsetRatio * totalNumCols))
+ offsetStr := strings.Repeat(" ", offsetChars)
+
+ // Print the description and duration.
+ gLogger.Printf("%s%v", offsetStr, s.desc)
+ gLogger.Printf("%s%v", offsetStr, d)
+
+ // Print a banner showing the duration graphically.
+ bannerChars := int(round(durationRatio * totalNumCols))
+ var dashes string
+ if bannerChars > 2 {
+ dashes = strings.Repeat("-", bannerChars-2)
+ }
+
+ gLogger.Printf("%s|%s|", offsetStr, dashes)
+ gLogger.Println()
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/messages.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/messages.go
new file mode 100644
index 00000000000..ae1a15116f5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/messages.go
@@ -0,0 +1,94 @@
+package assertions
+
+const ( // equality
+ shouldHaveBeenEqual = "Expected: '%v'\nActual: '%v'\n(Should be equal)"
+ shouldNotHaveBeenEqual = "Expected '%v'\nto NOT equal '%v'\n(but it did)!"
+ shouldHaveBeenEqualTypeMismatch = "Expected: '%v' (%T)\nActual: '%v' (%T)\n(Should be equal, type mismatch)"
+ shouldHaveBeenAlmostEqual = "Expected '%v' to almost equal '%v' (but it didn't)!"
+ shouldHaveNotBeenAlmostEqual = "Expected '%v' to NOT almost equal '%v' (but it did)!"
+ shouldHaveResembled = "Expected: '%#v'\nActual: '%#v'\n(Should resemble)!"
+ shouldHaveResembledTypeMismatch = "Expected: '%#v' (%T)\nActual: '%#v' (%T)\n(Should resemble, type mismatch)"
+ shouldNotHaveResembled = "Expected '%#v'\nto NOT resemble '%#v'\n(but it did)!"
+ shouldBePointers = "Both arguments should be pointers "
+ shouldHaveBeenNonNilPointer = shouldBePointers + "(the %s was %s)!"
+ shouldHavePointedTo = "Expected '%+v' (address: '%v') and '%+v' (address: '%v') to be the same address (but their weren't)!"
+ shouldNotHavePointedTo = "Expected '%+v' and '%+v' to be different references (but they matched: '%v')!"
+ shouldHaveBeenNil = "Expected: nil\nActual: '%v'"
+ shouldNotHaveBeenNil = "Expected '%+v' to NOT be nil (but it was)!"
+ shouldHaveBeenTrue = "Expected: true\nActual: %v"
+ shouldHaveBeenFalse = "Expected: false\nActual: %v"
+ shouldHaveBeenZeroValue = "'%+v' should have been the zero value" //"Expected: (zero value)\nActual: %v"
+)
+
+const ( // quantity comparisons
+ shouldHaveBeenGreater = "Expected '%v' to be greater than '%v' (but it wasn't)!"
+ shouldHaveBeenGreaterOrEqual = "Expected '%v' to be greater than or equal to '%v' (but it wasn't)!"
+ shouldHaveBeenLess = "Expected '%v' to be less than '%v' (but it wasn't)!"
+ shouldHaveBeenLessOrEqual = "Expected '%v' to be less than or equal to '%v' (but it wasn't)!"
+ shouldHaveBeenBetween = "Expected '%v' to be between '%v' and '%v' (but it wasn't)!"
+ shouldNotHaveBeenBetween = "Expected '%v' NOT to be between '%v' and '%v' (but it was)!"
+ shouldHaveDifferentUpperAndLower = "The lower and upper bounds must be different values (they were both '%v')."
+ shouldHaveBeenBetweenOrEqual = "Expected '%v' to be between '%v' and '%v' or equal to one of them (but it wasn't)!"
+ shouldNotHaveBeenBetweenOrEqual = "Expected '%v' NOT to be between '%v' and '%v' or equal to one of them (but it was)!"
+)
+
+const ( // collections
+ shouldHaveContained = "Expected the container (%v) to contain: '%v' (but it didn't)!"
+ shouldNotHaveContained = "Expected the container (%v) NOT to contain: '%v' (but it did)!"
+ shouldHaveContainedKey = "Expected the %v to contain the key: %v (but it didn't)!"
+ shouldNotHaveContainedKey = "Expected the %v NOT to contain the key: %v (but it did)!"
+ shouldHaveBeenIn = "Expected '%v' to be in the container (%v), but it wasn't!"
+ shouldNotHaveBeenIn = "Expected '%v' NOT to be in the container (%v), but it was!"
+ shouldHaveBeenAValidCollection = "You must provide a valid container (was %v)!"
+ shouldHaveBeenAValidMap = "You must provide a valid map type (was %v)!"
+ shouldHaveBeenEmpty = "Expected %+v to be empty (but it wasn't)!"
+ shouldNotHaveBeenEmpty = "Expected %+v to NOT be empty (but it was)!"
+ shouldHaveBeenAValidInteger = "You must provide a valid integer (was %v)!"
+ shouldHaveBeenAValidLength = "You must provide a valid positive integer (was %v)!"
+ shouldHaveHadLength = "Expected %+v (length: %v) to have length equal to '%v', but it wasn't!"
+)
+
+const ( // strings
+ shouldHaveStartedWith = "Expected '%v'\nto start with '%v'\n(but it didn't)!"
+ shouldNotHaveStartedWith = "Expected '%v'\nNOT to start with '%v'\n(but it did)!"
+ shouldHaveEndedWith = "Expected '%v'\nto end with '%v'\n(but it didn't)!"
+ shouldNotHaveEndedWith = "Expected '%v'\nNOT to end with '%v'\n(but it did)!"
+ shouldAllBeStrings = "All arguments to this assertion must be strings (you provided: %v)."
+ shouldBothBeStrings = "Both arguments to this assertion must be strings (you provided %v and %v)."
+ shouldBeString = "The argument to this assertion must be a string (you provided %v)."
+ shouldHaveContainedSubstring = "Expected '%s' to contain substring '%s' (but it didn't)!"
+ shouldNotHaveContainedSubstring = "Expected '%s' NOT to contain substring '%s' (but it did)!"
+ shouldHaveBeenBlank = "Expected '%s' to be blank (but it wasn't)!"
+ shouldNotHaveBeenBlank = "Expected value to NOT be blank (but it was)!"
+)
+
+const ( // panics
+ shouldUseVoidNiladicFunction = "You must provide a void, niladic function as the first argument!"
+ shouldHavePanickedWith = "Expected func() to panic with '%v' (but it panicked with '%v')!"
+ shouldHavePanicked = "Expected func() to panic (but it didn't)!"
+ shouldNotHavePanicked = "Expected func() NOT to panic (error: '%+v')!"
+ shouldNotHavePanickedWith = "Expected func() NOT to panic with '%v' (but it did)!"
+)
+
+const ( // type checking
+ shouldHaveBeenA = "Expected '%v' to be: '%v' (but was: '%v')!"
+ shouldNotHaveBeenA = "Expected '%v' to NOT be: '%v' (but it was)!"
+
+ shouldHaveImplemented = "Expected: '%v interface support'\nActual: '%v' does not implement the interface!"
+ shouldNotHaveImplemented = "Expected '%v'\nto NOT implement '%v'\n(but it did)!"
+ shouldCompareWithInterfacePointer = "The expected value must be a pointer to an interface type (eg. *fmt.Stringer)"
+ shouldNotBeNilActual = "The actual value was 'nil' and should be a value or a pointer to a value!"
+)
+
+const ( // time comparisons
+ shouldUseTimes = "You must provide time instances as arguments to this assertion."
+ shouldUseTimeSlice = "You must provide a slice of time instances as the first argument to this assertion."
+ shouldUseDurationAndTime = "You must provide a duration and a time as arguments to this assertion."
+ shouldHaveHappenedBefore = "Expected '%v' to happen before '%v' (it happened '%v' after)!"
+ shouldHaveHappenedAfter = "Expected '%v' to happen after '%v' (it happened '%v' before)!"
+ shouldHaveHappenedBetween = "Expected '%v' to happen between '%v' and '%v' (it happened '%v' outside threshold)!"
+ shouldNotHaveHappenedOnOrBetween = "Expected '%v' to NOT happen on or between '%v' and '%v' (but it did)!"
+
+ // format params: incorrect-index, previous-index, previous-time, incorrect-index, incorrect-time
+ shouldHaveBeenChronological = "The 'Time' at index [%d] should have happened after the previous one (but it didn't!):\n [%d]: %s\n [%d]: %s (see, it happened before!)"
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/panic.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/panic.go
new file mode 100644
index 00000000000..7e75db1784b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/panic.go
@@ -0,0 +1,115 @@
+package assertions
+
+import "fmt"
+
+// ShouldPanic receives a void, niladic function and expects to recover a panic.
+func ShouldPanic(actual interface{}, expected ...interface{}) (message string) {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ action, _ := actual.(func())
+
+ if action == nil {
+ message = shouldUseVoidNiladicFunction
+ return
+ }
+
+ defer func() {
+ recovered := recover()
+ if recovered == nil {
+ message = shouldHavePanicked
+ } else {
+ message = success
+ }
+ }()
+ action()
+
+ return
+}
+
+// ShouldNotPanic receives a void, niladic function and expects to execute the function without any panic.
+func ShouldNotPanic(actual interface{}, expected ...interface{}) (message string) {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ action, _ := actual.(func())
+
+ if action == nil {
+ message = shouldUseVoidNiladicFunction
+ return
+ }
+
+ defer func() {
+ recovered := recover()
+ if recovered != nil {
+ message = fmt.Sprintf(shouldNotHavePanicked, recovered)
+ } else {
+ message = success
+ }
+ }()
+ action()
+
+ return
+}
+
+// ShouldPanicWith receives a void, niladic function and expects to recover a panic with the second argument as the content.
+func ShouldPanicWith(actual interface{}, expected ...interface{}) (message string) {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ action, _ := actual.(func())
+
+ if action == nil {
+ message = shouldUseVoidNiladicFunction
+ return
+ }
+
+ defer func() {
+ recovered := recover()
+ if recovered == nil {
+ message = shouldHavePanicked
+ } else {
+ if equal := ShouldEqual(recovered, expected[0]); equal != success {
+ message = serializer.serialize(expected[0], recovered, fmt.Sprintf(shouldHavePanickedWith, expected[0], recovered))
+ } else {
+ message = success
+ }
+ }
+ }()
+ action()
+
+ return
+}
+
+// ShouldNotPanicWith receives a void, niladic function and expects to recover a panic whose content differs from the second argument.
+func ShouldNotPanicWith(actual interface{}, expected ...interface{}) (message string) {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ action, _ := actual.(func())
+
+ if action == nil {
+ message = shouldUseVoidNiladicFunction
+ return
+ }
+
+ defer func() {
+ recovered := recover()
+ if recovered == nil {
+ message = success
+ } else {
+ if equal := ShouldEqual(recovered, expected[0]); equal == success {
+ message = fmt.Sprintf(shouldNotHavePanickedWith, expected[0])
+ } else {
+ message = success
+ }
+ }
+ }()
+ action()
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/panic_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/panic_test.go
new file mode 100644
index 00000000000..15eafac4fbb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/panic_test.go
@@ -0,0 +1,53 @@
+package assertions
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestShouldPanic(t *testing.T) {
+ fail(t, so(func() {}, ShouldPanic, 1), "This assertion requires exactly 0 comparison values (you provided 1).")
+ fail(t, so(func() {}, ShouldPanic, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).")
+
+ fail(t, so(1, ShouldPanic), shouldUseVoidNiladicFunction)
+ fail(t, so(func(i int) {}, ShouldPanic), shouldUseVoidNiladicFunction)
+ fail(t, so(func() int { panic("hi") }, ShouldPanic), shouldUseVoidNiladicFunction)
+
+ fail(t, so(func() {}, ShouldPanic), shouldHavePanicked)
+ pass(t, so(func() { panic("hi") }, ShouldPanic))
+}
+
+func TestShouldNotPanic(t *testing.T) {
+ fail(t, so(func() {}, ShouldNotPanic, 1), "This assertion requires exactly 0 comparison values (you provided 1).")
+ fail(t, so(func() {}, ShouldNotPanic, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).")
+
+ fail(t, so(1, ShouldNotPanic), shouldUseVoidNiladicFunction)
+ fail(t, so(func(i int) {}, ShouldNotPanic), shouldUseVoidNiladicFunction)
+
+ fail(t, so(func() { panic("hi") }, ShouldNotPanic), fmt.Sprintf(shouldNotHavePanicked, "hi"))
+ pass(t, so(func() {}, ShouldNotPanic))
+}
+
+func TestShouldPanicWith(t *testing.T) {
+ fail(t, so(func() {}, ShouldPanicWith), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(func() {}, ShouldPanicWith, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(1, ShouldPanicWith, 1), shouldUseVoidNiladicFunction)
+ fail(t, so(func(i int) {}, ShouldPanicWith, "hi"), shouldUseVoidNiladicFunction)
+ fail(t, so(func() {}, ShouldPanicWith, "bye"), shouldHavePanicked)
+ fail(t, so(func() { panic("hi") }, ShouldPanicWith, "bye"), "bye|hi|Expected func() to panic with 'bye' (but it panicked with 'hi')!")
+
+ pass(t, so(func() { panic("hi") }, ShouldPanicWith, "hi"))
+}
+
+func TestShouldNotPanicWith(t *testing.T) {
+ fail(t, so(func() {}, ShouldNotPanicWith), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(func() {}, ShouldNotPanicWith, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(1, ShouldNotPanicWith, 1), shouldUseVoidNiladicFunction)
+ fail(t, so(func(i int) {}, ShouldNotPanicWith, "hi"), shouldUseVoidNiladicFunction)
+ fail(t, so(func() { panic("hi") }, ShouldNotPanicWith, "hi"), "Expected func() NOT to panic with 'hi' (but it did)!")
+
+ pass(t, so(func() {}, ShouldNotPanicWith, "bye"))
+ pass(t, so(func() { panic("hi") }, ShouldNotPanicWith, "bye"))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/quantity.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/quantity.go
new file mode 100644
index 00000000000..80789f0cc71
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/quantity.go
@@ -0,0 +1,141 @@
+package assertions
+
+import (
+ "fmt"
+
+ "github.com/smartystreets/assertions/internal/oglematchers"
+)
+
+// ShouldBeGreaterThan receives exactly two parameters and ensures that the first is greater than the second.
+func ShouldBeGreaterThan(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ if matchError := oglematchers.GreaterThan(expected[0]).Matches(actual); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenGreater, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldBeGreaterThanOrEqualTo receives exactly two parameters and ensures that the first is greater than or equal to the second.
+func ShouldBeGreaterThanOrEqualTo(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ } else if matchError := oglematchers.GreaterOrEqual(expected[0]).Matches(actual); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenGreaterOrEqual, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than the second.
+func ShouldBeLessThan(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ } else if matchError := oglematchers.LessThan(expected[0]).Matches(actual); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenLess, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldBeLessThan receives exactly two parameters and ensures that the first is less than or equal to the second.
+func ShouldBeLessThanOrEqualTo(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ } else if matchError := oglematchers.LessOrEqual(expected[0]).Matches(actual); matchError != nil {
+ return fmt.Sprintf(shouldHaveBeenLess, actual, expected[0])
+ }
+ return success
+}
+
+// ShouldBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound.
+// It ensures that the actual value is between both bounds (but not equal to either of them).
+func ShouldBeBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ lower, upper, fail := deriveBounds(expected)
+
+ if fail != success {
+ return fail
+ } else if !isBetween(actual, lower, upper) {
+ return fmt.Sprintf(shouldHaveBeenBetween, actual, lower, upper)
+ }
+ return success
+}
+
+// ShouldNotBeBetween receives exactly three parameters: an actual value, a lower bound, and an upper bound.
+// It ensures that the actual value is NOT between both bounds.
+func ShouldNotBeBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ lower, upper, fail := deriveBounds(expected)
+
+ if fail != success {
+ return fail
+ } else if isBetween(actual, lower, upper) {
+ return fmt.Sprintf(shouldNotHaveBeenBetween, actual, lower, upper)
+ }
+ return success
+}
+func deriveBounds(values []interface{}) (lower interface{}, upper interface{}, fail string) {
+ lower = values[0]
+ upper = values[1]
+
+ if ShouldNotEqual(lower, upper) != success {
+ return nil, nil, fmt.Sprintf(shouldHaveDifferentUpperAndLower, lower)
+ } else if ShouldBeLessThan(lower, upper) != success {
+ lower, upper = upper, lower
+ }
+ return lower, upper, success
+}
+func isBetween(value, lower, upper interface{}) bool {
+ if ShouldBeGreaterThan(value, lower) != success {
+ return false
+ } else if ShouldBeLessThan(value, upper) != success {
+ return false
+ }
+ return true
+}
+
+// ShouldBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound.
+// It ensures that the actual value is between both bounds or equal to one of them.
+func ShouldBeBetweenOrEqual(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ lower, upper, fail := deriveBounds(expected)
+
+ if fail != success {
+ return fail
+ } else if !isBetweenOrEqual(actual, lower, upper) {
+ return fmt.Sprintf(shouldHaveBeenBetweenOrEqual, actual, lower, upper)
+ }
+ return success
+}
+
+// ShouldNotBeBetweenOrEqual receives exactly three parameters: an actual value, a lower bound, and an upper bound.
+// It ensures that the actual value is nopt between the bounds nor equal to either of them.
+func ShouldNotBeBetweenOrEqual(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ lower, upper, fail := deriveBounds(expected)
+
+ if fail != success {
+ return fail
+ } else if isBetweenOrEqual(actual, lower, upper) {
+ return fmt.Sprintf(shouldNotHaveBeenBetweenOrEqual, actual, lower, upper)
+ }
+ return success
+}
+
+func isBetweenOrEqual(value, lower, upper interface{}) bool {
+ if ShouldBeGreaterThanOrEqualTo(value, lower) != success {
+ return false
+ } else if ShouldBeLessThanOrEqualTo(value, upper) != success {
+ return false
+ }
+ return true
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/quantity_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/quantity_test.go
new file mode 100644
index 00000000000..7546e7250a8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/quantity_test.go
@@ -0,0 +1,145 @@
+package assertions
+
+import "testing"
+
+func TestShouldBeGreaterThan(t *testing.T) {
+ fail(t, so(1, ShouldBeGreaterThan), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldBeGreaterThan, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so(1, ShouldBeGreaterThan, 0))
+ pass(t, so(1.1, ShouldBeGreaterThan, 1))
+ pass(t, so(1, ShouldBeGreaterThan, uint(0)))
+ pass(t, so("b", ShouldBeGreaterThan, "a"))
+
+ fail(t, so(0, ShouldBeGreaterThan, 1), "Expected '0' to be greater than '1' (but it wasn't)!")
+ fail(t, so(1, ShouldBeGreaterThan, 1.1), "Expected '1' to be greater than '1.1' (but it wasn't)!")
+ fail(t, so(uint(0), ShouldBeGreaterThan, 1.1), "Expected '0' to be greater than '1.1' (but it wasn't)!")
+ fail(t, so("a", ShouldBeGreaterThan, "b"), "Expected 'a' to be greater than 'b' (but it wasn't)!")
+}
+
+func TestShouldBeGreaterThanOrEqual(t *testing.T) {
+ fail(t, so(1, ShouldBeGreaterThanOrEqualTo), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldBeGreaterThanOrEqualTo, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so(1, ShouldBeGreaterThanOrEqualTo, 1))
+ pass(t, so(1.1, ShouldBeGreaterThanOrEqualTo, 1.1))
+ pass(t, so(1, ShouldBeGreaterThanOrEqualTo, uint(1)))
+ pass(t, so("b", ShouldBeGreaterThanOrEqualTo, "b"))
+
+ pass(t, so(1, ShouldBeGreaterThanOrEqualTo, 0))
+ pass(t, so(1.1, ShouldBeGreaterThanOrEqualTo, 1))
+ pass(t, so(1, ShouldBeGreaterThanOrEqualTo, uint(0)))
+ pass(t, so("b", ShouldBeGreaterThanOrEqualTo, "a"))
+
+ fail(t, so(0, ShouldBeGreaterThanOrEqualTo, 1), "Expected '0' to be greater than or equal to '1' (but it wasn't)!")
+ fail(t, so(1, ShouldBeGreaterThanOrEqualTo, 1.1), "Expected '1' to be greater than or equal to '1.1' (but it wasn't)!")
+ fail(t, so(uint(0), ShouldBeGreaterThanOrEqualTo, 1.1), "Expected '0' to be greater than or equal to '1.1' (but it wasn't)!")
+ fail(t, so("a", ShouldBeGreaterThanOrEqualTo, "b"), "Expected 'a' to be greater than or equal to 'b' (but it wasn't)!")
+}
+
+func TestShouldBeLessThan(t *testing.T) {
+ fail(t, so(1, ShouldBeLessThan), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldBeLessThan, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so(0, ShouldBeLessThan, 1))
+ pass(t, so(1, ShouldBeLessThan, 1.1))
+ pass(t, so(uint(0), ShouldBeLessThan, 1))
+ pass(t, so("a", ShouldBeLessThan, "b"))
+
+ fail(t, so(1, ShouldBeLessThan, 0), "Expected '1' to be less than '0' (but it wasn't)!")
+ fail(t, so(1.1, ShouldBeLessThan, 1), "Expected '1.1' to be less than '1' (but it wasn't)!")
+ fail(t, so(1.1, ShouldBeLessThan, uint(0)), "Expected '1.1' to be less than '0' (but it wasn't)!")
+ fail(t, so("b", ShouldBeLessThan, "a"), "Expected 'b' to be less than 'a' (but it wasn't)!")
+}
+
+func TestShouldBeLessThanOrEqualTo(t *testing.T) {
+ fail(t, so(1, ShouldBeLessThanOrEqualTo), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldBeLessThanOrEqualTo, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so(1, ShouldBeLessThanOrEqualTo, 1))
+ pass(t, so(1.1, ShouldBeLessThanOrEqualTo, 1.1))
+ pass(t, so(uint(1), ShouldBeLessThanOrEqualTo, 1))
+ pass(t, so("b", ShouldBeLessThanOrEqualTo, "b"))
+
+ pass(t, so(0, ShouldBeLessThanOrEqualTo, 1))
+ pass(t, so(1, ShouldBeLessThanOrEqualTo, 1.1))
+ pass(t, so(uint(0), ShouldBeLessThanOrEqualTo, 1))
+ pass(t, so("a", ShouldBeLessThanOrEqualTo, "b"))
+
+ fail(t, so(1, ShouldBeLessThanOrEqualTo, 0), "Expected '1' to be less than '0' (but it wasn't)!")
+ fail(t, so(1.1, ShouldBeLessThanOrEqualTo, 1), "Expected '1.1' to be less than '1' (but it wasn't)!")
+ fail(t, so(1.1, ShouldBeLessThanOrEqualTo, uint(0)), "Expected '1.1' to be less than '0' (but it wasn't)!")
+ fail(t, so("b", ShouldBeLessThanOrEqualTo, "a"), "Expected 'b' to be less than 'a' (but it wasn't)!")
+}
+
+func TestShouldBeBetween(t *testing.T) {
+ fail(t, so(1, ShouldBeBetween), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(1, ShouldBeBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(4, ShouldBeBetween, 1, 1), "The lower and upper bounds must be different values (they were both '1').")
+
+ fail(t, so(7, ShouldBeBetween, 8, 12), "Expected '7' to be between '8' and '12' (but it wasn't)!")
+ fail(t, so(8, ShouldBeBetween, 8, 12), "Expected '8' to be between '8' and '12' (but it wasn't)!")
+ pass(t, so(9, ShouldBeBetween, 8, 12))
+ pass(t, so(10, ShouldBeBetween, 8, 12))
+ pass(t, so(11, ShouldBeBetween, 8, 12))
+ fail(t, so(12, ShouldBeBetween, 8, 12), "Expected '12' to be between '8' and '12' (but it wasn't)!")
+ fail(t, so(13, ShouldBeBetween, 8, 12), "Expected '13' to be between '8' and '12' (but it wasn't)!")
+
+ pass(t, so(1, ShouldBeBetween, 2, 0))
+ fail(t, so(-1, ShouldBeBetween, 2, 0), "Expected '-1' to be between '0' and '2' (but it wasn't)!")
+}
+
+func TestShouldNotBeBetween(t *testing.T) {
+ fail(t, so(1, ShouldNotBeBetween), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(1, ShouldNotBeBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(4, ShouldNotBeBetween, 1, 1), "The lower and upper bounds must be different values (they were both '1').")
+
+ pass(t, so(7, ShouldNotBeBetween, 8, 12))
+ pass(t, so(8, ShouldNotBeBetween, 8, 12))
+ fail(t, so(9, ShouldNotBeBetween, 8, 12), "Expected '9' NOT to be between '8' and '12' (but it was)!")
+ fail(t, so(10, ShouldNotBeBetween, 8, 12), "Expected '10' NOT to be between '8' and '12' (but it was)!")
+ fail(t, so(11, ShouldNotBeBetween, 8, 12), "Expected '11' NOT to be between '8' and '12' (but it was)!")
+ pass(t, so(12, ShouldNotBeBetween, 8, 12))
+ pass(t, so(13, ShouldNotBeBetween, 8, 12))
+
+ pass(t, so(-1, ShouldNotBeBetween, 2, 0))
+ fail(t, so(1, ShouldNotBeBetween, 2, 0), "Expected '1' NOT to be between '0' and '2' (but it was)!")
+}
+
+func TestShouldBeBetweenOrEqual(t *testing.T) {
+ fail(t, so(1, ShouldBeBetweenOrEqual), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(1, ShouldBeBetweenOrEqual, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(4, ShouldBeBetweenOrEqual, 1, 1), "The lower and upper bounds must be different values (they were both '1').")
+
+ fail(t, so(7, ShouldBeBetweenOrEqual, 8, 12), "Expected '7' to be between '8' and '12' or equal to one of them (but it wasn't)!")
+ pass(t, so(8, ShouldBeBetweenOrEqual, 8, 12))
+ pass(t, so(9, ShouldBeBetweenOrEqual, 8, 12))
+ pass(t, so(10, ShouldBeBetweenOrEqual, 8, 12))
+ pass(t, so(11, ShouldBeBetweenOrEqual, 8, 12))
+ pass(t, so(12, ShouldBeBetweenOrEqual, 8, 12))
+ fail(t, so(13, ShouldBeBetweenOrEqual, 8, 12), "Expected '13' to be between '8' and '12' or equal to one of them (but it wasn't)!")
+
+ pass(t, so(1, ShouldBeBetweenOrEqual, 2, 0))
+ fail(t, so(-1, ShouldBeBetweenOrEqual, 2, 0), "Expected '-1' to be between '0' and '2' or equal to one of them (but it wasn't)!")
+}
+
+func TestShouldNotBeBetweenOrEqual(t *testing.T) {
+ fail(t, so(1, ShouldNotBeBetweenOrEqual), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(1, ShouldNotBeBetweenOrEqual, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(4, ShouldNotBeBetweenOrEqual, 1, 1), "The lower and upper bounds must be different values (they were both '1').")
+
+ pass(t, so(7, ShouldNotBeBetweenOrEqual, 8, 12))
+ fail(t, so(8, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '8' NOT to be between '8' and '12' or equal to one of them (but it was)!")
+ fail(t, so(9, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '9' NOT to be between '8' and '12' or equal to one of them (but it was)!")
+ fail(t, so(10, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '10' NOT to be between '8' and '12' or equal to one of them (but it was)!")
+ fail(t, so(11, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '11' NOT to be between '8' and '12' or equal to one of them (but it was)!")
+ fail(t, so(12, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '12' NOT to be between '8' and '12' or equal to one of them (but it was)!")
+ pass(t, so(13, ShouldNotBeBetweenOrEqual, 8, 12))
+
+ pass(t, so(-1, ShouldNotBeBetweenOrEqual, 2, 0))
+ fail(t, so(1, ShouldNotBeBetweenOrEqual, 2, 0), "Expected '1' NOT to be between '0' and '2' or equal to one of them (but it was)!")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/serializer.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/serializer.go
new file mode 100644
index 00000000000..90ae3e3b692
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/serializer.go
@@ -0,0 +1,69 @@
+package assertions
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/smartystreets/assertions/internal/go-render/render"
+)
+
+type Serializer interface {
+ serialize(expected, actual interface{}, message string) string
+ serializeDetailed(expected, actual interface{}, message string) string
+}
+
+type failureSerializer struct{}
+
+func (self *failureSerializer) serializeDetailed(expected, actual interface{}, message string) string {
+ view := FailureView{
+ Message: message,
+ Expected: render.Render(expected),
+ Actual: render.Render(actual),
+ }
+ serialized, err := json.Marshal(view)
+ if err != nil {
+ return message
+ }
+ return string(serialized)
+}
+
+func (self *failureSerializer) serialize(expected, actual interface{}, message string) string {
+ view := FailureView{
+ Message: message,
+ Expected: fmt.Sprintf("%+v", expected),
+ Actual: fmt.Sprintf("%+v", actual),
+ }
+ serialized, err := json.Marshal(view)
+ if err != nil {
+ return message
+ }
+ return string(serialized)
+}
+
+func newSerializer() *failureSerializer {
+ return &failureSerializer{}
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// This struct is also declared in github.com/smartystreets/goconvey/convey/reporting.
+// The json struct tags should be equal in both declarations.
+type FailureView struct {
+ Message string `json:"Message"`
+ Expected string `json:"Expected"`
+ Actual string `json:"Actual"`
+}
+
+///////////////////////////////////////////////////////
+
+// noopSerializer just gives back the original message. This is useful when we are using
+// the assertions from a context other than the web UI, that requires the JSON structure
+// provided by the failureSerializer.
+type noopSerializer struct{}
+
+func (self *noopSerializer) serialize(expected, actual interface{}, message string) string {
+ return message
+}
+func (self *noopSerializer) serializeDetailed(expected, actual interface{}, message string) string {
+ return message
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/serializer_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/serializer_test.go
new file mode 100644
index 00000000000..597b40ac183
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/serializer_test.go
@@ -0,0 +1,36 @@
+package assertions
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+)
+
+func TestSerializerCreatesSerializedVersionOfAssertionResult(t *testing.T) {
+ thing1 := Thing1{"Hi"}
+ thing2 := Thing2{"Bye"}
+ message := "Super-hip failure message."
+ serializer := newSerializer()
+
+ actualResult := serializer.serialize(thing1, thing2, message)
+
+ expectedResult, _ := json.Marshal(FailureView{
+ Message: message,
+ Expected: fmt.Sprintf("%+v", thing1),
+ Actual: fmt.Sprintf("%+v", thing2),
+ })
+
+ if actualResult != string(expectedResult) {
+ t.Errorf("\nExpected: %s\nActual: %s", string(expectedResult), actualResult)
+ }
+
+ actualResult = serializer.serializeDetailed(thing1, thing2, message)
+ expectedResult, _ = json.Marshal(FailureView{
+ Message: message,
+ Expected: fmt.Sprintf("%#v", thing1),
+ Actual: fmt.Sprintf("%#v", thing2),
+ })
+ if actualResult != string(expectedResult) {
+ t.Errorf("\nExpected: %s\nActual: %s", string(expectedResult), actualResult)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/should/should.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/should/should.go
new file mode 100644
index 00000000000..596e43b8f8b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/should/should.go
@@ -0,0 +1,73 @@
+// package should is simply a rewording of the assertion
+// functions in the assertions package.
+package should
+
+import "github.com/smartystreets/assertions"
+
+var (
+ Equal = assertions.ShouldEqual
+ NotEqual = assertions.ShouldNotEqual
+ AlmostEqual = assertions.ShouldAlmostEqual
+ NotAlmostEqual = assertions.ShouldNotAlmostEqual
+ Resemble = assertions.ShouldResemble
+ NotResemble = assertions.ShouldNotResemble
+ PointTo = assertions.ShouldPointTo
+ NotPointTo = assertions.ShouldNotPointTo
+ BeNil = assertions.ShouldBeNil
+ NotBeNil = assertions.ShouldNotBeNil
+ BeTrue = assertions.ShouldBeTrue
+ BeFalse = assertions.ShouldBeFalse
+ BeZeroValue = assertions.ShouldBeZeroValue
+
+ BeGreaterThan = assertions.ShouldBeGreaterThan
+ BeGreaterThanOrEqualTo = assertions.ShouldBeGreaterThanOrEqualTo
+ BeLessThan = assertions.ShouldBeLessThan
+ BeLessThanOrEqualTo = assertions.ShouldBeLessThanOrEqualTo
+ BeBetween = assertions.ShouldBeBetween
+ NotBeBetween = assertions.ShouldNotBeBetween
+ BeBetweenOrEqual = assertions.ShouldBeBetweenOrEqual
+ NotBeBetweenOrEqual = assertions.ShouldNotBeBetweenOrEqual
+
+ Contain = assertions.ShouldContain
+ NotContain = assertions.ShouldNotContain
+ ContainKey = assertions.ShouldContainKey
+ NotContainKey = assertions.ShouldNotContainKey
+ BeIn = assertions.ShouldBeIn
+ NotBeIn = assertions.ShouldNotBeIn
+ BeEmpty = assertions.ShouldBeEmpty
+ NotBeEmpty = assertions.ShouldNotBeEmpty
+ HaveLength = assertions.ShouldHaveLength
+
+ StartWith = assertions.ShouldStartWith
+ NotStartWith = assertions.ShouldNotStartWith
+ EndWith = assertions.ShouldEndWith
+ NotEndWith = assertions.ShouldNotEndWith
+ BeBlank = assertions.ShouldBeBlank
+ NotBeBlank = assertions.ShouldNotBeBlank
+ ContainSubstring = assertions.ShouldContainSubstring
+ NotContainSubstring = assertions.ShouldNotContainSubstring
+
+ EqualWithout = assertions.ShouldEqualWithout
+ EqualTrimSpace = assertions.ShouldEqualTrimSpace
+
+ Panic = assertions.ShouldPanic
+ NotPanic = assertions.ShouldNotPanic
+ PanicWith = assertions.ShouldPanicWith
+ NotPanicWith = assertions.ShouldNotPanicWith
+
+ HaveSameTypeAs = assertions.ShouldHaveSameTypeAs
+ NotHaveSameTypeAs = assertions.ShouldNotHaveSameTypeAs
+ Implement = assertions.ShouldImplement
+ NotImplement = assertions.ShouldNotImplement
+
+ HappenBefore = assertions.ShouldHappenBefore
+ HappenOnOrBefore = assertions.ShouldHappenOnOrBefore
+ HappenAfter = assertions.ShouldHappenAfter
+ HappenOnOrAfter = assertions.ShouldHappenOnOrAfter
+ HappenBetween = assertions.ShouldHappenBetween
+ HappenOnOrBetween = assertions.ShouldHappenOnOrBetween
+ NotHappenOnOrBetween = assertions.ShouldNotHappenOnOrBetween
+ HappenWithin = assertions.ShouldHappenWithin
+ NotHappenWithin = assertions.ShouldNotHappenWithin
+ BeChronological = assertions.ShouldBeChronological
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/strings.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/strings.go
new file mode 100644
index 00000000000..dbc3f04790e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/strings.go
@@ -0,0 +1,227 @@
+package assertions
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// ShouldStartWith receives exactly 2 string parameters and ensures that the first starts with the second.
+func ShouldStartWith(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ value, valueIsString := actual.(string)
+ prefix, prefixIsString := expected[0].(string)
+
+ if !valueIsString || !prefixIsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ return shouldStartWith(value, prefix)
+}
+func shouldStartWith(value, prefix string) string {
+ if !strings.HasPrefix(value, prefix) {
+ shortval := value
+ if len(shortval) > len(prefix) {
+ shortval = shortval[:len(prefix)] + "..."
+ }
+ return serializer.serialize(prefix, shortval, fmt.Sprintf(shouldHaveStartedWith, value, prefix))
+ }
+ return success
+}
+
+// ShouldNotStartWith receives exactly 2 string parameters and ensures that the first does not start with the second.
+func ShouldNotStartWith(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ value, valueIsString := actual.(string)
+ prefix, prefixIsString := expected[0].(string)
+
+ if !valueIsString || !prefixIsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ return shouldNotStartWith(value, prefix)
+}
+func shouldNotStartWith(value, prefix string) string {
+ if strings.HasPrefix(value, prefix) {
+ if value == "" {
+ value = "<empty>"
+ }
+ if prefix == "" {
+ prefix = "<empty>"
+ }
+ return fmt.Sprintf(shouldNotHaveStartedWith, value, prefix)
+ }
+ return success
+}
+
+// ShouldEndWith receives exactly 2 string parameters and ensures that the first ends with the second.
+func ShouldEndWith(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ value, valueIsString := actual.(string)
+ suffix, suffixIsString := expected[0].(string)
+
+ if !valueIsString || !suffixIsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ return shouldEndWith(value, suffix)
+}
+func shouldEndWith(value, suffix string) string {
+ if !strings.HasSuffix(value, suffix) {
+ shortval := value
+ if len(shortval) > len(suffix) {
+ shortval = "..." + shortval[len(shortval)-len(suffix):]
+ }
+ return serializer.serialize(suffix, shortval, fmt.Sprintf(shouldHaveEndedWith, value, suffix))
+ }
+ return success
+}
+
+// ShouldEndWith receives exactly 2 string parameters and ensures that the first does not end with the second.
+func ShouldNotEndWith(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ value, valueIsString := actual.(string)
+ suffix, suffixIsString := expected[0].(string)
+
+ if !valueIsString || !suffixIsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ return shouldNotEndWith(value, suffix)
+}
+func shouldNotEndWith(value, suffix string) string {
+ if strings.HasSuffix(value, suffix) {
+ if value == "" {
+ value = "<empty>"
+ }
+ if suffix == "" {
+ suffix = "<empty>"
+ }
+ return fmt.Sprintf(shouldNotHaveEndedWith, value, suffix)
+ }
+ return success
+}
+
+// ShouldContainSubstring receives exactly 2 string parameters and ensures that the first contains the second as a substring.
+func ShouldContainSubstring(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ long, longOk := actual.(string)
+ short, shortOk := expected[0].(string)
+
+ if !longOk || !shortOk {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ if !strings.Contains(long, short) {
+ return serializer.serialize(expected[0], actual, fmt.Sprintf(shouldHaveContainedSubstring, long, short))
+ }
+ return success
+}
+
+// ShouldNotContainSubstring receives exactly 2 string parameters and ensures that the first does NOT contain the second as a substring.
+func ShouldNotContainSubstring(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ long, longOk := actual.(string)
+ short, shortOk := expected[0].(string)
+
+ if !longOk || !shortOk {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ if strings.Contains(long, short) {
+ return fmt.Sprintf(shouldNotHaveContainedSubstring, long, short)
+ }
+ return success
+}
+
+// ShouldBeBlank receives exactly 1 string parameter and ensures that it is equal to "".
+func ShouldBeBlank(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+ value, ok := actual.(string)
+ if !ok {
+ return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual))
+ }
+ if value != "" {
+ return serializer.serialize("", value, fmt.Sprintf(shouldHaveBeenBlank, value))
+ }
+ return success
+}
+
+// ShouldNotBeBlank receives exactly 1 string parameter and ensures that it is equal to "".
+func ShouldNotBeBlank(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+ value, ok := actual.(string)
+ if !ok {
+ return fmt.Sprintf(shouldBeString, reflect.TypeOf(actual))
+ }
+ if value == "" {
+ return shouldNotHaveBeenBlank
+ }
+ return success
+}
+
+// ShouldEqualWithout receives exactly 3 string parameters and ensures that the first is equal to the second
+// after removing all instances of the third from the first using strings.Replace(first, third, "", -1).
+func ShouldEqualWithout(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualString, ok1 := actual.(string)
+ expectedString, ok2 := expected[0].(string)
+ replace, ok3 := expected[1].(string)
+
+ if !ok1 || !ok2 || !ok3 {
+ return fmt.Sprintf(shouldAllBeStrings, []reflect.Type{
+ reflect.TypeOf(actual),
+ reflect.TypeOf(expected[0]),
+ reflect.TypeOf(expected[1]),
+ })
+ }
+
+ replaced := strings.Replace(actualString, replace, "", -1)
+ if replaced == expectedString {
+ return ""
+ }
+
+ return fmt.Sprintf("Expected '%s' to equal '%s' but without any '%s' (but it didn't).", actualString, expectedString, replace)
+}
+
+// ShouldEqualTrimSpace receives exactly 2 string parameters and ensures that the first is equal to the second
+// after removing all leading and trailing whitespace using strings.TrimSpace(first).
+func ShouldEqualTrimSpace(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ actualString, valueIsString := actual.(string)
+ _, value2IsString := expected[0].(string)
+
+ if !valueIsString || !value2IsString {
+ return fmt.Sprintf(shouldBothBeStrings, reflect.TypeOf(actual), reflect.TypeOf(expected[0]))
+ }
+
+ actualString = strings.TrimSpace(actualString)
+ return ShouldEqual(actualString, expected[0])
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/strings_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/strings_test.go
new file mode 100644
index 00000000000..ad8d0c88585
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/strings_test.go
@@ -0,0 +1,118 @@
+package assertions
+
+import "testing"
+
+func TestShouldStartWith(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ fail(t, so("", ShouldStartWith), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so("", ShouldStartWith, "asdf", "asdf"), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so("", ShouldStartWith, ""))
+ fail(t, so("", ShouldStartWith, "x"), "x||Expected '' to start with 'x' (but it didn't)!")
+ pass(t, so("abc", ShouldStartWith, "abc"))
+ fail(t, so("abc", ShouldStartWith, "abcd"), "abcd|abc|Expected 'abc' to start with 'abcd' (but it didn't)!")
+
+ pass(t, so("superman", ShouldStartWith, "super"))
+ fail(t, so("superman", ShouldStartWith, "bat"), "bat|sup...|Expected 'superman' to start with 'bat' (but it didn't)!")
+ fail(t, so("superman", ShouldStartWith, "man"), "man|sup...|Expected 'superman' to start with 'man' (but it didn't)!")
+
+ fail(t, so(1, ShouldStartWith, 2), "Both arguments to this assertion must be strings (you provided int and int).")
+}
+
+func TestShouldNotStartWith(t *testing.T) {
+ fail(t, so("", ShouldNotStartWith), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so("", ShouldNotStartWith, "asdf", "asdf"), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ fail(t, so("", ShouldNotStartWith, ""), "Expected '<empty>' NOT to start with '<empty>' (but it did)!")
+ fail(t, so("superman", ShouldNotStartWith, "super"), "Expected 'superman' NOT to start with 'super' (but it did)!")
+ pass(t, so("superman", ShouldNotStartWith, "bat"))
+ pass(t, so("superman", ShouldNotStartWith, "man"))
+
+ fail(t, so(1, ShouldNotStartWith, 2), "Both arguments to this assertion must be strings (you provided int and int).")
+}
+
+func TestShouldEndWith(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ fail(t, so("", ShouldEndWith), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so("", ShouldEndWith, "", ""), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ pass(t, so("", ShouldEndWith, ""))
+ fail(t, so("", ShouldEndWith, "z"), "z||Expected '' to end with 'z' (but it didn't)!")
+ pass(t, so("xyz", ShouldEndWith, "xyz"))
+ fail(t, so("xyz", ShouldEndWith, "wxyz"), "wxyz|xyz|Expected 'xyz' to end with 'wxyz' (but it didn't)!")
+
+ pass(t, so("superman", ShouldEndWith, "man"))
+ fail(t, so("superman", ShouldEndWith, "super"), "super|...erman|Expected 'superman' to end with 'super' (but it didn't)!")
+ fail(t, so("superman", ShouldEndWith, "blah"), "blah|...rman|Expected 'superman' to end with 'blah' (but it didn't)!")
+
+ fail(t, so(1, ShouldEndWith, 2), "Both arguments to this assertion must be strings (you provided int and int).")
+}
+
+func TestShouldNotEndWith(t *testing.T) {
+ fail(t, so("", ShouldNotEndWith), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so("", ShouldNotEndWith, "", ""), "This assertion requires exactly 1 comparison values (you provided 2).")
+
+ fail(t, so("", ShouldNotEndWith, ""), "Expected '<empty>' NOT to end with '<empty>' (but it did)!")
+ fail(t, so("superman", ShouldNotEndWith, "man"), "Expected 'superman' NOT to end with 'man' (but it did)!")
+ pass(t, so("superman", ShouldNotEndWith, "super"))
+
+ fail(t, so(1, ShouldNotEndWith, 2), "Both arguments to this assertion must be strings (you provided int and int).")
+}
+
+func TestShouldContainSubstring(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ fail(t, so("asdf", ShouldContainSubstring), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so("asdf", ShouldContainSubstring, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(123, ShouldContainSubstring, 23), "Both arguments to this assertion must be strings (you provided int and int).")
+
+ pass(t, so("asdf", ShouldContainSubstring, "sd"))
+ fail(t, so("qwer", ShouldContainSubstring, "sd"), "sd|qwer|Expected 'qwer' to contain substring 'sd' (but it didn't)!")
+}
+
+func TestShouldNotContainSubstring(t *testing.T) {
+ fail(t, so("asdf", ShouldNotContainSubstring), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so("asdf", ShouldNotContainSubstring, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(123, ShouldNotContainSubstring, 23), "Both arguments to this assertion must be strings (you provided int and int).")
+
+ pass(t, so("qwer", ShouldNotContainSubstring, "sd"))
+ fail(t, so("asdf", ShouldNotContainSubstring, "sd"), "Expected 'asdf' NOT to contain substring 'sd' (but it did)!")
+}
+
+func TestShouldBeBlank(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ fail(t, so("", ShouldBeBlank, "adsf"), "This assertion requires exactly 0 comparison values (you provided 1).")
+ fail(t, so(1, ShouldBeBlank), "The argument to this assertion must be a string (you provided int).")
+
+ fail(t, so("asdf", ShouldBeBlank), "|asdf|Expected 'asdf' to be blank (but it wasn't)!")
+ pass(t, so("", ShouldBeBlank))
+}
+
+func TestShouldNotBeBlank(t *testing.T) {
+ fail(t, so("", ShouldNotBeBlank, "adsf"), "This assertion requires exactly 0 comparison values (you provided 1).")
+ fail(t, so(1, ShouldNotBeBlank), "The argument to this assertion must be a string (you provided int).")
+
+ fail(t, so("", ShouldNotBeBlank), "Expected value to NOT be blank (but it was)!")
+ pass(t, so("asdf", ShouldNotBeBlank))
+}
+
+func TestShouldEqualWithout(t *testing.T) {
+ fail(t, so("", ShouldEqualWithout, ""), "This assertion requires exactly 2 comparison values (you provided 1).")
+ fail(t, so(1, ShouldEqualWithout, 2, 3), "All arguments to this assertion must be strings (you provided: [int int int]).")
+
+ fail(t, so("asdf", ShouldEqualWithout, "qwer", "q"), "Expected 'asdf' to equal 'qwer' but without any 'q' (but it didn't).")
+ pass(t, so("asdf", ShouldEqualWithout, "df", "as"))
+}
+
+func TestShouldEqualTrimSpace(t *testing.T) {
+ fail(t, so(" asdf ", ShouldEqualTrimSpace), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldEqualTrimSpace, 2), "Both arguments to this assertion must be strings (you provided int and int).")
+
+ fail(t, so("asdf", ShouldEqualTrimSpace, "qwer"), "qwer|asdf|Expected: 'qwer' Actual: 'asdf' (Should be equal)")
+ pass(t, so(" asdf\t\n", ShouldEqualTrimSpace, "asdf"))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/time.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/time.go
new file mode 100644
index 00000000000..7e05026143f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/time.go
@@ -0,0 +1,202 @@
+package assertions
+
+import (
+ "fmt"
+ "time"
+)
+
+// ShouldHappenBefore receives exactly 2 time.Time arguments and asserts that the first happens before the second.
+func ShouldHappenBefore(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ expectedTime, secondOk := expected[0].(time.Time)
+
+ if !firstOk || !secondOk {
+ return shouldUseTimes
+ }
+
+ if !actualTime.Before(expectedTime) {
+ return fmt.Sprintf(shouldHaveHappenedBefore, actualTime, expectedTime, actualTime.Sub(expectedTime))
+ }
+
+ return success
+}
+
+// ShouldHappenOnOrBefore receives exactly 2 time.Time arguments and asserts that the first happens on or before the second.
+func ShouldHappenOnOrBefore(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ expectedTime, secondOk := expected[0].(time.Time)
+
+ if !firstOk || !secondOk {
+ return shouldUseTimes
+ }
+
+ if actualTime.Equal(expectedTime) {
+ return success
+ }
+ return ShouldHappenBefore(actualTime, expectedTime)
+}
+
+// ShouldHappenAfter receives exactly 2 time.Time arguments and asserts that the first happens after the second.
+func ShouldHappenAfter(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ expectedTime, secondOk := expected[0].(time.Time)
+
+ if !firstOk || !secondOk {
+ return shouldUseTimes
+ }
+ if !actualTime.After(expectedTime) {
+ return fmt.Sprintf(shouldHaveHappenedAfter, actualTime, expectedTime, expectedTime.Sub(actualTime))
+ }
+ return success
+}
+
+// ShouldHappenOnOrAfter receives exactly 2 time.Time arguments and asserts that the first happens on or after the second.
+func ShouldHappenOnOrAfter(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ expectedTime, secondOk := expected[0].(time.Time)
+
+ if !firstOk || !secondOk {
+ return shouldUseTimes
+ }
+ if actualTime.Equal(expectedTime) {
+ return success
+ }
+ return ShouldHappenAfter(actualTime, expectedTime)
+}
+
+// ShouldHappenBetween receives exactly 3 time.Time arguments and asserts that the first happens between (not on) the second and third.
+func ShouldHappenBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ min, secondOk := expected[0].(time.Time)
+ max, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseTimes
+ }
+
+ if !actualTime.After(min) {
+ return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, min.Sub(actualTime))
+ }
+ if !actualTime.Before(max) {
+ return fmt.Sprintf(shouldHaveHappenedBetween, actualTime, min, max, actualTime.Sub(max))
+ }
+ return success
+}
+
+// ShouldHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first happens between or on the second and third.
+func ShouldHappenOnOrBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ min, secondOk := expected[0].(time.Time)
+ max, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseTimes
+ }
+ if actualTime.Equal(min) || actualTime.Equal(max) {
+ return success
+ }
+ return ShouldHappenBetween(actualTime, min, max)
+}
+
+// ShouldNotHappenOnOrBetween receives exactly 3 time.Time arguments and asserts that the first
+// does NOT happen between or on the second or third.
+func ShouldNotHappenOnOrBetween(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ min, secondOk := expected[0].(time.Time)
+ max, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseTimes
+ }
+ if actualTime.Equal(min) || actualTime.Equal(max) {
+ return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max)
+ }
+ if actualTime.After(min) && actualTime.Before(max) {
+ return fmt.Sprintf(shouldNotHaveHappenedOnOrBetween, actualTime, min, max)
+ }
+ return success
+}
+
+// ShouldHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments)
+// and asserts that the first time.Time happens within or on the duration specified relative to
+// the other time.Time.
+func ShouldHappenWithin(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ tolerance, secondOk := expected[0].(time.Duration)
+ threshold, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseDurationAndTime
+ }
+
+ min := threshold.Add(-tolerance)
+ max := threshold.Add(tolerance)
+ return ShouldHappenOnOrBetween(actualTime, min, max)
+}
+
+// ShouldNotHappenWithin receives a time.Time, a time.Duration, and a time.Time (3 arguments)
+// and asserts that the first time.Time does NOT happen within or on the duration specified relative to
+// the other time.Time.
+func ShouldNotHappenWithin(actual interface{}, expected ...interface{}) string {
+ if fail := need(2, expected); fail != success {
+ return fail
+ }
+ actualTime, firstOk := actual.(time.Time)
+ tolerance, secondOk := expected[0].(time.Duration)
+ threshold, thirdOk := expected[1].(time.Time)
+
+ if !firstOk || !secondOk || !thirdOk {
+ return shouldUseDurationAndTime
+ }
+
+ min := threshold.Add(-tolerance)
+ max := threshold.Add(tolerance)
+ return ShouldNotHappenOnOrBetween(actualTime, min, max)
+}
+
+// ShouldBeChronological receives a []time.Time slice and asserts that the are
+// in chronological order starting with the first time.Time as the earliest.
+func ShouldBeChronological(actual interface{}, expected ...interface{}) string {
+ if fail := need(0, expected); fail != success {
+ return fail
+ }
+
+ times, ok := actual.([]time.Time)
+ if !ok {
+ return shouldUseTimeSlice
+ }
+
+ var previous time.Time
+ for i, current := range times {
+ if i > 0 && current.Before(previous) {
+ return fmt.Sprintf(shouldHaveBeenChronological,
+ i, i-1, previous.String(), i, current.String())
+ }
+ previous = current
+ }
+ return ""
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/time_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/time_test.go
new file mode 100644
index 00000000000..f9dda8f8f34
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/time_test.go
@@ -0,0 +1,159 @@
+package assertions
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestShouldHappenBefore(t *testing.T) {
+ fail(t, so(0, ShouldHappenBefore), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(0, ShouldHappenBefore, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldHappenBefore, 1), shouldUseTimes)
+ fail(t, so(0, ShouldHappenBefore, time.Now()), shouldUseTimes)
+ fail(t, so(time.Now(), ShouldHappenBefore, 0), shouldUseTimes)
+
+ fail(t, so(january3, ShouldHappenBefore, january1), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '48h0m0s' after)!", pretty(january3), pretty(january1)))
+ fail(t, so(january3, ShouldHappenBefore, january3), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '0' after)!", pretty(january3), pretty(january3)))
+ pass(t, so(january1, ShouldHappenBefore, january3))
+}
+
+func TestShouldHappenOnOrBefore(t *testing.T) {
+ fail(t, so(0, ShouldHappenOnOrBefore), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(0, ShouldHappenOnOrBefore, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldHappenOnOrBefore, 1), shouldUseTimes)
+ fail(t, so(0, ShouldHappenOnOrBefore, time.Now()), shouldUseTimes)
+ fail(t, so(time.Now(), ShouldHappenOnOrBefore, 0), shouldUseTimes)
+
+ fail(t, so(january3, ShouldHappenOnOrBefore, january1), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '48h0m0s' after)!", pretty(january3), pretty(january1)))
+ pass(t, so(january3, ShouldHappenOnOrBefore, january3))
+ pass(t, so(january1, ShouldHappenOnOrBefore, january3))
+}
+
+func TestShouldHappenAfter(t *testing.T) {
+ fail(t, so(0, ShouldHappenAfter), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(0, ShouldHappenAfter, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldHappenAfter, 1), shouldUseTimes)
+ fail(t, so(0, ShouldHappenAfter, time.Now()), shouldUseTimes)
+ fail(t, so(time.Now(), ShouldHappenAfter, 0), shouldUseTimes)
+
+ fail(t, so(january1, ShouldHappenAfter, january2), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '24h0m0s' before)!", pretty(january1), pretty(january2)))
+ fail(t, so(january1, ShouldHappenAfter, january1), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '0' before)!", pretty(january1), pretty(january1)))
+ pass(t, so(january3, ShouldHappenAfter, january1))
+}
+
+func TestShouldHappenOnOrAfter(t *testing.T) {
+ fail(t, so(0, ShouldHappenOnOrAfter), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(0, ShouldHappenOnOrAfter, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldHappenOnOrAfter, 1), shouldUseTimes)
+ fail(t, so(0, ShouldHappenOnOrAfter, time.Now()), shouldUseTimes)
+ fail(t, so(time.Now(), ShouldHappenOnOrAfter, 0), shouldUseTimes)
+
+ fail(t, so(january1, ShouldHappenOnOrAfter, january2), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '24h0m0s' before)!", pretty(january1), pretty(january2)))
+ pass(t, so(january1, ShouldHappenOnOrAfter, january1))
+ pass(t, so(january3, ShouldHappenOnOrAfter, january1))
+}
+
+func TestShouldHappenBetween(t *testing.T) {
+ fail(t, so(0, ShouldHappenBetween), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(0, ShouldHappenBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldHappenBetween, 1, 2), shouldUseTimes)
+ fail(t, so(0, ShouldHappenBetween, time.Now(), time.Now()), shouldUseTimes)
+ fail(t, so(time.Now(), ShouldHappenBetween, 0, time.Now()), shouldUseTimes)
+ fail(t, so(time.Now(), ShouldHappenBetween, time.Now(), 9), shouldUseTimes)
+
+ fail(t, so(january1, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4)))
+ fail(t, so(january2, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '0' outside threshold)!", pretty(january2), pretty(january2), pretty(january4)))
+ pass(t, so(january3, ShouldHappenBetween, january2, january4))
+ fail(t, so(january4, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '0' outside threshold)!", pretty(january4), pretty(january2), pretty(january4)))
+ fail(t, so(january5, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4)))
+}
+
+func TestShouldHappenOnOrBetween(t *testing.T) {
+ fail(t, so(0, ShouldHappenOnOrBetween), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(0, ShouldHappenOnOrBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldHappenOnOrBetween, 1, time.Now()), shouldUseTimes)
+ fail(t, so(0, ShouldHappenOnOrBetween, time.Now(), 1), shouldUseTimes)
+ fail(t, so(time.Now(), ShouldHappenOnOrBetween, 0, 1), shouldUseTimes)
+
+ fail(t, so(january1, ShouldHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4)))
+ pass(t, so(january2, ShouldHappenOnOrBetween, january2, january4))
+ pass(t, so(january3, ShouldHappenOnOrBetween, january2, january4))
+ pass(t, so(january4, ShouldHappenOnOrBetween, january2, january4))
+ fail(t, so(january5, ShouldHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4)))
+}
+
+func TestShouldNotHappenOnOrBetween(t *testing.T) {
+ fail(t, so(0, ShouldNotHappenOnOrBetween), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(0, ShouldNotHappenOnOrBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldNotHappenOnOrBetween, 1, time.Now()), shouldUseTimes)
+ fail(t, so(0, ShouldNotHappenOnOrBetween, time.Now(), 1), shouldUseTimes)
+ fail(t, so(time.Now(), ShouldNotHappenOnOrBetween, 0, 1), shouldUseTimes)
+
+ pass(t, so(january1, ShouldNotHappenOnOrBetween, january2, january4))
+ fail(t, so(january2, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january2), pretty(january2), pretty(january4)))
+ fail(t, so(january3, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january3), pretty(january2), pretty(january4)))
+ fail(t, so(january4, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january4), pretty(january2), pretty(january4)))
+ pass(t, so(january5, ShouldNotHappenOnOrBetween, january2, january4))
+}
+
+func TestShouldHappenWithin(t *testing.T) {
+ fail(t, so(0, ShouldHappenWithin), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(0, ShouldHappenWithin, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldHappenWithin, 1, 2), shouldUseDurationAndTime)
+ fail(t, so(0, ShouldHappenWithin, oneDay, time.Now()), shouldUseDurationAndTime)
+ fail(t, so(time.Now(), ShouldHappenWithin, 0, time.Now()), shouldUseDurationAndTime)
+
+ fail(t, so(january1, ShouldHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4)))
+ pass(t, so(january2, ShouldHappenWithin, oneDay, january3))
+ pass(t, so(january3, ShouldHappenWithin, oneDay, january3))
+ pass(t, so(january4, ShouldHappenWithin, oneDay, january3))
+ fail(t, so(january5, ShouldHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4)))
+}
+
+func TestShouldNotHappenWithin(t *testing.T) {
+ fail(t, so(0, ShouldNotHappenWithin), "This assertion requires exactly 2 comparison values (you provided 0).")
+ fail(t, so(0, ShouldNotHappenWithin, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).")
+
+ fail(t, so(0, ShouldNotHappenWithin, 1, 2), shouldUseDurationAndTime)
+ fail(t, so(0, ShouldNotHappenWithin, oneDay, time.Now()), shouldUseDurationAndTime)
+ fail(t, so(time.Now(), ShouldNotHappenWithin, 0, time.Now()), shouldUseDurationAndTime)
+
+ pass(t, so(january1, ShouldNotHappenWithin, oneDay, january3))
+ fail(t, so(january2, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january2), pretty(january2), pretty(january4)))
+ fail(t, so(january3, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january3), pretty(january2), pretty(january4)))
+ fail(t, so(january4, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january4), pretty(january2), pretty(january4)))
+ pass(t, so(january5, ShouldNotHappenWithin, oneDay, january3))
+}
+
+func TestShouldBeChronological(t *testing.T) {
+ fail(t, so(0, ShouldBeChronological, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).")
+ fail(t, so(0, ShouldBeChronological), shouldUseTimeSlice)
+ fail(t, so([]time.Time{january5, january1}, ShouldBeChronological),
+ "The 'Time' at index [1] should have happened after the previous one (but it didn't!):\n [0]: 2013-01-05 00:00:00 +0000 UTC\n [1]: 2013-01-01 00:00:00 +0000 UTC (see, it happened before!)")
+
+ pass(t, so([]time.Time{january1, january2, january3, january4, january5}, ShouldBeChronological))
+}
+
+const layout = "2006-01-02 15:04"
+
+var january1, _ = time.Parse(layout, "2013-01-01 00:00")
+var january2, _ = time.Parse(layout, "2013-01-02 00:00")
+var january3, _ = time.Parse(layout, "2013-01-03 00:00")
+var january4, _ = time.Parse(layout, "2013-01-04 00:00")
+var january5, _ = time.Parse(layout, "2013-01-05 00:00")
+
+var oneDay, _ = time.ParseDuration("24h0m0s")
+var twoDays, _ = time.ParseDuration("48h0m0s")
+
+func pretty(t time.Time) string {
+ return fmt.Sprintf("%v", t)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/type.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/type.go
new file mode 100644
index 00000000000..3fc00f68cd0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/type.go
@@ -0,0 +1,112 @@
+package assertions
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// ShouldHaveSameTypeAs receives exactly two parameters and compares their underlying types for equality.
+func ShouldHaveSameTypeAs(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ first := reflect.TypeOf(actual)
+ second := reflect.TypeOf(expected[0])
+
+ if equal := ShouldEqual(first, second); equal != success {
+ return serializer.serialize(second, first, fmt.Sprintf(shouldHaveBeenA, actual, second, first))
+ }
+ return success
+}
+
+// ShouldNotHaveSameTypeAs receives exactly two parameters and compares their underlying types for inequality.
+func ShouldNotHaveSameTypeAs(actual interface{}, expected ...interface{}) string {
+ if fail := need(1, expected); fail != success {
+ return fail
+ }
+
+ first := reflect.TypeOf(actual)
+ second := reflect.TypeOf(expected[0])
+
+ if equal := ShouldEqual(first, second); equal == success {
+ return fmt.Sprintf(shouldNotHaveBeenA, actual, second)
+ }
+ return success
+}
+
+// ShouldImplement receives exactly two parameters and ensures
+// that the first implements the interface type of the second.
+func ShouldImplement(actual interface{}, expectedList ...interface{}) string {
+ if fail := need(1, expectedList); fail != success {
+ return fail
+ }
+
+ expected := expectedList[0]
+ if fail := ShouldBeNil(expected); fail != success {
+ return shouldCompareWithInterfacePointer
+ }
+
+ if fail := ShouldNotBeNil(actual); fail != success {
+ return shouldNotBeNilActual
+ }
+
+ var actualType reflect.Type
+ if reflect.TypeOf(actual).Kind() != reflect.Ptr {
+ actualType = reflect.PtrTo(reflect.TypeOf(actual))
+ } else {
+ actualType = reflect.TypeOf(actual)
+ }
+
+ expectedType := reflect.TypeOf(expected)
+ if fail := ShouldNotBeNil(expectedType); fail != success {
+ return shouldCompareWithInterfacePointer
+ }
+
+ expectedInterface := expectedType.Elem()
+
+ if actualType == nil {
+ return fmt.Sprintf(shouldHaveImplemented, expectedInterface, actual)
+ }
+
+ if !actualType.Implements(expectedInterface) {
+ return fmt.Sprintf(shouldHaveImplemented, expectedInterface, actualType)
+ }
+ return success
+}
+
+// ShouldNotImplement receives exactly two parameters and ensures
+// that the first does NOT implement the interface type of the second.
+func ShouldNotImplement(actual interface{}, expectedList ...interface{}) string {
+ if fail := need(1, expectedList); fail != success {
+ return fail
+ }
+
+ expected := expectedList[0]
+ if fail := ShouldBeNil(expected); fail != success {
+ return shouldCompareWithInterfacePointer
+ }
+
+ if fail := ShouldNotBeNil(actual); fail != success {
+ return shouldNotBeNilActual
+ }
+
+ var actualType reflect.Type
+ if reflect.TypeOf(actual).Kind() != reflect.Ptr {
+ actualType = reflect.PtrTo(reflect.TypeOf(actual))
+ } else {
+ actualType = reflect.TypeOf(actual)
+ }
+
+ expectedType := reflect.TypeOf(expected)
+ if fail := ShouldNotBeNil(expectedType); fail != success {
+ return shouldCompareWithInterfacePointer
+ }
+
+ expectedInterface := expectedType.Elem()
+
+ if actualType.Implements(expectedInterface) {
+ return fmt.Sprintf(shouldNotHaveImplemented, actualType, expectedInterface)
+ }
+ return success
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/type_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/type_test.go
new file mode 100644
index 00000000000..4b8d1984670
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/type_test.go
@@ -0,0 +1,76 @@
+package assertions
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+ "testing"
+)
+
+func TestShouldHaveSameTypeAs(t *testing.T) {
+ serializer = newFakeSerializer()
+
+ fail(t, so(1, ShouldHaveSameTypeAs), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldHaveSameTypeAs, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(nil, ShouldHaveSameTypeAs, 0), "int|<nil>|Expected '<nil>' to be: 'int' (but was: '<nil>')!")
+ fail(t, so(1, ShouldHaveSameTypeAs, "asdf"), "string|int|Expected '1' to be: 'string' (but was: 'int')!")
+
+ pass(t, so(1, ShouldHaveSameTypeAs, 0))
+ pass(t, so(nil, ShouldHaveSameTypeAs, nil))
+}
+
+func TestShouldNotHaveSameTypeAs(t *testing.T) {
+ fail(t, so(1, ShouldNotHaveSameTypeAs), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(1, ShouldNotHaveSameTypeAs, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(1, ShouldNotHaveSameTypeAs, 0), "Expected '1' to NOT be: 'int' (but it was)!")
+ fail(t, so(nil, ShouldNotHaveSameTypeAs, nil), "Expected '<nil>' to NOT be: '<nil>' (but it was)!")
+
+ pass(t, so(nil, ShouldNotHaveSameTypeAs, 0))
+ pass(t, so(1, ShouldNotHaveSameTypeAs, "asdf"))
+}
+
+func TestShouldImplement(t *testing.T) {
+ var ioReader *io.Reader = nil
+ var response http.Response = http.Response{}
+ var responsePtr *http.Response = new(http.Response)
+ var reader = bytes.NewBufferString("")
+
+ fail(t, so(reader, ShouldImplement), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(reader, ShouldImplement, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 2).")
+ fail(t, so(reader, ShouldImplement, ioReader, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(reader, ShouldImplement, "foo"), shouldCompareWithInterfacePointer)
+ fail(t, so(reader, ShouldImplement, 1), shouldCompareWithInterfacePointer)
+ fail(t, so(reader, ShouldImplement, nil), shouldCompareWithInterfacePointer)
+
+ fail(t, so(nil, ShouldImplement, ioReader), shouldNotBeNilActual)
+ fail(t, so(1, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*int' does not implement the interface!")
+
+ fail(t, so(response, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*http.Response' does not implement the interface!")
+ fail(t, so(responsePtr, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*http.Response' does not implement the interface!")
+ pass(t, so(reader, ShouldImplement, ioReader))
+ pass(t, so(reader, ShouldImplement, (*io.Reader)(nil)))
+}
+
+func TestShouldNotImplement(t *testing.T) {
+ var ioReader *io.Reader = nil
+ var response http.Response = http.Response{}
+ var responsePtr *http.Response = new(http.Response)
+ var reader io.Reader = bytes.NewBufferString("")
+
+ fail(t, so(reader, ShouldNotImplement), "This assertion requires exactly 1 comparison values (you provided 0).")
+ fail(t, so(reader, ShouldNotImplement, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 2).")
+ fail(t, so(reader, ShouldNotImplement, ioReader, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 3).")
+
+ fail(t, so(reader, ShouldNotImplement, "foo"), shouldCompareWithInterfacePointer)
+ fail(t, so(reader, ShouldNotImplement, 1), shouldCompareWithInterfacePointer)
+ fail(t, so(reader, ShouldNotImplement, nil), shouldCompareWithInterfacePointer)
+
+ fail(t, so(reader, ShouldNotImplement, ioReader), "Expected '*bytes.Buffer'\nto NOT implement 'io.Reader' (but it did)!")
+ fail(t, so(nil, ShouldNotImplement, ioReader), shouldNotBeNilActual)
+ pass(t, so(1, ShouldNotImplement, ioReader))
+ pass(t, so(response, ShouldNotImplement, ioReader))
+ pass(t, so(responsePtr, ShouldNotImplement, ioReader))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/utilities_for_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/utilities_for_test.go
new file mode 100644
index 00000000000..7243ebcb937
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/assertions/utilities_for_test.go
@@ -0,0 +1,75 @@
+package assertions
+
+import (
+ "fmt"
+ "path"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func pass(t *testing.T, result string) {
+ if result != success {
+ _, file, line, _ := runtime.Caller(1)
+ base := path.Base(file)
+ t.Errorf("Expectation should have passed but failed (see %s: line %d): '%s'", base, line, result)
+ }
+}
+
+func fail(t *testing.T, actual string, expected string) {
+ actual = format(actual)
+ expected = format(expected)
+
+ if actual != expected {
+ if actual == "" {
+ actual = "(empty)"
+ }
+ _, file, line, _ := runtime.Caller(1)
+ base := path.Base(file)
+ t.Errorf("Expectation should have failed but passed (see %s: line %d). \nExpected: %s\nActual: %s\n",
+ base, line, expected, actual)
+ }
+}
+func format(message string) string {
+ message = strings.Replace(message, "\n", " ", -1)
+ for strings.Contains(message, " ") {
+ message = strings.Replace(message, " ", " ", -1)
+ }
+ return message
+}
+
+type Thing1 struct {
+ a string
+}
+type Thing2 struct {
+ a string
+}
+
+type Thinger interface {
+ Hi()
+}
+
+type Thing struct{}
+
+func (self *Thing) Hi() {}
+
+type IntAlias int
+type StringAlias string
+type StringSliceAlias []string
+type StringStringMapAlias map[string]string
+
+/******** FakeSerialzier ********/
+
+type fakeSerializer struct{}
+
+func (self *fakeSerializer) serialize(expected, actual interface{}, message string) string {
+ return fmt.Sprintf("%v|%v|%s", expected, actual, message)
+}
+
+func (self *fakeSerializer) serializeDetailed(expected, actual interface{}, message string) string {
+ return fmt.Sprintf("%v|%v|%s", expected, actual, message)
+}
+
+func newFakeSerializer() *fakeSerializer {
+ return new(fakeSerializer)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/.gitignore b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/.gitignore
new file mode 100644
index 00000000000..c9205c5335a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/.gitignore
@@ -0,0 +1,5 @@
+.DS_Store
+Thumbs.db
+examples/output.json
+web/client/reports/
+/.idea \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/.travis.yml b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/.travis.yml
new file mode 100644
index 00000000000..a5124b0491b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+
+install:
+ - go get -t ./...
+
+script: go test -short -v ./...
+
+sudo: false
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/CONTRIBUTING.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/CONTRIBUTING.md
new file mode 100644
index 00000000000..9c9053b83ad
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/CONTRIBUTING.md
@@ -0,0 +1,22 @@
+# Subject: GoConvey maintainers wanted
+
+We'd like to open the project up to additional maintainers who want to move the project forward in a meaningful way.
+
+We've spent significant time at SmartyStreets building GoConvey and it has perfectly met (and exceeded) all of our initial design specifications. We've used it to great effect. Being so well-matched to our development workflows at SmartyStreets, we haven't had a need to hack on it lately. This had been frustrating to many in the community who have ideas for the project and would like to see new features released (and some old bugs fixed). The release of Go 1.5 and the new vendoring experiment has been a source of confusion and hassle for those who have already upgraded and find that GoConvey needs to be brought up to speed.
+
+Comment below if you're interested. Preference will be given to those that have already contributed to the project. Checkout the issues listing if you need some ideas for contributing.
+
+GoConvey is a popular 2-pronged, open-source github project (1,600+ stargazers, 100+ forks):
+
+- A package you import in your test code that allows you to write BDD-style tests.
+- An executable that runs a local web server which displays auto-updating test results in a web browser.
+
+----
+
+- http://goconvey.co/
+- https://github.com/smartystreets/goconvey
+- https://github.com/smartystreets/goconvey/wiki
+
+_I should mention that the [assertions package](https://github.com/smartystreets/assertions) imported by the convey package is used by other projects at SmartyStreets and so we will be continuing to maintain that project internally._
+
+We hope to hear from you soon. Thanks!
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/LICENSE.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/LICENSE.md
new file mode 100644
index 00000000000..5bc993c93c9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/LICENSE.md
@@ -0,0 +1,23 @@
+Copyright (c) 2014 SmartyStreets, LLC
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+NOTE: Various optional and subordinate components carry their own licensing
+requirements and restrictions. Use of those components is subject to the terms
+and conditions outlined the respective license of each component.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/README.md b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/README.md
new file mode 100644
index 00000000000..a07ce5a6eb2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/README.md
@@ -0,0 +1,126 @@
+GoConvey is awesome Go testing
+==============================
+
+[![Build Status](https://travis-ci.org/smartystreets/goconvey.png)](https://travis-ci.org/smartystreets/goconvey)
+[![GoDoc](https://godoc.org/github.com/smartystreets/goconvey?status.svg)](http://godoc.org/github.com/smartystreets/goconvey)
+
+
+Welcome to GoConvey, a yummy Go testing tool for gophers. Works with `go test`. Use it in the terminal or browser according to your viewing pleasure. **[View full feature tour.](http://goconvey.co)**
+
+**Features:**
+
+- Directly integrates with `go test`
+- Fully-automatic web UI (works with native Go tests, too)
+- Huge suite of regression tests
+- Shows test coverage (Go 1.2+)
+- Readable, colorized console output (understandable by any manager, IT or not)
+- Test code generator
+- Desktop notifications (optional)
+- Immediately open problem lines in [Sublime Text](http://www.sublimetext.com) ([some assembly required](https://github.com/asuth/subl-handler))
+
+
+You can ask questions about how to use GoConvey on [StackOverflow](http://stackoverflow.com/questions/ask?tags=goconvey,go&title=GoConvey%3A%20). Use the tags `go` and `goconvey`.
+
+**Menu:**
+
+- [Installation](#installation)
+- [Quick start](#quick-start)
+- [Documentation](#documentation)
+- [Screenshots](#screenshots)
+- [Contributors](#contributors-thanks)
+
+
+
+
+Installation
+------------
+
+ $ go get github.com/smartystreets/goconvey
+
+[Quick start](https://github.com/smartystreets/goconvey/wiki#get-going-in-25-seconds)
+-----------
+
+Make a test, for example:
+
+```go
+package package_name
+
+import (
+ "testing"
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func TestSpec(t *testing.T) {
+
+ // Only pass t into top-level Convey calls
+ Convey("Given some integer with a starting value", t, func() {
+ x := 1
+
+ Convey("When the integer is incremented", func() {
+ x++
+
+ Convey("The value should be greater by one", func() {
+ So(x, ShouldEqual, 2)
+ })
+ })
+ })
+}
+```
+
+
+#### [In the browser](https://github.com/smartystreets/goconvey/wiki/Web-UI)
+
+Start up the GoConvey web server at your project's path:
+
+ $ $GOPATH/bin/goconvey
+
+Then watch the test results display in your browser at:
+
+ http://localhost:8080
+
+
+If the browser doesn't open automatically, please click [http://localhost:8080](http://localhost:8080) to open manually.
+
+There you have it.
+![](http://d79i1fxsrar4t.cloudfront.net/goconvey.co/gc-1-dark.png)
+As long as GoConvey is running, test results will automatically update in your browser window.
+
+![](http://d79i1fxsrar4t.cloudfront.net/goconvey.co/gc-5-dark.png)
+The design is responsive, so you can squish the browser real tight if you need to put it beside your code.
+
+
+The [web UI](https://github.com/smartystreets/goconvey/wiki/Web-UI) supports traditional Go tests, so use it even if you're not using GoConvey tests.
+
+
+
+#### [In the terminal](https://github.com/smartystreets/goconvey/wiki/Execution)
+
+Just do what you do best:
+
+ $ go test
+
+Or if you want the output to include the story:
+
+ $ go test -v
+
+
+[Documentation](https://github.com/smartystreets/goconvey/wiki)
+
+-----------
+
+Check out the
+
+- [GoConvey wiki](https://github.com/smartystreets/goconvey/wiki),
+- [![GoDoc](https://godoc.org/github.com/smartystreets/goconvey?status.png)](http://godoc.org/github.com/smartystreets/goconvey)
+- and the *_test.go files scattered throughout this project.
+
+[Screenshots](http://goconvey.co)
+
+-----------
+
+For web UI and terminal screenshots, check out [the full feature tour](http://goconvey.co).
+
+
+----------------------
+
+GoConvey is brought to you by [SmartyStreets](https://github.com/smartystreets) and [several contributors](https://github.com/smartystreets/goconvey/graphs/contributors) (Thanks!).
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/assertions.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/assertions.go
new file mode 100644
index 00000000000..1e87b826dff
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/assertions.go
@@ -0,0 +1,68 @@
+package convey
+
+import "github.com/smartystreets/assertions"
+
+var (
+ ShouldEqual = assertions.ShouldEqual
+ ShouldNotEqual = assertions.ShouldNotEqual
+ ShouldAlmostEqual = assertions.ShouldAlmostEqual
+ ShouldNotAlmostEqual = assertions.ShouldNotAlmostEqual
+ ShouldResemble = assertions.ShouldResemble
+ ShouldNotResemble = assertions.ShouldNotResemble
+ ShouldPointTo = assertions.ShouldPointTo
+ ShouldNotPointTo = assertions.ShouldNotPointTo
+ ShouldBeNil = assertions.ShouldBeNil
+ ShouldNotBeNil = assertions.ShouldNotBeNil
+ ShouldBeTrue = assertions.ShouldBeTrue
+ ShouldBeFalse = assertions.ShouldBeFalse
+ ShouldBeZeroValue = assertions.ShouldBeZeroValue
+
+ ShouldBeGreaterThan = assertions.ShouldBeGreaterThan
+ ShouldBeGreaterThanOrEqualTo = assertions.ShouldBeGreaterThanOrEqualTo
+ ShouldBeLessThan = assertions.ShouldBeLessThan
+ ShouldBeLessThanOrEqualTo = assertions.ShouldBeLessThanOrEqualTo
+ ShouldBeBetween = assertions.ShouldBeBetween
+ ShouldNotBeBetween = assertions.ShouldNotBeBetween
+ ShouldBeBetweenOrEqual = assertions.ShouldBeBetweenOrEqual
+ ShouldNotBeBetweenOrEqual = assertions.ShouldNotBeBetweenOrEqual
+
+ ShouldContain = assertions.ShouldContain
+ ShouldNotContain = assertions.ShouldNotContain
+ ShouldContainKey = assertions.ShouldContainKey
+ ShouldNotContainKey = assertions.ShouldNotContainKey
+ ShouldBeIn = assertions.ShouldBeIn
+ ShouldNotBeIn = assertions.ShouldNotBeIn
+ ShouldBeEmpty = assertions.ShouldBeEmpty
+ ShouldNotBeEmpty = assertions.ShouldNotBeEmpty
+ ShouldHaveLength = assertions.ShouldHaveLength
+
+ ShouldStartWith = assertions.ShouldStartWith
+ ShouldNotStartWith = assertions.ShouldNotStartWith
+ ShouldEndWith = assertions.ShouldEndWith
+ ShouldNotEndWith = assertions.ShouldNotEndWith
+ ShouldBeBlank = assertions.ShouldBeBlank
+ ShouldNotBeBlank = assertions.ShouldNotBeBlank
+ ShouldContainSubstring = assertions.ShouldContainSubstring
+ ShouldNotContainSubstring = assertions.ShouldNotContainSubstring
+
+ ShouldPanic = assertions.ShouldPanic
+ ShouldNotPanic = assertions.ShouldNotPanic
+ ShouldPanicWith = assertions.ShouldPanicWith
+ ShouldNotPanicWith = assertions.ShouldNotPanicWith
+
+ ShouldHaveSameTypeAs = assertions.ShouldHaveSameTypeAs
+ ShouldNotHaveSameTypeAs = assertions.ShouldNotHaveSameTypeAs
+ ShouldImplement = assertions.ShouldImplement
+ ShouldNotImplement = assertions.ShouldNotImplement
+
+ ShouldHappenBefore = assertions.ShouldHappenBefore
+ ShouldHappenOnOrBefore = assertions.ShouldHappenOnOrBefore
+ ShouldHappenAfter = assertions.ShouldHappenAfter
+ ShouldHappenOnOrAfter = assertions.ShouldHappenOnOrAfter
+ ShouldHappenBetween = assertions.ShouldHappenBetween
+ ShouldHappenOnOrBetween = assertions.ShouldHappenOnOrBetween
+ ShouldNotHappenOnOrBetween = assertions.ShouldNotHappenOnOrBetween
+ ShouldHappenWithin = assertions.ShouldHappenWithin
+ ShouldNotHappenWithin = assertions.ShouldNotHappenWithin
+ ShouldBeChronological = assertions.ShouldBeChronological
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/context.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/context.go
new file mode 100644
index 00000000000..2c75c2d7b1b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/context.go
@@ -0,0 +1,272 @@
+package convey
+
+import (
+ "fmt"
+
+ "github.com/jtolds/gls"
+ "github.com/smartystreets/goconvey/convey/reporting"
+)
+
+type conveyErr struct {
+ fmt string
+ params []interface{}
+}
+
+func (e *conveyErr) Error() string {
+ return fmt.Sprintf(e.fmt, e.params...)
+}
+
+func conveyPanic(fmt string, params ...interface{}) {
+ panic(&conveyErr{fmt, params})
+}
+
+const (
+ missingGoTest = `Top-level calls to Convey(...) need a reference to the *testing.T.
+ Hint: Convey("description here", t, func() { /* notice that the second argument was the *testing.T (t)! */ }) `
+ extraGoTest = `Only the top-level call to Convey(...) needs a reference to the *testing.T.`
+ noStackContext = "Convey operation made without context on goroutine stack.\n" +
+ "Hint: Perhaps you meant to use `Convey(..., func(c C){...})` ?"
+ differentConveySituations = "Different set of Convey statements on subsequent pass!\nDid not expect %#v."
+ multipleIdenticalConvey = "Multiple convey suites with identical names: %#v"
+)
+
+const (
+ failureHalt = "___FAILURE_HALT___"
+
+ nodeKey = "node"
+)
+
+///////////////////////////////// Stack Context /////////////////////////////////
+
+func getCurrentContext() *context {
+ ctx, ok := ctxMgr.GetValue(nodeKey)
+ if ok {
+ return ctx.(*context)
+ }
+ return nil
+}
+
+func mustGetCurrentContext() *context {
+ ctx := getCurrentContext()
+ if ctx == nil {
+ conveyPanic(noStackContext)
+ }
+ return ctx
+}
+
+//////////////////////////////////// Context ////////////////////////////////////
+
+// context magically handles all coordination of Convey's and So assertions.
+//
+// It is tracked on the stack as goroutine-local-storage with the gls package,
+// or explicitly if the user decides to call convey like:
+//
+// Convey(..., func(c C) {
+// c.So(...)
+// })
+//
+// This implements the `C` interface.
+type context struct {
+ reporter reporting.Reporter
+
+ children map[string]*context
+
+ resets []func()
+
+ executedOnce bool
+ expectChildRun *bool
+ complete bool
+
+ focus bool
+ failureMode FailureMode
+}
+
+// rootConvey is the main entry point to a test suite. This is called when
+// there's no context in the stack already, and items must contain a `t` object,
+// or this panics.
+func rootConvey(items ...interface{}) {
+ entry := discover(items)
+
+ if entry.Test == nil {
+ conveyPanic(missingGoTest)
+ }
+
+ expectChildRun := true
+ ctx := &context{
+ reporter: buildReporter(),
+
+ children: make(map[string]*context),
+
+ expectChildRun: &expectChildRun,
+
+ focus: entry.Focus,
+ failureMode: defaultFailureMode.combine(entry.FailMode),
+ }
+ ctxMgr.SetValues(gls.Values{nodeKey: ctx}, func() {
+ ctx.reporter.BeginStory(reporting.NewStoryReport(entry.Test))
+ defer ctx.reporter.EndStory()
+
+ for ctx.shouldVisit() {
+ ctx.conveyInner(entry.Situation, entry.Func)
+ expectChildRun = true
+ }
+ })
+}
+
+//////////////////////////////////// Methods ////////////////////////////////////
+
+func (ctx *context) SkipConvey(items ...interface{}) {
+ ctx.Convey(items, skipConvey)
+}
+
+func (ctx *context) FocusConvey(items ...interface{}) {
+ ctx.Convey(items, focusConvey)
+}
+
+func (ctx *context) Convey(items ...interface{}) {
+ entry := discover(items)
+
+ // we're a branch, or leaf (on the wind)
+ if entry.Test != nil {
+ conveyPanic(extraGoTest)
+ }
+ if ctx.focus && !entry.Focus {
+ return
+ }
+
+ var inner_ctx *context
+ if ctx.executedOnce {
+ var ok bool
+ inner_ctx, ok = ctx.children[entry.Situation]
+ if !ok {
+ conveyPanic(differentConveySituations, entry.Situation)
+ }
+ } else {
+ if _, ok := ctx.children[entry.Situation]; ok {
+ conveyPanic(multipleIdenticalConvey, entry.Situation)
+ }
+ inner_ctx = &context{
+ reporter: ctx.reporter,
+
+ children: make(map[string]*context),
+
+ expectChildRun: ctx.expectChildRun,
+
+ focus: entry.Focus,
+ failureMode: ctx.failureMode.combine(entry.FailMode),
+ }
+ ctx.children[entry.Situation] = inner_ctx
+ }
+
+ if inner_ctx.shouldVisit() {
+ ctxMgr.SetValues(gls.Values{nodeKey: inner_ctx}, func() {
+ inner_ctx.conveyInner(entry.Situation, entry.Func)
+ })
+ }
+}
+
+func (ctx *context) SkipSo(stuff ...interface{}) {
+ ctx.assertionReport(reporting.NewSkipReport())
+}
+
+func (ctx *context) So(actual interface{}, assert assertion, expected ...interface{}) {
+ if result := assert(actual, expected...); result == assertionSuccess {
+ ctx.assertionReport(reporting.NewSuccessReport())
+ } else {
+ ctx.assertionReport(reporting.NewFailureReport(result))
+ }
+}
+
+func (ctx *context) Reset(action func()) {
+ /* TODO: Failure mode configuration */
+ ctx.resets = append(ctx.resets, action)
+}
+
+func (ctx *context) Print(items ...interface{}) (int, error) {
+ fmt.Fprint(ctx.reporter, items...)
+ return fmt.Print(items...)
+}
+
+func (ctx *context) Println(items ...interface{}) (int, error) {
+ fmt.Fprintln(ctx.reporter, items...)
+ return fmt.Println(items...)
+}
+
+func (ctx *context) Printf(format string, items ...interface{}) (int, error) {
+ fmt.Fprintf(ctx.reporter, format, items...)
+ return fmt.Printf(format, items...)
+}
+
+//////////////////////////////////// Private ////////////////////////////////////
+
+// shouldVisit returns true iff we should traverse down into a Convey. Note
+// that just because we don't traverse a Convey this time, doesn't mean that
+// we may not traverse it on a subsequent pass.
+func (c *context) shouldVisit() bool {
+ return !c.complete && *c.expectChildRun
+}
+
+// conveyInner is the function which actually executes the user's anonymous test
+// function body. At this point, Convey or RootConvey has decided that this
+// function should actually run.
+func (ctx *context) conveyInner(situation string, f func(C)) {
+ // Record/Reset state for next time.
+ defer func() {
+ ctx.executedOnce = true
+
+ // This is only needed at the leaves, but there's no harm in also setting it
+ // when returning from branch Convey's
+ *ctx.expectChildRun = false
+ }()
+
+ // Set up+tear down our scope for the reporter
+ ctx.reporter.Enter(reporting.NewScopeReport(situation))
+ defer ctx.reporter.Exit()
+
+ // Recover from any panics in f, and assign the `complete` status for this
+ // node of the tree.
+ defer func() {
+ ctx.complete = true
+ if problem := recover(); problem != nil {
+ if problem, ok := problem.(*conveyErr); ok {
+ panic(problem)
+ }
+ if problem != failureHalt {
+ ctx.reporter.Report(reporting.NewErrorReport(problem))
+ }
+ } else {
+ for _, child := range ctx.children {
+ if !child.complete {
+ ctx.complete = false
+ return
+ }
+ }
+ }
+ }()
+
+ // Resets are registered as the `f` function executes, so nil them here.
+ // All resets are run in registration order (FIFO).
+ ctx.resets = []func(){}
+ defer func() {
+ for _, r := range ctx.resets {
+ // panics handled by the previous defer
+ r()
+ }
+ }()
+
+ if f == nil {
+ // if f is nil, this was either a Convey(..., nil), or a SkipConvey
+ ctx.reporter.Report(reporting.NewSkipReport())
+ } else {
+ f(ctx)
+ }
+}
+
+// assertionReport is a helper for So and SkipSo which makes the report and
+// then possibly panics, depending on the current context's failureMode.
+func (ctx *context) assertionReport(r *reporting.AssertionResult) {
+ ctx.reporter.Report(r)
+ if r.Failure != "" && ctx.failureMode == FailureHalts {
+ panic(failureHalt)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/convey.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/convey.goconvey
new file mode 100644
index 00000000000..a2d9327dc91
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/convey.goconvey
@@ -0,0 +1,4 @@
+#ignore
+-timeout=1s
+#-covermode=count
+#-coverpkg=github.com/smartystreets/goconvey/convey,github.com/smartystreets/goconvey/convey/gotest,github.com/smartystreets/goconvey/convey/reporting \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/discovery.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/discovery.go
new file mode 100644
index 00000000000..eb8d4cb2cee
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/discovery.go
@@ -0,0 +1,103 @@
+package convey
+
+type actionSpecifier uint8
+
+const (
+ noSpecifier actionSpecifier = iota
+ skipConvey
+ focusConvey
+)
+
+type suite struct {
+ Situation string
+ Test t
+ Focus bool
+ Func func(C) // nil means skipped
+ FailMode FailureMode
+}
+
+func newSuite(situation string, failureMode FailureMode, f func(C), test t, specifier actionSpecifier) *suite {
+ ret := &suite{
+ Situation: situation,
+ Test: test,
+ Func: f,
+ FailMode: failureMode,
+ }
+ switch specifier {
+ case skipConvey:
+ ret.Func = nil
+ case focusConvey:
+ ret.Focus = true
+ }
+ return ret
+}
+
+func discover(items []interface{}) *suite {
+ name, items := parseName(items)
+ test, items := parseGoTest(items)
+ failure, items := parseFailureMode(items)
+ action, items := parseAction(items)
+ specifier, items := parseSpecifier(items)
+
+ if len(items) != 0 {
+ conveyPanic(parseError)
+ }
+
+ return newSuite(name, failure, action, test, specifier)
+}
+func item(items []interface{}) interface{} {
+ if len(items) == 0 {
+ conveyPanic(parseError)
+ }
+ return items[0]
+}
+func parseName(items []interface{}) (string, []interface{}) {
+ if name, parsed := item(items).(string); parsed {
+ return name, items[1:]
+ }
+ conveyPanic(parseError)
+ panic("never get here")
+}
+func parseGoTest(items []interface{}) (t, []interface{}) {
+ if test, parsed := item(items).(t); parsed {
+ return test, items[1:]
+ }
+ return nil, items
+}
+func parseFailureMode(items []interface{}) (FailureMode, []interface{}) {
+ if mode, parsed := item(items).(FailureMode); parsed {
+ return mode, items[1:]
+ }
+ return FailureInherits, items
+}
+func parseAction(items []interface{}) (func(C), []interface{}) {
+ switch x := item(items).(type) {
+ case nil:
+ return nil, items[1:]
+ case func(C):
+ return x, items[1:]
+ case func():
+ return func(C) { x() }, items[1:]
+ }
+ conveyPanic(parseError)
+ panic("never get here")
+}
+func parseSpecifier(items []interface{}) (actionSpecifier, []interface{}) {
+ if len(items) == 0 {
+ return noSpecifier, items
+ }
+ if spec, ok := items[0].(actionSpecifier); ok {
+ return spec, items[1:]
+ }
+ conveyPanic(parseError)
+ panic("never get here")
+}
+
+// This interface allows us to pass the *testing.T struct
+// throughout the internals of this package without ever
+// having to import the "testing" package.
+type t interface {
+ Fail()
+}
+
+const parseError = "You must provide a name (string), then a *testing.T (if in outermost scope), an optional FailureMode, and then an action (func())."
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/doc.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/doc.go
new file mode 100644
index 00000000000..2562ce4c284
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/doc.go
@@ -0,0 +1,218 @@
+// Package convey contains all of the public-facing entry points to this project.
+// This means that it should never be required of the user to import any other
+// packages from this project as they serve internal purposes.
+package convey
+
+import "github.com/smartystreets/goconvey/convey/reporting"
+
+////////////////////////////////// suite //////////////////////////////////
+
+// C is the Convey context which you can optionally obtain in your action
+// by calling Convey like:
+//
+// Convey(..., func(c C) {
+// ...
+// })
+//
+// See the documentation on Convey for more details.
+//
+// All methods in this context behave identically to the global functions of the
+// same name in this package.
+type C interface {
+ Convey(items ...interface{})
+ SkipConvey(items ...interface{})
+ FocusConvey(items ...interface{})
+
+ So(actual interface{}, assert assertion, expected ...interface{})
+ SkipSo(stuff ...interface{})
+
+ Reset(action func())
+
+ Println(items ...interface{}) (int, error)
+ Print(items ...interface{}) (int, error)
+ Printf(format string, items ...interface{}) (int, error)
+}
+
+// Convey is the method intended for use when declaring the scopes of
+// a specification. Each scope has a description and a func() which may contain
+// other calls to Convey(), Reset() or Should-style assertions. Convey calls can
+// be nested as far as you see fit.
+//
+// IMPORTANT NOTE: The top-level Convey() within a Test method
+// must conform to the following signature:
+//
+// Convey(description string, t *testing.T, action func())
+//
+// All other calls should look like this (no need to pass in *testing.T):
+//
+// Convey(description string, action func())
+//
+// Don't worry, goconvey will panic if you get it wrong so you can fix it.
+//
+// Additionally, you may explicitly obtain access to the Convey context by doing:
+//
+// Convey(description string, action func(c C))
+//
+// You may need to do this if you want to pass the context through to a
+// goroutine, or to close over the context in a handler to a library which
+// calls your handler in a goroutine (httptest comes to mind).
+//
+// All Convey()-blocks also accept an optional parameter of FailureMode which sets
+// how goconvey should treat failures for So()-assertions in the block and
+// nested blocks. See the constants in this file for the available options.
+//
+// By default it will inherit from its parent block and the top-level blocks
+// default to the FailureHalts setting.
+//
+// This parameter is inserted before the block itself:
+//
+// Convey(description string, t *testing.T, mode FailureMode, action func())
+// Convey(description string, mode FailureMode, action func())
+//
+// See the examples package for, well, examples.
+func Convey(items ...interface{}) {
+ if ctx := getCurrentContext(); ctx == nil {
+ rootConvey(items...)
+ } else {
+ ctx.Convey(items...)
+ }
+}
+
+// SkipConvey is analagous to Convey except that the scope is not executed
+// (which means that child scopes defined within this scope are not run either).
+// The reporter will be notified that this step was skipped.
+func SkipConvey(items ...interface{}) {
+ Convey(append(items, skipConvey)...)
+}
+
+// FocusConvey is has the inverse effect of SkipConvey. If the top-level
+// Convey is changed to `FocusConvey`, only nested scopes that are defined
+// with FocusConvey will be run. The rest will be ignored completely. This
+// is handy when debugging a large suite that runs a misbehaving function
+// repeatedly as you can disable all but one of that function
+// without swaths of `SkipConvey` calls, just a targeted chain of calls
+// to FocusConvey.
+func FocusConvey(items ...interface{}) {
+ Convey(append(items, focusConvey)...)
+}
+
+// Reset registers a cleanup function to be run after each Convey()
+// in the same scope. See the examples package for a simple use case.
+func Reset(action func()) {
+ mustGetCurrentContext().Reset(action)
+}
+
+/////////////////////////////////// Assertions ///////////////////////////////////
+
+// assertion is an alias for a function with a signature that the convey.So()
+// method can handle. Any future or custom assertions should conform to this
+// method signature. The return value should be an empty string if the assertion
+// passes and a well-formed failure message if not.
+type assertion func(actual interface{}, expected ...interface{}) string
+
+const assertionSuccess = ""
+
+// So is the means by which assertions are made against the system under test.
+// The majority of exported names in the assertions package begin with the word
+// 'Should' and describe how the first argument (actual) should compare with any
+// of the final (expected) arguments. How many final arguments are accepted
+// depends on the particular assertion that is passed in as the assert argument.
+// See the examples package for use cases and the assertions package for
+// documentation on specific assertion methods. A failing assertion will
+// cause t.Fail() to be invoked--you should never call this method (or other
+// failure-inducing methods) in your test code. Leave that to GoConvey.
+func So(actual interface{}, assert assertion, expected ...interface{}) {
+ mustGetCurrentContext().So(actual, assert, expected...)
+}
+
+// SkipSo is analagous to So except that the assertion that would have been passed
+// to So is not executed and the reporter is notified that the assertion was skipped.
+func SkipSo(stuff ...interface{}) {
+ mustGetCurrentContext().SkipSo()
+}
+
+// FailureMode is a type which determines how the So() blocks should fail
+// if their assertion fails. See constants further down for acceptable values
+type FailureMode string
+
+const (
+
+ // FailureContinues is a failure mode which prevents failing
+ // So()-assertions from halting Convey-block execution, instead
+ // allowing the test to continue past failing So()-assertions.
+ FailureContinues FailureMode = "continue"
+
+ // FailureHalts is the default setting for a top-level Convey()-block
+ // and will cause all failing So()-assertions to halt further execution
+ // in that test-arm and continue on to the next arm.
+ FailureHalts FailureMode = "halt"
+
+ // FailureInherits is the default setting for failure-mode, it will
+ // default to the failure-mode of the parent block. You should never
+ // need to specify this mode in your tests..
+ FailureInherits FailureMode = "inherits"
+)
+
+func (f FailureMode) combine(other FailureMode) FailureMode {
+ if other == FailureInherits {
+ return f
+ }
+ return other
+}
+
+var defaultFailureMode FailureMode = FailureHalts
+
+// SetDefaultFailureMode allows you to specify the default failure mode
+// for all Convey blocks. It is meant to be used in an init function to
+// allow the default mode to be changdd across all tests for an entire packgae
+// but it can be used anywhere.
+func SetDefaultFailureMode(mode FailureMode) {
+ if mode == FailureContinues || mode == FailureHalts {
+ defaultFailureMode = mode
+ } else {
+ panic("You may only use the constants named 'FailureContinues' and 'FailureHalts' as default failure modes.")
+ }
+}
+
+//////////////////////////////////// Print functions ////////////////////////////////////
+
+// Print is analogous to fmt.Print (and it even calls fmt.Print). It ensures that
+// output is aligned with the corresponding scopes in the web UI.
+func Print(items ...interface{}) (written int, err error) {
+ return mustGetCurrentContext().Print(items...)
+}
+
+// Print is analogous to fmt.Println (and it even calls fmt.Println). It ensures that
+// output is aligned with the corresponding scopes in the web UI.
+func Println(items ...interface{}) (written int, err error) {
+ return mustGetCurrentContext().Println(items...)
+}
+
+// Print is analogous to fmt.Printf (and it even calls fmt.Printf). It ensures that
+// output is aligned with the corresponding scopes in the web UI.
+func Printf(format string, items ...interface{}) (written int, err error) {
+ return mustGetCurrentContext().Printf(format, items...)
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// SuppressConsoleStatistics prevents automatic printing of console statistics.
+// Calling PrintConsoleStatistics explicitly will force printing of statistics.
+func SuppressConsoleStatistics() {
+ reporting.SuppressConsoleStatistics()
+}
+
+// ConsoleStatistics may be called at any time to print assertion statistics.
+// Generally, the best place to do this would be in a TestMain function,
+// after all tests have been run. Something like this:
+//
+// func TestMain(m *testing.M) {
+// convey.SuppressConsoleStatistics()
+// result := m.Run()
+// convey.PrintConsoleStatistics()
+// os.Exit(result)
+// }
+//
+func PrintConsoleStatistics() {
+ reporting.PrintConsoleStatistics()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/focused_execution_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/focused_execution_test.go
new file mode 100644
index 00000000000..294e32fa17e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/focused_execution_test.go
@@ -0,0 +1,72 @@
+package convey
+
+import "testing"
+
+func TestFocusOnlyAtTopLevel(t *testing.T) {
+ output := prepare()
+
+ FocusConvey("hi", t, func() {
+ output += "done"
+ })
+
+ expectEqual(t, "done", output)
+}
+
+func TestFocus(t *testing.T) {
+ output := prepare()
+
+ FocusConvey("hi", t, func() {
+ output += "1"
+
+ Convey("bye", func() {
+ output += "2"
+ })
+ })
+
+ expectEqual(t, "1", output)
+}
+
+func TestNestedFocus(t *testing.T) {
+ output := prepare()
+
+ FocusConvey("hi", t, func() {
+ output += "1"
+
+ Convey("This shouldn't run", func() {
+ output += "boink!"
+ })
+
+ FocusConvey("This should run", func() {
+ output += "2"
+
+ FocusConvey("The should run too", func() {
+ output += "3"
+
+ })
+
+ Convey("The should NOT run", func() {
+ output += "blah blah blah!"
+ })
+ })
+ })
+
+ expectEqual(t, "123", output)
+}
+
+func TestForgotTopLevelFocus(t *testing.T) {
+ output := prepare()
+
+ Convey("1", t, func() {
+ output += "1"
+
+ FocusConvey("This will be run because the top-level lacks Focus", func() {
+ output += "2"
+ })
+
+ Convey("3", func() {
+ output += "3"
+ })
+ })
+
+ expectEqual(t, "1213", output)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/gotest/doc_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/gotest/doc_test.go
new file mode 100644
index 00000000000..1b6406be99b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/gotest/doc_test.go
@@ -0,0 +1 @@
+package gotest
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/gotest/utils.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/gotest/utils.go
new file mode 100644
index 00000000000..3a5c848a445
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/gotest/utils.go
@@ -0,0 +1,28 @@
+// Package gotest contains internal functionality. Although this package
+// contains one or more exported names it is not intended for public
+// consumption. See the examples package for how to use this project.
+package gotest
+
+import (
+ "runtime"
+ "strings"
+)
+
+func ResolveExternalCaller() (file string, line int, name string) {
+ var caller_id uintptr
+ callers := runtime.Callers(0, callStack)
+
+ for x := 0; x < callers; x++ {
+ caller_id, file, line, _ = runtime.Caller(x)
+ if strings.HasSuffix(file, "_test.go") || strings.HasSuffix(file, "_tests.go") {
+ name = runtime.FuncForPC(caller_id).Name()
+ return
+ }
+ }
+ file, line, name = "<unkown file>", -1, "<unknown name>"
+ return // panic?
+}
+
+const maxStackDepth = 100 // This had better be enough...
+
+var callStack []uintptr = make([]uintptr, maxStackDepth, maxStackDepth)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/init.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/init.go
new file mode 100644
index 00000000000..732b72142e0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/init.go
@@ -0,0 +1,81 @@
+package convey
+
+import (
+ "flag"
+ "os"
+
+ "github.com/jtolds/gls"
+ "github.com/smartystreets/assertions"
+ "github.com/smartystreets/goconvey/convey/reporting"
+)
+
+func init() {
+ assertions.GoConveyMode(true)
+
+ declareFlags()
+
+ ctxMgr = gls.NewContextManager()
+}
+
+func declareFlags() {
+ flag.BoolVar(&json, "json", false, "When true, emits results in JSON blocks. Default: 'false'")
+ flag.BoolVar(&silent, "silent", false, "When true, all output from GoConvey is suppressed.")
+ flag.BoolVar(&story, "story", false, "When true, emits story output, otherwise emits dot output. When not provided, this flag mirros the value of the '-test.v' flag")
+
+ if noStoryFlagProvided() {
+ story = verboseEnabled
+ }
+
+ // FYI: flag.Parse() is called from the testing package.
+}
+
+func noStoryFlagProvided() bool {
+ return !story && !storyDisabled
+}
+
+func buildReporter() reporting.Reporter {
+ selectReporter := os.Getenv("GOCONVEY_REPORTER")
+
+ switch {
+ case testReporter != nil:
+ return testReporter
+ case json || selectReporter == "json":
+ return reporting.BuildJsonReporter()
+ case silent || selectReporter == "silent":
+ return reporting.BuildSilentReporter()
+ case selectReporter == "dot":
+ // Story is turned on when verbose is set, so we need to check for dot reporter first.
+ return reporting.BuildDotReporter()
+ case story || selectReporter == "story":
+ return reporting.BuildStoryReporter()
+ default:
+ return reporting.BuildDotReporter()
+ }
+}
+
+var (
+ ctxMgr *gls.ContextManager
+
+ // only set by internal tests
+ testReporter reporting.Reporter
+)
+
+var (
+ json bool
+ silent bool
+ story bool
+
+ verboseEnabled = flagFound("-test.v=true")
+ storyDisabled = flagFound("-story=false")
+)
+
+// flagFound parses the command line args manually for flags defined in other
+// packages. Like the '-v' flag from the "testing" package, for instance.
+func flagFound(flagValue string) bool {
+ for _, arg := range os.Args {
+ if arg == flagValue {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/isolated_execution_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/isolated_execution_test.go
new file mode 100644
index 00000000000..7e22b3caa53
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/isolated_execution_test.go
@@ -0,0 +1,774 @@
+package convey
+
+import (
+ "strconv"
+ "testing"
+ "time"
+)
+
+func TestSingleScope(t *testing.T) {
+ output := prepare()
+
+ Convey("hi", t, func() {
+ output += "done"
+ })
+
+ expectEqual(t, "done", output)
+}
+
+func TestSingleScopeWithMultipleConveys(t *testing.T) {
+ output := prepare()
+
+ Convey("1", t, func() {
+ output += "1"
+ })
+
+ Convey("2", t, func() {
+ output += "2"
+ })
+
+ expectEqual(t, "12", output)
+}
+
+func TestNestedScopes(t *testing.T) {
+ output := prepare()
+
+ Convey("a", t, func() {
+ output += "a "
+
+ Convey("bb", func() {
+ output += "bb "
+
+ Convey("ccc", func() {
+ output += "ccc | "
+ })
+ })
+ })
+
+ expectEqual(t, "a bb ccc | ", output)
+}
+
+func TestNestedScopesWithIsolatedExecution(t *testing.T) {
+ output := prepare()
+
+ Convey("a", t, func() {
+ output += "a "
+
+ Convey("aa", func() {
+ output += "aa "
+
+ Convey("aaa", func() {
+ output += "aaa | "
+ })
+
+ Convey("aaa1", func() {
+ output += "aaa1 | "
+ })
+ })
+
+ Convey("ab", func() {
+ output += "ab "
+
+ Convey("abb", func() {
+ output += "abb | "
+ })
+ })
+ })
+
+ expectEqual(t, "a aa aaa | a aa aaa1 | a ab abb | ", output)
+}
+
+func TestSingleScopeWithConveyAndNestedReset(t *testing.T) {
+ output := prepare()
+
+ Convey("1", t, func() {
+ output += "1"
+
+ Reset(func() {
+ output += "a"
+ })
+ })
+
+ expectEqual(t, "1a", output)
+}
+
+func TestPanicingReset(t *testing.T) {
+ output := prepare()
+
+ Convey("1", t, func() {
+ output += "1"
+
+ Reset(func() {
+ panic("nooo")
+ })
+
+ Convey("runs since the reset hasn't yet", func() {
+ output += "a"
+ })
+
+ Convey("but this doesnt", func() {
+ output += "nope"
+ })
+ })
+
+ expectEqual(t, "1a", output)
+}
+
+func TestSingleScopeWithMultipleRegistrationsAndReset(t *testing.T) {
+ output := prepare()
+
+ Convey("reset after each nested convey", t, func() {
+ Convey("first output", func() {
+ output += "1"
+ })
+
+ Convey("second output", func() {
+ output += "2"
+ })
+
+ Reset(func() {
+ output += "a"
+ })
+ })
+
+ expectEqual(t, "1a2a", output)
+}
+
+func TestSingleScopeWithMultipleRegistrationsAndMultipleResets(t *testing.T) {
+ output := prepare()
+
+ Convey("each reset is run at end of each nested convey", t, func() {
+ Convey("1", func() {
+ output += "1"
+ })
+
+ Convey("2", func() {
+ output += "2"
+ })
+
+ Reset(func() {
+ output += "a"
+ })
+
+ Reset(func() {
+ output += "b"
+ })
+ })
+
+ expectEqual(t, "1ab2ab", output)
+}
+
+func Test_Failure_AtHigherLevelScopePreventsChildScopesFromRunning(t *testing.T) {
+ output := prepare()
+
+ Convey("This step fails", t, func() {
+ So(1, ShouldEqual, 2)
+
+ Convey("this should NOT be executed", func() {
+ output += "a"
+ })
+ })
+
+ expectEqual(t, "", output)
+}
+
+func Test_Panic_AtHigherLevelScopePreventsChildScopesFromRunning(t *testing.T) {
+ output := prepare()
+
+ Convey("This step panics", t, func() {
+ Convey("this happens, because the panic didn't happen yet", func() {
+ output += "1"
+ })
+
+ output += "a"
+
+ Convey("this should NOT be executed", func() {
+ output += "2"
+ })
+
+ output += "b"
+
+ panic("Hi")
+
+ output += "nope"
+ })
+
+ expectEqual(t, "1ab", output)
+}
+
+func Test_Panic_InChildScopeDoes_NOT_PreventExecutionOfSiblingScopes(t *testing.T) {
+ output := prepare()
+
+ Convey("This is the parent", t, func() {
+ Convey("This step panics", func() {
+ panic("Hi")
+ output += "1"
+ })
+
+ Convey("This sibling should execute", func() {
+ output += "2"
+ })
+ })
+
+ expectEqual(t, "2", output)
+}
+
+func Test_Failure_InChildScopeDoes_NOT_PreventExecutionOfSiblingScopes(t *testing.T) {
+ output := prepare()
+
+ Convey("This is the parent", t, func() {
+ Convey("This step fails", func() {
+ So(1, ShouldEqual, 2)
+ output += "1"
+ })
+
+ Convey("This sibling should execute", func() {
+ output += "2"
+ })
+ })
+
+ expectEqual(t, "2", output)
+}
+
+func TestResetsAreAlwaysExecutedAfterScope_Panics(t *testing.T) {
+ output := prepare()
+
+ Convey("This is the parent", t, func() {
+ Convey("This step panics", func() {
+ panic("Hi")
+ output += "1"
+ })
+
+ Convey("This sibling step does not panic", func() {
+ output += "a"
+
+ Reset(func() {
+ output += "b"
+ })
+ })
+
+ Reset(func() {
+ output += "2"
+ })
+ })
+
+ expectEqual(t, "2ab2", output)
+}
+
+func TestResetsAreAlwaysExecutedAfterScope_Failures(t *testing.T) {
+ output := prepare()
+
+ Convey("This is the parent", t, func() {
+ Convey("This step fails", func() {
+ So(1, ShouldEqual, 2)
+ output += "1"
+ })
+
+ Convey("This sibling step does not fail", func() {
+ output += "a"
+
+ Reset(func() {
+ output += "b"
+ })
+ })
+
+ Reset(func() {
+ output += "2"
+ })
+ })
+
+ expectEqual(t, "2ab2", output)
+}
+
+func TestSkipTopLevel(t *testing.T) {
+ output := prepare()
+
+ SkipConvey("hi", t, func() {
+ output += "This shouldn't be executed!"
+ })
+
+ expectEqual(t, "", output)
+}
+
+func TestSkipNestedLevel(t *testing.T) {
+ output := prepare()
+
+ Convey("hi", t, func() {
+ output += "yes"
+
+ SkipConvey("bye", func() {
+ output += "no"
+ })
+ })
+
+ expectEqual(t, "yes", output)
+}
+
+func TestSkipNestedLevelSkipsAllChildLevels(t *testing.T) {
+ output := prepare()
+
+ Convey("hi", t, func() {
+ output += "yes"
+
+ SkipConvey("bye", func() {
+ output += "no"
+
+ Convey("byebye", func() {
+ output += "no-no"
+ })
+ })
+ })
+
+ expectEqual(t, "yes", output)
+}
+
+func TestIterativeConveys(t *testing.T) {
+ output := prepare()
+
+ Convey("Test", t, func() {
+ for x := 0; x < 10; x++ {
+ y := strconv.Itoa(x)
+
+ Convey(y, func() {
+ output += y
+ })
+ }
+ })
+
+ expectEqual(t, "0123456789", output)
+}
+
+func TestClosureVariables(t *testing.T) {
+ output := prepare()
+
+ i := 0
+
+ Convey("A", t, func() {
+ i = i + 1
+ j := i
+
+ output += "A" + strconv.Itoa(i) + " "
+
+ Convey("B", func() {
+ k := j
+ j = j + 1
+
+ output += "B" + strconv.Itoa(k) + " "
+
+ Convey("C", func() {
+ output += "C" + strconv.Itoa(k) + strconv.Itoa(j) + " "
+ })
+
+ Convey("D", func() {
+ output += "D" + strconv.Itoa(k) + strconv.Itoa(j) + " "
+ })
+ })
+
+ Convey("C", func() {
+ output += "C" + strconv.Itoa(j) + " "
+ })
+ })
+
+ output += "D" + strconv.Itoa(i) + " "
+
+ expectEqual(t, "A1 B1 C12 A2 B2 D23 A3 C3 D3 ", output)
+}
+
+func TestClosureVariablesWithReset(t *testing.T) {
+ output := prepare()
+
+ i := 0
+
+ Convey("A", t, func() {
+ i = i + 1
+ j := i
+
+ output += "A" + strconv.Itoa(i) + " "
+
+ Reset(func() {
+ output += "R" + strconv.Itoa(i) + strconv.Itoa(j) + " "
+ })
+
+ Convey("B", func() {
+ output += "B" + strconv.Itoa(j) + " "
+ })
+
+ Convey("C", func() {
+ output += "C" + strconv.Itoa(j) + " "
+ })
+ })
+
+ output += "D" + strconv.Itoa(i) + " "
+
+ expectEqual(t, "A1 B1 R11 A2 C2 R22 D2 ", output)
+}
+
+func TestWrappedSimple(t *testing.T) {
+ prepare()
+ output := resetTestString{""}
+
+ Convey("A", t, func() {
+ func() {
+ output.output += "A "
+
+ Convey("B", func() {
+ output.output += "B "
+
+ Convey("C", func() {
+ output.output += "C "
+ })
+
+ })
+
+ Convey("D", func() {
+ output.output += "D "
+ })
+ }()
+ })
+
+ expectEqual(t, "A B C A D ", output.output)
+}
+
+type resetTestString struct {
+ output string
+}
+
+func addReset(o *resetTestString, f func()) func() {
+ return func() {
+ Reset(func() {
+ o.output += "R "
+ })
+
+ f()
+ }
+}
+
+func TestWrappedReset(t *testing.T) {
+ prepare()
+ output := resetTestString{""}
+
+ Convey("A", t, addReset(&output, func() {
+ output.output += "A "
+
+ Convey("B", func() {
+ output.output += "B "
+ })
+
+ Convey("C", func() {
+ output.output += "C "
+ })
+ }))
+
+ expectEqual(t, "A B R A C R ", output.output)
+}
+
+func TestWrappedReset2(t *testing.T) {
+ prepare()
+ output := resetTestString{""}
+
+ Convey("A", t, func() {
+ Reset(func() {
+ output.output += "R "
+ })
+
+ func() {
+ output.output += "A "
+
+ Convey("B", func() {
+ output.output += "B "
+
+ Convey("C", func() {
+ output.output += "C "
+ })
+ })
+
+ Convey("D", func() {
+ output.output += "D "
+ })
+ }()
+ })
+
+ expectEqual(t, "A B C R A D R ", output.output)
+}
+
+func TestInfiniteLoopWithTrailingFail(t *testing.T) {
+ done := make(chan int)
+
+ go func() {
+ Convey("This fails", t, func() {
+ Convey("and this is run", func() {
+ So(true, ShouldEqual, true)
+ })
+
+ /* And this prevents the whole block to be marked as run */
+ So(false, ShouldEqual, true)
+ })
+
+ done <- 1
+ }()
+
+ select {
+ case <-done:
+ return
+ case <-time.After(1 * time.Millisecond):
+ t.Fail()
+ }
+}
+
+func TestOutermostResetInvokedForGrandchildren(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, func() {
+ output += "A "
+
+ Reset(func() {
+ output += "rA "
+ })
+
+ Convey("B", func() {
+ output += "B "
+
+ Reset(func() {
+ output += "rB "
+ })
+
+ Convey("C", func() {
+ output += "C "
+
+ Reset(func() {
+ output += "rC "
+ })
+ })
+
+ Convey("D", func() {
+ output += "D "
+
+ Reset(func() {
+ output += "rD "
+ })
+ })
+ })
+ })
+
+ expectEqual(t, "A B C rC rB rA A B D rD rB rA ", output)
+}
+
+func TestFailureOption(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureHalts, func() {
+ output += "A "
+ So(true, ShouldEqual, true)
+ output += "B "
+ So(false, ShouldEqual, true)
+ output += "C "
+ })
+
+ expectEqual(t, "A B ", output)
+}
+
+func TestFailureOption2(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, func() {
+ output += "A "
+ So(true, ShouldEqual, true)
+ output += "B "
+ So(false, ShouldEqual, true)
+ output += "C "
+ })
+
+ expectEqual(t, "A B ", output)
+}
+
+func TestFailureOption3(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureContinues, func() {
+ output += "A "
+ So(true, ShouldEqual, true)
+ output += "B "
+ So(false, ShouldEqual, true)
+ output += "C "
+ })
+
+ expectEqual(t, "A B C ", output)
+}
+
+func TestFailureOptionInherit(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureContinues, func() {
+ output += "A1 "
+ So(false, ShouldEqual, true)
+ output += "A2 "
+
+ Convey("B", func() {
+ output += "B1 "
+ So(true, ShouldEqual, true)
+ output += "B2 "
+ So(false, ShouldEqual, true)
+ output += "B3 "
+ })
+ })
+
+ expectEqual(t, "A1 A2 B1 B2 B3 ", output)
+}
+
+func TestFailureOptionInherit2(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureHalts, func() {
+ output += "A1 "
+ So(false, ShouldEqual, true)
+ output += "A2 "
+
+ Convey("B", func() {
+ output += "A1 "
+ So(true, ShouldEqual, true)
+ output += "A2 "
+ So(false, ShouldEqual, true)
+ output += "A3 "
+ })
+ })
+
+ expectEqual(t, "A1 ", output)
+}
+
+func TestFailureOptionInherit3(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureHalts, func() {
+ output += "A1 "
+ So(true, ShouldEqual, true)
+ output += "A2 "
+
+ Convey("B", func() {
+ output += "B1 "
+ So(true, ShouldEqual, true)
+ output += "B2 "
+ So(false, ShouldEqual, true)
+ output += "B3 "
+ })
+ })
+
+ expectEqual(t, "A1 A2 B1 B2 ", output)
+}
+
+func TestFailureOptionNestedOverride(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureContinues, func() {
+ output += "A "
+ So(false, ShouldEqual, true)
+ output += "B "
+
+ Convey("C", FailureHalts, func() {
+ output += "C "
+ So(true, ShouldEqual, true)
+ output += "D "
+ So(false, ShouldEqual, true)
+ output += "E "
+ })
+ })
+
+ expectEqual(t, "A B C D ", output)
+}
+
+func TestFailureOptionNestedOverride2(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureHalts, func() {
+ output += "A "
+ So(true, ShouldEqual, true)
+ output += "B "
+
+ Convey("C", FailureContinues, func() {
+ output += "C "
+ So(true, ShouldEqual, true)
+ output += "D "
+ So(false, ShouldEqual, true)
+ output += "E "
+ })
+ })
+
+ expectEqual(t, "A B C D E ", output)
+}
+
+func TestMultipleInvocationInheritance(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureHalts, func() {
+ output += "A1 "
+ So(true, ShouldEqual, true)
+ output += "A2 "
+
+ Convey("B", FailureContinues, func() {
+ output += "B1 "
+ So(true, ShouldEqual, true)
+ output += "B2 "
+ So(false, ShouldEqual, true)
+ output += "B3 "
+ })
+
+ Convey("C", func() {
+ output += "C1 "
+ So(true, ShouldEqual, true)
+ output += "C2 "
+ So(false, ShouldEqual, true)
+ output += "C3 "
+ })
+ })
+
+ expectEqual(t, "A1 A2 B1 B2 B3 A1 A2 C1 C2 ", output)
+}
+
+func TestMultipleInvocationInheritance2(t *testing.T) {
+ output := prepare()
+
+ Convey("A", t, FailureContinues, func() {
+ output += "A1 "
+ So(true, ShouldEqual, true)
+ output += "A2 "
+ So(false, ShouldEqual, true)
+ output += "A3 "
+
+ Convey("B", FailureHalts, func() {
+ output += "B1 "
+ So(true, ShouldEqual, true)
+ output += "B2 "
+ So(false, ShouldEqual, true)
+ output += "B3 "
+ })
+
+ Convey("C", func() {
+ output += "C1 "
+ So(true, ShouldEqual, true)
+ output += "C2 "
+ So(false, ShouldEqual, true)
+ output += "C3 "
+ })
+ })
+
+ expectEqual(t, "A1 A2 A3 B1 B2 A1 A2 A3 C1 C2 C3 ", output)
+}
+
+func TestSetDefaultFailureMode(t *testing.T) {
+ output := prepare()
+
+ SetDefaultFailureMode(FailureContinues) // the default is normally FailureHalts
+ defer SetDefaultFailureMode(FailureHalts)
+
+ Convey("A", t, func() {
+ output += "A1 "
+ So(true, ShouldBeFalse)
+ output += "A2 "
+ })
+
+ expectEqual(t, "A1 A2 ", output)
+}
+
+func prepare() string {
+ testReporter = newNilReporter()
+ return ""
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/nilReporter.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/nilReporter.go
new file mode 100644
index 00000000000..777b2a51228
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/nilReporter.go
@@ -0,0 +1,15 @@
+package convey
+
+import (
+ "github.com/smartystreets/goconvey/convey/reporting"
+)
+
+type nilReporter struct{}
+
+func (self *nilReporter) BeginStory(story *reporting.StoryReport) {}
+func (self *nilReporter) Enter(scope *reporting.ScopeReport) {}
+func (self *nilReporter) Report(report *reporting.AssertionResult) {}
+func (self *nilReporter) Exit() {}
+func (self *nilReporter) EndStory() {}
+func (self *nilReporter) Write(p []byte) (int, error) { return len(p), nil }
+func newNilReporter() *nilReporter { return &nilReporter{} }
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/console.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/console.go
new file mode 100644
index 00000000000..7bf67dbb2b1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/console.go
@@ -0,0 +1,16 @@
+package reporting
+
+import (
+ "fmt"
+ "io"
+)
+
+type console struct{}
+
+func (self *console) Write(p []byte) (n int, err error) {
+ return fmt.Print(string(p))
+}
+
+func NewConsole() io.Writer {
+ return new(console)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/doc.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/doc.go
new file mode 100644
index 00000000000..a37d0019466
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/doc.go
@@ -0,0 +1,5 @@
+// Package reporting contains internal functionality related
+// to console reporting and output. Although this package has
+// exported names is not intended for public consumption. See the
+// examples package for how to use this project.
+package reporting
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/dot.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/dot.go
new file mode 100644
index 00000000000..47d57c6b0d9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/dot.go
@@ -0,0 +1,40 @@
+package reporting
+
+import "fmt"
+
+type dot struct{ out *Printer }
+
+func (self *dot) BeginStory(story *StoryReport) {}
+
+func (self *dot) Enter(scope *ScopeReport) {}
+
+func (self *dot) Report(report *AssertionResult) {
+ if report.Error != nil {
+ fmt.Print(redColor)
+ self.out.Insert(dotError)
+ } else if report.Failure != "" {
+ fmt.Print(yellowColor)
+ self.out.Insert(dotFailure)
+ } else if report.Skipped {
+ fmt.Print(yellowColor)
+ self.out.Insert(dotSkip)
+ } else {
+ fmt.Print(greenColor)
+ self.out.Insert(dotSuccess)
+ }
+ fmt.Print(resetColor)
+}
+
+func (self *dot) Exit() {}
+
+func (self *dot) EndStory() {}
+
+func (self *dot) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewDotReporter(out *Printer) *dot {
+ self := new(dot)
+ self.out = out
+ return self
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/dot_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/dot_test.go
new file mode 100644
index 00000000000..a8d20d46f08
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/dot_test.go
@@ -0,0 +1,40 @@
+package reporting
+
+import (
+ "errors"
+ "testing"
+)
+
+func TestDotReporterAssertionPrinting(t *testing.T) {
+ monochrome()
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ reporter := NewDotReporter(printer)
+
+ reporter.Report(NewSuccessReport())
+ reporter.Report(NewFailureReport("failed"))
+ reporter.Report(NewErrorReport(errors.New("error")))
+ reporter.Report(NewSkipReport())
+
+ expected := dotSuccess + dotFailure + dotError + dotSkip
+
+ if file.buffer != expected {
+ t.Errorf("\nExpected: '%s'\nActual: '%s'", expected, file.buffer)
+ }
+}
+
+func TestDotReporterOnlyReportsAssertions(t *testing.T) {
+ monochrome()
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ reporter := NewDotReporter(printer)
+
+ reporter.BeginStory(nil)
+ reporter.Enter(nil)
+ reporter.Exit()
+ reporter.EndStory()
+
+ if file.buffer != "" {
+ t.Errorf("\nExpected: '(blank)'\nActual: '%s'", file.buffer)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/gotest.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/gotest.go
new file mode 100644
index 00000000000..c396e16b17a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/gotest.go
@@ -0,0 +1,33 @@
+package reporting
+
+type gotestReporter struct{ test T }
+
+func (self *gotestReporter) BeginStory(story *StoryReport) {
+ self.test = story.Test
+}
+
+func (self *gotestReporter) Enter(scope *ScopeReport) {}
+
+func (self *gotestReporter) Report(r *AssertionResult) {
+ if !passed(r) {
+ self.test.Fail()
+ }
+}
+
+func (self *gotestReporter) Exit() {}
+
+func (self *gotestReporter) EndStory() {
+ self.test = nil
+}
+
+func (self *gotestReporter) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewGoTestReporter() *gotestReporter {
+ return new(gotestReporter)
+}
+
+func passed(r *AssertionResult) bool {
+ return r.Error == nil && r.Failure == ""
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go
new file mode 100644
index 00000000000..fda189458e5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go
@@ -0,0 +1,66 @@
+package reporting
+
+import "testing"
+
+func TestReporterReceivesSuccessfulReport(t *testing.T) {
+ reporter := NewGoTestReporter()
+ test := new(fakeTest)
+ reporter.BeginStory(NewStoryReport(test))
+ reporter.Report(NewSuccessReport())
+
+ if test.failed {
+ t.Errorf("Should have have marked test as failed--the report reflected success.")
+ }
+}
+
+func TestReporterReceivesFailureReport(t *testing.T) {
+ reporter := NewGoTestReporter()
+ test := new(fakeTest)
+ reporter.BeginStory(NewStoryReport(test))
+ reporter.Report(NewFailureReport("This is a failure."))
+
+ if !test.failed {
+ t.Errorf("Test should have been marked as failed (but it wasn't).")
+ }
+}
+
+func TestReporterReceivesErrorReport(t *testing.T) {
+ reporter := NewGoTestReporter()
+ test := new(fakeTest)
+ reporter.BeginStory(NewStoryReport(test))
+ reporter.Report(NewErrorReport("This is an error."))
+
+ if !test.failed {
+ t.Errorf("Test should have been marked as failed (but it wasn't).")
+ }
+}
+
+func TestReporterIsResetAtTheEndOfTheStory(t *testing.T) {
+ defer catch(t)
+ reporter := NewGoTestReporter()
+ test := new(fakeTest)
+ reporter.BeginStory(NewStoryReport(test))
+ reporter.EndStory()
+
+ reporter.Report(NewSuccessReport())
+}
+
+func TestReporterNoopMethods(t *testing.T) {
+ reporter := NewGoTestReporter()
+ reporter.Enter(NewScopeReport("title"))
+ reporter.Exit()
+}
+
+func catch(t *testing.T) {
+ if r := recover(); r != nil {
+ t.Log("Getting to this point means we've passed (because we caught a panic appropriately).")
+ }
+}
+
+type fakeTest struct {
+ failed bool
+}
+
+func (self *fakeTest) Fail() {
+ self.failed = true
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/init.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/init.go
new file mode 100644
index 00000000000..44d080e90e6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/init.go
@@ -0,0 +1,94 @@
+package reporting
+
+import (
+ "os"
+ "runtime"
+ "strings"
+)
+
+func init() {
+ if !isColorableTerminal() {
+ monochrome()
+ }
+
+ if runtime.GOOS == "windows" {
+ success, failure, error_ = dotSuccess, dotFailure, dotError
+ }
+}
+
+func BuildJsonReporter() Reporter {
+ out := NewPrinter(NewConsole())
+ return NewReporters(
+ NewGoTestReporter(),
+ NewJsonReporter(out))
+}
+func BuildDotReporter() Reporter {
+ out := NewPrinter(NewConsole())
+ return NewReporters(
+ NewGoTestReporter(),
+ NewDotReporter(out),
+ NewProblemReporter(out),
+ consoleStatistics)
+}
+func BuildStoryReporter() Reporter {
+ out := NewPrinter(NewConsole())
+ return NewReporters(
+ NewGoTestReporter(),
+ NewStoryReporter(out),
+ NewProblemReporter(out),
+ consoleStatistics)
+}
+func BuildSilentReporter() Reporter {
+ out := NewPrinter(NewConsole())
+ return NewReporters(
+ NewGoTestReporter(),
+ NewSilentProblemReporter(out))
+}
+
+var (
+ newline = "\n"
+ success = "✔"
+ failure = "✘"
+ error_ = "🔥"
+ skip = "⚠"
+ dotSuccess = "."
+ dotFailure = "x"
+ dotError = "E"
+ dotSkip = "S"
+ errorTemplate = "* %s \nLine %d: - %v \n%s\n"
+ failureTemplate = "* %s \nLine %d:\n%s\n"
+)
+
+var (
+ greenColor = "\033[32m"
+ yellowColor = "\033[33m"
+ redColor = "\033[31m"
+ resetColor = "\033[0m"
+)
+
+var consoleStatistics = NewStatisticsReporter(NewPrinter(NewConsole()))
+
+func SuppressConsoleStatistics() { consoleStatistics.Suppress() }
+func PrintConsoleStatistics() { consoleStatistics.PrintSummary() }
+
+// QuiteMode disables all console output symbols. This is only meant to be used
+// for tests that are internal to goconvey where the output is distracting or
+// otherwise not needed in the test output.
+func QuietMode() {
+ success, failure, error_, skip, dotSuccess, dotFailure, dotError, dotSkip = "", "", "", "", "", "", "", ""
+}
+
+func monochrome() {
+ greenColor, yellowColor, redColor, resetColor = "", "", "", ""
+}
+
+func isColorableTerminal() bool {
+ return strings.Contains(os.Getenv("TERM"), "color")
+}
+
+// This interface allows us to pass the *testing.T struct
+// throughout the internals of this tool without ever
+// having to import the "testing" package.
+type T interface {
+ Fail()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/json.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/json.go
new file mode 100644
index 00000000000..f8526979f85
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/json.go
@@ -0,0 +1,88 @@
+// TODO: under unit test
+
+package reporting
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+type JsonReporter struct {
+ out *Printer
+ currentKey []string
+ current *ScopeResult
+ index map[string]*ScopeResult
+ scopes []*ScopeResult
+}
+
+func (self *JsonReporter) depth() int { return len(self.currentKey) }
+
+func (self *JsonReporter) BeginStory(story *StoryReport) {}
+
+func (self *JsonReporter) Enter(scope *ScopeReport) {
+ self.currentKey = append(self.currentKey, scope.Title)
+ ID := strings.Join(self.currentKey, "|")
+ if _, found := self.index[ID]; !found {
+ next := newScopeResult(scope.Title, self.depth(), scope.File, scope.Line)
+ self.scopes = append(self.scopes, next)
+ self.index[ID] = next
+ }
+ self.current = self.index[ID]
+}
+
+func (self *JsonReporter) Report(report *AssertionResult) {
+ self.current.Assertions = append(self.current.Assertions, report)
+}
+
+func (self *JsonReporter) Exit() {
+ self.currentKey = self.currentKey[:len(self.currentKey)-1]
+}
+
+func (self *JsonReporter) EndStory() {
+ self.report()
+ self.reset()
+}
+func (self *JsonReporter) report() {
+ scopes := []string{}
+ for _, scope := range self.scopes {
+ serialized, err := json.Marshal(scope)
+ if err != nil {
+ self.out.Println(jsonMarshalFailure)
+ panic(err)
+ }
+ var buffer bytes.Buffer
+ json.Indent(&buffer, serialized, "", " ")
+ scopes = append(scopes, buffer.String())
+ }
+ self.out.Print(fmt.Sprintf("%s\n%s,\n%s\n", OpenJson, strings.Join(scopes, ","), CloseJson))
+}
+func (self *JsonReporter) reset() {
+ self.scopes = []*ScopeResult{}
+ self.index = map[string]*ScopeResult{}
+ self.currentKey = nil
+}
+
+func (self *JsonReporter) Write(content []byte) (written int, err error) {
+ self.current.Output += string(content)
+ return len(content), nil
+}
+
+func NewJsonReporter(out *Printer) *JsonReporter {
+ self := new(JsonReporter)
+ self.out = out
+ self.reset()
+ return self
+}
+
+const OpenJson = ">->->OPEN-JSON->->->" // "⌦"
+const CloseJson = "<-<-<-CLOSE-JSON<-<-<" // "⌫"
+const jsonMarshalFailure = `
+
+GOCONVEY_JSON_MARSHALL_FAILURE: There was an error when attempting to convert test results to JSON.
+Please file a bug report and reference the code that caused this failure if possible.
+
+Here's the panic:
+
+`
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/printer.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/printer.go
new file mode 100644
index 00000000000..6d4a879c40d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/printer.go
@@ -0,0 +1,57 @@
+package reporting
+
+import (
+ "fmt"
+ "io"
+ "strings"
+)
+
+type Printer struct {
+ out io.Writer
+ prefix string
+}
+
+func (self *Printer) Println(message string, values ...interface{}) {
+ formatted := self.format(message, values...) + newline
+ self.out.Write([]byte(formatted))
+}
+
+func (self *Printer) Print(message string, values ...interface{}) {
+ formatted := self.format(message, values...)
+ self.out.Write([]byte(formatted))
+}
+
+func (self *Printer) Insert(text string) {
+ self.out.Write([]byte(text))
+}
+
+func (self *Printer) format(message string, values ...interface{}) string {
+ var formatted string
+ if len(values) == 0 {
+ formatted = self.prefix + message
+ } else {
+ formatted = self.prefix + fmt.Sprintf(message, values...)
+ }
+ indented := strings.Replace(formatted, newline, newline+self.prefix, -1)
+ return strings.TrimRight(indented, space)
+}
+
+func (self *Printer) Indent() {
+ self.prefix += pad
+}
+
+func (self *Printer) Dedent() {
+ if len(self.prefix) >= padLength {
+ self.prefix = self.prefix[:len(self.prefix)-padLength]
+ }
+}
+
+func NewPrinter(out io.Writer) *Printer {
+ self := new(Printer)
+ self.out = out
+ return self
+}
+
+const space = " "
+const pad = space + space
+const padLength = len(pad)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/printer_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/printer_test.go
new file mode 100644
index 00000000000..94202d5ac97
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/printer_test.go
@@ -0,0 +1,181 @@
+package reporting
+
+import "testing"
+
+func TestPrint(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ const expected = "Hello, World!"
+
+ printer.Print(expected)
+
+ if file.buffer != expected {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintFormat(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ template := "Hi, %s"
+ name := "Ralph"
+ expected := "Hi, Ralph"
+
+ printer.Print(template, name)
+
+ if file.buffer != expected {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintPreservesEncodedStrings(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ const expected = "= -> %3D"
+ printer.Print(expected)
+
+ if file.buffer != expected {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintln(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ const expected = "Hello, World!"
+
+ printer.Println(expected)
+
+ if file.buffer != expected+"\n" {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintlnFormat(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ template := "Hi, %s"
+ name := "Ralph"
+ expected := "Hi, Ralph\n"
+
+ printer.Println(template, name)
+
+ if file.buffer != expected {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintlnPreservesEncodedStrings(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ const expected = "= -> %3D"
+ printer.Println(expected)
+
+ if file.buffer != expected+"\n" {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintIndented(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ const message = "Hello, World!\nGoodbye, World!"
+ const expected = " Hello, World!\n Goodbye, World!"
+
+ printer.Indent()
+ printer.Print(message)
+
+ if file.buffer != expected {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintDedented(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ const expected = "Hello, World!\nGoodbye, World!"
+
+ printer.Indent()
+ printer.Dedent()
+ printer.Print(expected)
+
+ if file.buffer != expected {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintlnIndented(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ const message = "Hello, World!\nGoodbye, World!"
+ const expected = " Hello, World!\n Goodbye, World!\n"
+
+ printer.Indent()
+ printer.Println(message)
+
+ if file.buffer != expected {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestPrintlnDedented(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+ const expected = "Hello, World!\nGoodbye, World!"
+
+ printer.Indent()
+ printer.Dedent()
+ printer.Println(expected)
+
+ if file.buffer != expected+"\n" {
+ t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer)
+ }
+}
+
+func TestDedentTooFarShouldNotPanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Error("Should not have panicked!")
+ }
+ }()
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+
+ printer.Dedent()
+
+ t.Log("Getting to this point without panicking means we passed.")
+}
+
+func TestInsert(t *testing.T) {
+ file := newMemoryFile()
+ printer := NewPrinter(file)
+
+ printer.Indent()
+ printer.Print("Hi")
+ printer.Insert(" there")
+ printer.Dedent()
+
+ expected := " Hi there"
+ if file.buffer != expected {
+ t.Errorf("Should have written '%s' but instead wrote '%s'.", expected, file.buffer)
+ }
+}
+
+////////////////// memoryFile ////////////////////
+
+type memoryFile struct {
+ buffer string
+}
+
+func (self *memoryFile) Write(p []byte) (n int, err error) {
+ self.buffer += string(p)
+ return len(p), nil
+}
+
+func (self *memoryFile) String() string {
+ return self.buffer
+}
+
+func newMemoryFile() *memoryFile {
+ return new(memoryFile)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/problems.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/problems.go
new file mode 100644
index 00000000000..9ae493ac3b7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/problems.go
@@ -0,0 +1,80 @@
+package reporting
+
+import "fmt"
+
+type problem struct {
+ silent bool
+ out *Printer
+ errors []*AssertionResult
+ failures []*AssertionResult
+}
+
+func (self *problem) BeginStory(story *StoryReport) {}
+
+func (self *problem) Enter(scope *ScopeReport) {}
+
+func (self *problem) Report(report *AssertionResult) {
+ if report.Error != nil {
+ self.errors = append(self.errors, report)
+ } else if report.Failure != "" {
+ self.failures = append(self.failures, report)
+ }
+}
+
+func (self *problem) Exit() {}
+
+func (self *problem) EndStory() {
+ self.show(self.showErrors, redColor)
+ self.show(self.showFailures, yellowColor)
+ self.prepareForNextStory()
+}
+func (self *problem) show(display func(), color string) {
+ if !self.silent {
+ fmt.Print(color)
+ }
+ display()
+ if !self.silent {
+ fmt.Print(resetColor)
+ }
+ self.out.Dedent()
+}
+func (self *problem) showErrors() {
+ for i, e := range self.errors {
+ if i == 0 {
+ self.out.Println("\nErrors:\n")
+ self.out.Indent()
+ }
+ self.out.Println(errorTemplate, e.File, e.Line, e.Error, e.StackTrace)
+ }
+}
+func (self *problem) showFailures() {
+ for i, f := range self.failures {
+ if i == 0 {
+ self.out.Println("\nFailures:\n")
+ self.out.Indent()
+ }
+ self.out.Println(failureTemplate, f.File, f.Line, f.Failure)
+ }
+}
+
+func (self *problem) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewProblemReporter(out *Printer) *problem {
+ self := new(problem)
+ self.out = out
+ self.prepareForNextStory()
+ return self
+}
+
+func NewSilentProblemReporter(out *Printer) *problem {
+ self := NewProblemReporter(out)
+ self.silent = true
+ return self
+}
+
+func (self *problem) prepareForNextStory() {
+ self.errors = []*AssertionResult{}
+ self.failures = []*AssertionResult{}
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/problems_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/problems_test.go
new file mode 100644
index 00000000000..92f0ca35cca
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/problems_test.go
@@ -0,0 +1,51 @@
+package reporting
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestNoopProblemReporterActions(t *testing.T) {
+ file, reporter := setup()
+ reporter.BeginStory(nil)
+ reporter.Enter(nil)
+ reporter.Exit()
+ expected := ""
+ actual := file.String()
+ if expected != actual {
+ t.Errorf("Expected: '(blank)'\nActual: '%s'", actual)
+ }
+}
+
+func TestReporterPrintsFailuresAndErrorsAtTheEndOfTheStory(t *testing.T) {
+ file, reporter := setup()
+ reporter.Report(NewFailureReport("failed"))
+ reporter.Report(NewErrorReport("error"))
+ reporter.Report(NewSuccessReport())
+ reporter.EndStory()
+
+ result := file.String()
+ if !strings.Contains(result, "Errors:\n") {
+ t.Errorf("Expected errors, found none.")
+ }
+ if !strings.Contains(result, "Failures:\n") {
+ t.Errorf("Expected failures, found none.")
+ }
+
+ // Each stack trace looks like: `* /path/to/file.go`, so look for `* `.
+ // With go 1.4+ there is a line in some stack traces that looks like this:
+ // `testing.(*M).Run(0x2082d60a0, 0x25b7c0)`
+ // So we can't just look for "*" anymore.
+ problemCount := strings.Count(result, "* ")
+ if problemCount != 2 {
+ t.Errorf("Expected one failure and one error (total of 2 '*' characters). Got %d", problemCount)
+ }
+}
+
+func setup() (file *memoryFile, reporter *problem) {
+ monochrome()
+ file = newMemoryFile()
+ printer := NewPrinter(file)
+ reporter = NewProblemReporter(printer)
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporter.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporter.go
new file mode 100644
index 00000000000..cce6c5e4388
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporter.go
@@ -0,0 +1,39 @@
+package reporting
+
+import "io"
+
+type Reporter interface {
+ BeginStory(story *StoryReport)
+ Enter(scope *ScopeReport)
+ Report(r *AssertionResult)
+ Exit()
+ EndStory()
+ io.Writer
+}
+
+type reporters struct{ collection []Reporter }
+
+func (self *reporters) BeginStory(s *StoryReport) { self.foreach(func(r Reporter) { r.BeginStory(s) }) }
+func (self *reporters) Enter(s *ScopeReport) { self.foreach(func(r Reporter) { r.Enter(s) }) }
+func (self *reporters) Report(a *AssertionResult) { self.foreach(func(r Reporter) { r.Report(a) }) }
+func (self *reporters) Exit() { self.foreach(func(r Reporter) { r.Exit() }) }
+func (self *reporters) EndStory() { self.foreach(func(r Reporter) { r.EndStory() }) }
+
+func (self *reporters) Write(contents []byte) (written int, err error) {
+ self.foreach(func(r Reporter) {
+ written, err = r.Write(contents)
+ })
+ return written, err
+}
+
+func (self *reporters) foreach(action func(Reporter)) {
+ for _, r := range self.collection {
+ action(r)
+ }
+}
+
+func NewReporters(collection ...Reporter) *reporters {
+ self := new(reporters)
+ self.collection = collection
+ return self
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go
new file mode 100644
index 00000000000..4e5caf63b2b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go
@@ -0,0 +1,94 @@
+package reporting
+
+import (
+ "runtime"
+ "testing"
+)
+
+func TestEachNestedReporterReceivesTheCallFromTheContainingReporter(t *testing.T) {
+ fake1 := newFakeReporter()
+ fake2 := newFakeReporter()
+ reporter := NewReporters(fake1, fake2)
+
+ reporter.BeginStory(nil)
+ assertTrue(t, fake1.begun)
+ assertTrue(t, fake2.begun)
+
+ reporter.Enter(NewScopeReport("scope"))
+ assertTrue(t, fake1.entered)
+ assertTrue(t, fake2.entered)
+
+ reporter.Report(NewSuccessReport())
+ assertTrue(t, fake1.reported)
+ assertTrue(t, fake2.reported)
+
+ reporter.Exit()
+ assertTrue(t, fake1.exited)
+ assertTrue(t, fake2.exited)
+
+ reporter.EndStory()
+ assertTrue(t, fake1.ended)
+ assertTrue(t, fake2.ended)
+
+ content := []byte("hi")
+ written, err := reporter.Write(content)
+ assertTrue(t, fake1.written)
+ assertTrue(t, fake2.written)
+ assertEqual(t, written, len(content))
+ assertNil(t, err)
+
+}
+
+func assertTrue(t *testing.T, value bool) {
+ if !value {
+ _, _, line, _ := runtime.Caller(1)
+ t.Errorf("Value should have been true (but was false). See line %d", line)
+ }
+}
+
+func assertEqual(t *testing.T, expected, actual int) {
+ if actual != expected {
+ _, _, line, _ := runtime.Caller(1)
+ t.Errorf("Value should have been %d (but was %d). See line %d", expected, actual, line)
+ }
+}
+
+func assertNil(t *testing.T, err error) {
+ if err != nil {
+ _, _, line, _ := runtime.Caller(1)
+ t.Errorf("Error should have been <nil> (but wasn't). See line %d", err, line)
+ }
+}
+
+type fakeReporter struct {
+ begun bool
+ entered bool
+ reported bool
+ exited bool
+ ended bool
+ written bool
+}
+
+func newFakeReporter() *fakeReporter {
+ return &fakeReporter{}
+}
+
+func (self *fakeReporter) BeginStory(story *StoryReport) {
+ self.begun = true
+}
+func (self *fakeReporter) Enter(scope *ScopeReport) {
+ self.entered = true
+}
+func (self *fakeReporter) Report(report *AssertionResult) {
+ self.reported = true
+}
+func (self *fakeReporter) Exit() {
+ self.exited = true
+}
+func (self *fakeReporter) EndStory() {
+ self.ended = true
+}
+func (self *fakeReporter) Write(content []byte) (int, error) {
+ self.written = true
+ return len(content), nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey
new file mode 100644
index 00000000000..79982854b53
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reporting.goconvey
@@ -0,0 +1,2 @@
+#ignore
+-timeout=1s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reports.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reports.go
new file mode 100644
index 00000000000..712e6ade625
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/reports.go
@@ -0,0 +1,179 @@
+package reporting
+
+import (
+ "encoding/json"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "github.com/smartystreets/goconvey/convey/gotest"
+)
+
+////////////////// ScopeReport ////////////////////
+
+type ScopeReport struct {
+ Title string
+ File string
+ Line int
+}
+
+func NewScopeReport(title string) *ScopeReport {
+ file, line, _ := gotest.ResolveExternalCaller()
+ self := new(ScopeReport)
+ self.Title = title
+ self.File = file
+ self.Line = line
+ return self
+}
+
+////////////////// ScopeResult ////////////////////
+
+type ScopeResult struct {
+ Title string
+ File string
+ Line int
+ Depth int
+ Assertions []*AssertionResult
+ Output string
+}
+
+func newScopeResult(title string, depth int, file string, line int) *ScopeResult {
+ self := new(ScopeResult)
+ self.Title = title
+ self.Depth = depth
+ self.File = file
+ self.Line = line
+ self.Assertions = []*AssertionResult{}
+ return self
+}
+
+/////////////////// StoryReport /////////////////////
+
+type StoryReport struct {
+ Test T
+ Name string
+ File string
+ Line int
+}
+
+func NewStoryReport(test T) *StoryReport {
+ file, line, name := gotest.ResolveExternalCaller()
+ name = removePackagePath(name)
+ self := new(StoryReport)
+ self.Test = test
+ self.Name = name
+ self.File = file
+ self.Line = line
+ return self
+}
+
+// name comes in looking like "github.com/smartystreets/goconvey/examples.TestName".
+// We only want the stuff after the last '.', which is the name of the test function.
+func removePackagePath(name string) string {
+ parts := strings.Split(name, ".")
+ return parts[len(parts)-1]
+}
+
+/////////////////// FailureView ////////////////////////
+
+// This struct is also declared in github.com/smartystreets/assertions.
+// The json struct tags should be equal in both declarations.
+type FailureView struct {
+ Message string `json:"Message"`
+ Expected string `json:"Expected"`
+ Actual string `json:"Actual"`
+}
+
+////////////////////AssertionResult //////////////////////
+
+type AssertionResult struct {
+ File string
+ Line int
+ Expected string
+ Actual string
+ Failure string
+ Error interface{}
+ StackTrace string
+ Skipped bool
+}
+
+func NewFailureReport(failure string) *AssertionResult {
+ report := new(AssertionResult)
+ report.File, report.Line = caller()
+ report.StackTrace = stackTrace()
+ parseFailure(failure, report)
+ return report
+}
+func parseFailure(failure string, report *AssertionResult) {
+ view := new(FailureView)
+ err := json.Unmarshal([]byte(failure), view)
+ if err == nil {
+ report.Failure = view.Message
+ report.Expected = view.Expected
+ report.Actual = view.Actual
+ } else {
+ report.Failure = failure
+ }
+}
+func NewErrorReport(err interface{}) *AssertionResult {
+ report := new(AssertionResult)
+ report.File, report.Line = caller()
+ report.StackTrace = fullStackTrace()
+ report.Error = fmt.Sprintf("%v", err)
+ return report
+}
+func NewSuccessReport() *AssertionResult {
+ return new(AssertionResult)
+}
+func NewSkipReport() *AssertionResult {
+ report := new(AssertionResult)
+ report.File, report.Line = caller()
+ report.StackTrace = fullStackTrace()
+ report.Skipped = true
+ return report
+}
+
+func caller() (file string, line int) {
+ file, line, _ = gotest.ResolveExternalCaller()
+ return
+}
+
+func stackTrace() string {
+ buffer := make([]byte, 1024*64)
+ n := runtime.Stack(buffer, false)
+ return removeInternalEntries(string(buffer[:n]))
+}
+func fullStackTrace() string {
+ buffer := make([]byte, 1024*64)
+ n := runtime.Stack(buffer, true)
+ return removeInternalEntries(string(buffer[:n]))
+}
+func removeInternalEntries(stack string) string {
+ lines := strings.Split(stack, newline)
+ filtered := []string{}
+ for _, line := range lines {
+ if !isExternal(line) {
+ filtered = append(filtered, line)
+ }
+ }
+ return strings.Join(filtered, newline)
+}
+func isExternal(line string) bool {
+ for _, p := range internalPackages {
+ if strings.Contains(line, p) {
+ return true
+ }
+ }
+ return false
+}
+
+// NOTE: any new packages that host goconvey packages will need to be added here!
+// An alternative is to scan the goconvey directory and then exclude stuff like
+// the examples package but that's nasty too.
+var internalPackages = []string{
+ "goconvey/assertions",
+ "goconvey/convey",
+ "goconvey/execution",
+ "goconvey/gotest",
+ "goconvey/reporting",
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/statistics.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/statistics.go
new file mode 100644
index 00000000000..28e1d2071d4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/statistics.go
@@ -0,0 +1,89 @@
+package reporting
+
+import "fmt"
+
+func (self *statistics) BeginStory(story *StoryReport) {}
+
+func (self *statistics) Enter(scope *ScopeReport) {}
+
+func (self *statistics) Report(report *AssertionResult) {
+ if !self.failing && report.Failure != "" {
+ self.failing = true
+ }
+ if !self.erroring && report.Error != nil {
+ self.erroring = true
+ }
+ if report.Skipped {
+ self.skipped += 1
+ } else {
+ self.total++
+ }
+}
+
+func (self *statistics) Exit() {}
+
+func (self *statistics) EndStory() {
+ if !self.suppressed {
+ self.PrintSummary()
+ }
+}
+
+func (self *statistics) Suppress() {
+ self.suppressed = true
+}
+
+func (self *statistics) PrintSummary() {
+ self.reportAssertions()
+ self.reportSkippedSections()
+ self.completeReport()
+}
+func (self *statistics) reportAssertions() {
+ self.decideColor()
+ self.out.Print("\n%d total %s", self.total, plural("assertion", self.total))
+}
+func (self *statistics) decideColor() {
+ if self.failing && !self.erroring {
+ fmt.Print(yellowColor)
+ } else if self.erroring {
+ fmt.Print(redColor)
+ } else {
+ fmt.Print(greenColor)
+ }
+}
+func (self *statistics) reportSkippedSections() {
+ if self.skipped > 0 {
+ fmt.Print(yellowColor)
+ self.out.Print(" (one or more sections skipped)")
+ }
+}
+func (self *statistics) completeReport() {
+ fmt.Print(resetColor)
+ self.out.Print("\n")
+ self.out.Print("\n")
+}
+
+func (self *statistics) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewStatisticsReporter(out *Printer) *statistics {
+ self := statistics{}
+ self.out = out
+ return &self
+}
+
+type statistics struct {
+ out *Printer
+ total int
+ failing bool
+ erroring bool
+ skipped int
+ suppressed bool
+}
+
+func plural(word string, count int) string {
+ if count == 1 {
+ return word
+ }
+ return word + "s"
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/story.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/story.go
new file mode 100644
index 00000000000..9e73c971f8f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting/story.go
@@ -0,0 +1,73 @@
+// TODO: in order for this reporter to be completely honest
+// we need to retrofit to be more like the json reporter such that:
+// 1. it maintains ScopeResult collections, which count assertions
+// 2. it reports only after EndStory(), so that all tick marks
+// are placed near the appropriate title.
+// 3. Under unit test
+
+package reporting
+
+import (
+ "fmt"
+ "strings"
+)
+
+type story struct {
+ out *Printer
+ titlesById map[string]string
+ currentKey []string
+}
+
+func (self *story) BeginStory(story *StoryReport) {}
+
+func (self *story) Enter(scope *ScopeReport) {
+ self.out.Indent()
+
+ self.currentKey = append(self.currentKey, scope.Title)
+ ID := strings.Join(self.currentKey, "|")
+
+ if _, found := self.titlesById[ID]; !found {
+ self.out.Println("")
+ self.out.Print(scope.Title)
+ self.out.Insert(" ")
+ self.titlesById[ID] = scope.Title
+ }
+}
+
+func (self *story) Report(report *AssertionResult) {
+ if report.Error != nil {
+ fmt.Print(redColor)
+ self.out.Insert(error_)
+ } else if report.Failure != "" {
+ fmt.Print(yellowColor)
+ self.out.Insert(failure)
+ } else if report.Skipped {
+ fmt.Print(yellowColor)
+ self.out.Insert(skip)
+ } else {
+ fmt.Print(greenColor)
+ self.out.Insert(success)
+ }
+ fmt.Print(resetColor)
+}
+
+func (self *story) Exit() {
+ self.out.Dedent()
+ self.currentKey = self.currentKey[:len(self.currentKey)-1]
+}
+
+func (self *story) EndStory() {
+ self.titlesById = make(map[string]string)
+ self.out.Println("\n")
+}
+
+func (self *story) Write(content []byte) (written int, err error) {
+ return len(content), nil // no-op
+}
+
+func NewStoryReporter(out *Printer) *story {
+ self := new(story)
+ self.out = out
+ self.titlesById = make(map[string]string)
+ return self
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go
new file mode 100644
index 00000000000..69125c3cf44
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go
@@ -0,0 +1,317 @@
+package convey
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/smartystreets/goconvey/convey/reporting"
+)
+
+func TestSingleScopeReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ So(1, ShouldEqual, 1)
+ })
+
+ expectEqual(t, "Begin|A|Success|Exit|End", myReporter.wholeStory())
+}
+
+func TestNestedScopeReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ Convey("B", func() {
+ So(1, ShouldEqual, 1)
+ })
+ })
+
+ expectEqual(t, "Begin|A|B|Success|Exit|Exit|End", myReporter.wholeStory())
+}
+
+func TestFailureReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ So(1, ShouldBeNil)
+ })
+
+ expectEqual(t, "Begin|A|Failure|Exit|End", myReporter.wholeStory())
+}
+
+func TestFirstFailureEndsScopeExecution(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ So(1, ShouldBeNil)
+ So(nil, ShouldBeNil)
+ })
+
+ expectEqual(t, "Begin|A|Failure|Exit|End", myReporter.wholeStory())
+}
+
+func TestComparisonFailureDeserializedAndReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ So("hi", ShouldEqual, "bye")
+ })
+
+ expectEqual(t, "Begin|A|Failure(bye/hi)|Exit|End", myReporter.wholeStory())
+}
+
+func TestNestedFailureReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ Convey("B", func() {
+ So(2, ShouldBeNil)
+ })
+ })
+
+ expectEqual(t, "Begin|A|B|Failure|Exit|Exit|End", myReporter.wholeStory())
+}
+
+func TestSuccessAndFailureReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ So(nil, ShouldBeNil)
+ So(1, ShouldBeNil)
+ })
+
+ expectEqual(t, "Begin|A|Success|Failure|Exit|End", myReporter.wholeStory())
+}
+
+func TestIncompleteActionReportedAsSkipped(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ Convey("B", nil)
+ })
+
+ expectEqual(t, "Begin|A|B|Skipped|Exit|Exit|End", myReporter.wholeStory())
+}
+
+func TestSkippedConveyReportedAsSkipped(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ SkipConvey("B", func() {
+ So(1, ShouldEqual, 1)
+ })
+ })
+
+ expectEqual(t, "Begin|A|B|Skipped|Exit|Exit|End", myReporter.wholeStory())
+}
+
+func TestMultipleSkipsAreReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ Convey("0", func() {
+ So(nil, ShouldBeNil)
+ })
+
+ SkipConvey("1", func() {})
+ SkipConvey("2", func() {})
+
+ Convey("3", nil)
+ Convey("4", nil)
+
+ Convey("5", func() {
+ So(nil, ShouldBeNil)
+ })
+ })
+
+ expected := "Begin" +
+ "|A|0|Success|Exit|Exit" +
+ "|A|1|Skipped|Exit|Exit" +
+ "|A|2|Skipped|Exit|Exit" +
+ "|A|3|Skipped|Exit|Exit" +
+ "|A|4|Skipped|Exit|Exit" +
+ "|A|5|Success|Exit|Exit" +
+ "|End"
+
+ expectEqual(t, expected, myReporter.wholeStory())
+}
+
+func TestSkippedAssertionIsNotReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ SkipSo(1, ShouldEqual, 1)
+ })
+
+ expectEqual(t, "Begin|A|Skipped|Exit|End", myReporter.wholeStory())
+}
+
+func TestMultipleSkippedAssertionsAreNotReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ SkipSo(1, ShouldEqual, 1)
+ So(1, ShouldEqual, 1)
+ SkipSo(1, ShouldEqual, 1)
+ })
+
+ expectEqual(t, "Begin|A|Skipped|Success|Skipped|Exit|End", myReporter.wholeStory())
+}
+
+func TestErrorByManualPanicReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ panic("Gopher alert!")
+ })
+
+ expectEqual(t, "Begin|A|Error|Exit|End", myReporter.wholeStory())
+}
+
+func TestIterativeConveysReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ for x := 0; x < 3; x++ {
+ Convey(strconv.Itoa(x), func() {
+ So(x, ShouldEqual, x)
+ })
+ }
+ })
+
+ expectEqual(t, "Begin|A|0|Success|Exit|Exit|A|1|Success|Exit|Exit|A|2|Success|Exit|Exit|End", myReporter.wholeStory())
+}
+
+func TestNestedIterativeConveysReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func() {
+ for x := 0; x < 3; x++ {
+ Convey(strconv.Itoa(x), func() {
+ for y := 0; y < 3; y++ {
+ Convey("< "+strconv.Itoa(y), func() {
+ So(x, ShouldBeLessThan, y)
+ })
+ }
+ })
+ }
+ })
+
+ expectEqual(t, ("Begin|" +
+ "A|0|< 0|Failure|Exit|Exit|Exit|" +
+ "A|0|< 1|Success|Exit|Exit|Exit|" +
+ "A|0|< 2|Success|Exit|Exit|Exit|" +
+ "A|1|< 0|Failure|Exit|Exit|Exit|" +
+ "A|1|< 1|Failure|Exit|Exit|Exit|" +
+ "A|1|< 2|Success|Exit|Exit|Exit|" +
+ "A|2|< 0|Failure|Exit|Exit|Exit|" +
+ "A|2|< 1|Failure|Exit|Exit|Exit|" +
+ "A|2|< 2|Failure|Exit|Exit|Exit|" +
+ "End"), myReporter.wholeStory())
+}
+
+func TestEmbeddedAssertionReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ Convey("A", test, func(c C) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ c.So(r.FormValue("msg"), ShouldEqual, "ping")
+ }))
+ http.DefaultClient.Get(ts.URL + "?msg=ping")
+ })
+
+ expectEqual(t, "Begin|A|Success|Exit|End", myReporter.wholeStory())
+}
+
+func TestEmbeddedContextHelperReported(t *testing.T) {
+ myReporter, test := setupFakeReporter()
+
+ helper := func(c C) http.HandlerFunc {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ c.Convey("Embedded", func() {
+ So(r.FormValue("msg"), ShouldEqual, "ping")
+ })
+ })
+ }
+
+ Convey("A", test, func(c C) {
+ ts := httptest.NewServer(helper(c))
+ http.DefaultClient.Get(ts.URL + "?msg=ping")
+ })
+
+ expectEqual(t, "Begin|A|Embedded|Success|Exit|Exit|End", myReporter.wholeStory())
+}
+
+func expectEqual(t *testing.T, expected interface{}, actual interface{}) {
+ if expected != actual {
+ _, file, line, _ := runtime.Caller(1)
+ t.Errorf("Expected '%v' to be '%v' but it wasn't. See '%s' at line %d.",
+ actual, expected, path.Base(file), line)
+ }
+}
+
+func setupFakeReporter() (*fakeReporter, *fakeGoTest) {
+ myReporter := new(fakeReporter)
+ myReporter.calls = []string{}
+ testReporter = myReporter
+ return myReporter, new(fakeGoTest)
+}
+
+type fakeReporter struct {
+ calls []string
+}
+
+func (self *fakeReporter) BeginStory(story *reporting.StoryReport) {
+ self.calls = append(self.calls, "Begin")
+}
+
+func (self *fakeReporter) Enter(scope *reporting.ScopeReport) {
+ self.calls = append(self.calls, scope.Title)
+}
+
+func (self *fakeReporter) Report(report *reporting.AssertionResult) {
+ if report.Error != nil {
+ self.calls = append(self.calls, "Error")
+ } else if report.Failure != "" {
+ message := "Failure"
+ if report.Expected != "" || report.Actual != "" {
+ message += fmt.Sprintf("(%s/%s)", report.Expected, report.Actual)
+ }
+ self.calls = append(self.calls, message)
+ } else if report.Skipped {
+ self.calls = append(self.calls, "Skipped")
+ } else {
+ self.calls = append(self.calls, "Success")
+ }
+}
+
+func (self *fakeReporter) Exit() {
+ self.calls = append(self.calls, "Exit")
+}
+
+func (self *fakeReporter) EndStory() {
+ self.calls = append(self.calls, "End")
+}
+
+func (self *fakeReporter) Write(content []byte) (int, error) {
+ return len(content), nil // no-op
+}
+
+func (self *fakeReporter) wholeStory() string {
+ return strings.Join(self.calls, "|")
+}
+
+////////////////////////////////
+
+type fakeGoTest struct{}
+
+func (self *fakeGoTest) Fail() {}
+func (self *fakeGoTest) Fatalf(format string, args ...interface{}) {}
+
+var test t = new(fakeGoTest)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/story_conventions_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/story_conventions_test.go
new file mode 100644
index 00000000000..84832c78d5b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/convey/story_conventions_test.go
@@ -0,0 +1,175 @@
+package convey
+
+import (
+ "reflect"
+ "testing"
+)
+
+func expectPanic(t *testing.T, f string) interface{} {
+ r := recover()
+ if r != nil {
+ if cp, ok := r.(*conveyErr); ok {
+ if cp.fmt != f {
+ t.Error("Incorrect panic message.")
+ }
+ } else {
+ t.Errorf("Incorrect panic type. %s", reflect.TypeOf(r))
+ }
+ } else {
+ t.Error("Expected panic but none occured")
+ }
+ return r
+}
+
+func TestMissingTopLevelGoTestReferenceCausesPanic(t *testing.T) {
+ output := map[string]bool{}
+
+ defer expectEqual(t, false, output["good"])
+ defer expectPanic(t, missingGoTest)
+
+ Convey("Hi", func() {
+ output["bad"] = true // this shouldn't happen
+ })
+}
+
+func TestMissingTopLevelGoTestReferenceAfterGoodExample(t *testing.T) {
+ output := map[string]bool{}
+
+ defer func() {
+ expectEqual(t, true, output["good"])
+ expectEqual(t, false, output["bad"])
+ }()
+ defer expectPanic(t, missingGoTest)
+
+ Convey("Good example", t, func() {
+ output["good"] = true
+ })
+
+ Convey("Bad example", func() {
+ output["bad"] = true // shouldn't happen
+ })
+}
+
+func TestExtraReferencePanics(t *testing.T) {
+ output := map[string]bool{}
+
+ defer expectEqual(t, false, output["bad"])
+ defer expectPanic(t, extraGoTest)
+
+ Convey("Good example", t, func() {
+ Convey("Bad example - passing in *testing.T a second time!", t, func() {
+ output["bad"] = true // shouldn't happen
+ })
+ })
+}
+
+func TestParseRegistrationMissingRequiredElements(t *testing.T) {
+ defer expectPanic(t, parseError)
+
+ Convey()
+}
+
+func TestParseRegistration_MissingNameString(t *testing.T) {
+ defer expectPanic(t, parseError)
+
+ Convey(func() {})
+}
+
+func TestParseRegistration_MissingActionFunc(t *testing.T) {
+ defer expectPanic(t, parseError)
+
+ Convey("Hi there", 12345)
+}
+
+func TestFailureModeNoContext(t *testing.T) {
+ Convey("Foo", t, func() {
+ done := make(chan int, 1)
+ go func() {
+ defer func() { done <- 1 }()
+ defer expectPanic(t, noStackContext)
+ So(len("I have no context"), ShouldBeGreaterThan, 0)
+ }()
+ <-done
+ })
+}
+
+func TestFailureModeDuplicateSuite(t *testing.T) {
+ Convey("cool", t, func() {
+ defer expectPanic(t, multipleIdenticalConvey)
+
+ Convey("dup", nil)
+ Convey("dup", nil)
+ })
+}
+
+func TestFailureModeIndeterminentSuiteNames(t *testing.T) {
+ defer expectPanic(t, differentConveySituations)
+
+ name := "bob"
+ Convey("cool", t, func() {
+ for i := 0; i < 3; i++ {
+ Convey(name, func() {})
+ name += "bob"
+ }
+ })
+}
+
+func TestFailureModeNestedIndeterminentSuiteNames(t *testing.T) {
+ defer expectPanic(t, differentConveySituations)
+
+ name := "bob"
+ Convey("cool", t, func() {
+ Convey("inner", func() {
+ for i := 0; i < 3; i++ {
+ Convey(name, func() {})
+ name += "bob"
+ }
+ })
+ })
+}
+
+func TestFailureModeParameterButMissing(t *testing.T) {
+ defer expectPanic(t, parseError)
+
+ prepare()
+
+ Convey("Foobar", t, FailureHalts)
+}
+
+func TestFailureModeParameterWithAction(t *testing.T) {
+ prepare()
+
+ Convey("Foobar", t, FailureHalts, func() {})
+}
+
+func TestExtraConveyParameters(t *testing.T) {
+ defer expectPanic(t, parseError)
+
+ prepare()
+
+ Convey("Foobar", t, FailureHalts, func() {}, "This is not supposed to be here")
+}
+
+func TestExtraConveyParameters2(t *testing.T) {
+ defer expectPanic(t, parseError)
+
+ prepare()
+
+ Convey("Foobar", t, func() {}, "This is not supposed to be here")
+}
+
+func TestExtraConveyParameters3(t *testing.T) {
+ defer expectPanic(t, parseError)
+
+ output := prepare()
+
+ Convey("A", t, func() {
+ output += "A "
+
+ Convey("B", func() {
+ output += "B "
+ }, "This is not supposed to be here")
+ })
+
+ expectEqual(t, "A ", output)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/dependencies.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/dependencies.go
new file mode 100644
index 00000000000..0839e27fdf2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/dependencies.go
@@ -0,0 +1,4 @@
+package main
+
+import _ "github.com/jtolds/gls"
+import _ "github.com/smartystreets/assertions"
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/doc_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/doc_test.go
new file mode 100644
index 00000000000..06ab7d0f9a3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/doc_test.go
@@ -0,0 +1 @@
+package main
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/assertion_examples_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/assertion_examples_test.go
new file mode 100644
index 00000000000..a933292a2e7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/assertion_examples_test.go
@@ -0,0 +1,125 @@
+package examples
+
+import (
+ "bytes"
+ "io"
+ "testing"
+ "time"
+
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func TestAssertionsAreAvailableFromConveyPackage(t *testing.T) {
+ SetDefaultFailureMode(FailureContinues)
+ defer SetDefaultFailureMode(FailureHalts)
+
+ Convey("Equality assertions should be accessible", t, func() {
+ thing1a := thing{a: "asdf"}
+ thing1b := thing{a: "asdf"}
+ thing2 := thing{a: "qwer"}
+
+ So(1, ShouldEqual, 1)
+ So(1, ShouldNotEqual, 2)
+ So(1, ShouldAlmostEqual, 1.000000000000001)
+ So(1, ShouldNotAlmostEqual, 2, 0.5)
+ So(thing1a, ShouldResemble, thing1b)
+ So(thing1a, ShouldNotResemble, thing2)
+ So(&thing1a, ShouldPointTo, &thing1a)
+ So(&thing1a, ShouldNotPointTo, &thing1b)
+ So(nil, ShouldBeNil)
+ So(1, ShouldNotBeNil)
+ So(true, ShouldBeTrue)
+ So(false, ShouldBeFalse)
+ So(0, ShouldBeZeroValue)
+ })
+
+ Convey("Numeric comparison assertions should be accessible", t, func() {
+ So(1, ShouldBeGreaterThan, 0)
+ So(1, ShouldBeGreaterThanOrEqualTo, 1)
+ So(1, ShouldBeLessThan, 2)
+ So(1, ShouldBeLessThanOrEqualTo, 1)
+ So(1, ShouldBeBetween, 0, 2)
+ So(1, ShouldNotBeBetween, 2, 4)
+ So(1, ShouldBeBetweenOrEqual, 1, 2)
+ So(1, ShouldNotBeBetweenOrEqual, 2, 4)
+ })
+
+ Convey("Container assertions should be accessible", t, func() {
+ So([]int{1, 2, 3}, ShouldContain, 2)
+ So([]int{1, 2, 3}, ShouldNotContain, 4)
+ So(map[int]int{1: 1, 2: 2, 3: 3}, ShouldContainKey, 2)
+ So(map[int]int{1: 1, 2: 2, 3: 3}, ShouldNotContainKey, 4)
+ So(1, ShouldBeIn, []int{1, 2, 3})
+ So(4, ShouldNotBeIn, []int{1, 2, 3})
+ So([]int{}, ShouldBeEmpty)
+ So([]int{1}, ShouldNotBeEmpty)
+ So([]int{1, 2}, ShouldHaveLength, 2)
+ })
+
+ Convey("String assertions should be accessible", t, func() {
+ So("asdf", ShouldStartWith, "a")
+ So("asdf", ShouldNotStartWith, "z")
+ So("asdf", ShouldEndWith, "df")
+ So("asdf", ShouldNotEndWith, "as")
+ So("", ShouldBeBlank)
+ So("asdf", ShouldNotBeBlank)
+ So("asdf", ShouldContainSubstring, "sd")
+ So("asdf", ShouldNotContainSubstring, "af")
+ })
+
+ Convey("Panic recovery assertions should be accessible", t, func() {
+ So(panics, ShouldPanic)
+ So(func() {}, ShouldNotPanic)
+ So(panics, ShouldPanicWith, "Goofy Gophers!")
+ So(panics, ShouldNotPanicWith, "Guileless Gophers!")
+ })
+
+ Convey("Type-checking assertions should be accessible", t, func() {
+
+ // NOTE: Values or pointers may be checked. If a value is passed,
+ // it will be cast as a pointer to the value to avoid cases where
+ // the struct being tested takes pointer receivers. Go allows values
+ // or pointers to be passed as receivers on methods with a value
+ // receiver, but only pointers on methods with pointer receivers.
+ // See:
+ // http://golang.org/doc/effective_go.html#pointers_vs_values
+ // http://golang.org/doc/effective_go.html#blank_implements
+ // http://blog.golang.org/laws-of-reflection
+
+ So(1, ShouldHaveSameTypeAs, 0)
+ So(1, ShouldNotHaveSameTypeAs, "1")
+
+ So(bytes.NewBufferString(""), ShouldImplement, (*io.Reader)(nil))
+ So("string", ShouldNotImplement, (*io.Reader)(nil))
+ })
+
+ Convey("Time assertions should be accessible", t, func() {
+ january1, _ := time.Parse(timeLayout, "2013-01-01 00:00")
+ january2, _ := time.Parse(timeLayout, "2013-01-02 00:00")
+ january3, _ := time.Parse(timeLayout, "2013-01-03 00:00")
+ january4, _ := time.Parse(timeLayout, "2013-01-04 00:00")
+ january5, _ := time.Parse(timeLayout, "2013-01-05 00:00")
+ oneDay, _ := time.ParseDuration("24h0m0s")
+
+ So(january1, ShouldHappenBefore, january4)
+ So(january1, ShouldHappenOnOrBefore, january1)
+ So(january2, ShouldHappenAfter, january1)
+ So(january2, ShouldHappenOnOrAfter, january2)
+ So(january3, ShouldHappenBetween, january2, january5)
+ So(january3, ShouldHappenOnOrBetween, january3, january5)
+ So(january1, ShouldNotHappenOnOrBetween, january2, january5)
+ So(january2, ShouldHappenWithin, oneDay, january3)
+ So(january5, ShouldNotHappenWithin, oneDay, january1)
+ So([]time.Time{january1, january2}, ShouldBeChronological)
+ })
+}
+
+type thing struct {
+ a string
+}
+
+func panics() {
+ panic("Goofy Gophers!")
+}
+
+const timeLayout = "2006-01-02 15:04"
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/bowling_game.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/bowling_game.go
new file mode 100644
index 00000000000..547bf93d1c3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/bowling_game.go
@@ -0,0 +1,75 @@
+package examples
+
+// Game contains the state of a bowling game.
+type Game struct {
+ rolls []int
+ current int
+}
+
+// NewGame allocates and starts a new game of bowling.
+func NewGame() *Game {
+ game := new(Game)
+ game.rolls = make([]int, maxThrowsPerGame)
+ return game
+}
+
+// Roll rolls the ball and knocks down the number of pins specified by pins.
+func (self *Game) Roll(pins int) {
+ self.rolls[self.current] = pins
+ self.current++
+}
+
+// Score calculates and returns the player's current score.
+func (self *Game) Score() (sum int) {
+ for throw, frame := 0, 0; frame < framesPerGame; frame++ {
+ if self.isStrike(throw) {
+ sum += self.strikeBonusFor(throw)
+ throw += 1
+ } else if self.isSpare(throw) {
+ sum += self.spareBonusFor(throw)
+ throw += 2
+ } else {
+ sum += self.framePointsAt(throw)
+ throw += 2
+ }
+ }
+ return sum
+}
+
+// isStrike determines if a given throw is a strike or not. A strike is knocking
+// down all pins in one throw.
+func (self *Game) isStrike(throw int) bool {
+ return self.rolls[throw] == allPins
+}
+
+// strikeBonusFor calculates and returns the strike bonus for a throw.
+func (self *Game) strikeBonusFor(throw int) int {
+ return allPins + self.framePointsAt(throw+1)
+}
+
+// isSpare determines if a given frame is a spare or not. A spare is knocking
+// down all pins in one frame with two throws.
+func (self *Game) isSpare(throw int) bool {
+ return self.framePointsAt(throw) == allPins
+}
+
+// spareBonusFor calculates and returns the spare bonus for a throw.
+func (self *Game) spareBonusFor(throw int) int {
+ return allPins + self.rolls[throw+2]
+}
+
+// framePointsAt computes and returns the score in a frame specified by throw.
+func (self *Game) framePointsAt(throw int) int {
+ return self.rolls[throw] + self.rolls[throw+1]
+}
+
+const (
+ // allPins is the number of pins allocated per fresh throw.
+ allPins = 10
+
+ // framesPerGame is the number of frames per bowling game.
+ framesPerGame = 10
+
+ // maxThrowsPerGame is the maximum number of throws possible in a single game.
+ maxThrowsPerGame = 21
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/bowling_game_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/bowling_game_test.go
new file mode 100644
index 00000000000..18e997d44ae
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/bowling_game_test.go
@@ -0,0 +1,80 @@
+/*
+
+Reference: http://butunclebob.com/ArticleS.UncleBob.TheBowlingGameKata
+
+See the very first link (which happens to be the very first word of
+the first paragraph) on the page for a tutorial.
+
+*/
+
+package examples
+
+import (
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func TestBowlingGameScoring(t *testing.T) {
+ Convey("Given a fresh score card", t, func() {
+ game := NewGame()
+
+ Convey("When all gutter balls are thrown", func() {
+ game.rollMany(20, 0)
+
+ Convey("The score should be zero", func() {
+ So(game.Score(), ShouldEqual, 0)
+ })
+ })
+
+ Convey("When all throws knock down only one pin", func() {
+ game.rollMany(20, 1)
+
+ Convey("The score should be 20", func() {
+ So(game.Score(), ShouldEqual, 20)
+ })
+ })
+
+ Convey("When a spare is thrown", func() {
+ game.rollSpare()
+ game.Roll(3)
+ game.rollMany(17, 0)
+
+ Convey("The score should include a spare bonus.", func() {
+ So(game.Score(), ShouldEqual, 16)
+ })
+ })
+
+ Convey("When a strike is thrown", func() {
+ game.rollStrike()
+ game.Roll(3)
+ game.Roll(4)
+ game.rollMany(16, 0)
+
+ Convey("The score should include a strike bonus.", func() {
+ So(game.Score(), ShouldEqual, 24)
+ })
+ })
+
+ Convey("When all strikes are thrown", func() {
+ game.rollMany(21, 10)
+
+ Convey("The score should be 300.", func() {
+ So(game.Score(), ShouldEqual, 300)
+ })
+ })
+ })
+}
+
+func (self *Game) rollMany(times, pins int) {
+ for x := 0; x < times; x++ {
+ self.Roll(pins)
+ }
+}
+func (self *Game) rollSpare() {
+ self.Roll(5)
+ self.Roll(5)
+}
+func (self *Game) rollStrike() {
+ self.Roll(10)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/doc.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/doc.go
new file mode 100644
index 00000000000..dae661e18dc
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/doc.go
@@ -0,0 +1,5 @@
+// Package examples contains, well, examples of how to use goconvey to
+// specify behavior of a system under test. It contains a well-known example
+// by Robert C. Martin called "Bowling Game Kata" as well as another very
+// trivial example that demonstrates Reset() and some of the assertions.
+package examples
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/examples.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/examples.goconvey
new file mode 100644
index 00000000000..b5c805fbf4e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/examples.goconvey
@@ -0,0 +1,12 @@
+// Uncomment the next line to disable the package when running the GoConvey UI:
+//IGNORE
+
+// Uncomment the next line to limit testing to the specified test function name pattern:
+//-run=TestAssertionsAreAvailableFromConveyPackage
+
+// Uncomment the next line to limit testing to those tests that don't bail when testing.Short() is true:
+//-short
+
+// include any additional `go test` flags or application-specific flags below:
+
+-timeout=1s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/simple_example_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/simple_example_test.go
new file mode 100644
index 00000000000..dadfd8136a3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/examples/simple_example_test.go
@@ -0,0 +1,36 @@
+package examples
+
+import (
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func TestIntegerManipulation(t *testing.T) {
+ t.Parallel()
+
+ Convey("Given a starting integer value", t, func() {
+ x := 42
+
+ Convey("When incremented", func() {
+ x++
+
+ Convey("The value should be greater by one", func() {
+ So(x, ShouldEqual, 43)
+ })
+ Convey("The value should NOT be what it used to be", func() {
+ So(x, ShouldNotEqual, 42)
+ })
+ })
+ Convey("When decremented", func() {
+ x--
+
+ Convey("The value should be lesser by one", func() {
+ So(x, ShouldEqual, 41)
+ })
+ Convey("The value should NOT be what it used to be", func() {
+ So(x, ShouldNotEqual, 42)
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/goconvey.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/goconvey.go
new file mode 100644
index 00000000000..4d5fc0ef6d5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/goconvey.go
@@ -0,0 +1,280 @@
+// This executable provides an HTTP server that watches for file system changes
+// to .go files within the working directory (and all nested go packages).
+// Navigating to the configured host and port in a web browser will display the
+// latest results of running `go test` in each go package.
+package main
+
+import (
+ "flag"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "time"
+
+ "go/build"
+
+ "github.com/smartystreets/goconvey/web/server/api"
+ "github.com/smartystreets/goconvey/web/server/contract"
+ "github.com/smartystreets/goconvey/web/server/executor"
+ "github.com/smartystreets/goconvey/web/server/messaging"
+ "github.com/smartystreets/goconvey/web/server/parser"
+ "github.com/smartystreets/goconvey/web/server/system"
+ "github.com/smartystreets/goconvey/web/server/watch"
+)
+
+func init() {
+ flags()
+ folders()
+}
+func flags() {
+ flag.IntVar(&port, "port", 8080, "The port at which to serve http.")
+ flag.StringVar(&host, "host", "127.0.0.1", "The host at which to serve http.")
+ flag.DurationVar(&nap, "poll", quarterSecond, "The interval to wait between polling the file system for changes (default: 250ms).")
+ flag.IntVar(&packages, "packages", 10, "The number of packages to test in parallel. Higher == faster but more costly in terms of computing. (default: 10)")
+ flag.StringVar(&gobin, "gobin", "go", "The path to the 'go' binary (default: search on the PATH).")
+ flag.BoolVar(&cover, "cover", true, "Enable package-level coverage statistics. Requires Go 1.2+ and the go cover tool. (default: true)")
+ flag.IntVar(&depth, "depth", -1, "The directory scanning depth. If -1, scan infinitely deep directory structures. 0: scan working directory. 1+: Scan into nested directories, limited to value. (default: -1)")
+ flag.StringVar(&timeout, "timeout", "0", "The test execution timeout if none is specified in the *.goconvey file (default is '0', which is the same as not providing this option).")
+ flag.StringVar(&watchedSuffixes, "watchedSuffixes", ".go", "A comma separated list of file suffixes to watch for modifications (default: .go).")
+ flag.StringVar(&excludedDirs, "excludedDirs", "vendor,node_modules", "A comma separated list of directories that will be excluded from being watched")
+ flag.StringVar(&workDir, "workDir", "", "set goconvey working directory (default current directory)")
+
+ log.SetOutput(os.Stdout)
+ log.SetFlags(log.LstdFlags | log.Lshortfile)
+}
+func folders() {
+ _, file, _, _ := runtime.Caller(0)
+ here := filepath.Dir(file)
+ static = filepath.Join(here, "/web/client")
+ reports = filepath.Join(static, "reports")
+}
+
+func main() {
+ flag.Parse()
+ log.Printf(initialConfiguration, host, port, nap, cover)
+
+ working := getWorkDir()
+ cover = coverageEnabled(cover, reports)
+ shell := system.NewShell(gobin, reports, cover, timeout)
+
+ watcherInput := make(chan messaging.WatcherCommand)
+ watcherOutput := make(chan messaging.Folders)
+ excludedDirItems := strings.Split(excludedDirs, `,`)
+ watcher := watch.NewWatcher(working, depth, nap, watcherInput, watcherOutput, watchedSuffixes, excludedDirItems)
+
+ parser := parser.NewParser(parser.ParsePackageResults)
+ tester := executor.NewConcurrentTester(shell)
+ tester.SetBatchSize(packages)
+
+ longpollChan := make(chan chan string)
+ executor := executor.NewExecutor(tester, parser, longpollChan)
+ server := api.NewHTTPServer(working, watcherInput, executor, longpollChan)
+ go runTestOnUpdates(watcherOutput, executor, server)
+ go watcher.Listen()
+ go launchBrowser(host, port)
+ serveHTTP(server)
+}
+
+func browserCmd() (string, bool) {
+ browser := map[string]string{
+ "darwin": "open",
+ "linux": "xdg-open",
+ "win32": "start",
+ }
+ cmd, ok := browser[runtime.GOOS]
+ return cmd, ok
+}
+
+func launchBrowser(host string, port int) {
+ browser, ok := browserCmd()
+ if !ok {
+ log.Printf("Skipped launching browser for this OS: %s", runtime.GOOS)
+ return
+ }
+
+ log.Printf("Launching browser on %s:%d", host, port)
+ url := fmt.Sprintf("http://%s:%d", host, port)
+ cmd := exec.Command(browser, url)
+
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ log.Println(err)
+ }
+ log.Println(string(output))
+}
+
+func runTestOnUpdates(queue chan messaging.Folders, executor contract.Executor, server contract.Server) {
+ for update := range queue {
+ log.Println("Received request from watcher to execute tests...")
+ packages := extractPackages(update)
+ output := executor.ExecuteTests(packages)
+ root := extractRoot(update, packages)
+ server.ReceiveUpdate(root, output)
+ }
+}
+
+func extractPackages(folderList messaging.Folders) []*contract.Package {
+ packageList := []*contract.Package{}
+ for _, folder := range folderList {
+ hasImportCycle := testFilesImportTheirOwnPackage(folder.Path)
+ packageList = append(packageList, contract.NewPackage(folder, hasImportCycle))
+ }
+ return packageList
+}
+
+func extractRoot(folderList messaging.Folders, packageList []*contract.Package) string {
+ path := packageList[0].Path
+ folder := folderList[path]
+ return folder.Root
+}
+
+// This method exists because of a bug in the go cover tool that
+// causes an infinite loop when you try to run `go test -cover`
+// on a package that has an import cycle defined in one of it's
+// test files. Yuck.
+func testFilesImportTheirOwnPackage(packagePath string) bool {
+ meta, err := build.ImportDir(packagePath, build.AllowBinary)
+ if err != nil {
+ return false
+ }
+
+ for _, dependency := range meta.TestImports {
+ if dependency == meta.ImportPath {
+ return true
+ }
+ }
+ return false
+}
+
+func serveHTTP(server contract.Server) {
+ serveStaticResources()
+ serveAjaxMethods(server)
+ activateServer()
+}
+
+func serveStaticResources() {
+ http.Handle("/", http.FileServer(http.Dir(static)))
+}
+
+func serveAjaxMethods(server contract.Server) {
+ http.HandleFunc("/watch", server.Watch)
+ http.HandleFunc("/ignore", server.Ignore)
+ http.HandleFunc("/reinstate", server.Reinstate)
+ http.HandleFunc("/latest", server.Results)
+ http.HandleFunc("/execute", server.Execute)
+ http.HandleFunc("/status", server.Status)
+ http.HandleFunc("/status/poll", server.LongPollStatus)
+ http.HandleFunc("/pause", server.TogglePause)
+}
+
+func activateServer() {
+ log.Printf("Serving HTTP at: http://%s:%d\n", host, port)
+ err := http.ListenAndServe(fmt.Sprintf("%s:%d", host, port), nil)
+ if err != nil {
+ log.Println(err)
+ }
+}
+
+func coverageEnabled(cover bool, reports string) bool {
+ return (cover &&
+ goVersion_1_2_orGreater() &&
+ coverToolInstalled() &&
+ ensureReportDirectoryExists(reports))
+}
+func goVersion_1_2_orGreater() bool {
+ version := runtime.Version() // 'go1.2....'
+ major, minor := version[2], version[4]
+ version_1_2 := major >= byte('1') && minor >= byte('2')
+ if !version_1_2 {
+ log.Printf(pleaseUpgradeGoVersion, version)
+ return false
+ }
+ return true
+}
+func coverToolInstalled() bool {
+ working := getWorkDir()
+ command := system.NewCommand(working, "go", "tool", "cover").Execute()
+ installed := strings.Contains(command.Output, "Usage of 'go tool cover':")
+ if !installed {
+ log.Print(coverToolMissing)
+ return false
+ }
+ return true
+}
+func ensureReportDirectoryExists(reports string) bool {
+ result, err := exists(reports)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if result {
+ return true
+ }
+
+ if err := os.Mkdir(reports, 0755); err == nil {
+ return true
+ }
+
+ log.Printf(reportDirectoryUnavailable, reports)
+ return false
+}
+func exists(path string) (bool, error) {
+ _, err := os.Stat(path)
+ if err == nil {
+ return true, nil
+ }
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+}
+func getWorkDir() string {
+ working := ""
+ var err error
+ if workDir != "" {
+ working = workDir
+ } else {
+ working, err = os.Getwd()
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+ result, err := exists(working)
+ if err != nil {
+ log.Fatal(err)
+ }
+ if !result {
+ log.Fatalf("Path:%s does not exists", working)
+ }
+ return working
+}
+
+var (
+ port int
+ host string
+ gobin string
+ nap time.Duration
+ packages int
+ cover bool
+ depth int
+ timeout string
+ watchedSuffixes string
+ excludedDirs string
+
+ static string
+ reports string
+
+ quarterSecond = time.Millisecond * 250
+ workDir string
+)
+
+const (
+ initialConfiguration = "Initial configuration: [host: %s] [port: %d] [poll: %v] [cover: %v]\n"
+ pleaseUpgradeGoVersion = "Go version is less that 1.2 (%s), please upgrade to the latest stable version to enable coverage reporting.\n"
+ coverToolMissing = "Go cover tool is not installed or not accessible: for Go < 1.5 run`go get golang.org/x/tools/cmd/cover`\n For >= Go 1.5 run `go install $GOROOT/src/cmd/cover`\n"
+ reportDirectoryUnavailable = "Could not find or create the coverage report directory (at: '%s'). You probably won't see any coverage statistics...\n"
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/composer.html b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/composer.html
new file mode 100644
index 00000000000..e0d32409893
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/composer.html
@@ -0,0 +1,35 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta http-equiv="content-type" content="text/html; charset=UTF-8">
+ <title>GoConvey Composer</title>
+ <link rel="stylesheet" type="text/css" href="/resources/css/composer.css">
+ <script src="/resources/js/lib/markup.min.js"></script>
+ <script src="/resources/js/lib/taboverride.min.js"></script>
+ <script src="/resources/js/lib/jquery-2_1_0.min.js"></script>
+ <script src="/resources/js/composer.js"></script>
+ </head>
+ <body>
+ <header>
+ <h1>
+ <span class="logo">GoConvey</span>
+ <span class="afterlogo">Composer</span>
+ </h1>
+ </header>
+ <main>
+ <textarea id="input" placeholder="Type test cases here, one per line, with tab indentation"></textarea>
+ <div id="output"></div>
+ </main>
+
+<script id="tpl-convey" type="text/template">{{.}}{{if .|notTestFunc}}{{depth|indent}}Convey("{{title}}", {{if showT}}t, {{/if}}{{if stories|empty}}nil{{else}}func() {
+
+{{stories|recursivelyRender}}{{depth|indent}}}{{/if}})
+{{else}}func {{title|properCase|safeFunc}}(t *testing.T) {
+
+{{stories|recursivelyRender}}}
+{{/if}}
+{{/.}}</script>
+
+
+ </body>
+</html> \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/favicon.ico b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/favicon.ico
new file mode 100644
index 00000000000..bb3df78c2ab
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/favicon.ico
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/index.html b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/index.html
new file mode 100644
index 00000000000..6895c7c4936
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/index.html
@@ -0,0 +1,487 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <title>GoConvey</title>
+ <meta name="apple-mobile-web-app-capable" content="yes">
+ <link rel="stylesheet" href="/resources/css/font-awesome.min.css">
+ <link rel="stylesheet" href="/resources/css/tipsy.css">
+ <link rel="stylesheet" href="/resources/css/common.css">
+ <link rel="icon" class="favicon" href="/favicon.ico" type="image/vnd.microsoft.icon">
+ <link rel="shortcut icon" class="favicon" href="/favicon.ico" type="image/vnd.microsoft.icon">
+ <script src="/resources/js/lib/markup.min.js"></script>
+ <script src="/resources/js/lib/moment.min.js"></script>
+ <script src="/resources/js/lib/ansispan.js"></script>
+ <script src="/resources/js/lib/diff-match-patch.min.js"></script>
+ <script src="/resources/js/lib/jquery-2_1_0.min.js"></script>
+ <script src="/resources/js/lib/jquery-ui-1_10_3-custom.min.js"></script>
+ <script src="/resources/js/lib/jquery.pretty-text-diff.min.js"></script>
+ <script src="/resources/js/lib/jquery.tipsy.min.js"></script>
+
+ <!-- Script ordering is important -->
+ <script src="/resources/js/poller.js"></script>
+ <script src="/resources/js/convey.js"></script>
+ <script src="/resources/js/config.js"></script>
+ <script src="/resources/js/goconvey.js"></script>
+ </head>
+ <body>
+ <header>
+ <div class="overall ok">
+ <div class="status">PASS</div>
+ </div>
+
+ <div class="toggler narrow" data-toggle="controls">
+ Controls
+ </div>
+
+ <div id="controls" class="controls hide-narrow">
+ <div class="server-not-down">
+ <ul>
+ <li id="logo" title="Powered by GoConvey"><a href="http://goconvey.co" target="_blank">GoConvey</a></li>
+ </ul>
+
+ <div class="float-left" id="path-container">
+ <input type="text" id="path" placeholder="Watched directory" title="Change watched directory">
+ </div>
+
+ <ul class="float-right" id="control-buttons">
+ <li class="fa fa-pause" id="play-pause" title="Play/pause tests"></li>
+ <li class="fa fa-refresh" id="run-tests" title="Run tests"></li>
+ <li class="fa fa-history" id="show-history" title="Test history"></li>
+ <li class="fa fa-bell-o" id="toggle-notif" title="Toggle notifications"></li>
+ <li class="fa fa-cog" id="show-settings" title="Settings"></li>
+ <li class="fa fa-pencil-square-o" id="show-gen" title="Composer"></li>
+ </ul>
+ </div>
+ <div class="server-down">
+ <span class="flash">NOTICE:</span>
+ <span class="notice-message"><!-- Populated by Javascript --></span>
+ </div>
+
+ <hr class="clear">
+
+
+ <div class="expandable settings">
+ <div class="container">
+ <div class="setting">
+ <div class="setting-meta">
+ Theme
+ </div>
+ <div class="setting-val">
+ <ol class="enum" id="theme"><!-- Populated by Javascript --></ol>
+ <script id="tpl-theme-enum" type="text/template">
+ {{.}}<li data-theme="{{id}}">{{name}}</li>{{/.}}
+ </script>
+ </div>
+ </div>
+ <div class="setting">
+ <div class="setting-meta">
+ Default
+ </div>
+ <div class="setting-val">
+ <ol class="enum" id="pkg-expand-collapse">
+ <li data-pkg-expand-collapse="expanded">Expand All</li>
+ <li data-pkg-expand-collapse="collapsed">Collapse All</li>
+ </ol>
+ </div>
+ </div>
+ <div class="setting">
+ <div class="setting-meta">
+ Debug Output
+ </div>
+ <div class="setting-val">
+ <ol class="enum" id="show-debug-output">
+ <li data-show-debug-output="show">Show</li>
+ <li data-show-debug-output="hide">Hide</li>
+ </ol>
+ </div>
+ </div>
+ <div class="setting">
+ <div class="setting-meta">
+ Effects
+ </div>
+ <div class="setting-val">
+ <ol class="enum" id="ui-effects">
+ <li data-ui-effects="true">Cinematic</li>
+ <li data-ui-effects="false">Off</li>
+ </ol>
+ </div>
+ </div>
+ </div>
+ </div>
+
+ <div class="expandable history">
+ <div class="container"><!-- Populated by Javascript --></div>
+ <script id="tpl-history" type="text/template">
+ <div class="item {{overall.status.class}} frame-{{id}}" data-frameid="{{id}}">
+ <div class="status momentjs" id="frame-{{id}}"></div>
+ <div class="summary">{{overall.status.text|upcase}}<br>{{overall.passed}}/{{overall.assertions}} pass<br>{{overall.failures}} fail, {{overall.skipped}} skip</div>
+ </div>
+ </script>
+ </div>
+
+
+ </div>
+ </header>
+
+
+
+
+
+
+ <div class="frame cf">
+
+
+ <div class="col" id="col-1">
+ <div class="toggler" data-toggle="coverage">
+ Coverage
+ </div>
+ <div class="togglable" id="coverage"><!-- Populated by Javascript --></div>
+ <script id="tpl-coverage" type="text/template">
+ <div class="templated">
+ {{.}}
+ <div class="pkg-cover">
+ <div class="pkg-cover-bar" data-pkg="{{PackageName}}" data-width="{{Coverage|coveragePct}}"></div>
+ <div class="pkg-cover-name rtl pad-right{{if Coverage|coveragePct|equals>0}} no-coverage{{/if}}" title="{{Coverage|coveragePct}}% coverage">
+ <a class="fa fa-level-down" href="#pkg-{{_id}}" style="padding: 0 5px;"></a>{{if Coverage|more>0}}<a href="/reports/{{PackageName|coverageReportName}}.html" target="_blank">{{PackageName|boldPkgName}}</a>{{else}}{{PackageName|boldPkgName}}{{/if}}
+ </div>
+ </div>
+ {{/.}}
+ </div>
+ </script>
+
+
+
+ <div class="toggler" data-toggle="ignored">
+ Ignored
+ </div>
+ <div class="togglable" id="ignored"><!-- Populated by Javascript --></div>
+ <script id="tpl-ignored" type="text/template">
+ <div class="templated">
+ <div class="rtl pkg-list">
+ {{.}}
+ <div>
+ <a class="fa fa-level-down" href="#pkg-{{_id}}" style="padding: 0 5px;"></a>{{PackageName|boldPkgName}}
+ </div>
+ {{/.}}
+ </div>
+ </div>
+ </script>
+
+
+ <div class="toggler" data-toggle="notestfn">
+ No Test Functions
+ </div>
+ <div class="togglable" id="notestfn"><!-- Populated by Javascript --></div>
+ <script id="tpl-notestfn" type="text/template">
+ <div class="templated">
+ <div class="rtl pkg-list">
+ {{.}}
+ {{PackageName|boldPkgName}}<br>
+ {{/.}}
+ </div>
+ </div>
+ </script>
+
+
+ <div class="toggler" data-toggle="notestfiles">
+ No Test Files
+ </div>
+ <div class="togglable" id="notestfiles"><!-- Populated by Javascript --></div>
+ <script id="tpl-notestfiles" type="text/template">
+ <div class="templated">
+ <div class="rtl pkg-list">
+ {{.}}
+ {{PackageName|boldPkgName}}<br>
+ {{/.}}
+ </div>
+ </div>
+ </script>
+
+
+
+ <div class="toggler" data-toggle="nogofiles">
+ No Go Files
+ </div>
+ <div class="togglable" id="nogofiles"><!-- Populated by Javascript --></div>
+ <script id="tpl-nogofiles" type="text/template">
+ <div class="templated">
+ <div class="rtl pkg-list">
+ {{.}}
+ {{PackageName|boldPkgName}}<br>
+ {{/.}}
+ </div>
+ </div>
+ </script>
+ </div>
+
+
+
+
+
+
+
+
+
+
+
+ <div class="col" id="col-2">
+
+ <div class="toggler buildfailures" data-toggle="buildfailures">
+ Build Failures
+ </div>
+ <div class="togglable buildfailures" id="buildfailures"><!-- Populated by Javascript --></div>
+ <script id="tpl-buildfailures" type="text/template">
+ <div class="templated">
+ {{.}}
+ <div class="buildfail">
+ <div class="buildfail-pkg"><i class="fa fa-wrench"></i>&nbsp; {{PackageName|boldPkgName}}</div>
+ <div class="buildfail-output">{{BuildOutput|htmlSafe|ansiColours}}</div>
+ </div>
+ {{/.}}
+ </div>
+ </script>
+
+
+
+ <div class="toggler panics" data-toggle="panics">
+ Panics
+ </div>
+ <div class="togglable panics" id="panics"><!-- Populated by Javascript --></div>
+ <script id="tpl-panics" type="text/template">
+ <div class="templated">
+ {{.}}
+ <div class="panic">
+ <div class="panic-pkg">
+ <i class="fa fa-bomb"></i>&nbsp; {{_pkg|boldPkgName}}
+ </div>
+ <div class="panic-details">
+ <div class="panic-story">
+ <div class="panic-file">
+ {{if File|notempty}}
+ <a href="goconvey://open/?url=file://{{File|url}}&line={{Line}}">{{File|relativePath}}{{if Line|more>0}}:{{Line}}{{/if}}&nbsp; <i class="fa fa-external-link"></i></a>
+ {{else}}
+ <b>{{TestName}}</b>
+ {{/if}}
+ </div>
+ {{if _path|notempty}}
+ {{_path}}
+ <div class="story-links{{if Depth|more>-1}} depth-{{Depth}}{{/if}}"><a href="#test-{{_id}}">{{Title}}</a></div>
+ {{/_path}}
+ {{/if}}
+ {{if StackTrace|notempty}}<div class="depth-{{_maxDepth}} panic-summary">{{Error}}</div>{{/if}}
+ </div>
+ <div class="panic-output">{{if StackTrace|empty}}{{Error|htmlSafe|ansiColours}}{{else}}{{StackTrace|htmlSafe|ansiColours}}{{/if}}</div>
+ </div>
+ {{/.}}
+ </div>
+ </script>
+
+
+
+
+ <div class="toggler failures" data-toggle="failures">
+ Failures
+ </div>
+ <div class="togglable failures" id="failures"><!-- Populated by Javascript --></div>
+ <script id="tpl-failures" type="text/template">
+ <div class="templated">
+ {{.}}
+ <div class="failure">
+ <div class="failure-pkg"><i class="fa fa-file-code-o"></i>&nbsp; {{_pkg|boldPkgName}}</div>
+ <div class="failure-details">
+ <div class="failure-story">
+ <div class="failure-file">
+ {{if File|notempty}}
+ <a href="goconvey://open/?url=file://{{File|url}}&line={{Line}}">{{File|relativePath}}{{if Line|more>0}}:{{Line}}{{/if}}&nbsp; <i class="fa fa-external-link"></i></a>
+ {{else}}
+ <b class="test-name-link"><a href="#test-{{_id}}">{{TestName}}</a></b>
+ {{/if}}
+ </div>
+ {{if _path|notempty}}
+ {{_path}}
+ <div class="story-links{{if Depth|more>-1}} depth-{{Depth}}{{/if}}"><a href="#test-{{_id}}">{{Title}}</a></div>
+ {{/_path}}
+ {{/if}}
+ </div>
+ <div class="failure-output">{{if Failure|notempty}}{{Failure|htmlSafe|ansiColours}}{{else}}{{if Message|notempty}}{{Message|htmlSafe|ansiColours}}{{else}}{{StackTrace|htmlSafe|ansiColours}}{{/if}}{{/if}}</div>
+ {{if .|needsDiff}}
+ <table class="diffviewer">
+ <tr>
+ <td class="exp">Expected</td>
+ <td class="original">{{Expected|htmlSafe|ansiColours}}</td>
+ </tr>
+ <tr>
+ <td class="act">Actual</td>
+ <td class="changed">{{Actual|htmlSafe|ansiColours}}</td>
+ </tr>
+ <tr>
+ <td>Diff</td>
+ <td class="diff"></td>
+ </tr>
+ </table>
+ {{/if}}
+ </div>
+ </div>
+ {{/.}}
+ </div>
+ </script>
+
+
+
+
+ <div class="toggler stories" data-toggle="stories">
+ Stories
+ </div>
+ <div class="togglable stories" id="stories"><!-- Populated by Javascript --></div>
+
+ <script id="tpl-stories" type="text/template">
+ <table>
+ {{.}}
+ <tr class="story-pkg expanded pkg-{{_id}}" data-pkg="{{_id}}" data-pkg-name="{{PackageName}}" data-pkg-state="expanded" id="pkg-{{_id}}">
+ <td colspan="2">
+ <span class="pkg-toggle-container">
+ <a href="javascript:" class="fa fa-minus-square-o pkg-toggle"></a>
+ <span class="toggle-all-pkg">ALL</span>
+ </span>
+ </td>
+ <td class="story-pkg-name">
+ {{PackageName|boldPkgName}}
+ </td>
+ <td class="story-pkg-summary">
+ {{if _panicked|more>0}}<span class="story-pkg-panic-count"><span class="statusicon panic"><i class="fa fa-bolt"></i></span> {{_panicked}}
+ &nbsp;</span>{{/if}}
+ {{if _failed|more>0}}<span class="story-pkg-failure-count"><span class="statusicon fail">&#10007;</span> {{_failed}}
+ &nbsp;</span>{{/if}}
+ {{if _passed|more>0}}<span class="story-pkg-pass-count"><span class="statusicon ok">&#10003;</span> {{_passed}}</span>{{/if}}
+ {{if _skipped|more>0}}<span class="story-pkg-skip-count">&nbsp; <span class="statusicon skip"><b>S</b></span> {{_skipped}}</span>{{/if}}
+ </td>
+ <td class="story-pkg-watch-td">
+ {{if Outcome|equals>disabled}}
+ <span class="fa fa-lg fa-eye-slash disabled" title="Disabled"></span>
+ {{else}}
+ <a class="fa fa-lg ignore {{if Outcome|notequals>ignored}}fa-eye unwatch{{else}}fa-eye-slash watch clr-red{{/if}}" href="javascript:" title="Toggle ignore" data-pkg="{{PackageName}}"></a>
+ {{/if}}
+ </td>
+ </tr>
+ {{TestResults}}
+ <tr id="test-{{_id}}" class="story-line {{if _status.class}}{{_status.class}}{{else}}skip{{/if}} test-{{_id}} pkg-{{_pkgid}}">
+ <td class="story-line-status"></td>
+ <td class="story-line-summary-container">
+
+ {{if Stories|empty}} <!-- Not apparently a GoConvey test -->
+ {{if _passed}}<span class="statusicon ok">&#10003;</span><br>{{/if}}
+ {{if _failed}}<span class="statusicon fail">&#10007;</span><br>{{/if}}
+ {{if _panicked}}<span class="statusicon panic"><i class="fa fa-bolt"></i></span><br>{{/if}}
+ {{if _skipped}}<span class="statusicon skip"><b>S</b></span><br>{{/if}}
+ {{/if}}
+
+ </td>
+ <td colspan="3" class="depth-0 story-line-desc">
+ <b>{{TestName|htmlSafe|ansiColours}}</b>
+ {{if Message}}<div class="message">{{Message|htmlSafe|ansiColours}}</div>{{/if}}
+ </td>
+ </tr>
+
+
+
+ {{Stories}}
+ <tr class="story-line {{if _status.class}}{{_status.class}}{{else}}skip{{/if}} pkg-{{_pkgid}}" id="test-{{_id}}">
+ <td class="story-line-status"></td>
+ <td class="story-line-summary-container">
+
+ {{if _passed}}<span class="statusicon ok">&#10003; {{_passed}}</span><br>{{/if}}
+ {{if _failed}}<span class="statusicon fail">&#10007; {{_failed}}</span><br>{{/if}}
+ {{if _panicked}}<span class="statusicon panic"><i class="fa fa-bolt"></i> {{_panicked}}</span><br>{{/if}}
+ {{if _skipped}}<span class="statusicon skip"><b>S</b> {{_skipped}}</span><br>{{/if}}
+
+ </td>
+ <td colspan="3" class="depth-{{Depth}} story-line-desc">
+ {{Title|htmlSafe|ansiColours}}
+ {{if Output}}<div class="message">{{Output|htmlSafe|ansiColours}}</div>{{/if}}
+ {{if _failed}}
+ {{Assertions}}
+ {{if _failed}}
+ <div class="failure">
+ <div class="failure-details">
+ <div class="failure-output">{{if Failure|notempty}}{{Failure|htmlSafe|ansiColours}}{{else}}{{if Message|notempty}}{{Message|htmlSafe|ansiColours}}{{else}}
+ {{StackTrace|htmlSafe|ansiColours}}{{/if}}{{/if}}</div>
+ </div>
+ </div>
+ {{/if}}
+ {{/Assertions}}
+ {{/if}}
+ {{if _panicked}}
+ {{Assertions}}
+ {{if _panicked}}
+ <div class="panic">
+ <div class="panic-details">
+ <div class="panic-output">{{if Panic|notempty}}{{Panic|htmlSafe|ansiColours}}{{else}}{{if Message|notempty}}{{Message|htmlSafe|ansiColours}}{{else}}{{StackTrace|htmlSafe|ansiColours}}{{/if}}{{/if}}</div>
+ </div>
+ </div>
+ {{/if}}
+ {{/Assertions}}
+ {{/if}}
+ </td>
+ </tr>
+ {{/Stories}}
+
+
+ {{/TestResults}}
+ {{/.}}
+ </table>
+ </script>
+ </div>
+
+
+
+ <div class="col" id="col-3">
+ <div class="toggler" data-toggle="log">
+ LOG
+ </div>
+ <div class="togglable log" id="log"><!-- Populated by Javascript --></div>
+ <script id="tpl-log-line" type="text/template">
+ <div class="line"><span class="timestamp">[{{time}}]</span> {{msg|clean}}</div>
+ </script>
+ </div>
+
+ </div>
+
+ <footer>
+ <section>
+ <span id="summary">
+ <span class="info" id="time"><!-- Populated by Javascript --></span>
+ <span class="info" id="last-test-container">Last test <span id="last-test"><!-- Populated by Javascript --></span></span>
+ <span class="info" id="assert-count"><!-- Populated by Javascript --></span>
+ <span class="info fail-clr" id="fail-count"><!-- Populated by Javascript --></span>
+ <span class="info panic-clr" id="panic-count"><!-- Populated by Javascript --></span>
+ <span class="info skip-clr" id="skip-count"><!-- Populated by Javascript --></span>
+ <span class="momentjs" id="duration"><!-- Populated by Javascript --></span>
+ </span>
+ <span id="narrow-summary">
+ <span id="narrow-assert-count"><!-- Populated by Javascript --></span>:
+ <span class="fail-clr" id="narrow-fail-count"><!-- Populated by Javascript --></span> /
+ <span class="panic-clr" id="narrow-panic-count"><!-- Populated by Javascript --></span> /
+ <span class="skip-clr" id="narrow-skip-count"><!-- Populated by Javascript --></span>
+ </span>
+ </section>
+ <section>
+ <span class="server-not-down">
+ <span class="recording">
+ <i class="fa fa-circle"></i> LIVE
+ </span>
+ <span class="replay" title="Click for current test results">
+ <i class="fa fa-play"></i> REPLAY
+ </span>
+ <span class="paused">
+ <i class="fa fa-pause"></i> PAUSED
+ </span>
+ </span>
+ <span class="server-down">
+ <i class="fa fa-exclamation-triangle fa-lg flash"></i>
+ <span class="notice-message"><!-- Populated by Javascript --></span>
+ </span>
+ </section>
+ </footer>
+
+ </body>
+</html>
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/common.css b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/common.css
new file mode 100644
index 00000000000..7aa56d1f668
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/common.css
@@ -0,0 +1,962 @@
+/* Eric Meyer's Reset CSS v2.0 */
+html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{border:0;font-size:100%;font:inherit;vertical-align:baseline;margin:0;padding:0}article,aside,details,figcaption,figure,footer,header,hgroup,menu,nav,section{display:block}body{line-height:1}ol,ul{list-style:none}blockquote,q{quotes:none}blockquote:before,blockquote:after,q:before,q:after{content:none}table{border-collapse:collapse;border-spacing:0}
+
+@font-face {
+ font-family: 'Open Sans';
+ src: local("Open Sans"), url("../fonts/Open_Sans/OpenSans-Regular.ttf");
+}
+@font-face {
+ font-family: 'Orbitron';
+ src: local("Orbitron"), url("../fonts/Orbitron/Orbitron-Regular.ttf");
+}
+@font-face {
+ font-family: 'Oswald';
+ src: local("Oswald"), url("../fonts/Oswald/Oswald-Regular.ttf");
+}
+
+::selection {
+ background: #87AFBC;
+ color: #FFF;
+ text-shadow: none;
+}
+
+::-moz-selection {
+ background: #87AFBC;
+ color: #FFF;
+ text-shadow: none;
+}
+
+::-webkit-input-placeholder {
+ font-style: italic;
+}
+:-moz-placeholder {
+ font-style: italic;
+}
+::-moz-placeholder {
+ font-style: italic;
+}
+:-ms-input-placeholder {
+ font-style: italic;
+}
+
+
+
+html, body {
+ height: 100%;
+ min-height: 100%;
+}
+
+body {
+ -webkit-transform: translate3d(0, 0, 0); /* attempts to fix Chrome glitching on Mac */
+ background-position: fixed;
+ background-repeat: no-repeat;
+ font-family: Menlo, Monaco, 'Courier New', monospace;
+ line-height: 1.5em;
+ font-size: 14px;
+ overflow: hidden;
+ display: none;
+}
+
+a {
+ text-decoration: none;
+}
+
+a:hover {
+ text-decoration: underline;
+}
+
+a.fa {
+ text-decoration: none;
+}
+
+b {
+ font-weight: bold;
+}
+
+i {
+ font-style: italic;
+}
+
+hr {
+ border: 0;
+ background: 0;
+ height: 0;
+ margin: 0;
+ padding: 0;
+}
+
+input[type=text] {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+
+ background: none;
+ border: none;
+ border-bottom-width: 1px;
+ border-bottom-style: solid;
+ outline: none;
+ padding-bottom: .1em;
+ font: 300 18px/1.5em 'Open Sans', sans-serif;
+}
+
+.overall {
+ padding: 30px 0 15px;
+ position: relative;
+ z-index: 50;
+}
+
+.status {
+ line-height: 1em;
+ font-family: 'Orbitron', monospace;
+ text-align: center;
+}
+
+.overall .status {
+ font-size: 46px;
+ letter-spacing: 5px;
+ text-transform: uppercase;
+ white-space: nowrap;
+}
+
+.toggler {
+ font-size: 10px;
+ padding: 3px 5px;
+ text-decoration: none;
+ text-transform: uppercase;
+ cursor: pointer;
+ line-height: 1.5em;
+}
+
+.toggler.narrow {
+ display: none;
+}
+
+.togglable {
+ overflow-x: auto;
+}
+
+.controls {
+ font-size: 18px;
+ line-height: 1em;
+}
+
+.controls li {
+ text-decoration: none;
+ display: block;
+ float: left;
+ padding: .75em;
+ cursor: pointer;
+}
+
+.server-down {
+ display: none;
+ text-align: center;
+ padding: 10px 0;
+}
+
+footer .server-down {
+ padding: 8px 15px;
+ text-transform: uppercase;
+}
+
+#logo {
+ font-family: 'Oswald', 'Impact', 'Arial Black', sans-serif;
+}
+
+#path-container {
+ margin-top: .4em;
+}
+
+#path {
+ width: 100%;
+ text-align: center;
+ border-bottom-width: 0;
+}
+
+#path:hover,
+#path:focus {
+ border-bottom-width: 1px;
+}
+
+.expandable {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+
+ border-top-width: 1px;
+ border-top-style: solid;
+ overflow-y: hidden;
+ overflow-x: auto;
+ text-align: center;
+ white-space: nowrap;
+ display: none;
+}
+
+.settings {
+ white-space: normal;
+ overflow-x: auto;
+ white-space: nowrap;
+}
+
+.settings .setting-meta,
+.settings .setting-val {
+ display: inline-block;
+}
+
+.settings .container {
+ padding: 15px 0;
+}
+
+.settings .setting {
+ font-size: 13px;
+ display: inline-block;
+ margin-right: 5%;
+}
+
+.settings .setting:first-child {
+ margin-left: 5%;
+}
+
+.settings .setting .setting-meta {
+ text-align: right;
+ padding-right: 1em;
+ vertical-align: middle;
+ max-width: 150px;
+}
+
+.settings .setting .setting-meta small {
+ font-size: 8px;
+ text-transform: uppercase;
+ display: block;
+ line-height: 1.25em;
+}
+
+.history .container {
+ padding: 15px 0 15px 25%;
+}
+
+.history .item {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+
+ transition: all .1s linear;
+ -moz-transition: all .1s linear;
+ -webkit-transition: all .1s linear;
+ -o-transition: all .1s linear;
+
+ display: inline-block;
+ text-align: left;
+ margin: 0 20px;
+ padding: 20px;
+ height: 100%;
+ width: 175px;
+ opacity: .7;
+ cursor: pointer;
+}
+
+.history .item:hover {
+ opacity: 1;
+}
+
+.history .item:nth-child(odd):hover {
+ -webkit-transform: scale(1.1) rotate(5deg);
+ -moz-transform: scale(1.1) rotate(5deg);
+}
+
+.history .item:nth-child(even):hover {
+ -webkit-transform: scale(1.1) rotate(-5deg);
+ -moz-transform: scale(1.1) rotate(-5deg);
+}
+
+.history .item .summary {
+ font: 14px/1.5em 'Monaco', 'Menlo', 'Courier New', monospace;
+}
+
+.history .item.selected {
+ opacity: 1;
+}
+
+.history .status {
+ font-size: 13px;
+}
+
+
+
+
+
+
+.frame {
+ position: relative;
+ z-index: 0;
+ width: 100%;
+}
+
+.frame .col {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+
+ border-right-width: 1px;
+ border-right-style: solid;
+ float: left;
+ height: 100%;
+ overflow-y: auto;
+}
+
+.frame .col:first-child {
+ border-left: none;
+}
+
+.frame .col:last-child {
+ border-right: none;
+}
+
+
+#col-1 {
+ width: 15%;
+}
+
+#col-2 {
+ width: 60%;
+}
+
+#col-3 {
+ width: 25%;
+}
+
+#coverage {
+ font-size: 10px;
+ white-space: nowrap;
+}
+
+#coverage-color-template {
+ display: none;
+}
+
+.rtl {
+ direction: rtl;
+}
+
+.pkg-cover {
+ position: relative;
+}
+
+.pkg-cover a {
+ color: inherit !important;
+ text-decoration: none;
+}
+
+.pkg-cover-bar {
+ position: absolute;
+ top: 0;
+ left: 0;
+ height: 100%;
+ z-index: 1;
+}
+
+.pkg-cover-name {
+ position: relative;
+ z-index: 2;
+}
+
+.pkg-cover-name,
+.pkg-list {
+ font-family: 'Menlo', monospace;
+ font-size: 10px;
+ padding-right: 2%;
+ white-space: nowrap;
+}
+
+.buildfail-pkg,
+.panic-pkg,
+.failure-pkg {
+ padding: 5px 10px;
+ font: 14px 'Open Sans', sans-serif;
+}
+
+.buildfail-output,
+.panic-output,
+.failure-output {
+ padding: 10px;
+ font-size: 12px;
+ line-height: 1.25em;
+ overflow-y: auto;
+ white-space: pre-wrap;
+ font-family: 'Menlo', monospace;
+}
+
+.panic-story,
+.failure-story {
+ font-size: 10px;
+ line-height: 1.25em;
+ font-family: 'Open Sans', sans-serif;
+}
+
+.panic-summary {
+ font-size: 14px;
+ font-weight: bold;
+ line-height: 1.5em;
+}
+
+.panic-file,
+.failure-file {
+ font-size: 13px;
+ line-height: 1.5em;
+}
+
+.diffviewer {
+ border-collapse: collapse;
+ width: 100%;
+}
+
+.diffviewer td {
+ border-bottom-width: 1px;
+ border-bottom-style: solid;
+ padding: 2px 5px;
+ font-size: 14px;
+}
+
+.diffviewer .original,
+.diffviewer .changed,
+.diffviewer .diff {
+ white-space: pre-wrap;
+}
+
+.diffviewer tr:first-child td {
+ border-top-width: 1px;
+ border-top-style: solid;
+}
+
+.diffviewer td:first-child {
+ width: 65px;
+ font-size: 10px;
+ border-right-width: 1px;
+ border-right-style: solid;
+ text-transform: uppercase;
+}
+
+.diff ins {
+ text-decoration: none;
+}
+
+
+
+#stories table {
+ width: 100%;
+}
+
+
+.story-pkg {
+ cursor: pointer;
+}
+
+.story-pkg td {
+ font: 16px 'Open Sans', sans-serif;
+ white-space: nowrap;
+ padding: 10px;
+}
+
+.story-pkg td:first-child {
+ width: 1em;
+}
+
+.story-line {
+ font: 12px 'Open Sans', sans-serif;
+ cursor: default;
+}
+
+.story-line td {
+ padding-top: 7px;
+ padding-bottom: 7px;
+}
+
+.pkg-toggle-container {
+ position: relative;
+ display: inline-block;
+}
+
+.toggle-all-pkg {
+ font-size: 10px;
+ text-transform: uppercase;
+ position: absolute;
+ padding: 5px;
+ font-family: 'Menlo', 'Open Sans', sans-serif;
+ display: none;
+}
+
+.story-line-summary-container {
+ padding: 0 10px 0 10px;
+ white-space: nowrap;
+ width: 35px;
+ text-align: center;
+}
+
+.story-line-status {
+ width: 6px;
+ min-width: 6px;
+ height: 100%;
+}
+
+.story-line-desc {
+ padding: 5px;
+}
+
+.story-line-desc .message {
+ font-family: 'Menlo', monospace;
+ white-space: pre-wrap;
+}
+
+.statusicon {
+ font: 14px 'Open Sans', sans-serif;
+}
+
+.statusicon.skip {
+ font-size: 16px;
+}
+
+
+.depth-0 { padding-left: 1.5em !important; }
+.depth-1 { padding-left: 3em !important; }
+.depth-2 { padding-left: 4.5em !important; }
+.depth-3 { padding-left: 6em !important; }
+.depth-4 { padding-left: 7.5em !important; }
+.depth-5 { padding-left: 9em !important; }
+.depth-6 { padding-left: 10.5em !important; }
+.depth-7 { padding-left: 11em !important; }
+
+
+.log {
+ font-size: 11px;
+ line-height: 1.5em;
+ padding: 5px;
+ padding-bottom: .5em;
+}
+
+.log .line {
+ white-space: pre-wrap;
+ padding-left: 2em;
+ text-indent: -2em;
+}
+
+
+
+
+
+footer {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ padding: 5px 15px;
+ width: 100%;
+ border-top-width: 1px;
+ border-top-style: solid;
+ font-size: 12px;
+}
+
+footer section {
+ float: left;
+}
+
+footer section:first-child {
+ width: 80%;
+}
+
+footer section:last-child {
+ text-align: right;
+ width: 20%;
+}
+
+footer .info {
+ padding: 0 10px;
+}
+
+footer .info:first-child {
+ padding-left: 0;
+}
+
+#narrow-summary {
+ display: none;
+}
+
+footer .replay,
+footer .paused {
+ display: none;
+}
+
+footer .replay {
+ cursor: pointer;
+}
+
+footer .server-down .notice-message {
+ font-size: 10px;
+}
+
+
+
+
+.rel {
+ position: relative;
+}
+
+.text-right {
+ text-align: right;
+}
+
+.text-center {
+ text-align: center;
+}
+
+.text-left {
+ text-align: left;
+}
+
+.float-left {
+ float: left;
+}
+
+.float-right {
+ float: right;
+}
+
+.clear {
+ clear: both;
+}
+
+.nowrap {
+ white-space: nowrap;
+}
+
+.clr-blue {
+ color: #2B597F;
+}
+
+.show {
+ display: block;
+}
+
+.hide {
+ display: none;
+}
+
+.enum {
+ cursor: pointer;
+ display: inline-block;
+ font-size: 12px;
+ border-width: 1px;
+ border-style: solid;
+ border-radius: 9px;
+ vertical-align: middle;
+}
+
+.enum > li {
+ display: block;
+ float: left;
+ padding: 5px 12px;
+ border-left-width: 1px;
+ border-left-style: solid;
+}
+
+.enum > li:first-child {
+ border-left: 0px;
+ border-top-left-radius: 8px;
+ border-bottom-left-radius: 8px;
+}
+
+.enum > li:last-child {
+ border-top-right-radius: 8px;
+ border-bottom-right-radius: 8px;
+}
+
+
+
+
+
+
+
+
+.disabled {
+ cursor: default !important;
+ background: transparent !important;
+}
+
+.spin-once {
+ -webkit-animation: spin 0.5s 1 ease;
+ animation: spin 0.5s 1 ease;
+}
+
+.spin-slowly {
+ -webkit-animation: spin .75s infinite linear;
+ animation: spin .75s infinite linear;
+}
+
+.throb {
+ -webkit-animation: throb 2.5s ease-in-out infinite;
+ -moz-animation: throb 2.5s ease-in-out infinite;
+ -o-animation: throb 2.5s ease-in-out infinite;
+ animation: throb 2.5s ease-in-out infinite;
+}
+
+.flash {
+ -webkit-animation: flash 4s linear infinite;
+ -moz-animation: flash 4s linear infinite;
+ -o-animation: flash 4s linear infinite;
+ animation: flash 4s linear infinite;
+}
+
+
+
+
+
+/* Clearfix */
+.cf:before,
+.cf:after {
+ content: " ";
+ display: table;
+}
+.cf:after {
+ clear: both;
+}
+
+
+
+
+
+
+@media (max-width: 1099px) {
+ #col-1 {
+ width: 25%;
+ }
+
+ #col-2 {
+ width: 75%;
+ border-right: none;
+ }
+
+ #col-3 {
+ display: none;
+ }
+
+ footer #duration {
+ display: none;
+ }
+}
+
+@media (max-width: 900px) {
+ footer #last-test-container {
+ display: none;
+ }
+}
+
+@media (min-width: 850px) and (max-width: 1220px) {
+ #path {
+ font-size: 14px;
+ margin-top: 5px;
+ }
+}
+
+@media (min-width: 700px) and (max-width: 849px) {
+ #path {
+ font-size: 12px;
+ margin-top: 8px;
+ }
+}
+
+@media (max-width: 799px) {
+ #col-1 {
+ display: none;
+ }
+
+ #col-2 {
+ width: 100%;
+ }
+
+ #stories .story-pkg-name {
+ font-size: 14px;
+ }
+
+ #stories .story-pkg-watch-td {
+ display: none;
+ }
+}
+
+@media (max-width: 700px) {
+ #path-container {
+ display: none;
+ }
+
+ footer #time {
+ display: none;
+ }
+
+ footer .info {
+ padding: 0 5px;
+ }
+
+ footer .server-down .notice-message {
+ display: none;
+ }
+}
+
+@media (max-width: 499px) {
+ .toggler.narrow {
+ display: block;
+ }
+
+ #show-gen {
+ display: none;
+ }
+
+ .hide-narrow {
+ display: none;
+ }
+
+ .show-narrow {
+ display: block;
+ }
+
+ .overall .status {
+ font-size: 28px;
+ letter-spacing: 1px;
+ }
+
+ .toggler {
+ display: block;
+ }
+
+ .controls ul {
+ text-align: center;
+ float: none;
+ }
+
+ .controls li {
+ display: inline-block;
+ float: none;
+ }
+
+ .enum > li {
+ float: left;
+ display: block;
+ }
+
+ #logo {
+ display: none;
+ }
+
+ .history .item {
+ margin: 0 5px;
+ }
+
+ .history .item .summary {
+ display: none;
+ }
+
+ .server-down {
+ font-size: 14px;
+ }
+
+ #stories .story-pkg-name {
+ font-size: 16px;
+ }
+
+ #stories .not-pkg-name {
+ display: none;
+ }
+
+ footer #duration {
+ display: none;
+ }
+
+ footer #summary {
+ display: none;
+ }
+
+ footer #narrow-summary {
+ display: inline;
+ }
+}
+
+
+
+
+/**
+ Custom CSS Animations
+**/
+
+
+
+@-webkit-keyframes throb {
+ 0% { opacity: 1; }
+ 50% { opacity: .35; }
+ 100% { opacity: 1; }
+}
+@-moz-keyframes throb {
+ 0% { opacity: 1; }
+ 50% { opacity: .35; }
+ 100% { opacity: 1; }
+}
+@-o-keyframes throb {
+ 0% { opacity: 1; }
+ 50% { opacity: .35; }
+ 100% { opacity: 1; }
+}
+@keyframes throb {
+ 0% { opacity: 1; }
+ 50% { opacity: .35; }
+ 100% { opacity: 1; }
+}
+
+
+@-webkit-keyframes flash {
+ 70% { opacity: 1; }
+ 90% { opacity: 0; }
+ 98% { opacity: 0; }
+ 100% { opacity: 1; }
+}
+@-moz-keyframes flash {
+ 70% { opacity: 1; }
+ 90% { opacity: 0; }
+ 98% { opacity: 0; }
+ 100% { opacity: 1; }
+}
+@-o-keyframes flash {
+ 70% { opacity: 1; }
+ 90% { opacity: 0; }
+ 98% { opacity: 0; }
+ 100% { opacity: 1; }
+}
+@keyframes flash {
+ 70% { opacity: 1; }
+ 90% { opacity: 0; }
+ 98% { opacity: 0; }
+ 100% { opacity: 1; }
+}
+
+
+
+
+
+
+
+
+
+
+
+/*
+#coverage {
+ perspective: 1000;
+}
+
+#coverage .pkg-cover {
+ -webkit-transition: .7s;
+ transform-style: preserve-3d;
+ position: relative;
+}
+
+#coverage:hover .pkg-cover {
+ -webkit-transform: rotateX(180deg);
+}*/
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/composer.css b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/composer.css
new file mode 100644
index 00000000000..6dd344ba5c5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/composer.css
@@ -0,0 +1,65 @@
+/* Eric Meyer's Reset CSS v2.0 */
+html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,hgroup,menu,nav,output,ruby,section,summary,time,mark,audio,video{border:0;font-size:100%;font:inherit;vertical-align:baseline;margin:0;padding:0}article,aside,details,figcaption,figure,footer,header,hgroup,menu,nav,section{display:block}body{line-height:1}ol,ul{list-style:none}blockquote,q{quotes:none}blockquote:before,blockquote:after,q:before,q:after{content:none}table{border-collapse:collapse;border-spacing:0}
+
+@font-face {
+ font-family: 'Open Sans';
+ src: local("Open Sans"), url("../fonts/Open_Sans/OpenSans-Regular.ttf");
+}
+@font-face {
+ font-family: 'Oswald';
+ src: local("Oswald"), url("../fonts/Oswald/Oswald-Regular.ttf");
+}
+
+body {
+ font-family: 'Open Sans', 'Helvetica Neue', sans-serif;
+ font-size: 16px;
+}
+
+header {
+ background: #2C3F49;
+ padding: 10px;
+}
+
+.logo {
+ font-family: Oswald, sans-serif;
+ font-size: 24px;
+ margin-right: 5px;
+ color: #DDD;
+}
+
+.afterlogo {
+ font-size: 12px;
+ text-transform: uppercase;
+ position: relative;
+ top: -3px;
+ color: #999;
+}
+
+#input,
+#output {
+ -webkit-box-sizing: border-box;
+ -moz-box-sizing: border-box;
+ box-sizing: border-box;
+
+ padding: 15px;
+ height: 80%;
+ float: left;
+ overflow: auto;
+}
+
+#input {
+ border: 0;
+ font: 300 18px/1.5em 'Open Sans';
+ resize: none;
+ outline: none;
+ width: 50%;
+}
+
+#output {
+ width: 50%;
+ display: inline-block;
+ background: #F0F0F0;
+ font: 14px/1.25em 'Menlo', 'Monaco', 'Courier New', monospace;
+ border-left: 1px solid #CCC;
+ white-space: pre-wrap;
+} \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/font-awesome.min.css b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/font-awesome.min.css
new file mode 100644
index 00000000000..40403f53189
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/font-awesome.min.css
@@ -0,0 +1,5 @@
+/*!
+ * Font Awesome 4.1.0 by @davegandy - http://fontawesome.io - @fontawesome
+ * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
+ */
+ @font-face{font-family:'FontAwesome';src:url('../fonts/FontAwesome/fontawesome-webfont.eot?v=4.1.0');src:url('../fonts/FontAwesome/fontawesome-webfont.eot?#iefix&v=4.1.0') format('embedded-opentype'),url('../fonts/FontAwesome/fontawesome-webfont.woff?v=4.1.0') format('woff'),url('../fonts/FontAwesome/fontawesome-webfont.ttf?v=4.1.0') format('truetype'),url('../fonts/FontAwesome/fontawesome-webfont.svg?v=4.1.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:spin 2s infinite linear;-moz-animation:spin 2s infinite linear;-o-animation:spin 2s infinite linear;animation:spin 2s infinite linear}@-moz-keyframes spin{0%{-moz-transform:rotate(0deg)}100%{-moz-transform:rotate(359deg)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg)}}@-o-keyframes spin{0%{-o-transform:rotate(0deg)}100%{-o-transform:rotate(359deg)}}@keyframes spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1);-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2);-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=3);-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1);-webkit-transform:scale(-1, 1);-moz-transform:scale(-1, 1);-ms-transform:scale(-1, 1);-o-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1);-webkit-transform:scale(1, -1);-moz-transform:scale(1, -1);-ms-transform:scale(1, -1);-o-transform:scale(1, -1);transform:scale(1, -1)}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper-square:before,.fa-pied-piper:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"} \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark-bigtext.css b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark-bigtext.css
new file mode 100644
index 00000000000..38d71340208
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark-bigtext.css
@@ -0,0 +1,400 @@
+/* This is a fork of the dark.css theme. The only changes from dark.css are near the very end. */
+
+::-webkit-scrollbar {
+ width: 10px;
+ height: 10px;
+}
+
+::-webkit-scrollbar-corner {
+ background: transparent;
+}
+
+::-webkit-scrollbar-thumb {
+ background-color: rgba(255, 255, 255, .35);
+ border-radius: 10px;
+}
+
+body {
+ color: #D0D0D0;
+ background: fixed #040607;
+ background: fixed -moz-linear-gradient(top, hsl(200,27%,2%) 0%, hsl(203,29%,26%) 100%);
+ background: fixed -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(200,27%,2%)), color-stop(100%,hsl(203,29%,26%)));
+ background: fixed -webkit-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%);
+ background: fixed -o-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%);
+ background: fixed -ms-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%);
+ background: fixed linear-gradient(to bottom, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#040607', endColorstr='#2f4756',GradientType=0 );
+}
+
+a,
+.toggle-all-pkg {
+ color: #247D9E;
+}
+
+a:hover,
+.toggle-all-pkg:hover {
+ color: #33B5E5;
+}
+
+input[type=text] {
+ border-bottom-color: #33B5E5;
+ color: #BBB;
+}
+
+::-webkit-input-placeholder {
+ color: #555;
+}
+:-moz-placeholder {
+ color: #555;
+}
+::-moz-placeholder {
+ color: #555;
+}
+:-ms-input-placeholder {
+ color: #555;
+}
+
+.overall {
+ /*
+ Using box-shadow here is not very performant but allows us
+ to animate the change of the background color much more easily.
+ This box-shadow is an ALTERNATIVE, not supplement, to using gradients
+ in this case.
+ */
+ box-shadow: inset 0 150px 100px -110px rgba(0, 0, 0, .5);
+}
+
+.overall.ok {
+ background: #688E00;
+}
+
+.overall.fail {
+ background: #DB8700;
+}
+
+.overall.panic {
+ background: #A80000;
+}
+
+.overall.buildfail {
+ background: #A4A8AA;
+}
+
+.overall .status {
+ color: #EEE;
+}
+
+.server-down {
+ background: rgba(255, 45, 45, 0.55);
+ color: #FFF;
+}
+
+.toggler {
+ background: #132535;
+}
+
+.toggler:hover {
+ background: #1C374F;
+}
+
+.controls {
+ border-bottom: 1px solid #33B5E5;
+}
+
+.controls li {
+ color: #2A5A84;
+}
+
+.controls li:hover {
+ background: #132535;
+ color: #33B5E5;
+}
+
+.sel {
+ background: #33B5E5 !important;
+ color: #FFF !important;
+}
+
+.pkg-cover-name {
+ text-shadow: 1px 1px 0px #000;
+}
+
+.pkg-cover-name b,
+.story-pkg-name b {
+ color: #FFF;
+ font-weight: bold;
+}
+
+.pkg-cover:hover,
+.pkg-cover:hover b {
+ color: #FFF;
+}
+
+.expandable {
+ border-top-color: #33B5E5;
+}
+
+.expandable {
+ background: rgba(0, 0, 0, .2);
+}
+
+.history .item.ok {
+ background: #3f5400;
+ background: -moz-linear-gradient(top, hsl(75,100%,16%) 0%, hsl(76,100%,28%) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(75,100%,16%)), color-stop(100%,hsl(76,100%,28%)));
+ background: -webkit-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%);
+ background: -o-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%);
+ background: -ms-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%);
+ background: linear-gradient(to bottom, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#3f5400', endColorstr='#698f00',GradientType=0 );
+}
+
+.history .item.fail {
+ background: #7f4e00;
+ background: -moz-linear-gradient(top, hsl(37,100%,25%) 0%, hsl(37,100%,43%) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(37,100%,25%)), color-stop(100%,hsl(37,100%,43%)));
+ background: -webkit-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%);
+ background: -o-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%);
+ background: -ms-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%);
+ background: linear-gradient(to bottom, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#7f4e00', endColorstr='#db8700',GradientType=0 );
+}
+
+.history .item.panic {
+ background: #660000;
+ background: -moz-linear-gradient(top, hsl(0,100%,20%) 0%, hsl(0,100%,33%) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(0,100%,20%)), color-stop(100%,hsl(0,100%,33%)));
+ background: -webkit-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%);
+ background: -o-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%);
+ background: -ms-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%);
+ background: linear-gradient(to bottom, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#660000', endColorstr='#a80000',GradientType=0 );
+}
+
+.history .item.buildfail {
+ background: #282f33;
+ background: -moz-linear-gradient(top, hsl(202,12%,18%) 0%, hsl(208,5%,48%) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(202,12%,18%)), color-stop(100%,hsl(208,5%,48%)));
+ background: -webkit-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%);
+ background: -o-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%);
+ background: -ms-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%);
+ background: linear-gradient(to bottom, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#282f33', endColorstr='#757c82',GradientType=0 );
+}
+
+.enum {
+ border-color: #2B597F;
+}
+
+.enum > li {
+ border-left-color: #2B597F;
+}
+
+.enum > li:hover {
+ background: rgba(55, 114, 163, .25);
+}
+
+.group {
+ background: -moz-linear-gradient(top, rgba(16,59,71,0) 0%, rgba(16,59,71,1) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,rgba(16,59,71,0)), color-stop(100%,rgba(16,59,71,1)));
+ background: -webkit-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%);
+ background: -o-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%);
+ background: -ms-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%);
+ background: linear-gradient(to top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#00103b47', endColorstr='#103b47',GradientType=0 );
+}
+
+.stats {
+ color: #FFF;
+}
+
+.error {
+ color: #F58888 !important;
+ background: rgba(255, 45, 45, 0.35) !important;
+}
+
+.spin-slowly,
+.spin-once {
+ color: #33B5E5 !important;
+}
+
+.frame .col,
+footer {
+ border-color: #33B5E5;
+}
+
+footer {
+ background: rgba(0, 0, 0, .5);
+}
+
+footer .recording .fa {
+ color: #CC0000;
+}
+
+footer .replay .fa {
+ color: #33B5E5;
+}
+
+footer .paused .fa {
+ color: #AAA;
+}
+
+footer .recording.replay .fa {
+ color: #33B5E5;
+}
+
+
+
+.buildfail-pkg {
+ background: rgba(255, 255, 255, .1);
+}
+.buildfail-output {
+ background: rgba(255, 255, 255, .2);
+}
+
+
+
+.panic-pkg {
+ background: rgba(255, 0, 0, .3);
+}
+.panic-story {
+ padding: 10px;
+ background: rgba(255, 0, 0, .1);
+}
+.panic-story a,
+.panic-summary {
+ color: #E94A4A;
+}
+.panic-output {
+ color: #FF8181;
+}
+
+
+
+.failure-pkg {
+ background: rgba(255, 153, 0, .42);
+}
+.failure-story {
+ padding: 10px;
+ background: rgba(255, 153, 0, .1);
+}
+.failure-story a {
+ color: #FFB518;
+}
+.failure-output {
+ color: #FFBD47;
+}
+.failure-file {
+ color: #FFF;
+}
+
+
+.diffviewer td {
+ border-color: rgba(0, 0, 0, .3);
+}
+
+/* prettyTextDiff expected/deleted colors */
+.diffviewer .exp,
+.diff del {
+ background: rgba(131, 252, 131, 0.22);
+}
+
+/* prettyTextDiff actual/inserted colors */
+.diffviewer .act,
+.diff ins {
+ background: rgba(255, 52, 52, 0.33);
+}
+
+
+
+.story-links a,
+.test-name-link a {
+ color: inherit;
+}
+
+
+
+.story-pkg {
+ background: rgba(0, 0, 0, .4);
+}
+
+.story-pkg:hover {
+ background: rgba(255, 255, 255, .05);
+}
+
+.story-line + .story-line {
+ border-top: 1px dashed rgba(255, 255, 255, .08);
+}
+
+.story-line-desc .message {
+ color: #999;
+}
+
+.story-line-summary-container {
+ border-right: 1px dashed #333;
+}
+
+.story-line.ok .story-line-status { background: #008000; }
+.story-line.ok:hover, .story-line.ok.story-line-sel { background: rgba(0, 128, 0, .1); }
+
+.story-line.fail .story-line-status { background: #EA9C4D; }
+.story-line.fail:hover, .story-line.fail.story-line-sel { background: rgba(234, 156, 77, .1); }
+
+.story-line.panic .story-line-status { background: #FF3232; }
+.story-line.panic:hover, .story-line.panic.story-line-sel { background: rgba(255, 50, 50, .1); }
+
+.story-line.skip .story-line-status { background: #AAA; }
+.story-line.skip:hover, .story-line.skip.story-line-sel { background: rgba(255, 255, 255, .1); }
+
+.statusicon.ok { color: #76C13C; }
+.statusicon.fail, .fail-clr { color: #EA9C4D; }
+.statusicon.panic, .statusicon.panic .fa, .panic-clr { color: #FF3232; }
+.statusicon.skip, .skip-clr { color: #888; }
+
+
+.log .timestamp {
+ color: #999;
+}
+
+
+.clr-red {
+ color: #FF2222;
+}
+
+
+.tipsy-inner {
+ background-color: #FAFAFA;
+ color: #222;
+}
+
+.tipsy-arrow {
+ border: 8px dashed #FAFAFA;
+}
+
+.tipsy-arrow-n,
+.tipsy-arrow-s,
+.tipsy-arrow-e,
+.tipsy-arrow-w,
+{
+ border-color: #FAFAFA;
+}
+
+/***************************************************************/
+/*************************** Tweaks ****************************/
+/***************************************************************/
+
+
+/* More space for stories */
+div#col-3 { display: none; } /* hides the log */
+div#col-2 { width: 85%; } /* fill it in with stories */
+
+/* Bigger Text */
+.story-line { font-size: 16px; }
+.story-line b { font-size: 20px; }
+td.story-pkg-name { font-size: 24px; }
+
+/* Smaller Header */
+div.overall { padding: 10px 0 0px; }
+.overall .status { font-size: 36px; }
+
+/***************************************************************/
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark.css b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark.css
new file mode 100644
index 00000000000..132e19dbf1d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/dark.css
@@ -0,0 +1,380 @@
+::-webkit-scrollbar {
+ width: 10px;
+ height: 10px;
+}
+
+::-webkit-scrollbar-corner {
+ background: transparent;
+}
+
+::-webkit-scrollbar-thumb {
+ background-color: rgba(255, 255, 255, .35);
+ border-radius: 10px;
+}
+
+body {
+ color: #D0D0D0;
+ background: fixed #040607;
+ background: fixed -moz-linear-gradient(top, hsl(200,27%,2%) 0%, hsl(203,29%,26%) 100%);
+ background: fixed -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(200,27%,2%)), color-stop(100%,hsl(203,29%,26%)));
+ background: fixed -webkit-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%);
+ background: fixed -o-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%);
+ background: fixed -ms-linear-gradient(top, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%);
+ background: fixed linear-gradient(to bottom, hsl(200,27%,2%) 0%,hsl(203,29%,26%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#040607', endColorstr='#2f4756',GradientType=0 );
+}
+
+a,
+.toggle-all-pkg {
+ color: #247D9E;
+}
+
+a:hover,
+.toggle-all-pkg:hover {
+ color: #33B5E5;
+}
+
+input[type=text] {
+ border-bottom-color: #33B5E5;
+ color: #BBB;
+}
+
+::-webkit-input-placeholder {
+ color: #555;
+}
+:-moz-placeholder {
+ color: #555;
+}
+::-moz-placeholder {
+ color: #555;
+}
+:-ms-input-placeholder {
+ color: #555;
+}
+
+.overall {
+ /*
+ Using box-shadow here is not very performant but allows us
+ to animate the change of the background color much more easily.
+ This box-shadow is an ALTERNATIVE, not supplement, to using gradients
+ in this case.
+ */
+ box-shadow: inset 0 150px 100px -110px rgba(0, 0, 0, .5);
+}
+
+.overall.ok {
+ background: #688E00;
+}
+
+.overall.fail {
+ background: #DB8700;
+}
+
+.overall.panic {
+ background: #A80000;
+}
+
+.overall.buildfail {
+ background: #A4A8AA;
+}
+
+.overall .status {
+ color: #EEE;
+}
+
+.server-down {
+ background: rgba(255, 45, 45, 0.55);
+ color: #FFF;
+}
+
+.toggler {
+ background: #132535;
+}
+
+.toggler:hover {
+ background: #1C374F;
+}
+
+.controls {
+ border-bottom: 1px solid #33B5E5;
+}
+
+.controls li {
+ color: #2A5A84;
+}
+
+.controls li:hover {
+ background: #132535;
+ color: #33B5E5;
+}
+
+.sel {
+ background: #33B5E5 !important;
+ color: #FFF !important;
+}
+
+.pkg-cover-name {
+ text-shadow: 1px 1px 0px #000;
+}
+
+.pkg-cover-name b,
+.story-pkg-name b {
+ color: #FFF;
+ font-weight: bold;
+}
+
+.pkg-cover:hover,
+.pkg-cover:hover b {
+ color: #FFF;
+}
+
+.expandable {
+ border-top-color: #33B5E5;
+}
+
+.expandable {
+ background: rgba(0, 0, 0, .2);
+}
+
+.history .item.ok {
+ background: #3f5400;
+ background: -moz-linear-gradient(top, hsl(75,100%,16%) 0%, hsl(76,100%,28%) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(75,100%,16%)), color-stop(100%,hsl(76,100%,28%)));
+ background: -webkit-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%);
+ background: -o-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%);
+ background: -ms-linear-gradient(top, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%);
+ background: linear-gradient(to bottom, hsl(75,100%,16%) 0%,hsl(76,100%,28%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#3f5400', endColorstr='#698f00',GradientType=0 );
+}
+
+.history .item.fail {
+ background: #7f4e00;
+ background: -moz-linear-gradient(top, hsl(37,100%,25%) 0%, hsl(37,100%,43%) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(37,100%,25%)), color-stop(100%,hsl(37,100%,43%)));
+ background: -webkit-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%);
+ background: -o-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%);
+ background: -ms-linear-gradient(top, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%);
+ background: linear-gradient(to bottom, hsl(37,100%,25%) 0%,hsl(37,100%,43%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#7f4e00', endColorstr='#db8700',GradientType=0 );
+}
+
+.history .item.panic {
+ background: #660000;
+ background: -moz-linear-gradient(top, hsl(0,100%,20%) 0%, hsl(0,100%,33%) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(0,100%,20%)), color-stop(100%,hsl(0,100%,33%)));
+ background: -webkit-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%);
+ background: -o-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%);
+ background: -ms-linear-gradient(top, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%);
+ background: linear-gradient(to bottom, hsl(0,100%,20%) 0%,hsl(0,100%,33%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#660000', endColorstr='#a80000',GradientType=0 );
+}
+
+.history .item.buildfail {
+ background: #282f33;
+ background: -moz-linear-gradient(top, hsl(202,12%,18%) 0%, hsl(208,5%,48%) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,hsl(202,12%,18%)), color-stop(100%,hsl(208,5%,48%)));
+ background: -webkit-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%);
+ background: -o-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%);
+ background: -ms-linear-gradient(top, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%);
+ background: linear-gradient(to bottom, hsl(202,12%,18%) 0%,hsl(208,5%,48%) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#282f33', endColorstr='#757c82',GradientType=0 );
+}
+
+.enum {
+ border-color: #2B597F;
+}
+
+.enum > li {
+ border-left-color: #2B597F;
+}
+
+.enum > li:hover {
+ background: rgba(55, 114, 163, .25);
+}
+
+.group {
+ background: -moz-linear-gradient(top, rgba(16,59,71,0) 0%, rgba(16,59,71,1) 100%);
+ background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,rgba(16,59,71,0)), color-stop(100%,rgba(16,59,71,1)));
+ background: -webkit-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%);
+ background: -o-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%);
+ background: -ms-linear-gradient(top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%);
+ background: linear-gradient(to top, rgba(16,59,71,0) 0%,rgba(16,59,71,1) 100%);
+ filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#00103b47', endColorstr='#103b47',GradientType=0 );
+}
+
+.stats {
+ color: #FFF;
+}
+
+.error {
+ color: #F58888 !important;
+ background: rgba(255, 45, 45, 0.35) !important;
+}
+
+.spin-slowly,
+.spin-once {
+ color: #33B5E5 !important;
+}
+
+.frame .col,
+footer {
+ border-color: #33B5E5;
+}
+
+footer {
+ background: rgba(0, 0, 0, .5);
+}
+
+footer .recording .fa {
+ color: #CC0000;
+}
+
+footer .replay .fa {
+ color: #33B5E5;
+}
+
+footer .paused .fa {
+ color: #AAA;
+}
+
+footer .recording.replay .fa {
+ color: #33B5E5;
+}
+
+
+
+.buildfail-pkg {
+ background: rgba(255, 255, 255, .1);
+}
+.buildfail-output {
+ background: rgba(255, 255, 255, .2);
+}
+
+
+
+.panic-pkg {
+ background: rgba(255, 0, 0, .3);
+}
+.panic-story {
+ padding: 10px;
+ background: rgba(255, 0, 0, .1);
+}
+.panic-story a,
+.panic-summary {
+ color: #E94A4A;
+}
+.panic-output {
+ color: #FF8181;
+}
+
+
+
+.failure-pkg {
+ background: rgba(255, 153, 0, .42);
+}
+.failure-story {
+ padding: 10px;
+ background: rgba(255, 153, 0, .1);
+}
+.failure-story a {
+ color: #FFB518;
+}
+.failure-output {
+ color: #FFBD47;
+}
+.failure-file {
+ color: #FFF;
+}
+
+
+.diffviewer td {
+ border-color: rgba(0, 0, 0, .3);
+}
+
+/* prettyTextDiff expected/deleted colors */
+.diffviewer .exp,
+.diff del {
+ background: rgba(131, 252, 131, 0.22);
+}
+
+/* prettyTextDiff actual/inserted colors */
+.diffviewer .act,
+.diff ins {
+ background: rgba(255, 52, 52, 0.33);
+}
+
+
+
+.story-links a,
+.test-name-link a {
+ color: inherit;
+}
+
+
+
+.story-pkg {
+ background: rgba(0, 0, 0, .4);
+}
+
+.story-pkg:hover {
+ background: rgba(255, 255, 255, .05);
+}
+
+.story-line + .story-line {
+ border-top: 1px dashed rgba(255, 255, 255, .08);
+}
+
+.story-line-desc .message {
+ color: #999;
+}
+
+.story-line-summary-container {
+ border-right: 1px dashed #333;
+}
+
+.story-line.ok .story-line-status { background: #008000; }
+.story-line.ok:hover, .story-line.ok.story-line-sel { background: rgba(0, 128, 0, .1); }
+
+.story-line.fail .story-line-status { background: #EA9C4D; }
+.story-line.fail:hover, .story-line.fail.story-line-sel { background: rgba(234, 156, 77, .1); }
+
+.story-line.panic .story-line-status { background: #FF3232; }
+.story-line.panic:hover, .story-line.panic.story-line-sel { background: rgba(255, 50, 50, .1); }
+
+.story-line.skip .story-line-status { background: #AAA; }
+.story-line.skip:hover, .story-line.skip.story-line-sel { background: rgba(255, 255, 255, .1); }
+
+.statusicon.ok { color: #76C13C; }
+.statusicon.fail, .fail-clr { color: #EA9C4D; }
+.statusicon.panic, .statusicon.panic .fa, .panic-clr { color: #FF3232; }
+.statusicon.skip, .skip-clr { color: #888; }
+
+.ansi-green { color: #76C13C; }
+.ansi-yellow { color: #EA9C4D; }
+
+.log .timestamp {
+ color: #999;
+}
+
+
+.clr-red {
+ color: #FF2222;
+}
+
+
+.tipsy-inner {
+ background-color: #FAFAFA;
+ color: #222;
+}
+
+.tipsy-arrow {
+ border: 8px dashed #FAFAFA;
+}
+
+.tipsy-arrow-n,
+.tipsy-arrow-s,
+.tipsy-arrow-e,
+.tipsy-arrow-w,
+{
+ border-color: #FAFAFA;
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/light.css b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/light.css
new file mode 100644
index 00000000000..decfc7f4135
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/themes/light.css
@@ -0,0 +1,328 @@
+::-webkit-scrollbar-thumb {
+ background-color: rgba(0, 0, 0, .35);
+ border-radius: 10px;
+}
+
+::-webkit-input-placeholder {
+ color: #CCC;
+}
+:-moz-placeholder {
+ color: #CCC;
+}
+::-moz-placeholder {
+ color: #CCC;
+}
+:-ms-input-placeholder {
+ color: #CCC;
+}
+
+body {
+ color: #444;
+ background: #F4F4F4;
+}
+
+a {
+ color: #247D9E;
+}
+
+a:hover {
+ color: #33B5E5;
+}
+
+.overall.ok,
+.history .item.ok {
+ background: #8CB700; /* Can't decide: #5AA02C */
+}
+
+.overall.fail,
+.history .item.fail {
+ background: #E79C07;
+}
+
+.overall.panic,
+.history .item.panic {
+ background: #BB0000;
+}
+
+.overall.buildfail,
+.history .item.buildfail {
+ background: #828c95;
+}
+
+.overall .status {
+ color: #EEE;
+}
+
+.server-down {
+ background: #BB0000;
+ color: #FFF;
+}
+
+.toggler {
+ background: #6887A3;
+ color: #FFF;
+}
+
+.toggler:hover {
+ background: #465B6D;
+}
+
+.toggler .fa {
+ color: #FFF;
+}
+
+#logo {
+ color: #6887A3;
+}
+
+.controls {
+ border-bottom: 1px solid #33B5E5;
+}
+
+li.fa,
+a.fa,
+.toggle-all-pkg {
+ color: #6887A3;
+}
+
+li.fa:hover,
+a.fa:hover,
+.toggle-all-pkg:hover {
+ color: #465B6D;
+}
+
+li.fa:active,
+a.fa:active,
+.toggle-all-pkg:active {
+ color: #33B5E5;
+}
+
+.controls li,
+.enum > li {
+ border-left-color: #33B5E5;
+}
+
+.controls li:hover,
+.enum > li:hover {
+ background: #CFE6F9;
+}
+
+.enum {
+ border-color: #33B5E5;
+}
+
+.sel {
+ background: #33B5E5 !important;
+ color: #FFF !important;
+}
+
+.pkg-cover-name b,
+.story-pkg-name b {
+ color: #000;
+ font-weight: bold;
+}
+
+.expandable {
+ background: rgba(0, 0, 0, .1);
+ border-top-color: #33B5E5;
+}
+
+.history .item {
+ color: #FFF;
+}
+
+.spin-slowly,
+.spin-once {
+ color: #33B5E5 !important;
+}
+
+
+input[type=text] {
+ border-bottom-color: #33B5E5;
+ color: #333;
+}
+
+.error {
+ color: #CC0000 !important;
+ background: #FFD2D2 !important;
+}
+
+
+footer {
+ background: #F4F4F4;
+}
+
+.frame .col,
+footer {
+ border-color: #33B5E5;
+}
+
+footer .recording .fa {
+ color: #CC0000;
+}
+
+footer .replay .fa {
+ color: #33B5E5;
+}
+
+footer .paused .fa {
+ color: #333;
+}
+
+
+.buildfail-pkg {
+ background: #CCC;
+}
+.buildfail-output {
+ background: #EEE;
+}
+
+
+
+.panic-pkg {
+ background: #E94D4D;
+ color: #FFF;
+}
+.panics .panic-details {
+ border: 5px solid #E94D4D;
+ border-top: 0;
+ border-bottom: 0;
+}
+.panic-details {
+ color: #CC0000;
+}
+.panics .panic:last-child .panic-details {
+ border-bottom: 5px solid #E94D4D;
+}
+.panic-story {
+ padding: 10px;
+}
+.panics .panic-output {
+ background: #FFF;
+}
+
+
+
+
+.failure-pkg {
+ background: #FFA300;
+ color: #FFF;
+}
+.failures .failure-details {
+ border: 5px solid #FFA300;
+ border-top: 0;
+ border-bottom: 0;
+}
+.failures .failure:last-child .failure-details {
+ border-bottom: 5px solid #FFA300;
+}
+.failure-story {
+ padding: 10px;
+ color: #A87A00;
+}
+.stories .failure-output {
+ color: #EA9C4D;
+}
+.failures .failure-output {
+ background: #FFF;
+}
+.failure-file {
+ color: #000;
+}
+
+.diffviewer td {
+ border-color: #CCC;
+ background: #FFF;
+}
+
+/* prettyTextDiff expected/deleted colors */
+.diffviewer .exp,
+.diff del {
+ background: #ADFFAD;
+}
+
+/* prettyTextDiff actual/inserted colors */
+.diffviewer .act,
+.diff ins {
+ background: #FFC0C0;
+}
+
+
+
+.story-links a,
+.test-name-link a {
+ color: inherit;
+}
+
+
+
+.story-pkg {
+ background: #E8E8E8;
+}
+
+.story-pkg:hover {
+ background: #DFDFDF;
+}
+
+.story-line {
+ background: #FFF;
+}
+
+.story-line-desc .message {
+ color: #888;
+}
+
+.story-line + .story-line {
+ border-top: 1px dashed #DDD;
+}
+
+.story-line-summary-container {
+ border-right: 1px dashed #DDD;
+}
+
+.story-line.ok .story-line-status { background: #8CB700; }
+.story-line.ok:hover, .story-line.ok.story-line-sel { background: #F4FFD8; }
+
+.story-line.fail .story-line-status { background: #E79C07; }
+.story-line.fail:hover, .story-line.fail.story-line-sel { background: #FFF1DB; }
+
+.story-line.panic .story-line-status { background: #DD0606; }
+.story-line.panic:hover, .story-line.panic.story-line-sel { background: #FFE8E8; }
+
+.story-line.skip .story-line-status { background: #4E4E4E; }
+.story-line.skip:hover, .story-line.skip.story-line-sel { background: #F2F2F2; }
+
+.statusicon.ok { color: #76C13C; }
+.statusicon.fail, .fail-clr { color: #EA9C4D; }
+.statusicon.panic, .statusicon.panic .fa, .panic-clr { color: #FF3232; }
+.statusicon.skip, .skip-clr { color: #AAA; }
+
+.ansi-green { color: #76C13C; }
+.ansi-yellow { color: #EA9C4D; }
+
+.log .timestamp {
+ color: #999;
+}
+
+.clr-red,
+a.clr-red {
+ color: #CC0000;
+}
+
+
+.tipsy-inner {
+ background-color: #000;
+ color: #FFF;
+}
+
+.tipsy-arrow {
+ border: 8px dashed #000;
+}
+
+.tipsy-arrow-n,
+.tipsy-arrow-s,
+.tipsy-arrow-e,
+.tipsy-arrow-w,
+{
+ border-color: #000;
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/tipsy.css b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/tipsy.css
new file mode 100644
index 00000000000..25d261a4ff5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/css/tipsy.css
@@ -0,0 +1,97 @@
+.tipsy {
+ font-size: 12px;
+ position: absolute;
+ padding: 8px;
+ z-index: 100000;
+ font-family: 'Open Sans';
+ line-height: 1.25em;
+}
+
+.tipsy-inner {
+ max-width: 200px;
+ padding: 5px 7px;
+ text-align: center;
+}
+
+/* Rounded corners */
+/*.tipsy-inner { border-radius: 3px; -moz-border-radius: 3px; -webkit-border-radius: 3px; }*/
+
+/* Shadow */
+/*.tipsy-inner { box-shadow: 0 0 5px #000000; -webkit-box-shadow: 0 0 5px #000000; -moz-box-shadow: 0 0 5px #000000; }*/
+
+.tipsy-arrow {
+ position: absolute;
+ width: 0;
+ height: 0;
+ line-height: 0;
+}
+
+.tipsy-n .tipsy-arrow,
+.tipsy-nw .tipsy-arrow,
+.tipsy-ne .tipsy-arrow {
+ border-bottom-style: solid;
+ border-top: none;
+ border-left-color: transparent;
+ border-right-color: transparent;
+}
+
+
+.tipsy-n .tipsy-arrow {
+ top: 0px;
+ left: 50%;
+ margin-left: -7px;
+}
+.tipsy-nw .tipsy-arrow {
+ top: 0;
+ left: 10px;
+}
+.tipsy-ne .tipsy-arrow {
+ top: 0;
+ right: 10px;
+}
+
+.tipsy-s .tipsy-arrow,
+.tipsy-sw .tipsy-arrow,
+.tipsy-se .tipsy-arrow {
+ border-top-style: solid;
+ border-bottom: none;
+ border-left-color: transparent;
+ border-right-color: transparent;
+}
+
+
+.tipsy-s .tipsy-arrow {
+ bottom: 0;
+ left: 50%;
+ margin-left: -7px;
+}
+
+.tipsy-sw .tipsy-arrow {
+ bottom: 0;
+ left: 10px;
+}
+
+.tipsy-se .tipsy-arrow {
+ bottom: 0;
+ right: 10px;
+}
+
+.tipsy-e .tipsy-arrow {
+ right: 0;
+ top: 50%;
+ margin-top: -7px;
+ border-left-style: solid;
+ border-right: none;
+ border-top-color: transparent;
+ border-bottom-color: transparent;
+}
+
+.tipsy-w .tipsy-arrow {
+ left: 0;
+ top: 50%;
+ margin-top: -7px;
+ border-right-style: solid;
+ border-left: none;
+ border-top-color: transparent;
+ border-bottom-color: transparent;
+} \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/FontAwesome.otf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/FontAwesome.otf
new file mode 100755
index 00000000000..3461e3fce6a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/FontAwesome.otf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.eot b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.eot
new file mode 100755
index 00000000000..6cfd5660956
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.eot
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.svg b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.svg
new file mode 100755
index 00000000000..a9f84695031
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.svg
@@ -0,0 +1,504 @@
+<?xml version="1.0" standalone="no"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd" >
+<svg xmlns="http://www.w3.org/2000/svg">
+<metadata></metadata>
+<defs>
+<font id="fontawesomeregular" horiz-adv-x="1536" >
+<font-face units-per-em="1792" ascent="1536" descent="-256" />
+<missing-glyph horiz-adv-x="448" />
+<glyph unicode=" " horiz-adv-x="448" />
+<glyph unicode="&#x09;" horiz-adv-x="448" />
+<glyph unicode="&#xa0;" horiz-adv-x="448" />
+<glyph unicode="&#xa8;" horiz-adv-x="1792" />
+<glyph unicode="&#xa9;" horiz-adv-x="1792" />
+<glyph unicode="&#xae;" horiz-adv-x="1792" />
+<glyph unicode="&#xb4;" horiz-adv-x="1792" />
+<glyph unicode="&#xc6;" horiz-adv-x="1792" />
+<glyph unicode="&#xd8;" horiz-adv-x="1792" />
+<glyph unicode="&#x2000;" horiz-adv-x="768" />
+<glyph unicode="&#x2001;" horiz-adv-x="1537" />
+<glyph unicode="&#x2002;" horiz-adv-x="768" />
+<glyph unicode="&#x2003;" horiz-adv-x="1537" />
+<glyph unicode="&#x2004;" horiz-adv-x="512" />
+<glyph unicode="&#x2005;" horiz-adv-x="384" />
+<glyph unicode="&#x2006;" horiz-adv-x="256" />
+<glyph unicode="&#x2007;" horiz-adv-x="256" />
+<glyph unicode="&#x2008;" horiz-adv-x="192" />
+<glyph unicode="&#x2009;" horiz-adv-x="307" />
+<glyph unicode="&#x200a;" horiz-adv-x="85" />
+<glyph unicode="&#x202f;" horiz-adv-x="307" />
+<glyph unicode="&#x205f;" horiz-adv-x="384" />
+<glyph unicode="&#x2122;" horiz-adv-x="1792" />
+<glyph unicode="&#x221e;" horiz-adv-x="1792" />
+<glyph unicode="&#x2260;" horiz-adv-x="1792" />
+<glyph unicode="&#x25fc;" horiz-adv-x="500" d="M0 0z" />
+<glyph unicode="&#xf000;" horiz-adv-x="1792" d="M93 1350q0 23 18 36.5t38 17.5t43 4h1408q23 0 43 -4t38 -17.5t18 -36.5q0 -35 -43 -78l-632 -632v-768h320q26 0 45 -19t19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45t19 45t45 19h320v768l-632 632q-43 43 -43 78z" />
+<glyph unicode="&#xf001;" d="M0 -64q0 50 34 89t86 60.5t103.5 32t96.5 10.5q105 0 192 -39v967q0 31 19 56.5t49 35.5l832 256q12 4 28 4q40 0 68 -28t28 -68v-1120q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89t34 89t86 60.5t103.5 32t96.5 10.5 q105 0 192 -39v537l-768 -237v-709q0 -50 -34 -89t-86 -60.5t-103.5 -32t-96.5 -10.5t-96.5 10.5t-103.5 32t-86 60.5t-34 89z" />
+<glyph unicode="&#xf002;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90q0 -52 -38 -90t-90 -38q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5z" />
+<glyph unicode="&#xf003;" horiz-adv-x="1792" d="M0 32v1088q0 66 47 113t113 47h1472q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5v768q-32 -36 -69 -66q-268 -206 -426 -338q-51 -43 -83 -67t-86.5 -48.5 t-102.5 -24.5h-1h-1q-48 0 -102.5 24.5t-86.5 48.5t-83 67q-158 132 -426 338q-37 30 -69 66v-768zM128 1120q0 -168 147 -284q193 -152 401 -317q6 -5 35 -29.5t46 -37.5t44.5 -31.5t50.5 -27.5t43 -9h1h1q20 0 43 9t50.5 27.5t44.5 31.5t46 37.5t35 29.5q208 165 401 317 q54 43 100.5 115.5t46.5 131.5v11v13.5t-0.5 13t-3 12.5t-5.5 9t-9 7.5t-14 2.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5z" />
+<glyph unicode="&#xf004;" horiz-adv-x="1792" d="M0 940q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124t127 -344q0 -221 -229 -450l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138z " />
+<glyph unicode="&#xf005;" horiz-adv-x="1664" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455l502 -73q56 -9 56 -46q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -21 -10.5 -35.5t-30.5 -14.5q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500 l-364 354q-25 27 -25 48z" />
+<glyph unicode="&#xf006;" horiz-adv-x="1664" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41t49 -41l225 -455l502 -73q56 -9 56 -46q0 -22 -26 -48l-363 -354l86 -500q1 -7 1 -20q0 -50 -41 -50q-19 0 -40 12l-449 236l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354 q-25 27 -25 48zM221 829l306 -297l-73 -421l378 199l377 -199l-72 421l306 297l-422 62l-189 382l-189 -382z" />
+<glyph unicode="&#xf007;" horiz-adv-x="1408" d="M0 131q0 53 3.5 103.5t14 109t26.5 108.5t43 97.5t62 81t85.5 53.5t111.5 20q9 0 42 -21.5t74.5 -48t108 -48t133.5 -21.5t133.5 21.5t108 48t74.5 48t42 21.5q61 0 111.5 -20t85.5 -53.5t62 -81t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5q0 -120 -73 -189.5t-194 -69.5 h-874q-121 0 -194 69.5t-73 189.5zM320 1024q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5z" />
+<glyph unicode="&#xf008;" horiz-adv-x="1920" d="M0 -96v1344q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1344q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 64v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45zM128 320q0 -26 19 -45t45 -19h128 q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM128 704q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM128 1088q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19 h-128q-26 0 -45 -19t-19 -45v-128zM512 -64q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512zM512 704q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v512q0 26 -19 45t-45 19h-768q-26 0 -45 -19t-19 -45v-512zM1536 64 v-128q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45zM1536 320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM1536 704q0 -26 19 -45t45 -19h128q26 0 45 19t19 45 v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128zM1536 1088q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf009;" horiz-adv-x="1664" d="M0 128v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM0 896v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM896 128v384q0 52 38 90t90 38h512q52 0 90 -38 t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM896 896v384q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-384q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90z" />
+<glyph unicode="&#xf00a;" horiz-adv-x="1792" d="M0 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320 q-40 0 -68 28t-28 68zM640 1120v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 608v192 q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1280 1120v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf00b;" horiz-adv-x="1792" d="M0 96v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 96v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68zM640 608v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960 q-40 0 -68 28t-28 68zM640 1120v192q0 40 28 68t68 28h960q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf00c;" horiz-adv-x="1792" d="M121 608q0 40 28 68l136 136q28 28 68 28t68 -28l294 -295l656 657q28 28 68 28t68 -28l136 -136q28 -28 28 -68t-28 -68l-724 -724l-136 -136q-28 -28 -68 -28t-68 28l-136 136l-362 362q-28 28 -28 68z" />
+<glyph unicode="&#xf00d;" horiz-adv-x="1408" d="M110 214q0 40 28 68l294 294l-294 294q-28 28 -28 68t28 68l136 136q28 28 68 28t68 -28l294 -294l294 294q28 28 68 28t68 -28l136 -136q28 -28 28 -68t-28 -68l-294 -294l294 -294q28 -28 28 -68t-28 -68l-136 -136q-28 -28 -68 -28t-68 28l-294 294l-294 -294 q-28 -28 -68 -28t-68 28l-136 136q-28 28 -28 68z" />
+<glyph unicode="&#xf00e;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90t-37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM384 672v64q0 13 9.5 22.5t22.5 9.5h224v224q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-224h224q13 0 22.5 -9.5t9.5 -22.5v-64 q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-224q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v224h-224q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf010;" horiz-adv-x="1664" d="M0 704q0 143 55.5 273.5t150 225t225 150t273.5 55.5t273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -220 -124 -399l343 -343q37 -37 37 -90t-37.5 -90.5t-90.5 -37.5q-54 0 -90 38l-343 342q-179 -124 -399 -124q-143 0 -273.5 55.5t-225 150t-150 225t-55.5 273.5z M256 704q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM384 672v64q0 13 9.5 22.5t22.5 9.5h576q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-576q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf011;" d="M0 640q0 182 80.5 343t226.5 270q43 32 95.5 25t83.5 -50q32 -42 24.5 -94.5t-49.5 -84.5q-98 -74 -151.5 -181t-53.5 -228q0 -104 40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5t198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5q0 121 -53.5 228t-151.5 181 q-42 32 -49.5 84.5t24.5 94.5q31 43 84 50t95 -25q146 -109 226.5 -270t80.5 -343q0 -156 -61 -298t-164 -245t-245 -164t-298 -61t-298 61t-245 164t-164 245t-61 298zM640 768v640q0 52 38 90t90 38t90 -38t38 -90v-640q0 -52 -38 -90t-90 -38t-90 38t-38 90z" />
+<glyph unicode="&#xf012;" horiz-adv-x="1792" d="M0 -96v192q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM384 -96v320q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-320q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM768 -96v576q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-576 q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1152 -96v960q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-960q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1536 -96v1472q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1472q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf013;" d="M0 531v222q0 12 8 23t19 13l186 28q14 46 39 92q-40 57 -107 138q-10 12 -10 24q0 10 9 23q26 36 98.5 107.5t94.5 71.5q13 0 26 -10l138 -107q44 23 91 38q16 136 29 186q7 28 36 28h222q14 0 24.5 -8.5t11.5 -21.5l28 -184q49 -16 90 -37l142 107q9 9 24 9q13 0 25 -10 q129 -119 165 -170q7 -8 7 -22q0 -12 -8 -23q-15 -21 -51 -66.5t-54 -70.5q26 -50 41 -98l183 -28q13 -2 21 -12.5t8 -23.5v-222q0 -12 -8 -23t-20 -13l-185 -28q-19 -54 -39 -91q35 -50 107 -138q10 -12 10 -25t-9 -23q-27 -37 -99 -108t-94 -71q-12 0 -26 9l-138 108 q-44 -23 -91 -38q-16 -136 -29 -186q-7 -28 -36 -28h-222q-14 0 -24.5 8.5t-11.5 21.5l-28 184q-49 16 -90 37l-141 -107q-10 -9 -25 -9q-14 0 -25 11q-126 114 -165 168q-7 10 -7 23q0 12 8 23q15 21 51 66.5t54 70.5q-27 50 -41 99l-183 27q-13 2 -21 12.5t-8 23.5z M512 640q0 -106 75 -181t181 -75t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181z" />
+<glyph unicode="&#xf014;" horiz-adv-x="1408" d="M0 1056v64q0 14 9 23t23 9h309l70 167q15 37 54 63t79 26h320q40 0 79 -26t54 -63l70 -167h309q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-96v-948q0 -83 -47 -143.5t-113 -60.5h-832q-66 0 -113 58.5t-47 141.5v952h-96q-14 0 -23 9t-9 23zM256 76q0 -22 7 -40.5 t14.5 -27t10.5 -8.5h832q3 0 10.5 8.5t14.5 27t7 40.5v948h-896v-948zM384 224v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23zM480 1152h448l-48 117q-7 9 -17 11h-317q-10 -2 -17 -11zM640 224v576q0 14 9 23t23 9h64 q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23zM896 224v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf015;" horiz-adv-x="1664" d="M26 636.5q1 13.5 11 21.5l719 599q32 26 76 26t76 -26l244 -204v195q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-408l219 -182q10 -8 11 -21.5t-7 -23.5l-62 -74q-8 -9 -21 -11h-3q-13 0 -21 7l-692 577l-692 -577q-12 -8 -24 -7q-13 2 -21 11l-62 74q-8 10 -7 23.5zM256 64 v480q0 1 0.5 3t0.5 3l575 474l575 -474q1 -2 1 -6v-480q0 -26 -19 -45t-45 -19h-384v384h-256v-384h-384q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf016;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22 v-376z" />
+<glyph unicode="&#xf017;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 544v64q0 14 9 23t23 9h224v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf018;" horiz-adv-x="1920" d="M50 73q0 54 26 116l417 1044q8 19 26 33t38 14h339q-13 0 -23 -9.5t-11 -22.5l-15 -192q-1 -14 8 -23t22 -9h166q13 0 22 9t8 23l-15 192q-1 13 -11 22.5t-23 9.5h339q20 0 38 -14t26 -33l417 -1044q26 -62 26 -116q0 -73 -46 -73h-704q13 0 22 9.5t8 22.5l-20 256 q-1 13 -11 22.5t-23 9.5h-272q-13 0 -23 -9.5t-11 -22.5l-20 -256q-1 -13 8 -22.5t22 -9.5h-704q-46 0 -46 73zM809 540q-1 -12 8 -20t21 -8h244q12 0 21 8t8 20v4l-24 320q-1 13 -11 22.5t-23 9.5h-186q-13 0 -23 -9.5t-11 -22.5l-24 -320v-4z" />
+<glyph unicode="&#xf019;" horiz-adv-x="1664" d="M0 96v320q0 40 28 68t68 28h465l135 -136q58 -56 136 -56t136 56l136 136h464q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68zM325 985q17 39 59 39h256v448q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-448h256q42 0 59 -39q17 -41 -14 -70 l-448 -448q-18 -19 -45 -19t-45 19l-448 448q-31 29 -14 70zM1152 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM1408 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf01a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM418 620q8 20 30 20h192v352q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-352h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-11 -9 -23 -9t-23 9l-320 320q-15 16 -7 35z" />
+<glyph unicode="&#xf01b;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM416 672q0 12 10 24l319 319q11 9 23 9t23 -9l320 -320q15 -16 7 -35q-8 -20 -30 -20h-192v-352q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v352h-192q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf01c;" d="M0 64v482q0 62 25 123l238 552q10 25 36.5 42t52.5 17h832q26 0 52.5 -17t36.5 -42l238 -552q25 -61 25 -123v-482q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM197 576h316l95 -192h320l95 192h316q-1 3 -2.5 8t-2.5 8l-212 496h-708l-212 -496q-1 -2 -2.5 -8 t-2.5 -8z" />
+<glyph unicode="&#xf01d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 320v640q0 37 32 56q33 18 64 -1l544 -320q32 -18 32 -55t-32 -55l-544 -320q-15 -9 -32 -9q-16 0 -32 8q-32 19 -32 56z" />
+<glyph unicode="&#xf01e;" d="M0 640q0 156 61 298t164 245t245 164t298 61q147 0 284.5 -55.5t244.5 -156.5l130 129q29 31 70 14q39 -17 39 -59v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l138 138q-148 137 -349 137q-104 0 -198.5 -40.5t-163.5 -109.5t-109.5 -163.5 t-40.5 -198.5t40.5 -198.5t109.5 -163.5t163.5 -109.5t198.5 -40.5q119 0 225 52t179 147q7 10 23 12q14 0 25 -9l137 -138q9 -8 9.5 -20.5t-7.5 -22.5q-109 -132 -264 -204.5t-327 -72.5q-156 0 -298 61t-245 164t-164 245t-61 298z" />
+<glyph unicode="&#xf021;" d="M0 0v448q0 26 19 45t45 19h448q26 0 45 -19t19 -45t-19 -45l-137 -137q71 -66 161 -102t187 -36q134 0 250 65t186 179q11 17 53 117q8 23 30 23h192q13 0 22.5 -9.5t9.5 -22.5q0 -5 -1 -7q-64 -268 -268 -434.5t-478 -166.5q-146 0 -282.5 55t-243.5 157l-129 -129 q-19 -19 -45 -19t-45 19t-19 45zM18 800v7q65 268 270 434.5t480 166.5q146 0 284 -55.5t245 -156.5l130 129q19 19 45 19t45 -19t19 -45v-448q0 -26 -19 -45t-45 -19h-448q-26 0 -45 19t-19 45t19 45l138 138q-148 137 -349 137q-134 0 -250 -65t-186 -179 q-11 -17 -53 -117q-8 -23 -30 -23h-199q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf022;" horiz-adv-x="1792" d="M0 160v1088q0 66 47 113t113 47h1472q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM128 160q0 -13 9.5 -22.5t22.5 -9.5h1472q13 0 22.5 9.5t9.5 22.5v832q0 13 -9.5 22.5t-22.5 9.5h-1472q-13 0 -22.5 -9.5t-9.5 -22.5v-832z M256 288v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 544v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z M256 800v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 288v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5z M512 544v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5zM512 800v64q0 13 9.5 22.5t22.5 9.5h960q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-13 0 -22.5 9.5t-9.5 22.5z " />
+<glyph unicode="&#xf023;" horiz-adv-x="1152" d="M0 96v576q0 40 28 68t68 28h32v192q0 184 132 316t316 132t316 -132t132 -316v-192h32q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68zM320 768h512v192q0 106 -75 181t-181 75t-181 -75t-75 -181v-192z" />
+<glyph unicode="&#xf024;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -72 -64 -110v-1266q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5v1266q-64 38 -64 110zM320 320v742q0 32 31 55q21 14 79 43q236 120 421 120q107 0 200 -29t219 -88q38 -19 88 -19 q54 0 117.5 21t110 47t88 47t54.5 21q26 0 45 -19t19 -45v-763q0 -25 -12.5 -38.5t-39.5 -27.5q-215 -116 -369 -116q-61 0 -123.5 22t-108.5 48t-115.5 48t-142.5 22q-192 0 -464 -146q-17 -9 -33 -9q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf025;" horiz-adv-x="1664" d="M0 650q0 151 67 291t179 242.5t266 163.5t320 61t320 -61t266 -163.5t179 -242.5t67 -291q0 -166 -60 -314l-20 -49l-185 -33q-22 -83 -90.5 -136.5t-156.5 -53.5v-32q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v576q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-32 q71 0 130 -35.5t93 -95.5l68 12q29 95 29 193q0 148 -88 279t-236.5 209t-315.5 78t-315.5 -78t-236.5 -209t-88 -279q0 -98 29 -193l68 -12q34 60 93 95.5t130 35.5v32q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-576q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v32 q-88 0 -156.5 53.5t-90.5 136.5l-185 33l-20 49q-60 148 -60 314z" />
+<glyph unicode="&#xf026;" horiz-adv-x="768" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf027;" horiz-adv-x="1152" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45zM908 464q0 21 12 35.5t29 25t34 23t29 35.5t12 57t-12 57t-29 35.5t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5 q15 0 25 -5q70 -27 112.5 -93t42.5 -142t-42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5z" />
+<glyph unicode="&#xf028;" horiz-adv-x="1664" d="M0 448v384q0 26 19 45t45 19h262l333 333q19 19 45 19t45 -19t19 -45v-1088q0 -26 -19 -45t-45 -19t-45 19l-333 333h-262q-26 0 -45 19t-19 45zM908 464q0 21 12 35.5t29 25t34 23t29 35.5t12 57t-12 57t-29 35.5t-34 23t-29 25t-12 35.5q0 27 19 45.5t45 18.5 q15 0 25 -5q70 -27 112.5 -93t42.5 -142t-42.5 -141.5t-112.5 -93.5q-10 -5 -25 -5q-26 0 -45 18.5t-19 45.5zM1008 228q0 39 39 59q56 29 76 44q74 54 115.5 135.5t41.5 173.5t-41.5 173.5t-115.5 135.5q-20 15 -76 44q-39 20 -39 59q0 26 19 45t45 19q13 0 26 -5 q140 -59 225 -188.5t85 -282.5t-85 -282.5t-225 -188.5q-13 -5 -25 -5q-27 0 -46 19t-19 45zM1109 -7q0 36 39 59q7 4 22.5 10.5t22.5 10.5q46 25 82 51q123 91 192 227t69 289t-69 289t-192 227q-36 26 -82 51q-7 4 -22.5 10.5t-22.5 10.5q-39 23 -39 59q0 26 19 45t45 19 q13 0 26 -5q211 -91 338 -283.5t127 -422.5t-127 -422.5t-338 -283.5q-13 -5 -26 -5q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf029;" horiz-adv-x="1408" d="M0 0v640h640v-640h-640zM0 768v640h640v-640h-640zM128 129h384v383h-384v-383zM128 896h384v384h-384v-384zM256 256v128h128v-128h-128zM256 1024v128h128v-128h-128zM768 0v640h384v-128h128v128h128v-384h-384v128h-128v-384h-128zM768 768v640h640v-640h-640z M896 896h384v384h-384v-384zM1024 0v128h128v-128h-128zM1024 1024v128h128v-128h-128zM1280 0v128h128v-128h-128z" />
+<glyph unicode="&#xf02a;" horiz-adv-x="1792" d="M0 0v1408h63v-1408h-63zM94 1v1407h32v-1407h-32zM189 1v1407h31v-1407h-31zM346 1v1407h31v-1407h-31zM472 1v1407h62v-1407h-62zM629 1v1407h31v-1407h-31zM692 1v1407h31v-1407h-31zM755 1v1407h31v-1407h-31zM880 1v1407h63v-1407h-63zM1037 1v1407h63v-1407h-63z M1163 1v1407h63v-1407h-63zM1289 1v1407h63v-1407h-63zM1383 1v1407h63v-1407h-63zM1541 1v1407h94v-1407h-94zM1666 1v1407h32v-1407h-32zM1729 0v1408h63v-1408h-63z" />
+<glyph unicode="&#xf02b;" d="M0 864v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117zM192 1088q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5 t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf02c;" horiz-adv-x="1920" d="M0 864v416q0 52 38 90t90 38h416q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-53 0 -90 37l-715 716q-38 37 -64.5 101t-26.5 117zM192 1088q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5 t-90.5 -37.5t-37.5 -90.5zM704 1408h224q53 0 117 -26.5t102 -64.5l715 -714q37 -39 37 -91q0 -53 -37 -90l-491 -492q-39 -37 -91 -37q-36 0 -59 14t-53 45l470 470q37 37 37 90q0 52 -37 91l-715 714q-38 38 -102 64.5t-117 26.5z" />
+<glyph unicode="&#xf02d;" horiz-adv-x="1664" d="M10 184q0 4 3 27t4 37q1 8 -3 21.5t-3 19.5q2 11 8 21t16.5 23.5t16.5 23.5q23 38 45 91.5t30 91.5q3 10 0.5 30t-0.5 28q3 11 17 28t17 23q21 36 42 92t25 90q1 9 -2.5 32t0.5 28q4 13 22 30.5t22 22.5q19 26 42.5 84.5t27.5 96.5q1 8 -3 25.5t-2 26.5q2 8 9 18t18 23 t17 21q8 12 16.5 30.5t15 35t16 36t19.5 32t26.5 23.5t36 11.5t47.5 -5.5l-1 -3q38 9 51 9h761q74 0 114 -56t18 -130l-274 -906q-36 -119 -71.5 -153.5t-128.5 -34.5h-869q-27 0 -38 -15q-11 -16 -1 -43q24 -70 144 -70h923q29 0 56 15.5t35 41.5l300 987q7 22 5 57 q38 -15 59 -43q40 -57 18 -129l-275 -906q-19 -64 -76.5 -107.5t-122.5 -43.5h-923q-77 0 -148.5 53.5t-99.5 131.5q-24 67 -2 127zM492 800q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5t-16.5 -22.5zM575 1056 q-4 -13 2 -22.5t20 -9.5h608q13 0 25.5 9.5t16.5 22.5l21 64q4 13 -2 22.5t-20 9.5h-608q-13 0 -25.5 -9.5t-16.5 -22.5z" />
+<glyph unicode="&#xf02e;" horiz-adv-x="1280" d="M0 7v1289q0 34 19.5 62t52.5 41q21 9 44 9h1048q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62z" />
+<glyph unicode="&#xf02f;" horiz-adv-x="1664" d="M0 160v416q0 79 56.5 135.5t135.5 56.5h64v544q0 40 28 68t68 28h672q40 0 88 -20t76 -48l152 -152q28 -28 48 -76t20 -88v-256h64q79 0 135.5 -56.5t56.5 -135.5v-416q0 -13 -9.5 -22.5t-22.5 -9.5h-224v-160q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v160h-224 q-13 0 -22.5 9.5t-9.5 22.5zM384 0h896v256h-896v-256zM384 640h896v384h-160q-40 0 -68 28t-28 68v160h-640v-640zM1408 576q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf030;" horiz-adv-x="1920" d="M0 128v896q0 106 75 181t181 75h224l51 136q19 49 69.5 84.5t103.5 35.5h512q53 0 103.5 -35.5t69.5 -84.5l51 -136h224q106 0 181 -75t75 -181v-896q0 -106 -75 -181t-181 -75h-1408q-106 0 -181 75t-75 181zM512 576q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5 t131.5 316.5t-131.5 316.5t-316.5 131.5t-316.5 -131.5t-131.5 -316.5zM672 576q0 119 84.5 203.5t203.5 84.5t203.5 -84.5t84.5 -203.5t-84.5 -203.5t-203.5 -84.5t-203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf031;" horiz-adv-x="1664" d="M0 -128l2 79q23 7 56 12.5t57 10.5t49.5 14.5t44.5 29t31 50.5l237 616l280 724h75h53q8 -14 11 -21l205 -480q33 -78 106 -257.5t114 -274.5q15 -34 58 -144.5t72 -168.5q20 -45 35 -57q19 -15 88 -29.5t84 -20.5q6 -38 6 -57q0 -4 -0.5 -13t-0.5 -13q-63 0 -190 8 t-191 8q-76 0 -215 -7t-178 -8q0 43 4 78l131 28q1 0 12.5 2.5t15.5 3.5t14.5 4.5t15 6.5t11 8t9 11t2.5 14q0 16 -31 96.5t-72 177.5t-42 100l-450 2q-26 -58 -76.5 -195.5t-50.5 -162.5q0 -22 14 -37.5t43.5 -24.5t48.5 -13.5t57 -8.5t41 -4q1 -19 1 -58q0 -9 -2 -27 q-58 0 -174.5 10t-174.5 10q-8 0 -26.5 -4t-21.5 -4q-80 -14 -188 -14zM555 527q33 0 136.5 -2t160.5 -2q19 0 57 2q-87 253 -184 452z" />
+<glyph unicode="&#xf032;" horiz-adv-x="1408" d="M0 -128l2 94q15 4 85 16t106 27q7 12 12.5 27t8.5 33.5t5.5 32.5t3 37.5t0.5 34v35.5v30q0 982 -22 1025q-4 8 -22 14.5t-44.5 11t-49.5 7t-48.5 4.5t-30.5 3l-4 83q98 2 340 11.5t373 9.5q23 0 68.5 -0.5t67.5 -0.5q70 0 136.5 -13t128.5 -42t108 -71t74 -104.5 t28 -137.5q0 -52 -16.5 -95.5t-39 -72t-64.5 -57.5t-73 -45t-84 -40q154 -35 256.5 -134t102.5 -248q0 -100 -35 -179.5t-93.5 -130.5t-138 -85.5t-163.5 -48.5t-176 -14q-44 0 -132 3t-132 3q-106 0 -307 -11t-231 -12zM533 1292q0 -50 4 -151t4 -152q0 -27 -0.5 -80 t-0.5 -79q0 -46 1 -69q42 -7 109 -7q82 0 143 13t110 44.5t74.5 89.5t25.5 142q0 70 -29 122.5t-79 82t-108 43.5t-124 14q-50 0 -130 -13zM538.5 165q0.5 -37 4.5 -83.5t12 -66.5q74 -32 140 -32q376 0 376 335q0 114 -41 180q-27 44 -61.5 74t-67.5 46.5t-80.5 25 t-84 10.5t-94.5 2q-73 0 -101 -10q0 -53 -0.5 -159t-0.5 -158q0 -8 -1 -67.5t-0.5 -96.5z" />
+<glyph unicode="&#xf033;" horiz-adv-x="1024" d="M0 -126l17 85q6 2 81.5 21.5t111.5 37.5q28 35 41 101q1 7 62 289t114 543.5t52 296.5v25q-24 13 -54.5 18.5t-69.5 8t-58 5.5l19 103q33 -2 120 -6.5t149.5 -7t120.5 -2.5q48 0 98.5 2.5t121 7t98.5 6.5q-5 -39 -19 -89q-30 -10 -101.5 -28.5t-108.5 -33.5 q-8 -19 -14 -42.5t-9 -40t-7.5 -45.5t-6.5 -42q-27 -148 -87.5 -419.5t-77.5 -355.5q-2 -9 -13 -58t-20 -90t-16 -83.5t-6 -57.5l1 -18q17 -4 185 -31q-3 -44 -16 -99q-11 0 -32.5 -1.5t-32.5 -1.5q-29 0 -87 10t-86 10q-138 2 -206 2q-51 0 -143 -9t-121 -11z" />
+<glyph unicode="&#xf034;" horiz-adv-x="1792" d="M0 1023v383l81 1l54 -27q12 -5 211 -5q44 0 132 2t132 2q36 0 107.5 -0.5t107.5 -0.5h293q6 0 21 -0.5t20.5 0t16 3t17.5 9t15 17.5l42 1q4 0 14 -0.5t14 -0.5q2 -112 2 -336q0 -80 -5 -109q-39 -14 -68 -18q-25 44 -54 128q-3 9 -11 48t-14.5 73.5t-7.5 35.5 q-6 8 -12 12.5t-15.5 6t-13 2.5t-18 0.5t-16.5 -0.5q-17 0 -66.5 0.5t-74.5 0.5t-64 -2t-71 -6q-9 -81 -8 -136q0 -94 2 -388t2 -455q0 -16 -2.5 -71.5t0 -91.5t12.5 -69q40 -21 124 -42.5t120 -37.5q5 -40 5 -50q0 -14 -3 -29l-34 -1q-76 -2 -218 8t-207 10q-50 0 -151 -9 t-152 -9q-3 51 -3 52v9q17 27 61.5 43t98.5 29t78 27q19 42 19 383q0 101 -3 303t-3 303v117q0 2 0.5 15.5t0.5 25t-1 25.5t-3 24t-5 14q-11 12 -162 12q-33 0 -93 -12t-80 -26q-19 -13 -34 -72.5t-31.5 -111t-42.5 -53.5q-42 26 -56 44zM1414 109.5q9 18.5 42 18.5h80v1024 h-80q-33 0 -42 18.5t11 44.5l126 162q20 26 49 26t49 -26l126 -162q20 -26 11 -44.5t-42 -18.5h-80v-1024h80q33 0 42 -18.5t-11 -44.5l-126 -162q-20 -26 -49 -26t-49 26l-126 162q-20 26 -11 44.5z" />
+<glyph unicode="&#xf035;" d="M0 1023v383l81 1l54 -27q12 -5 211 -5q44 0 132 2t132 2q70 0 246.5 1t304.5 0.5t247 -4.5q33 -1 56 31l42 1q4 0 14 -0.5t14 -0.5q2 -112 2 -336q0 -80 -5 -109q-39 -14 -68 -18q-25 44 -54 128q-3 9 -11 47.5t-15 73.5t-7 36q-10 13 -27 19q-5 2 -66 2q-30 0 -93 1 t-103 1t-94 -2t-96 -7q-9 -81 -8 -136l1 -152v52q0 -55 1 -154t1.5 -180t0.5 -153q0 -16 -2.5 -71.5t0 -91.5t12.5 -69q40 -21 124 -42.5t120 -37.5q5 -40 5 -50q0 -14 -3 -29l-34 -1q-76 -2 -218 8t-207 10q-50 0 -151 -9t-152 -9q-3 51 -3 52v9q17 27 61.5 43t98.5 29 t78 27q7 16 11.5 74t6 145.5t1.5 155t-0.5 153.5t-0.5 89q0 7 -2.5 21.5t-2.5 22.5q0 7 0.5 44t1 73t0 76.5t-3 67.5t-6.5 32q-11 12 -162 12q-41 0 -163 -13.5t-138 -24.5q-19 -12 -34 -71.5t-31.5 -111.5t-42.5 -54q-42 26 -56 44zM5 -64q0 28 26 49q4 3 36 30t59.5 49 t57.5 41.5t42 19.5q13 0 20.5 -10.5t10 -28.5t2.5 -33.5t-1.5 -33t-1.5 -19.5h1024q0 2 -1.5 19.5t-1.5 33t2.5 33.5t10 28.5t20.5 10.5q12 0 42 -19.5t57.5 -41.5t59.5 -49t36 -30q26 -21 26 -49t-26 -49q-4 -3 -36 -30t-59.5 -49t-57.5 -41.5t-42 -19.5q-13 0 -20.5 10.5 t-10 28.5t-2.5 33.5t1.5 33t1.5 19.5h-1024q0 -2 1.5 -19.5t1.5 -33t-2.5 -33.5t-10 -28.5t-20.5 -10.5q-12 0 -42 19.5t-57.5 41.5t-59.5 49t-36 30q-26 21 -26 49z" />
+<glyph unicode="&#xf036;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 448v128q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM0 832v128q0 26 19 45t45 19h1536 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM0 1216v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf037;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM128 832v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM384 448v128q0 26 19 45t45 19h896 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-896q-26 0 -45 19t-19 45zM512 1216v128q0 26 19 45t45 19h640q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-640q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf038;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM128 832v128q0 26 19 45t45 19h1536q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM384 448v128q0 26 19 45t45 19h1280 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM512 1216v128q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1152q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf039;" horiz-adv-x="1792" d="M0 64v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 448v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 832v128q0 26 19 45t45 19h1664 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 1216v128q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf03a;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5zM0 416v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5 t-9.5 22.5zM0 800v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192q-13 0 -22.5 9.5t-9.5 22.5zM0 1184v192q0 13 9.5 22.5t22.5 9.5h192q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-192 q-13 0 -22.5 9.5t-9.5 22.5zM384 32v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 416v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5 t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 800v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5zM384 1184v192q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-192 q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03b;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM0 1184v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5 t-9.5 22.5zM32 704q0 14 9 23l288 288q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-576q0 -13 -9.5 -22.5t-22.5 -9.5q-14 0 -23 9l-288 288q-9 9 -9 23zM640 416v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088 q-13 0 -22.5 9.5t-9.5 22.5zM640 800v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03c;" horiz-adv-x="1792" d="M0 32v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM0 416v576q0 13 9.5 22.5t22.5 9.5q14 0 23 -9l288 -288q9 -9 9 -23t-9 -23l-288 -288q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5z M0 1184v192q0 13 9.5 22.5t22.5 9.5h1728q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1728q-13 0 -22.5 9.5t-9.5 22.5zM640 416v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5 t-9.5 22.5zM640 800v192q0 13 9.5 22.5t22.5 9.5h1088q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1088q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf03d;" horiz-adv-x="1792" d="M0 288v704q0 119 84.5 203.5t203.5 84.5h704q119 0 203.5 -84.5t84.5 -203.5v-165l403 402q18 19 45 19q12 0 25 -5q39 -17 39 -59v-1088q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-403 403v-166q0 -119 -84.5 -203.5t-203.5 -84.5h-704q-119 0 -203.5 84.5 t-84.5 203.5z" />
+<glyph unicode="&#xf03e;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v1216q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216z M256 128v192l320 320l160 -160l512 512l416 -416v-448h-1408zM256 960q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136z" />
+<glyph unicode="&#xf040;" d="M0 -128v416l832 832l416 -416l-832 -832h-416zM128 128h128v-128h107l91 91l-235 235l-91 -91v-107zM298 384q0 -22 22 -22q10 0 17 7l542 542q7 7 7 17q0 22 -22 22q-10 0 -17 -7l-542 -542q-7 -7 -7 -17zM896 1184l166 165q36 38 90 38q53 0 91 -38l235 -234 q37 -39 37 -91q0 -53 -37 -90l-166 -166z" />
+<glyph unicode="&#xf041;" horiz-adv-x="1024" d="M0 896q0 212 150 362t362 150t362 -150t150 -362q0 -109 -33 -179l-364 -774q-16 -33 -47.5 -52t-67.5 -19t-67.5 19t-46.5 52l-365 774q-33 70 -33 179zM256 896q0 -106 75 -181t181 -75t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181z" />
+<glyph unicode="&#xf042;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73v1088q-148 0 -273 -73t-198 -198t-73 -273z" />
+<glyph unicode="&#xf043;" horiz-adv-x="1024" d="M0 512q0 145 81 275q6 9 62.5 90.5t101 151t99.5 178t83 201.5q9 30 34 47t51 17t51.5 -17t33.5 -47q28 -93 83 -201.5t99.5 -178t101 -151t62.5 -90.5q81 -127 81 -275q0 -212 -150 -362t-362 -150t-362 150t-150 362zM256 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5 t37.5 90.5q0 36 -20 69q-1 1 -15.5 22.5t-25.5 38t-25 44t-21 50.5q-4 16 -21 16t-21 -16q-7 -23 -21 -50.5t-25 -44t-25.5 -38t-15.5 -22.5q-20 -33 -20 -69z" />
+<glyph unicode="&#xf044;" horiz-adv-x="1792" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-14 -14 -32 -8q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v126q0 13 9 22l64 64q15 15 35 7t20 -29v-190 q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM640 256v288l672 672l288 -288l-672 -672h-288zM736 448h96v-96h56l116 116l-152 152l-116 -116v-56zM944 688q16 -16 33 1l350 350q17 17 1 33t-33 -1l-350 -350q-17 -17 -1 -33zM1376 1280l92 92 q28 28 68 28t68 -28l152 -152q28 -28 28 -68t-28 -68l-92 -92z" />
+<glyph unicode="&#xf045;" horiz-adv-x="1664" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h255q13 0 22.5 -9.5t9.5 -22.5q0 -27 -26 -32q-77 -26 -133 -60q-10 -4 -16 -4h-112q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v214q0 19 18 29q28 13 54 37q16 16 35 8q21 -9 21 -29v-259 q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM256 704q0 49 3.5 91t14 90t28 88t47 81.5t68.5 74t94.5 61.5t124.5 48.5t159.5 30.5t196.5 11h160v192q0 42 39 59q13 5 25 5q26 0 45 -19l384 -384q19 -19 19 -45t-19 -45l-384 -384 q-18 -19 -45 -19q-12 0 -25 5q-39 17 -39 59v192h-160q-323 0 -438 -131q-119 -137 -74 -473q3 -23 -20 -34q-8 -2 -12 -2q-16 0 -26 13q-10 14 -21 31t-39.5 68.5t-49.5 99.5t-38.5 114t-17.5 122z" />
+<glyph unicode="&#xf046;" horiz-adv-x="1664" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q63 0 117 -25q15 -7 18 -23q3 -17 -9 -29l-49 -49q-10 -10 -23 -10q-3 0 -9 2q-23 6 -45 6h-832q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v254q0 13 9 22l64 64q10 10 23 10q6 0 12 -3 q20 -8 20 -29v-318q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM257 768q0 33 24 57l110 110q24 24 57 24t57 -24l263 -263l647 647q24 24 57 24t57 -24l110 -110q24 -24 24 -57t-24 -57l-814 -814q-24 -24 -57 -24t-57 24l-430 430 q-24 24 -24 57z" />
+<glyph unicode="&#xf047;" horiz-adv-x="1792" d="M0 640q0 26 19 45l256 256q19 19 45 19t45 -19t19 -45v-128h384v384h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45t-19 -45t-45 -19h-128v-384h384v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45t-19 -45l-256 -256 q-19 -19 -45 -19t-45 19t-19 45v128h-384v-384h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45t19 45t45 19h128v384h-384v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf048;" horiz-adv-x="1024" d="M0 -64v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf049;" horiz-adv-x="1792" d="M0 -64v1408q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-678q4 11 13 19l710 710q19 19 32 13t13 -32v-710q4 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-9 9 -13 19v-678q0 -26 -19 -45 t-45 -19h-128q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04a;" horiz-adv-x="1664" d="M122 640q0 26 19 45l710 710q19 19 32 13t13 -32v-710q5 11 13 19l710 710q19 19 32 13t13 -32v-1472q0 -26 -13 -32t-32 13l-710 710q-8 9 -13 19v-710q0 -26 -13 -32t-32 13l-710 710q-19 19 -19 45z" />
+<glyph unicode="&#xf04b;" horiz-adv-x="1408" d="M0 -96v1472q0 26 16.5 36t39.5 -3l1328 -738q23 -13 23 -31t-23 -31l-1328 -738q-23 -13 -39.5 -3t-16.5 36z" />
+<glyph unicode="&#xf04c;" d="M0 -64v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45zM896 -64v1408q0 26 19 45t45 19h512q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04d;" d="M0 -64v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf04e;" horiz-adv-x="1664" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v710q0 26 13 32t32 -13l710 -710q19 -19 19 -45t-19 -45l-710 -710q-19 -19 -32 -13t-13 32v710q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf050;" horiz-adv-x="1792" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v710q0 26 13 32t32 -13l710 -710q8 -8 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32v710 q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf051;" horiz-adv-x="1024" d="M0 -96v1472q0 26 13 32t32 -13l710 -710q8 -8 13 -19v678q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v678q-5 -10 -13 -19l-710 -710q-19 -19 -32 -13t-13 32z" />
+<glyph unicode="&#xf052;" horiz-adv-x="1538" d="M1 64v256q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM1 525q-6 13 13 32l710 710q19 19 45 19t45 -19l710 -710q19 -19 13 -32t-32 -13h-1472q-26 0 -32 13z" />
+<glyph unicode="&#xf053;" horiz-adv-x="1280" d="M154 704q0 26 19 45l742 742q19 19 45 19t45 -19l166 -166q19 -19 19 -45t-19 -45l-531 -531l531 -531q19 -19 19 -45t-19 -45l-166 -166q-19 -19 -45 -19t-45 19l-742 742q-19 19 -19 45z" />
+<glyph unicode="&#xf054;" horiz-adv-x="1280" d="M90 128q0 26 19 45l531 531l-531 531q-19 19 -19 45t19 45l166 166q19 19 45 19t45 -19l742 -742q19 -19 19 -45t-19 -45l-742 -742q-19 -19 -45 -19t-45 19l-166 166q-19 19 -19 45z" />
+<glyph unicode="&#xf055;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 576q0 -26 19 -45t45 -19h256v-256q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v256h256q26 0 45 19 t19 45v128q0 26 -19 45t-45 19h-256v256q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-256h-256q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf056;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 576q0 -26 19 -45t45 -19h768q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-768q-26 0 -45 -19 t-19 -45v-128z" />
+<glyph unicode="&#xf057;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM387 414q0 -27 19 -46l90 -90q19 -19 46 -19q26 0 45 19l181 181l181 -181q19 -19 45 -19q27 0 46 19 l90 90q19 19 19 46q0 26 -19 45l-181 181l181 181q19 19 19 45q0 27 -19 46l-90 90q-19 19 -46 19q-26 0 -45 -19l-181 -181l-181 181q-19 19 -45 19q-27 0 -46 -19l-90 -90q-19 -19 -19 -46q0 -26 19 -45l181 -181l-181 -181q-19 -19 -19 -45z" />
+<glyph unicode="&#xf058;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 621q0 -27 18 -45l362 -362q19 -19 45 -19q27 0 46 19l543 543q18 18 18 45q0 28 -18 46l-91 90 q-19 19 -45 19t-45 -19l-408 -407l-226 226q-19 19 -45 19t-45 -19l-91 -90q-18 -18 -18 -46z" />
+<glyph unicode="&#xf059;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM417 939q-15 -24 8 -42l132 -100q7 -6 19 -6q16 0 25 12q53 68 86 92q34 24 86 24q48 0 85.5 -26 t37.5 -59q0 -38 -20 -61t-68 -45q-63 -28 -115.5 -86.5t-52.5 -125.5v-36q0 -14 9 -23t23 -9h192q14 0 23 9t9 23q0 19 21.5 49.5t54.5 49.5q32 18 49 28.5t46 35t44.5 48t28 60.5t12.5 81q0 88 -55.5 163t-138.5 116t-170 41q-243 0 -371 -213zM640 160q0 -14 9 -23t23 -9 h192q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-192z" />
+<glyph unicode="&#xf05a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM512 160q0 -14 9 -23t23 -9h448q14 0 23 9t9 23v160q0 14 -9 23t-23 9h-96v512q0 14 -9 23t-23 9h-320 q-14 0 -23 -9t-9 -23v-160q0 -14 9 -23t23 -9h96v-320h-96q-14 0 -23 -9t-9 -23v-160zM640 1056q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v160q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-160z" />
+<glyph unicode="&#xf05b;" d="M0 576v128q0 26 19 45t45 19h143q37 161 154.5 278.5t278.5 154.5v143q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-143q161 -37 278.5 -154.5t154.5 -278.5h143q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-143q-37 -161 -154.5 -278.5t-278.5 -154.5v-143 q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v143q-161 37 -278.5 154.5t-154.5 278.5h-143q-26 0 -45 19t-19 45zM339 512q32 -108 112.5 -188.5t188.5 -112.5v109q0 26 19 45t45 19h128q26 0 45 -19t19 -45v-109q108 32 188.5 112.5t112.5 188.5h-109q-26 0 -45 19 t-19 45v128q0 26 19 45t45 19h109q-32 108 -112.5 188.5t-188.5 112.5v-109q0 -26 -19 -45t-45 -19h-128q-26 0 -45 19t-19 45v109q-108 -32 -188.5 -112.5t-112.5 -188.5h109q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-109z" />
+<glyph unicode="&#xf05c;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM429 480q0 13 10 23l137 137l-137 137q-10 10 -10 23t10 23l146 146q10 10 23 10t23 -10l137 -137l137 137q10 10 23 10t23 -10l146 -146q10 -10 10 -23t-10 -23l-137 -137l137 -137q10 -10 10 -23t-10 -23l-146 -146q-10 -10 -23 -10t-23 10l-137 137 l-137 -137q-10 -10 -23 -10t-23 10l-146 146q-10 10 -10 23z" />
+<glyph unicode="&#xf05d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM346 640q0 26 19 45l102 102q19 19 45 19t45 -19l147 -147l275 275q19 19 45 19t45 -19l102 -102q19 -19 19 -45t-19 -45l-422 -422q-19 -19 -45 -19t-45 19l-294 294q-19 19 -19 45z" />
+<glyph unicode="&#xf05e;" d="M0 643q0 157 61 299.5t163.5 245.5t245 164t298.5 61t298.5 -61t245 -164t163.5 -245.5t61 -299.5t-61 -300t-163.5 -246t-245 -164t-298.5 -61t-298.5 61t-245 164t-163.5 246t-61 300zM224 643q0 -162 89 -299l755 754q-135 91 -300 91q-148 0 -273 -73t-198 -199 t-73 -274zM471 185q137 -89 297 -89q111 0 211.5 43.5t173.5 116.5t116 174.5t43 212.5q0 161 -87 295z" />
+<glyph unicode="&#xf060;" d="M64 576q0 52 37 91l651 650q38 38 91 38q52 0 90 -38l75 -74q38 -38 38 -91t-38 -91l-293 -293h704q52 0 84.5 -37.5t32.5 -90.5v-128q0 -53 -32.5 -90.5t-84.5 -37.5h-704l293 -294q38 -36 38 -90t-38 -90l-75 -76q-37 -37 -90 -37q-52 0 -91 37l-651 652q-37 37 -37 90 z" />
+<glyph unicode="&#xf061;" d="M0 512v128q0 53 32.5 90.5t84.5 37.5h704l-293 294q-38 36 -38 90t38 90l75 75q38 38 90 38q53 0 91 -38l651 -651q37 -35 37 -90q0 -54 -37 -91l-651 -651q-39 -37 -91 -37q-51 0 -90 37l-75 75q-38 38 -38 91t38 91l293 293h-704q-52 0 -84.5 37.5t-32.5 90.5z" />
+<glyph unicode="&#xf062;" horiz-adv-x="1664" d="M53 565q0 53 38 91l651 651q35 37 90 37q54 0 91 -37l651 -651q37 -39 37 -91q0 -51 -37 -90l-75 -75q-38 -38 -91 -38q-54 0 -90 38l-294 293v-704q0 -52 -37.5 -84.5t-90.5 -32.5h-128q-53 0 -90.5 32.5t-37.5 84.5v704l-294 -293q-36 -38 -90 -38t-90 38l-75 75 q-38 38 -38 90z" />
+<glyph unicode="&#xf063;" horiz-adv-x="1664" d="M53 704q0 53 38 91l74 75q39 37 91 37q53 0 90 -37l294 -294v704q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-704l294 294q37 37 90 37q52 0 91 -37l75 -75q37 -39 37 -91q0 -53 -37 -90l-651 -652q-39 -37 -91 -37q-53 0 -90 37l-651 652q-38 36 -38 90z" />
+<glyph unicode="&#xf064;" horiz-adv-x="1792" d="M0 416q0 199 53 333q162 403 875 403h224v256q0 26 19 45t45 19t45 -19l512 -512q19 -19 19 -45t-19 -45l-512 -512q-19 -19 -45 -19t-45 19t-19 45v256h-224q-98 0 -175.5 -6t-154 -21.5t-133 -42.5t-105.5 -69.5t-80 -101t-48.5 -138.5t-17.5 -181q0 -55 5 -123 q0 -6 2.5 -23.5t2.5 -26.5q0 -15 -8.5 -25t-23.5 -10q-16 0 -28 17q-7 9 -13 22t-13.5 30t-10.5 24q-127 285 -127 451z" />
+<glyph unicode="&#xf065;" d="M0 -64v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10t23 -10l114 -114q10 -10 10 -23t-10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45t-45 -19h-448q-26 0 -45 19t-19 45zM781 800q0 13 10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448 q26 0 45 -19t19 -45v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23z" />
+<glyph unicode="&#xf066;" d="M13 32q0 13 10 23l332 332l-144 144q-19 19 -19 45t19 45t45 19h448q26 0 45 -19t19 -45v-448q0 -26 -19 -45t-45 -19t-45 19l-144 144l-332 -332q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23zM768 704v448q0 26 19 45t45 19t45 -19l144 -144l332 332q10 10 23 10 t23 -10l114 -114q10 -10 10 -23t-10 -23l-332 -332l144 -144q19 -19 19 -45t-19 -45t-45 -19h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf067;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h416v416q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-416h416q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-416v-416q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68v416h-416q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf068;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h1216q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-1216q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf069;" horiz-adv-x="1664" d="M122.5 408.5q13.5 51.5 59.5 77.5l266 154l-266 154q-46 26 -59.5 77.5t12.5 97.5l64 110q26 46 77.5 59.5t97.5 -12.5l266 -153v307q0 52 38 90t90 38h128q52 0 90 -38t38 -90v-307l266 153q46 26 97.5 12.5t77.5 -59.5l64 -110q26 -46 12.5 -97.5t-59.5 -77.5 l-266 -154l266 -154q46 -26 59.5 -77.5t-12.5 -97.5l-64 -110q-26 -46 -77.5 -59.5t-97.5 12.5l-266 153v-307q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v307l-266 -153q-46 -26 -97.5 -12.5t-77.5 59.5l-64 110q-26 46 -12.5 97.5z" />
+<glyph unicode="&#xf06a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM624 1126l17 -621q0 -10 10 -17.5t24 -7.5h185q14 0 23.5 7.5t10.5 17.5l18 621q0 12 -10 18 q-10 8 -24 8h-220q-14 0 -24 -8q-10 -6 -10 -18zM640 161q0 -13 10 -23t23 -10h192q13 0 22 9.5t9 23.5v190q0 14 -9 23.5t-22 9.5h-192q-13 0 -23 -10t-10 -23v-190z" />
+<glyph unicode="&#xf06b;" d="M0 544v320q0 14 9 23t23 9h440q-93 0 -158.5 65.5t-65.5 158.5t65.5 158.5t158.5 65.5q107 0 168 -77l128 -165l128 165q61 77 168 77q93 0 158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5h440q14 0 23 -9t9 -23v-320q0 -14 -9 -23t-23 -9h-96v-416q0 -40 -28 -68 t-68 -28h-1088q-40 0 -68 28t-28 68v416h-96q-14 0 -23 9t-9 23zM376 1120q0 -40 28 -68t68 -28h195l-126 161q-26 31 -69 31q-40 0 -68 -28t-28 -68zM608 180q0 -25 18 -38.5t46 -13.5h192q28 0 46 13.5t18 38.5v56v468v192h-320v-192v-468v-56zM870 1024h194q40 0 68 28 t28 68t-28 68t-68 28q-43 0 -69 -31z" />
+<glyph unicode="&#xf06c;" horiz-adv-x="1792" d="M0 121q0 35 31 73.5t68 65.5t68 56t31 48q0 4 -14 38t-16 44q-9 51 -9 104q0 115 43.5 220t119 184.5t170.5 139t204 95.5q55 18 145 25.5t179.5 9t178.5 6t163.5 24t113.5 56.5l29.5 29.5t29.5 28t27 20t36.5 16t43.5 4.5q39 0 70.5 -46t47.5 -112t24 -124t8 -96 q0 -95 -20 -193q-46 -224 -184.5 -383t-357.5 -268q-214 -108 -438 -108q-148 0 -286 47q-15 5 -88 42t-96 37q-16 0 -39.5 -32t-45 -70t-52.5 -70t-60 -32q-30 0 -51 11t-31 24t-27 42q-2 4 -6 11t-5.5 10t-3 9.5t-1.5 13.5zM384 448q0 -26 19 -45t45 -19q24 0 45 19 q27 24 74 71t67 66q137 124 268.5 176t313.5 52q26 0 45 19t19 45t-19 45t-45 19q-172 0 -318 -49.5t-259.5 -134t-235.5 -219.5q-19 -21 -19 -45z" />
+<glyph unicode="&#xf06d;" horiz-adv-x="1408" d="M0 -160q0 13 9.5 22.5t22.5 9.5h1344q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-1344q-13 0 -22.5 9.5t-9.5 22.5v64zM256 640q0 78 24.5 144t64 112.5t87.5 88t96 77.5t87.5 72t64 81.5t24.5 96.5q0 94 -66 224l3 -1l-1 1q90 -41 160 -83t138.5 -100 t113.5 -122.5t72.5 -150.5t27.5 -184q0 -78 -24.5 -144t-64 -112.5t-87.5 -88t-96 -77.5t-87.5 -72t-64 -81.5t-24.5 -96.5q0 -96 67 -224l-4 1l1 -1q-90 41 -160 83t-138.5 100t-113.5 122.5t-72.5 150.5t-27.5 184z" />
+<glyph unicode="&#xf06e;" horiz-adv-x="1792" d="M0 576q0 34 20 69q140 229 376.5 368t499.5 139t499.5 -139t376.5 -368q20 -35 20 -69t-20 -69q-140 -230 -376.5 -368.5t-499.5 -138.5t-499.5 139t-376.5 368q-20 35 -20 69zM128 576q133 -205 333.5 -326.5t434.5 -121.5t434.5 121.5t333.5 326.5q-152 236 -381 353 q61 -104 61 -225q0 -185 -131.5 -316.5t-316.5 -131.5t-316.5 131.5t-131.5 316.5q0 121 61 225q-229 -117 -381 -353zM592 704q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34t-14 34t-34 14q-125 0 -214.5 -89.5t-89.5 -214.5z" />
+<glyph unicode="&#xf070;" horiz-adv-x="1792" d="M0 576q0 38 20 69q153 235 380 371t496 136q89 0 180 -17l54 97q10 16 28 16q5 0 18 -6t31 -15.5t33 -18.5t31.5 -18.5t19.5 -11.5q16 -10 16 -27q0 -7 -1 -9q-105 -188 -315 -566t-316 -567l-49 -89q-10 -16 -28 -16q-12 0 -134 70q-16 10 -16 28q0 12 44 87 q-143 65 -263.5 173t-208.5 245q-20 31 -20 69zM128 576q167 -258 427 -375l78 141q-87 63 -136 159t-49 203q0 121 61 225q-229 -117 -381 -353zM592 704q0 -20 14 -34t34 -14t34 14t14 34q0 86 61 147t147 61q20 0 34 14t14 34t-14 34t-34 14q-125 0 -214.5 -89.5 t-89.5 -214.5zM896 0l74 132q212 18 392.5 137t301.5 307q-115 179 -282 294l63 112q95 -64 182.5 -153t144.5 -184q20 -34 20 -69t-20 -69q-39 -64 -109 -145q-150 -172 -347.5 -267t-419.5 -95zM1056 286l280 502q8 -45 8 -84q0 -139 -79 -253.5t-209 -164.5z" />
+<glyph unicode="&#xf071;" horiz-adv-x="1792" d="M16 61l768 1408q17 31 47 49t65 18t65 -18t47 -49l768 -1408q35 -63 -2 -126q-17 -29 -46.5 -46t-63.5 -17h-1536q-34 0 -63.5 17t-46.5 46q-37 63 -2 126zM752 992l17 -457q0 -10 10 -16.5t24 -6.5h185q14 0 23.5 6.5t10.5 16.5l18 459q0 12 -10 19q-13 11 -24 11h-220 q-11 0 -24 -11q-10 -7 -10 -21zM768 161q0 -14 9.5 -23.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 23.5v190q0 14 -9.5 23.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -23.5v-190z" />
+<glyph unicode="&#xf072;" horiz-adv-x="1408" d="M0 477q-1 13 9 25l96 97q9 9 23 9q6 0 8 -1l194 -53l259 259l-508 279q-14 8 -17 24q-2 16 9 27l128 128q14 13 30 8l665 -159l160 160q76 76 172 108t148 -12q44 -52 12 -148t-108 -172l-161 -161l160 -696q5 -19 -12 -33l-128 -96q-7 -6 -19 -6q-4 0 -7 1q-15 3 -21 16 l-279 508l-259 -259l53 -194q5 -17 -8 -31l-96 -96q-9 -9 -23 -9h-2q-15 2 -24 13l-189 252l-252 189q-11 7 -13 23z" />
+<glyph unicode="&#xf073;" horiz-adv-x="1664" d="M0 -128v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90zM128 -128h288v288h-288v-288zM128 224 h288v320h-288v-320zM128 608h288v288h-288v-288zM384 1088q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v288q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-288zM480 -128h320v288h-320v-288zM480 224h320v320h-320v-320zM480 608h320v288h-320 v-288zM864 -128h320v288h-320v-288zM864 224h320v320h-320v-320zM864 608h320v288h-320v-288zM1152 1088q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v288q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-288zM1248 -128h288v288h-288v-288z M1248 224h288v320h-288v-320zM1248 608h288v288h-288v-288z" />
+<glyph unicode="&#xf074;" horiz-adv-x="1792" d="M0 160v192q0 14 9 23t23 9h224q48 0 87 15t69 45t51 61.5t45 77.5q32 62 78 171q29 66 49.5 111t54 105t64 100t74 83t90 68.5t106.5 42t128 16.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192 h-256q-48 0 -87 -15t-69 -45t-51 -61.5t-45 -77.5q-32 -62 -78 -171q-29 -66 -49.5 -111t-54 -105t-64 -100t-74 -83t-90 -68.5t-106.5 -42t-128 -16.5h-224q-14 0 -23 9t-9 23zM0 1056v192q0 14 9 23t23 9h224q250 0 410 -225q-60 -92 -137 -273q-22 45 -37 72.5 t-40.5 63.5t-51 56.5t-63 35t-81.5 14.5h-224q-14 0 -23 9t-9 23zM743 353q59 93 136 273q22 -45 37 -72.5t40.5 -63.5t51 -56.5t63 -35t81.5 -14.5h256v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192 q-32 0 -85 -0.5t-81 -1t-73 1t-71 5t-64 10.5t-63 18.5t-58 28.5t-59 40t-55 53.5t-56 69.5z" />
+<glyph unicode="&#xf075;" horiz-adv-x="1792" d="M0 640q0 130 71 248.5t191 204.5t286 136.5t348 50.5q244 0 450 -85.5t326 -233t120 -321.5t-120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22q-17 -2 -30.5 9t-17.5 29v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5 t34.5 38t31 39.5t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281z" />
+<glyph unicode="&#xf076;" d="M0 576v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-128q0 -52 23.5 -90t53.5 -57t71 -30t64 -13t44 -2t44 2t64 13t71 30t53.5 57t23.5 90v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-128q0 -201 -98.5 -362t-274 -251.5t-395.5 -90.5t-395.5 90.5t-274 251.5 t-98.5 362zM0 960v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45zM1024 960v384q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-384q0 -26 -19 -45t-45 -19h-384q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf077;" horiz-adv-x="1792" d="M90 250.5q0 26.5 19 45.5l742 741q19 19 45 19t45 -19l742 -741q19 -19 19 -45.5t-19 -45.5l-166 -165q-19 -19 -45 -19t-45 19l-531 531l-531 -531q-19 -19 -45 -19t-45 19l-166 165q-19 19 -19 45.5z" />
+<glyph unicode="&#xf078;" horiz-adv-x="1792" d="M90 773.5q0 26.5 19 45.5l166 165q19 19 45 19t45 -19l531 -531l531 531q19 19 45 19t45 -19l166 -165q19 -19 19 -45.5t-19 -45.5l-742 -741q-19 -19 -45 -19t-45 19l-742 741q-19 19 -19 45.5z" />
+<glyph unicode="&#xf079;" horiz-adv-x="1920" d="M0 704q0 24 15 41l320 384q19 22 49 22t49 -22l320 -384q15 -17 15 -41q0 -26 -19 -45t-45 -19h-192v-384h576q16 0 25 -11l160 -192q7 -11 7 -21q0 -13 -9.5 -22.5t-22.5 -9.5h-960q-8 0 -13.5 2t-9 7t-5.5 8t-3 11.5t-1 11.5v13v11v160v416h-192q-26 0 -45 19t-19 45z M640 1120q0 13 9.5 22.5t22.5 9.5h960q8 0 13.5 -2t9 -7t5.5 -8t3 -11.5t1 -11.5v-13v-11v-160v-416h192q26 0 45 -19t19 -45q0 -24 -15 -41l-320 -384q-20 -23 -49 -23t-49 23l-320 384q-15 17 -15 41q0 26 19 45t45 19h192v384h-576q-16 0 -25 12l-160 192q-7 9 -7 20z " />
+<glyph unicode="&#xf07a;" horiz-adv-x="1664" d="M0 1216q0 26 19 45t45 19h256q16 0 28.5 -6.5t20 -15.5t13 -24.5t7.5 -26.5t5.5 -29.5t4.5 -25.5h1201q26 0 45 -19t19 -45v-512q0 -24 -16 -42.5t-41 -21.5l-1044 -122q1 -7 4.5 -21.5t6 -26.5t2.5 -22q0 -16 -24 -64h920q26 0 45 -19t19 -45t-19 -45t-45 -19h-1024 q-26 0 -45 19t-19 45q0 14 11 39.5t29.5 59.5t20.5 38l-177 823h-204q-26 0 -45 19t-19 45zM384 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM1280 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5 t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf07b;" horiz-adv-x="1664" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158z" />
+<glyph unicode="&#xf07c;" horiz-adv-x="1920" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158v-160h-832q-94 0 -197 -47.5t-164 -119.5l-337 -396l-5 -6q0 4 -0.5 12.5t-0.5 12.5zM73 56q0 31 31 66l336 396q43 51 120.5 86.5t143.5 35.5h1088q34 0 60.5 -13t26.5 -43 q0 -31 -31 -66l-336 -396q-43 -51 -120.5 -86.5t-143.5 -35.5h-1088q-34 0 -60.5 13t-26.5 43z" />
+<glyph unicode="&#xf07d;" horiz-adv-x="768" d="M64 64q0 26 19 45t45 19h128v1024h-128q-26 0 -45 19t-19 45t19 45l256 256q19 19 45 19t45 -19l256 -256q19 -19 19 -45t-19 -45t-45 -19h-128v-1024h128q26 0 45 -19t19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf07e;" horiz-adv-x="1792" d="M0 640q0 26 19 45l256 256q19 19 45 19t45 -19t19 -45v-128h1024v128q0 26 19 45t45 19t45 -19l256 -256q19 -19 19 -45t-19 -45l-256 -256q-19 -19 -45 -19t-45 19t-19 45v128h-1024v-128q0 -26 -19 -45t-45 -19t-45 19l-256 256q-19 19 -19 45z" />
+<glyph unicode="&#xf080;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v1216q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-1216z M256 128v384h256v-384h-256zM640 128v896h256v-896h-256zM1024 128v640h256v-640h-256zM1408 128v1024h256v-1024h-256z" />
+<glyph unicode="&#xf081;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 286q148 -94 322 -94q112 0 210 35.5t168 95t120.5 137t75 162t24.5 168.5q0 18 -1 27q63 45 105 109 q-56 -25 -121 -34q68 40 93 117q-65 -38 -134 -51q-61 66 -153 66q-87 0 -148.5 -61.5t-61.5 -148.5q0 -29 5 -48q-129 7 -242 65t-192 155q-29 -50 -29 -106q0 -114 91 -175q-47 1 -100 26v-2q0 -75 50 -133.5t123 -72.5q-29 -8 -51 -8q-13 0 -39 4q21 -63 74.5 -104 t121.5 -42q-116 -90 -261 -90q-26 0 -50 3z" />
+<glyph unicode="&#xf082;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-192v608h203l30 224h-233v143q0 54 28 83t96 29l132 1v207q-96 9 -180 9q-136 0 -218 -80.5t-82 -225.5v-166h-224v-224h224v-608h-544 q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf083;" horiz-adv-x="1792" d="M0 0v1280q0 53 37.5 90.5t90.5 37.5h1536q53 0 90.5 -37.5t37.5 -90.5v-1280q0 -53 -37.5 -90.5t-90.5 -37.5h-1536q-53 0 -90.5 37.5t-37.5 90.5zM128 0h1536v128h-1536v-128zM128 1024h1536v118v138h-828l-64 -128h-644v-128zM256 1216h384v128h-384v-128zM512 574 q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM640 574q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM736 576q0 -14 9 -23t23 -9t23 9t9 23q0 40 28 68t68 28q14 0 23 9 t9 23t-9 23t-23 9q-66 0 -113 -47t-47 -113z" />
+<glyph unicode="&#xf084;" horiz-adv-x="1792" d="M0 752q0 160 95 313t248 248t313 95q163 0 265.5 -102.5t102.5 -265.5q0 -189 -131 -365l355 -355l96 96q-3 3 -26 24.5t-40 38.5t-33 36.5t-16 28.5q0 17 49 66t66 49q13 0 23 -10q6 -6 46 -44.5t82 -79.5t86.5 -86t73 -78t28.5 -41q0 -17 -49 -66t-66 -49 q-9 0 -28.5 16t-36.5 33t-38.5 40t-24.5 26l-96 -96l220 -220q28 -28 28 -68q0 -42 -39 -81t-81 -39q-40 0 -68 28l-671 671q-176 -131 -365 -131q-163 0 -265.5 102.5t-102.5 265.5zM192 768q0 -80 56 -136t136 -56t136 56t56 136q0 42 -19 83q41 -19 83 -19q80 0 136 56 t56 136t-56 136t-136 56t-136 -56t-56 -136q0 -42 19 -83q-41 19 -83 19q-80 0 -136 -56t-56 -136z" />
+<glyph unicode="&#xf085;" horiz-adv-x="1920" d="M0 549v185q0 10 7 19.5t16 10.5l155 24q11 35 32 76q-34 48 -90 115q-7 11 -7 20q0 12 7 20q22 30 82 89t79 59q11 0 21 -7l115 -90q34 18 77 32q11 108 23 154q7 24 30 24h186q11 0 20 -7.5t10 -17.5l23 -153q34 -10 75 -31l118 89q8 7 20 7q11 0 21 -8 q144 -133 144 -160q0 -9 -7 -19q-12 -16 -42 -54t-45 -60q23 -48 34 -82l152 -23q10 -2 17 -10.5t7 -19.5v-185q0 -10 -7 -19.5t-16 -10.5l-155 -24q-11 -35 -32 -76q34 -48 90 -115q7 -10 7 -20q0 -12 -7 -19q-23 -30 -82.5 -89.5t-78.5 -59.5q-11 0 -21 7l-115 90 q-37 -19 -77 -31q-11 -108 -23 -155q-7 -24 -30 -24h-186q-11 0 -20 7.5t-10 17.5l-23 153q-34 10 -75 31l-118 -89q-7 -7 -20 -7q-11 0 -21 8q-144 133 -144 160q0 9 7 19q10 14 41 53t47 61q-23 44 -35 82l-152 24q-10 1 -17 9.5t-7 19.5zM384 640q0 -106 75 -181t181 -75 t181 75t75 181t-75 181t-181 75t-181 -75t-75 -181zM1152 58v140q0 16 149 31q13 29 30 52q-51 113 -51 138q0 4 4 7q4 2 35 20t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31 v-140q0 -16 -149 -31q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71q-8 0 -46 47t-52 68q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31zM1152 1082v140q0 16 149 31q13 29 30 52 q-51 113 -51 138q0 4 4 7q4 2 35 20t59 34t30 16q8 0 46 -46.5t52 -67.5q20 2 30 2t30 -2q51 71 92 112l6 2q4 0 124 -70q4 -3 4 -7q0 -25 -51 -138q17 -23 30 -52q149 -15 149 -31v-140q0 -16 -149 -31q-12 -27 -30 -52q51 -113 51 -138q0 -4 -4 -7q-122 -71 -124 -71 q-8 0 -46 47t-52 68q-20 -2 -30 -2t-30 2q-14 -21 -52 -68t-46 -47q-2 0 -124 71q-4 3 -4 7q0 25 51 138q-18 25 -30 52q-149 15 -149 31zM1408 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5q0 52 -38 90t-90 38t-90 -38t-38 -90zM1408 1152q0 -53 37.5 -90.5 t90.5 -37.5t90.5 37.5t37.5 90.5q0 52 -38 90t-90 38t-90 -38t-38 -90z" />
+<glyph unicode="&#xf086;" horiz-adv-x="1792" d="M0 768q0 139 94 257t256.5 186.5t353.5 68.5t353.5 -68.5t256.5 -186.5t94 -257t-94 -257t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25 t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224zM616 132q58 -4 88 -4q161 0 309 45t264 129q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230q0 -120 -71 -224.5t-195 -176.5q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5 t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22t-22 -7q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132z" />
+<glyph unicode="&#xf087;" d="M0 128v640q0 53 37.5 90.5t90.5 37.5h274q36 24 137 155q58 75 107 128q24 25 35.5 85.5t30.5 126.5t62 108q39 37 90 37q84 0 151 -32.5t102 -101.5t35 -186q0 -93 -48 -192h176q104 0 180 -76t76 -179q0 -89 -49 -163q9 -33 9 -69q0 -77 -38 -144q3 -21 3 -43 q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5h-36h-93q-96 0 -189.5 22.5t-216.5 65.5q-116 40 -138 40h-288q-53 0 -90.5 37.5t-37.5 90.5zM128 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 128h32q13 0 31.5 -3t33 -6.5t38 -11t35 -11.5 t35.5 -12.5t29 -10.5q211 -73 342 -73h121q192 0 192 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5q32 1 53.5 47t21.5 81q0 51 -39 89.5t-89 38.5h-352q0 58 48 159.5t48 160.5q0 98 -32 145t-128 47q-26 -26 -38 -85 t-30.5 -125.5t-59.5 -109.5q-22 -23 -77 -91q-4 -5 -23 -30t-31.5 -41t-34.5 -42.5t-40 -44t-38.5 -35.5t-40 -27t-35.5 -9h-32v-640z" />
+<glyph unicode="&#xf088;" d="M0 512v640q0 53 37.5 90.5t90.5 37.5h288q22 0 138 40q128 44 223 66t200 22h112q140 0 226.5 -79t85.5 -216v-5q60 -77 60 -178q0 -22 -3 -43q38 -67 38 -144q0 -36 -9 -69q49 -74 49 -163q0 -103 -76 -179t-180 -76h-176q48 -99 48 -192q0 -118 -35 -186 q-35 -69 -102 -101.5t-151 -32.5q-51 0 -90 37q-34 33 -54 82t-25.5 90.5t-17.5 84.5t-31 64q-48 50 -107 127q-101 131 -137 155h-274q-53 0 -90.5 37.5t-37.5 90.5zM128 1088q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 512h32q16 0 35.5 -9 t40 -27t38.5 -35.5t40 -44t34.5 -42.5t31.5 -41t23 -30q55 -68 77 -91q41 -43 59.5 -109.5t30.5 -125.5t38 -85q96 0 128 47t32 145q0 59 -48 160.5t-48 159.5h352q50 0 89 38.5t39 89.5q0 35 -21.5 81t-53.5 47q15 17 25 47.5t10 55.5q0 69 -53 119q18 32 18 69t-17.5 73.5 t-47.5 52.5q5 30 5 56q0 85 -49 126t-136 41h-128q-131 0 -342 -73q-5 -2 -29 -10.5t-35.5 -12.5t-35 -11.5t-38 -11t-33 -6.5t-31.5 -3h-32v-640z" />
+<glyph unicode="&#xf089;" horiz-adv-x="896" d="M0 889q0 37 56 46l502 73l225 455q19 41 49 41v-1339l-449 -236q-22 -12 -40 -12q-21 0 -31.5 14.5t-10.5 35.5q0 6 2 20l86 500l-364 354q-25 27 -25 48z" />
+<glyph unicode="&#xf08a;" horiz-adv-x="1792" d="M0 940q0 220 127 344t351 124q62 0 126.5 -21.5t120 -58t95.5 -68.5t76 -68q36 36 76 68t95.5 68.5t120 58t126.5 21.5q224 0 351 -124t127 -344q0 -221 -229 -450l-623 -600q-18 -18 -44 -18t-44 18l-624 602q-10 8 -27.5 26t-55.5 65.5t-68 97.5t-53.5 121t-23.5 138z M128 940q0 -168 187 -355l581 -560l580 559q188 188 188 356q0 81 -21.5 143t-55 98.5t-81.5 59.5t-94 31t-98 8t-112 -25.5t-110.5 -64t-86.5 -72t-60 -61.5q-18 -22 -49 -22t-49 22q-24 28 -60 61.5t-86.5 72t-110.5 64t-112 25.5t-98 -8t-94 -31t-81.5 -59.5t-55 -98.5 t-21.5 -143z" />
+<glyph unicode="&#xf08b;" horiz-adv-x="1664" d="M0 288v704q0 119 84.5 203.5t203.5 84.5h320q13 0 22.5 -9.5t9.5 -22.5q0 -4 1 -20t0.5 -26.5t-3 -23.5t-10 -19.5t-20.5 -6.5h-320q-66 0 -113 -47t-47 -113v-704q0 -66 47 -113t113 -47h288h11h13t11.5 -1t11.5 -3t8 -5.5t7 -9t2 -13.5q0 -4 1 -20t0.5 -26.5t-3 -23.5 t-10 -19.5t-20.5 -6.5h-320q-119 0 -203.5 84.5t-84.5 203.5zM384 448v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45t-19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf08c;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM223 1030q0 -51 35.5 -85.5t92.5 -34.5h1q59 0 95 34.5t36 85.5q-1 52 -36 86t-93 34t-94.5 -34t-36.5 -86z M237 122h231v694h-231v-694zM595 122h231v388q0 38 7 56q15 35 45 59.5t74 24.5q116 0 116 -157v-371h231v398q0 154 -73 233t-193 79q-136 0 -209 -117h2v101h-231q3 -66 0 -694z" />
+<glyph unicode="&#xf08d;" horiz-adv-x="1152" d="M0 320q0 123 78.5 221.5t177.5 98.5v512q-52 0 -90 38t-38 90t38 90t90 38h640q52 0 90 -38t38 -90t-38 -90t-90 -38v-512q99 0 177.5 -98.5t78.5 -221.5q0 -26 -19 -45t-45 -19h-429l-51 -483q-2 -12 -10.5 -20.5t-20.5 -8.5h-1q-27 0 -32 27l-76 485h-404q-26 0 -45 19 t-19 45zM416 672q0 -14 9 -23t23 -9t23 9t9 23v448q0 14 -9 23t-23 9t-23 -9t-9 -23v-448z" />
+<glyph unicode="&#xf08e;" horiz-adv-x="1792" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-66 0 -113 -47t-47 -113v-832q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v320q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-320q0 -119 -84.5 -203.5t-203.5 -84.5h-832 q-119 0 -203.5 84.5t-84.5 203.5zM685 576q0 13 10 23l652 652l-176 176q-19 19 -19 45t19 45t45 19h512q26 0 45 -19t19 -45v-512q0 -26 -19 -45t-45 -19t-45 19l-176 176l-652 -652q-10 -10 -23 -10t-23 10l-114 114q-10 10 -10 23z" />
+<glyph unicode="&#xf090;" d="M0 448v384q0 26 19 45t45 19h448v288q0 26 19 45t45 19t45 -19l544 -544q19 -19 19 -45t-19 -45l-544 -544q-19 -19 -45 -19t-45 19t-19 45v288h-448q-26 0 -45 19t-19 45zM894.5 78.5q0.5 10.5 3 23.5t10 19.5t20.5 6.5h320q66 0 113 47t47 113v704q0 66 -47 113 t-113 47h-288h-11h-13t-11.5 1t-11.5 3t-8 5.5t-7 9t-2 13.5q0 4 -1 20t-0.5 26.5t3 23.5t10 19.5t20.5 6.5h320q119 0 203.5 -84.5t84.5 -203.5v-704q0 -119 -84.5 -203.5t-203.5 -84.5h-320q-13 0 -22.5 9.5t-9.5 22.5q0 4 -1 20t-0.5 26.5z" />
+<glyph unicode="&#xf091;" horiz-adv-x="1664" d="M0 928v128q0 40 28 68t68 28h288v96q0 66 47 113t113 47h576q66 0 113 -47t47 -113v-96h288q40 0 68 -28t28 -68v-128q0 -71 -41.5 -143t-112 -130t-173 -97.5t-215.5 -44.5q-42 -54 -95 -95q-38 -34 -52.5 -72.5t-14.5 -89.5q0 -54 30.5 -91t97.5 -37q75 0 133.5 -45.5 t58.5 -114.5v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23v64q0 69 58.5 114.5t133.5 45.5q67 0 97.5 37t30.5 91q0 51 -14.5 89.5t-52.5 72.5q-53 41 -95 95q-113 5 -215.5 44.5t-173 97.5t-112 130t-41.5 143zM128 928q0 -78 94.5 -162t235.5 -113q-74 162 -74 371 h-256v-96zM1206 653q141 29 235.5 113t94.5 162v96h-256q0 -209 -74 -371z" />
+<glyph unicode="&#xf092;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-224q-16 0 -24.5 1t-19.5 5t-16 14.5t-5 27.5v239q0 97 -52 142q57 6 102.5 18t94 39t81 66.5t53 105t20.5 150.5q0 121 -79 206q37 91 -8 204 q-28 9 -81 -11t-92 -44l-38 -24q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-86 13.5q-44 -113 -7 -204q-79 -85 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-40 -36 -49 -103q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52 t-49.5 24l-20 3q-21 0 -29 -4.5t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -103t0.5 -68q0 -22 -11 -33.5t-22 -13t-33 -1.5h-224q-119 0 -203.5 84.5t-84.5 203.5zM271 315q3 5 13 2 q10 -5 7 -12q-5 -7 -13 -2q-10 5 -7 12zM304 290q6 6 16 -3q9 -11 2 -16q-6 -7 -16 3q-9 11 -2 16zM335 233q-9 13 0 18q9 7 17 -6q9 -12 0 -19q-8 -6 -17 7zM370 206q8 9 20 -3q12 -11 4 -19q-8 -9 -20 3q-13 11 -4 19zM419 168q4 11 19 7q16 -5 13 -16q-4 -12 -19 -6 q-17 4 -13 15zM481 154q0 11 16 11q17 2 17 -11q0 -11 -16 -11q-17 -2 -17 11zM540 158q-2 12 14 15q16 2 18 -9q2 -10 -14 -14t-18 8z" />
+<glyph unicode="&#xf093;" horiz-adv-x="1664" d="M0 -32v320q0 40 28 68t68 28h427q21 -56 70.5 -92t110.5 -36h256q61 0 110.5 36t70.5 92h427q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-1472q-40 0 -68 28t-28 68zM325 936q-17 39 14 69l448 448q18 19 45 19t45 -19l448 -448q31 -30 14 -69q-17 -40 -59 -40 h-256v-448q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45v448h-256q-42 0 -59 40zM1152 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM1408 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf094;" d="M0 433q0 111 18 217.5t54.5 209.5t100.5 194t150 156q78 59 232 120q194 78 316 78q60 0 175.5 -24t173.5 -24q19 0 57 5t58 5q81 0 118 -50.5t37 -134.5q0 -23 -5 -68t-5 -68q0 -10 1 -18.5t3 -17t4 -13.5t6.5 -16t6.5 -17q16 -40 25 -118.5t9 -136.5q0 -165 -70 -327.5 t-196 -288t-281 -180.5q-124 -44 -326 -44q-57 0 -170 14.5t-169 14.5q-24 0 -72.5 -14.5t-73.5 -14.5q-73 0 -123.5 55.5t-50.5 128.5q0 24 11 68t11 67q0 40 -12.5 120.5t-12.5 121.5zM128 434q0 -40 12.5 -120t12.5 -121q0 -23 -11 -66.5t-11 -65.5t12 -36.5t34 -14.5 q24 0 72.5 11t73.5 11q57 0 169.5 -15.5t169.5 -15.5q181 0 284 36q129 45 235.5 152.5t166 245.5t59.5 275q0 44 -7 113.5t-18 96.5q-12 30 -17 44t-9 36.5t-4 48.5q0 23 5 68.5t5 67.5q0 37 -10 55q-4 1 -13 1q-19 0 -58 -4.5t-59 -4.5q-60 0 -176 24t-175 24 q-43 0 -94.5 -11.5t-85 -23.5t-89.5 -34q-137 -54 -202 -103q-96 -73 -159.5 -189.5t-88 -236t-24.5 -248.5z" />
+<glyph unicode="&#xf095;" horiz-adv-x="1408" d="M0 1069q0 92 51 186q56 101 106 122q25 11 68.5 21t70.5 10q14 0 21 -3q18 -6 53 -76q11 -19 30 -54t35 -63.5t31 -53.5q3 -4 17.5 -25t21.5 -35.5t7 -28.5q0 -20 -28.5 -50t-62 -55t-62 -53t-28.5 -46q0 -9 5 -22.5t8.5 -20.5t14 -24t11.5 -19q76 -137 174 -235 t235 -174q2 -1 19 -11.5t24 -14t20.5 -8.5t22.5 -5q18 0 46 28.5t53 62t55 62t50 28.5q14 0 28.5 -7t35.5 -21.5t25 -17.5q25 -15 53.5 -31t63.5 -35t54 -30q70 -35 76 -53q3 -7 3 -21q0 -27 -10 -70.5t-21 -68.5q-21 -50 -122 -106q-94 -51 -186 -51q-27 0 -52.5 3.5 t-57.5 12.5t-47.5 14.5t-55.5 20.5t-49 18q-98 35 -175 83q-128 79 -264.5 215.5t-215.5 264.5q-48 77 -83 175q-3 9 -18 49t-20.5 55.5t-14.5 47.5t-12.5 57.5t-3.5 52.5z" />
+<glyph unicode="&#xf096;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832z" />
+<glyph unicode="&#xf097;" horiz-adv-x="1280" d="M0 7v1289q0 34 19.5 62t52.5 41q21 9 44 9h1048q23 0 44 -9q33 -13 52.5 -41t19.5 -62v-1289q0 -34 -19.5 -62t-52.5 -41q-19 -8 -44 -8q-48 0 -83 32l-441 424l-441 -424q-36 -33 -83 -33q-23 0 -44 9q-33 13 -52.5 41t-19.5 62zM128 38l423 406l89 85l89 -85l423 -406 v1242h-1024v-1242z" />
+<glyph unicode="&#xf098;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 905q0 -16 2.5 -34t5 -30.5t9 -33t10 -29.5t12.5 -33t11 -30q60 -164 216.5 -320.5t320.5 -216.5 q6 -2 30 -11t33 -12.5t29.5 -10t33 -9t30.5 -5t34 -2.5q57 0 130.5 34t94.5 80q22 53 22 101q0 11 -2 16q-3 8 -38.5 29.5t-88.5 49.5l-53 29q-5 3 -19 13t-25 15t-21 5q-18 0 -47 -32.5t-57 -65.5t-44 -33q-7 0 -16.5 3.5t-15.5 6.5t-17 9.5t-14 8.5q-99 55 -170.5 126.5 t-126.5 170.5q-2 3 -8.5 14t-9.5 17t-6.5 15.5t-3.5 16.5q0 13 20.5 33.5t45 38.5t45 39.5t20.5 36.5q0 10 -5 21t-15 25t-13 19q-3 6 -15 28.5t-25 45.5t-26.5 47.5t-25 40.5t-16.5 18t-16 2q-48 0 -101 -22q-46 -21 -80 -94.5t-34 -130.5z" />
+<glyph unicode="&#xf099;" horiz-adv-x="1664" d="M44 145q35 -4 78 -4q225 0 401 138q-105 2 -188 64.5t-114 159.5q33 -5 61 -5q43 0 85 11q-112 23 -185.5 111.5t-73.5 205.5v4q68 -38 146 -41q-66 44 -105 115t-39 154q0 88 44 163q121 -149 294.5 -238.5t371.5 -99.5q-8 38 -8 74q0 134 94.5 228.5t228.5 94.5 q140 0 236 -102q109 21 205 78q-37 -115 -142 -178q93 10 186 50q-67 -98 -162 -167q1 -14 1 -42q0 -130 -38 -259.5t-115.5 -248.5t-184.5 -210.5t-258 -146t-323 -54.5q-271 0 -496 145z" />
+<glyph unicode="&#xf09a;" horiz-adv-x="1024" d="M95 631v296h255v218q0 186 104 288.5t277 102.5q147 0 228 -12v-264h-157q-86 0 -116 -36t-30 -108v-189h293l-39 -296h-254v-759h-306v759h-255z" />
+<glyph unicode="&#xf09b;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5q0 -251 -146.5 -451.5t-378.5 -277.5q-27 -5 -39.5 7t-12.5 30v211q0 97 -52 142q57 6 102.5 18t94 39t81 66.5t53 105t20.5 150.5q0 121 -79 206q37 91 -8 204q-28 9 -81 -11t-92 -44 l-38 -24q-93 26 -192 26t-192 -26q-16 11 -42.5 27t-83.5 38.5t-86 13.5q-44 -113 -7 -204q-79 -85 -79 -206q0 -85 20.5 -150t52.5 -105t80.5 -67t94 -39t102.5 -18q-40 -36 -49 -103q-21 -10 -45 -15t-57 -5t-65.5 21.5t-55.5 62.5q-19 32 -48.5 52t-49.5 24l-20 3 q-21 0 -29 -4.5t-5 -11.5t9 -14t13 -12l7 -5q22 -10 43.5 -38t31.5 -51l10 -23q13 -38 44 -61.5t67 -30t69.5 -7t55.5 3.5l23 4q0 -38 0.5 -89t0.5 -54q0 -18 -13 -30t-40 -7q-232 77 -378.5 277.5t-146.5 451.5z" />
+<glyph unicode="&#xf09c;" horiz-adv-x="1664" d="M0 96v576q0 40 28 68t68 28h672v192q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5v-256q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45v256q0 106 -75 181t-181 75t-181 -75t-75 -181v-192h96q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960 q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf09d;" horiz-adv-x="1920" d="M0 32v1216q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1600q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v608h-1664v-608zM128 1024h1664v224q0 13 -9.5 22.5t-22.5 9.5h-1600 q-13 0 -22.5 -9.5t-9.5 -22.5v-224zM256 128v128h256v-128h-256zM640 128v128h384v-128h-384z" />
+<glyph unicode="&#xf09e;" horiz-adv-x="1408" d="M0 192q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 697v135q0 29 21 47q17 17 43 17h5q160 -13 306 -80.5t259 -181.5q114 -113 181.5 -259t80.5 -306q2 -28 -17 -48q-18 -21 -47 -21h-135q-25 0 -43 16.5t-20 41.5q-22 229 -184.5 391.5 t-391.5 184.5q-25 2 -41.5 20t-16.5 43zM0 1201v143q0 28 20 46q18 18 44 18h3q262 -13 501.5 -120t425.5 -294q187 -186 294 -425.5t120 -501.5q2 -27 -18 -47q-18 -20 -46 -20h-143q-26 0 -44.5 17.5t-19.5 42.5q-12 215 -101 408.5t-231.5 336t-336 231.5t-408.5 102 q-25 1 -42.5 19.5t-17.5 43.5z" />
+<glyph unicode="&#xf0a0;" d="M0 160v320q0 25 16 75l197 606q17 53 63 86t101 33h782q55 0 101 -33t63 -86l197 -606q16 -50 16 -75v-320q0 -66 -47 -113t-113 -47h-1216q-66 0 -113 47t-47 113zM128 160q0 -13 9.5 -22.5t22.5 -9.5h1216q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-1216 q-13 0 -22.5 -9.5t-9.5 -22.5v-320zM178 640h1180l-157 482q-4 13 -16 21.5t-26 8.5h-782q-14 0 -26 -8.5t-16 -21.5zM880 320q0 33 23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5zM1136 320q0 33 23.5 56.5t56.5 23.5 t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5z" />
+<glyph unicode="&#xf0a1;" horiz-adv-x="1792" d="M0 672v192q0 66 47 113t113 47h480q435 0 896 384q52 0 90 -38t38 -90v-384q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5v-384q0 -52 -38 -90t-90 -38q-417 347 -812 380q-58 -19 -91 -66t-31 -100.5t40 -92.5q-20 -33 -23 -65.5t6 -58t33.5 -55t48 -50 t61.5 -50.5q-29 -58 -111.5 -83t-168.5 -11.5t-132 55.5q-7 23 -29.5 87.5t-32 94.5t-23 89t-15 101t3.5 98.5t22 110.5h-122q-66 0 -113 47t-47 113zM768 633q377 -42 768 -341v954q-394 -302 -768 -343v-270z" />
+<glyph unicode="&#xf0a2;" horiz-adv-x="1664" d="M0 128q190 161 287 397.5t97 498.5q0 165 96 262t264 117q-8 18 -8 37q0 40 28 68t68 28t68 -28t28 -68q0 -19 -8 -37q168 -20 264 -117t96 -262q0 -262 97 -498.5t287 -397.5q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38 t-38 90zM183 128h1298q-164 181 -246.5 411.5t-82.5 484.5q0 256 -320 256t-320 -256q0 -254 -82.5 -484.5t-246.5 -411.5zM656 0q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16t-16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16z" />
+<glyph unicode="&#xf0a3;" d="M2 435q-10 42 20 70l138 135l-138 135q-30 28 -20 70q12 41 52 51l188 48l-53 186q-12 41 19 70q29 31 70 19l186 -53l48 188q10 41 51 51q41 12 70 -19l135 -139l135 139q29 30 70 19q41 -10 51 -51l48 -188l186 53q41 12 70 -19q31 -29 19 -70l-53 -186l188 -48 q40 -10 52 -51q10 -42 -20 -70l-138 -135l138 -135q30 -28 20 -70q-12 -41 -52 -51l-188 -48l53 -186q12 -41 -19 -70q-29 -31 -70 -19l-186 53l-48 -188q-10 -40 -51 -52q-12 -2 -19 -2q-31 0 -51 22l-135 138l-135 -138q-28 -30 -70 -20q-41 11 -51 52l-48 188l-186 -53 q-41 -12 -70 19q-31 29 -19 70l53 186l-188 48q-40 10 -52 51z" />
+<glyph unicode="&#xf0a4;" horiz-adv-x="1792" d="M0 128v640q0 53 37.5 90.5t90.5 37.5h288q10 0 21.5 4.5t23.5 14t22.5 18t24 22.5t20.5 21.5t19 21.5t14 17q65 74 100 129q13 21 33 62t37 72t40.5 63t55 49.5t69.5 17.5q125 0 206.5 -67t81.5 -189q0 -68 -22 -128h374q104 0 180 -76t76 -179q0 -105 -75.5 -181 t-180.5 -76h-169q-4 -62 -37 -119q3 -21 3 -43q0 -101 -60 -178q1 -139 -85 -219.5t-227 -80.5q-133 0 -322 69q-164 59 -223 59h-288q-53 0 -90.5 37.5t-37.5 90.5zM128 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM384 128h32q72 0 167 -32 t193.5 -64t179.5 -32q189 0 189 167q0 26 -5 56q30 16 47.5 52.5t17.5 73.5t-18 69q53 50 53 119q0 25 -10 55.5t-25 47.5h331q52 0 90 38t38 90q0 51 -39 89.5t-89 38.5h-576q0 20 15 48.5t33 55t33 68t15 84.5q0 67 -44.5 97.5t-115.5 30.5q-24 0 -90 -139 q-24 -44 -37 -65q-40 -64 -112 -145q-71 -81 -101 -106q-69 -57 -140 -57h-32v-640z" />
+<glyph unicode="&#xf0a5;" horiz-adv-x="1792" d="M0 769q0 103 76 179t180 76h374q-22 60 -22 128q0 122 81.5 189t206.5 67q38 0 69.5 -17.5t55 -49.5t40.5 -63t37 -72t33 -62q35 -55 100 -129q2 -3 14 -17t19 -21.5t20.5 -21.5t24 -22.5t22.5 -18t23.5 -14t21.5 -4.5h288q53 0 90.5 -37.5t37.5 -90.5v-640 q0 -53 -37.5 -90.5t-90.5 -37.5h-288q-59 0 -223 -59q-190 -69 -317 -69q-142 0 -230 77.5t-87 217.5l1 5q-61 76 -61 178q0 22 3 43q-33 57 -37 119h-169q-105 0 -180.5 76t-75.5 181zM128 768q0 -52 38 -90t90 -38h331q-15 -17 -25 -47.5t-10 -55.5q0 -69 53 -119 q-18 -32 -18 -69t17.5 -73.5t47.5 -52.5q-4 -24 -4 -56q0 -85 48.5 -126t135.5 -41q84 0 183 32t194 64t167 32h32v640h-32q-35 0 -67.5 12t-62.5 37t-50 46t-49 54q-2 3 -3.5 4.5t-4 4.5t-4.5 5q-72 81 -112 145q-14 22 -38 68q-1 3 -10.5 22.5t-18.5 36t-20 35.5 t-21.5 30.5t-18.5 11.5q-71 0 -115.5 -30.5t-44.5 -97.5q0 -43 15 -84.5t33 -68t33 -55t15 -48.5h-576q-50 0 -89 -38.5t-39 -89.5zM1536 192q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a6;" d="M0 640q0 125 67 206.5t189 81.5q68 0 128 -22v374q0 104 76 180t179 76q105 0 181 -75.5t76 -180.5v-169q62 -4 119 -37q21 3 43 3q101 0 178 -60q139 1 219.5 -85t80.5 -227q0 -133 -69 -322q-59 -164 -59 -223v-288q0 -53 -37.5 -90.5t-90.5 -37.5h-640 q-53 0 -90.5 37.5t-37.5 90.5v288q0 10 -4.5 21.5t-14 23.5t-18 22.5t-22.5 24t-21.5 20.5t-21.5 19t-17 14q-74 65 -129 100q-21 13 -62 33t-72 37t-63 40.5t-49.5 55t-17.5 69.5zM128 640q0 -24 139 -90q44 -24 65 -37q64 -40 145 -112q81 -71 106 -101q57 -69 57 -140 v-32h640v32q0 72 32 167t64 193.5t32 179.5q0 189 -167 189q-26 0 -56 -5q-16 30 -52.5 47.5t-73.5 17.5t-69 -18q-50 53 -119 53q-25 0 -55.5 -10t-47.5 -25v331q0 52 -38 90t-90 38q-51 0 -89.5 -39t-38.5 -89v-576q-20 0 -48.5 15t-55 33t-68 33t-84.5 15 q-67 0 -97.5 -44.5t-30.5 -115.5zM1152 -64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a7;" d="M0 640q0 38 17.5 69.5t49.5 55t63 40.5t72 37t62 33q55 35 129 100q3 2 17 14t21.5 19t21.5 20.5t22.5 24t18 22.5t14 23.5t4.5 21.5v288q0 53 37.5 90.5t90.5 37.5h640q53 0 90.5 -37.5t37.5 -90.5v-288q0 -59 59 -223q69 -190 69 -317q0 -142 -77.5 -230t-217.5 -87 l-5 1q-76 -61 -178 -61q-22 0 -43 3q-54 -30 -119 -37v-169q0 -105 -76 -180.5t-181 -75.5q-103 0 -179 76t-76 180v374q-54 -22 -128 -22q-121 0 -188.5 81.5t-67.5 206.5zM128 640q0 -71 30.5 -115.5t97.5 -44.5q43 0 84.5 15t68 33t55 33t48.5 15v-576q0 -50 38.5 -89 t89.5 -39q52 0 90 38t38 90v331q46 -35 103 -35q69 0 119 53q32 -18 69 -18t73.5 17.5t52.5 47.5q24 -4 56 -4q85 0 126 48.5t41 135.5q0 84 -32 183t-64 194t-32 167v32h-640v-32q0 -35 -12 -67.5t-37 -62.5t-46 -50t-54 -49q-9 -8 -14 -12q-81 -72 -145 -112 q-22 -14 -68 -38q-3 -1 -22.5 -10.5t-36 -18.5t-35.5 -20t-30.5 -21.5t-11.5 -18.5zM1152 1344q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0a8;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM251 640q0 -27 18 -45l91 -91l362 -362q18 -18 45 -18t45 18l91 91q18 18 18 45t-18 45l-189 189h502 q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-502l189 189q19 19 19 45t-19 45l-91 91q-18 18 -45 18t-45 -18l-362 -362l-91 -91q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0a9;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM256 576q0 -26 19 -45t45 -19h502l-189 -189q-19 -19 -19 -45t19 -45l91 -91q18 -18 45 -18t45 18 l362 362l91 91q18 18 18 45t-18 45l-91 91l-362 362q-18 18 -45 18t-45 -18l-91 -91q-18 -18 -18 -45t18 -45l189 -189h-502q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf0aa;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 641q0 -27 18 -45l91 -91q18 -18 45 -18t45 18l189 189v-502q0 -26 19 -45t45 -19h128q26 0 45 19 t19 45v502l189 -189q19 -19 45 -19t45 19l91 91q18 18 18 45t-18 45l-362 362l-91 91q-18 18 -45 18t-45 -18l-91 -91l-362 -362q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0ab;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM252 639q0 -27 18 -45l362 -362l91 -91q18 -18 45 -18t45 18l91 91l362 362q18 18 18 45t-18 45l-91 91 q-18 18 -45 18t-45 -18l-189 -189v502q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-502l-189 189q-19 19 -45 19t-45 -19l-91 -91q-18 -18 -18 -45z" />
+<glyph unicode="&#xf0ac;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM226 979q7 -7 12 -8q4 -1 5 -9t2.5 -11t11.5 3q9 -8 3 -19q1 1 44 -27q19 -17 21 -21q3 -11 -10 -18 q-1 2 -9 9t-9 4q-3 -5 0.5 -18.5t10.5 -12.5q-7 0 -9.5 -16t-2.5 -35.5t-1 -23.5l2 -1q-3 -12 5.5 -34.5t21.5 -19.5q-13 -3 20 -43q6 -8 8 -9q3 -2 12 -7.5t15 -10t10 -10.5q4 -5 10 -22.5t14 -23.5q-2 -6 9.5 -20t10.5 -23q-1 0 -2.5 -1t-2.5 -1q3 -7 15.5 -14t15.5 -13 q1 -3 2 -10t3 -11t8 -2q2 20 -24 62q-15 25 -17 29q-3 5 -5.5 15.5t-4.5 14.5q2 0 6 -1.5t8.5 -3.5t7.5 -4t2 -3q-3 -7 2 -17.5t12 -18.5t17 -19t12 -13q6 -6 14 -19.5t0 -13.5q9 0 20 -10t17 -20q5 -8 8 -26t5 -24q2 -7 8.5 -13.5t12.5 -9.5l16 -8t13 -7q5 -2 18.5 -10.5 t21.5 -11.5q10 -4 16 -4t14.5 2.5t13.5 3.5q15 2 29 -15t21 -21q36 -19 55 -11q-2 -1 0.5 -7.5t8 -15.5t9 -14.5t5.5 -8.5q5 -6 18 -15t18 -15q6 4 7 9q-3 -8 7 -20t18 -10q14 3 14 32q-31 -15 -49 18q0 1 -2.5 5.5t-4 8.5t-2.5 8.5t0 7.5t5 3q9 0 10 3.5t-2 12.5t-4 13 q-1 8 -11 20t-12 15q-5 -9 -16 -8t-16 9q0 -1 -1.5 -5.5t-1.5 -6.5q-13 0 -15 1q1 3 2.5 17.5t3.5 22.5q1 4 5.5 12t7.5 14.5t4 12.5t-4.5 9.5t-17.5 2.5q-19 -1 -26 -20q-1 -3 -3 -10.5t-5 -11.5t-9 -7q-7 -3 -24 -2t-24 5q-13 8 -22.5 29t-9.5 37q0 10 2.5 26.5t3 25 t-5.5 24.5q3 2 9 9.5t10 10.5q2 1 4.5 1.5t4.5 0t4 1.5t3 6q-1 1 -4 3q-3 3 -4 3q7 -3 28.5 1.5t27.5 -1.5q15 -11 22 2q0 1 -2.5 9.5t-0.5 13.5q5 -27 29 -9q3 -3 15.5 -5t17.5 -5q3 -2 7 -5.5t5.5 -4.5t5 0.5t8.5 6.5q10 -14 12 -24q11 -40 19 -44q7 -3 11 -2t4.5 9.5 t0 14t-1.5 12.5l-1 8v18l-1 8q-15 3 -18.5 12t1.5 18.5t15 18.5q1 1 8 3.5t15.5 6.5t12.5 8q21 19 15 35q7 0 11 9q-1 0 -5 3t-7.5 5t-4.5 2q9 5 2 16q5 3 7.5 11t7.5 10q9 -12 21 -2q7 8 1 16q5 7 20.5 10.5t18.5 9.5q7 -2 8 2t1 12t3 12q4 5 15 9t13 5l17 11q3 4 0 4 q18 -2 31 11q10 11 -6 20q3 6 -3 9.5t-15 5.5q3 1 11.5 0.5t10.5 1.5q15 10 -7 16q-17 5 -43 -12q-2 -1 -9.5 -9.5t-13.5 -9.5q2 0 4.5 5t5 11t3.5 7q6 7 22 15q14 6 52 12q34 8 51 -11q-2 2 9.5 13t14.5 12q3 2 15 4.5t15 7.5l2 22q-12 -1 -17.5 7t-6.5 21q0 -2 -6 -8 q0 7 -4.5 8t-11.5 -1t-9 -1q-10 3 -15 7.5t-8 16.5t-4 15q-2 5 -9.5 10.5t-9.5 10.5q-1 2 -2.5 5.5t-3 6.5t-4 5.5t-5.5 2.5t-7 -5t-7.5 -10t-4.5 -5q-3 2 -6 1.5t-4.5 -1t-4.5 -3t-5 -3.5q-3 -2 -8.5 -3t-8.5 -2q15 5 -1 11q-10 4 -16 3q9 4 7.5 12t-8.5 14h5 q-1 4 -8.5 8.5t-17.5 8.5t-13 6q-8 5 -34 9.5t-33 0.5q-5 -6 -4.5 -10.5t4 -14t3.5 -12.5q1 -6 -5.5 -13t-6.5 -12q0 -7 14 -15.5t10 -21.5q-3 -8 -16 -16t-16 -12q-5 -8 -1.5 -18.5t10.5 -16.5q2 -2 1.5 -4t-3.5 -4.5t-5.5 -4t-6.5 -3.5l-3 -2q-11 -5 -20.5 6t-13.5 26 q-7 25 -16 30q-23 8 -29 -1q-5 13 -41 26q-25 9 -58 4q6 1 0 15q-7 15 -19 12q3 6 4 17.5t1 13.5q3 13 12 23q1 1 7 8.5t9.5 13.5t0.5 6q35 -4 50 11q5 5 11.5 17t10.5 17q9 6 14 5.5t14.5 -5.5t14.5 -5q14 -1 15.5 11t-7.5 20q12 -1 3 17q-5 7 -8 9q-12 4 -27 -5 q-8 -4 2 -8q-1 1 -9.5 -10.5t-16.5 -17.5t-16 5q-1 1 -5.5 13.5t-9.5 13.5q-8 0 -16 -15q3 8 -11 15t-24 8q19 12 -8 27q-7 4 -20.5 5t-19.5 -4q-5 -7 -5.5 -11.5t5 -8t10.5 -5.5t11.5 -4t8.5 -3q14 -10 8 -14q-2 -1 -8.5 -3.5t-11.5 -4.5t-6 -4q-3 -4 0 -14t-2 -14 q-5 5 -9 17.5t-7 16.5q7 -9 -25 -6l-10 1q-4 0 -16 -2t-20.5 -1t-13.5 8q-4 8 0 20q1 4 4 2q-4 3 -11 9.5t-10 8.5q-46 -15 -94 -41q6 -1 12 1q5 2 13 6.5t10 5.5q34 14 42 7l5 5q14 -16 20 -25q-7 4 -30 1q-20 -6 -22 -12q7 -12 5 -18q-4 3 -11.5 10t-14.5 11t-15 5 q-16 0 -22 -1q-146 -80 -235 -222zM877 26q0 -6 2 -16q206 36 351 189q-3 3 -12.5 4.5t-12.5 3.5q-18 7 -24 8q1 7 -2.5 13t-8 9t-12.5 8t-11 7q-2 2 -7 6t-7 5.5t-7.5 4.5t-8.5 2t-10 -1l-3 -1q-3 -1 -5.5 -2.5t-5.5 -3t-4 -3t0 -2.5q-21 17 -36 22q-5 1 -11 5.5t-10.5 7 t-10 1.5t-11.5 -7q-5 -5 -6 -15t-2 -13q-7 5 0 17.5t2 18.5q-3 6 -10.5 4.5t-12 -4.5t-11.5 -8.5t-9 -6.5t-8.5 -5.5t-8.5 -7.5q-3 -4 -6 -12t-5 -11q-2 4 -11.5 6.5t-9.5 5.5q2 -10 4 -35t5 -38q7 -31 -12 -48q-27 -25 -29 -40q-4 -22 12 -26q0 -7 -8 -20.5t-7 -21.5z" />
+<glyph unicode="&#xf0ad;" horiz-adv-x="1664" d="M21 0q0 53 38 91l681 681q39 -98 114.5 -173.5t173.5 -114.5l-682 -682q-37 -37 -90 -37q-52 0 -91 37l-106 108q-38 36 -38 90zM256 64q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45zM768 960q0 185 131.5 316.5t316.5 131.5q58 0 121.5 -16.5 t107.5 -46.5q16 -11 16 -28t-16 -28l-293 -169v-224l193 -107q5 3 79 48.5t135.5 81t70.5 35.5q15 0 23.5 -10t8.5 -25q0 -39 -23 -106q-47 -134 -164.5 -217.5t-258.5 -83.5q-185 0 -316.5 131.5t-131.5 316.5z" />
+<glyph unicode="&#xf0ae;" horiz-adv-x="1792" d="M0 64v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 576v256q0 26 19 45t45 19h1664q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM0 1088v256q0 26 19 45t45 19h1664 q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1664q-26 0 -45 19t-19 45zM640 640h1024v128h-1024v-128zM1024 128h640v128h-640v-128zM1280 1152h384v128h-384v-128z" />
+<glyph unicode="&#xf0b0;" horiz-adv-x="1408" d="M5 1241q17 39 59 39h1280q42 0 59 -39q17 -41 -14 -70l-493 -493v-742q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-256 256q-19 19 -19 45v486l-493 493q-31 29 -14 70z" />
+<glyph unicode="&#xf0b1;" horiz-adv-x="1792" d="M0 160v480h672v-160q0 -26 19 -45t45 -19h320q26 0 45 19t19 45v160h672v-480q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM0 736v384q0 66 47 113t113 47h352v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h352q66 0 113 -47t47 -113v-384h-1792z M640 1280h512v128h-512v-128zM768 512v128h256v-128h-256z" />
+<glyph unicode="&#xf0b2;" d="M0 -64v448q0 42 40 59q39 17 69 -14l144 -144l355 355l-355 355l-144 -144q-19 -19 -45 -19q-12 0 -24 5q-40 17 -40 59v448q0 26 19 45t45 19h448q42 0 59 -40q17 -39 -14 -69l-144 -144l355 -355l355 355l-144 144q-31 30 -14 69q17 40 59 40h448q26 0 45 -19t19 -45 v-448q0 -42 -39 -59q-13 -5 -25 -5q-26 0 -45 19l-144 144l-355 -355l355 -355l144 144q29 31 70 14q39 -17 39 -59v-448q0 -26 -19 -45t-45 -19h-448q-42 0 -59 40q-17 39 14 69l144 144l-355 355l-355 -355l144 -144q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19 t-19 45z" />
+<glyph unicode="&#xf0c0;" horiz-adv-x="1920" d="M0 671q0 353 124 353q6 0 43.5 -21t97.5 -42.5t119 -21.5q67 0 133 23q-5 -37 -5 -66q0 -139 81 -256q-162 -5 -265 -128h-134q-82 0 -138 40.5t-56 118.5zM128 1280q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM256 3q0 53 3.5 103.5 t14 109t26.5 108.5t43 97.5t62 81t85.5 53.5t111.5 20q10 0 43 -21.5t73 -48t107 -48t135 -21.5t135 21.5t107 48t73 48t43 21.5q61 0 111.5 -20t85.5 -53.5t62 -81t43 -97.5t26.5 -108.5t14 -109t3.5 -103.5q0 -120 -73 -189.5t-194 -69.5h-874q-121 0 -194 69.5t-73 189.5 zM576 896q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5t-271.5 112.5t-112.5 271.5zM1280 1280q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181zM1327 640q81 117 81 256q0 29 -5 66q66 -23 133 -23 q59 0 119 21.5t97.5 42.5t43.5 21q124 0 124 -353q0 -78 -56 -118.5t-138 -40.5h-134q-103 123 -265 128z" />
+<glyph unicode="&#xf0c1;" horiz-adv-x="1664" d="M16 1088q0 120 85 203l147 146q83 83 203 83q121 0 204 -85l206 -207q83 -83 83 -203q0 -123 -88 -209l88 -88q86 88 208 88q120 0 204 -84l208 -208q84 -84 84 -204t-85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-206 207q-83 83 -83 203q0 123 88 209l-88 88 q-86 -88 -208 -88q-120 0 -204 84l-208 208q-84 84 -84 204zM208 1088q0 -40 28 -68l208 -208q27 -27 68 -27q42 0 72 31q-3 3 -19 18.5t-21.5 21.5t-15 19t-13 25.5t-3.5 27.5q0 40 28 68t68 28q15 0 27.5 -3.5t25.5 -13t19 -15t21.5 -21.5t18.5 -19q33 31 33 73 q0 40 -28 68l-206 207q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67zM911 383q0 -40 28 -68l206 -207q27 -27 68 -27q40 0 68 26l147 146q28 28 28 67q0 40 -28 68l-208 208q-28 28 -68 28q-42 0 -72 -32q3 -3 19 -18.5t21.5 -21.5t15 -19t13 -25.5t3.5 -27.5 q0 -40 -28 -68t-68 -28q-15 0 -27.5 3.5t-25.5 13t-19 15t-21.5 21.5t-18.5 19q-33 -31 -33 -73z" />
+<glyph unicode="&#xf0c2;" horiz-adv-x="1920" d="M0 448q0 132 71 241.5t187 163.5q-2 28 -2 43q0 212 150 362t362 150q158 0 286.5 -88t187.5 -230q70 62 166 62q106 0 181 -75t75 -181q0 -75 -41 -138q129 -30 213 -134.5t84 -239.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z" />
+<glyph unicode="&#xf0c3;" horiz-adv-x="1664" d="M115.5 -64.5q-34.5 63.5 21.5 152.5l503 793v399h-64q-26 0 -45 19t-19 45t19 45t45 19h512q26 0 45 -19t19 -45t-19 -45t-45 -19h-64v-399l503 -793q56 -89 21.5 -152.5t-140.5 -63.5h-1152q-106 0 -140.5 63.5zM476 384h712l-272 429l-20 31v37v399h-128v-399v-37 l-20 -31z" />
+<glyph unicode="&#xf0c4;" horiz-adv-x="1792" d="M1 157q7 76 56 147t131 124q132 84 278 84q83 0 151 -31q9 13 22 22l122 73l-122 73q-13 9 -22 22q-68 -31 -151 -31q-146 0 -278 84q-82 53 -131 124t-56 147q-5 59 15.5 113t63.5 93q85 79 222 79q145 0 277 -84q83 -52 132 -123t56 -148q4 -48 -10 -97q4 -1 12 -5 l110 -66l690 387q14 8 31 8q16 0 29 -7l128 -64q30 -16 35 -51q3 -36 -25 -56l-507 -398l507 -398q28 -20 25 -56q-5 -35 -35 -51l-128 -64q-13 -7 -29 -7q-17 0 -31 8l-690 387l-110 -66q-8 -4 -12 -5q14 -49 10 -97q-7 -77 -56 -147.5t-132 -123.5q-132 -84 -277 -84 q-136 0 -222 78q-90 84 -79 207zM168 176q-25 -66 21 -108q39 -36 113 -36q100 0 192 59q81 51 106 117t-21 108q-39 36 -113 36q-100 0 -192 -59q-81 -51 -106 -117zM168 976q25 -66 106 -117q92 -59 192 -59q74 0 113 36q46 42 21 108t-106 117q-92 59 -192 59 q-74 0 -113 -36q-46 -42 -21 -108zM672 448l9 -8q2 -2 7 -6q4 -4 11 -12t11 -12l26 -26l160 96l96 -32l736 576l-128 64l-768 -431v-113zM672 704l96 -58v11q0 36 33 56l14 8l-79 47l-26 -26q-3 -3 -10 -11t-12 -12q-2 -2 -4 -3.5t-3 -2.5zM896 576q0 26 19 45t45 19t45 -19 t19 -45t-19 -45t-45 -19t-45 19t-19 45zM1018 391l582 -327l128 64l-520 408l-177 -138q-2 -3 -13 -7z" />
+<glyph unicode="&#xf0c5;" horiz-adv-x="1792" d="M0 224v672q0 40 20 88t48 76l408 408q28 28 76 48t88 20h416q40 0 68 -28t28 -68v-328q68 40 128 40h416q40 0 68 -28t28 -68v-1216q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v288h-544q-40 0 -68 28t-28 68zM128 256h512v256q0 40 20 88t48 76l316 316v416h-384 v-416q0 -40 -28 -68t-68 -28h-416v-640zM213 1024h299v299zM768 -128h896v1152h-384v-416q0 -40 -28 -68t-68 -28h-416v-640zM853 640h299v299z" />
+<glyph unicode="&#xf0c6;" horiz-adv-x="1408" d="M4 1023q0 159 110 270t269 111q158 0 273 -113l605 -606q10 -10 10 -22q0 -16 -30.5 -46.5t-46.5 -30.5q-13 0 -23 10l-606 607q-79 77 -181 77q-106 0 -179 -75t-73 -181q0 -105 76 -181l776 -777q63 -63 145 -63q64 0 106 42t42 106q0 82 -63 145l-581 581 q-26 24 -60 24q-29 0 -48 -19t-19 -48q0 -32 25 -59l410 -410q10 -10 10 -22q0 -16 -31 -47t-47 -31q-12 0 -22 10l-410 410q-63 61 -63 149q0 82 57 139t139 57q88 0 149 -63l581 -581q100 -98 100 -235q0 -117 -79 -196t-196 -79q-135 0 -235 100l-777 776 q-113 115 -113 271z" />
+<glyph unicode="&#xf0c7;" d="M0 -32v1344q0 40 28 68t68 28h928q40 0 88 -20t76 -48l280 -280q28 -28 48 -76t20 -88v-928q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 0h128v416q0 40 28 68t68 28h832q40 0 68 -28t28 -68v-416h128v896q0 14 -10 38.5t-20 34.5l-281 281q-10 10 -34 20 t-39 10v-416q0 -40 -28 -68t-68 -28h-576q-40 0 -68 28t-28 68v416h-128v-1280zM384 0h768v384h-768v-384zM640 928q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-320z" />
+<glyph unicode="&#xf0c8;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf0c9;" d="M0 64v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM0 576v128q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM0 1088v128q0 26 19 45t45 19h1408 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0ca;" horiz-adv-x="1792" d="M0 128q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 640q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM0 1152q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM512 32v192 q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 544v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z M512 1056v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0cb;" horiz-adv-x="1792" d="M15 438q0 51 23.5 93t56.5 68t66 47.5t56.5 43.5t23.5 45q0 25 -14.5 38.5t-39.5 13.5q-46 0 -81 -58l-85 59q24 51 71.5 79.5t105.5 28.5q73 0 123 -41.5t50 -112.5q0 -50 -34 -91.5t-75 -64.5t-75.5 -50.5t-35.5 -52.5h127v60h105v-159h-362q-6 36 -6 54zM19 -190 l57 88q49 -45 106 -45q29 0 50.5 14.5t21.5 42.5q0 64 -105 56l-26 56q8 10 32.5 43.5t42.5 54t37 38.5v1q-16 0 -48.5 -1t-48.5 -1v-53h-106v152h333v-88l-95 -115q51 -12 81 -49t30 -88q0 -80 -54.5 -126t-135.5 -46q-106 0 -172 66zM34 1400l136 127h106v-404h108v-99 h-335v99h107q0 41 0.5 122t0.5 121v12h-2q-8 -17 -50 -54zM512 32v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 544v192q0 14 9 23t23 9h1216q13 0 22.5 -9.5t9.5 -22.5v-192 q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5zM512 1056v192q0 13 9.5 22.5t22.5 9.5h1216q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1216q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0cc;" horiz-adv-x="1792" d="M0 544v64q0 14 9 23t23 9h1728q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-1728q-14 0 -23 9t-9 23zM384 972q0 181 134 309q133 127 393 127q50 0 167 -19q66 -12 177 -48q10 -38 21 -118q14 -123 14 -183q0 -18 -5 -45l-12 -3l-84 6l-14 2q-50 149 -103 205 q-88 91 -210 91q-114 0 -182 -59q-67 -58 -67 -146q0 -73 66 -140t279 -129q69 -20 173 -66q58 -28 95 -52h-743q-28 35 -51 80q-48 97 -48 188zM414 154q-1 30 0 68l2 37v44l102 2q15 -34 30 -71t22.5 -56t12.5 -27q35 -57 80 -94q43 -36 105 -57q59 -22 132 -22 q64 0 139 27q77 26 122 86q47 61 47 129q0 84 -81 157q-34 29 -137 71h411q7 -39 7 -92q0 -111 -41 -212q-23 -55 -71 -104q-37 -35 -109 -81q-80 -48 -153 -66q-80 -21 -203 -21q-114 0 -195 23l-140 40q-57 16 -72 28q-8 8 -8 22v13q0 108 -2 156z" />
+<glyph unicode="&#xf0cd;" d="M0 -32v-64q0 -14 9 -23t23 -9h1472q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-1472q-14 0 -23 -9t-9 -23zM0 1405q13 1 40 1q60 0 112 -4q132 -7 166 -7q86 0 168 3q116 4 146 5q56 0 86 2l-1 -14l2 -64v-9q-60 -9 -124 -9q-60 0 -79 -25q-13 -14 -13 -132q0 -13 0.5 -32.5 t0.5 -25.5l1 -229l14 -280q6 -124 51 -202q35 -59 96 -92q88 -47 177 -47q104 0 191 28q56 18 99 51q48 36 65 64q36 56 53 114q21 73 21 229q0 79 -3.5 128t-11 122.5t-13.5 159.5l-4 59q-5 67 -24 88q-34 35 -77 34l-100 -2l-14 3l2 86h84l205 -10q76 -3 196 10l18 -2 q6 -38 6 -51q0 -7 -4 -31q-45 -12 -84 -13q-73 -11 -79 -17q-15 -15 -15 -41q0 -7 1.5 -27t1.5 -31q8 -19 22 -396q6 -195 -15 -304q-15 -76 -41 -122q-38 -65 -112 -123q-75 -57 -182 -89q-109 -33 -255 -33q-167 0 -284 46q-119 47 -179 122q-61 76 -83 195 q-16 80 -16 237v333q0 188 -17 213q-25 36 -147 39q-37 2 -45 4z" />
+<glyph unicode="&#xf0ce;" horiz-adv-x="1664" d="M0 160v1088q0 66 47 113t113 47h1344q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113zM128 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM128 544q0 -14 9 -23t23 -9h320 q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM128 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM640 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9 t-9 -23v-192zM640 544q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM640 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 160q0 -14 9 -23t23 -9h320q14 0 23 9t9 23 v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 544q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192zM1152 928q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23v-192z" />
+<glyph unicode="&#xf0d0;" horiz-adv-x="1664" d="M27 160q0 27 18 45l1286 1286q18 18 45 18t45 -18l198 -198q18 -18 18 -45t-18 -45l-1286 -1286q-18 -18 -45 -18t-45 18l-198 198q-18 18 -18 45zM128 1408l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98zM320 1216l196 60l60 196l60 -196l196 -60l-196 -60 l-60 -196l-60 196zM768 1408l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98zM1083 1062l107 -107l293 293l-107 107zM1408 768l98 30l30 98l30 -98l98 -30l-98 -30l-30 -98l-30 98z" />
+<glyph unicode="&#xf0d1;" horiz-adv-x="1792" d="M64 192q0 26 19 45t45 19v320q0 8 -0.5 35t0 38t2.5 34.5t6.5 37t14 30.5t22.5 30l198 198q19 19 50.5 32t58.5 13h160v192q0 26 19 45t45 19h1024q26 0 45 -19t19 -45v-1024q0 -15 -4 -26.5t-13.5 -18.5t-16.5 -11.5t-23.5 -6t-22.5 -2t-25.5 0t-22.5 0.5 q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-64q-3 0 -22.5 -0.5t-25.5 0t-22.5 2t-23.5 6t-16.5 11.5t-13.5 18.5t-4 26.5zM256 640h384v256h-158q-13 0 -22 -9l-195 -195q-9 -9 -9 -22v-30zM384 128q0 -52 38 -90t90 -38 t90 38t38 90t-38 90t-90 38t-90 -38t-38 -90zM1280 128q0 -52 38 -90t90 -38t90 38t38 90t-38 90t-90 38t-90 -38t-38 -90z" />
+<glyph unicode="&#xf0d2;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103q-111 0 -218 32q59 93 78 164q9 34 54 211q20 -39 73 -67.5t114 -28.5q121 0 216 68.5t147 188.5t52 270q0 114 -59.5 214t-172.5 163t-255 63 q-105 0 -196 -29t-154.5 -77t-109 -110.5t-67 -129.5t-21.5 -134q0 -104 40 -183t117 -111q30 -12 38 20q2 7 8 31t8 30q6 23 -11 43q-51 61 -51 151q0 151 104.5 259.5t273.5 108.5q151 0 235.5 -82t84.5 -213q0 -170 -68.5 -289t-175.5 -119q-61 0 -98 43.5t-23 104.5 q8 35 26.5 93.5t30 103t11.5 75.5q0 50 -27 83t-77 33q-62 0 -105 -57t-43 -142q0 -73 25 -122l-99 -418q-17 -70 -13 -177q-206 91 -333 281t-127 423z" />
+<glyph unicode="&#xf0d3;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-725q85 122 108 210q9 34 53 209q21 -39 73.5 -67t112.5 -28q181 0 295.5 147.5t114.5 373.5q0 84 -35 162.5t-96.5 139t-152.5 97t-197 36.5 q-104 0 -194.5 -28.5t-153 -76.5t-107.5 -109.5t-66.5 -128t-21.5 -132.5q0 -102 39.5 -180t116.5 -110q13 -5 23.5 0t14.5 19q10 44 15 61q6 23 -11 42q-50 62 -50 150q0 150 103.5 256.5t270.5 106.5q149 0 232.5 -81t83.5 -210q0 -168 -67.5 -286t-173.5 -118 q-60 0 -97 43.5t-23 103.5q8 34 26.5 92.5t29.5 102t11 74.5q0 49 -26.5 81.5t-75.5 32.5q-61 0 -103.5 -56.5t-42.5 -139.5q0 -72 24 -121l-98 -414q-24 -100 -7 -254h-183q-119 0 -203.5 84.5t-84.5 203.5z" />
+<glyph unicode="&#xf0d4;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM276 309q0 -43 18.5 -77.5t48.5 -56.5t69 -37t77.5 -21t76.5 -6q60 0 120.5 15.5t113.5 46t86 82.5t33 117 q0 49 -20 89.5t-49 66.5t-58 47.5t-49 44t-20 44.5t15.5 42.5t37.5 39.5t44 42t37.5 59.5t15.5 82.5q0 60 -22.5 99.5t-72.5 90.5h83l88 64h-265q-85 0 -161 -32t-127.5 -98t-51.5 -153q0 -93 64.5 -154.5t158.5 -61.5q22 0 43 3q-13 -29 -13 -54q0 -44 40 -94 q-175 -12 -257 -63q-47 -29 -75.5 -73t-28.5 -95zM395 338q0 46 25 80t65.5 51.5t82 25t84.5 7.5q20 0 31 -2q2 -1 23 -16.5t26 -19t23 -18t24.5 -22t19 -22.5t17 -26t9 -26.5t4.5 -31.5q0 -76 -58.5 -112.5t-139.5 -36.5q-41 0 -80.5 9.5t-75.5 28.5t-58 53t-22 78z M462 969q0 61 32 104t92 43q53 0 93.5 -45t58 -101t17.5 -107q0 -60 -33 -99.5t-92 -39.5q-53 0 -93 42.5t-57.5 96.5t-17.5 106zM960 672h128v-160h64v160h128v64h-128v128h-64v-128h-128v-64z" />
+<glyph unicode="&#xf0d5;" horiz-adv-x="1664" d="M32 182q0 81 44.5 150t118.5 115q131 82 404 100q-32 42 -47.5 74t-15.5 73q0 36 21 85q-46 -4 -68 -4q-148 0 -249.5 96.5t-101.5 244.5q0 82 36 159t99 131q77 66 182.5 98t217.5 32h418l-138 -88h-131q74 -63 112 -133t38 -160q0 -72 -24.5 -129.5t-59 -93t-69.5 -65 t-59.5 -61.5t-24.5 -66q0 -36 32 -70.5t77.5 -68t90.5 -73.5t77 -104t32 -142q0 -90 -48 -173q-72 -122 -211 -179.5t-298 -57.5q-132 0 -246.5 41.5t-171.5 137.5q-37 60 -37 131zM218 228q0 -70 35 -123.5t91.5 -83t119 -44t127.5 -14.5q58 0 111.5 13t99 39t73 73 t27.5 109q0 25 -7 49t-14.5 42t-27 41.5t-29.5 35t-38.5 34.5t-36.5 29t-41.5 30t-36.5 26q-16 2 -48 2q-53 0 -105 -7t-107.5 -25t-97 -46t-68.5 -74.5t-27 -105.5zM324 1222q0 -46 10 -97.5t31.5 -103t52 -92.5t75 -67t96.5 -26q38 0 78 16.5t66 43.5q53 57 53 159 q0 58 -17 125t-48.5 129.5t-84.5 103.5t-117 41q-42 0 -82.5 -19.5t-65.5 -52.5q-47 -59 -47 -160zM1084 731v108h212v217h105v-217h213v-108h-213v-219h-105v219h-212z" />
+<glyph unicode="&#xf0d6;" horiz-adv-x="1920" d="M0 64v1152q0 26 19 45t45 19h1792q26 0 45 -19t19 -45v-1152q0 -26 -19 -45t-45 -19h-1792q-26 0 -45 19t-19 45zM128 384q106 0 181 -75t75 -181h1152q0 106 75 181t181 75v512q-106 0 -181 75t-75 181h-1152q0 -106 -75 -181t-181 -75v-512zM640 640q0 70 21 142 t59.5 134t101.5 101t138 39t138 -39t101.5 -101t59.5 -134t21 -142t-21 -142t-59.5 -134t-101.5 -101t-138 -39t-138 39t-101.5 101t-59.5 134t-21 142zM762 791l77 -80q42 37 55 57h2v-288h-128v-96h384v96h-128v448h-114z" />
+<glyph unicode="&#xf0d7;" horiz-adv-x="1024" d="M0 832q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0d8;" horiz-adv-x="1024" d="M0 320q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0d9;" horiz-adv-x="640" d="M64 640q0 26 19 45l448 448q19 19 45 19t45 -19t19 -45v-896q0 -26 -19 -45t-45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0da;" horiz-adv-x="640" d="M0 192v896q0 26 19 45t45 19t45 -19l448 -448q19 -19 19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19t-19 45z" />
+<glyph unicode="&#xf0db;" horiz-adv-x="1664" d="M0 32v1216q0 66 47 113t113 47h1344q66 0 113 -47t47 -113v-1216q0 -66 -47 -113t-113 -47h-1344q-66 0 -113 47t-47 113zM128 32q0 -13 9.5 -22.5t22.5 -9.5h608v1152h-640v-1120zM896 0h608q13 0 22.5 9.5t9.5 22.5v1120h-640v-1152z" />
+<glyph unicode="&#xf0dc;" horiz-adv-x="1024" d="M0 448q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45zM0 832q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0dd;" horiz-adv-x="1024" d="M0 448q0 26 19 45t45 19h896q26 0 45 -19t19 -45t-19 -45l-448 -448q-19 -19 -45 -19t-45 19l-448 448q-19 19 -19 45z" />
+<glyph unicode="&#xf0de;" horiz-adv-x="1024" d="M0 832q0 26 19 45l448 448q19 19 45 19t45 -19l448 -448q19 -19 19 -45t-19 -45t-45 -19h-896q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0e0;" horiz-adv-x="1792" d="M0 32v794q44 -49 101 -87q362 -246 497 -345q57 -42 92.5 -65.5t94.5 -48t110 -24.5h1h1q51 0 110 24.5t94.5 48t92.5 65.5q170 123 498 345q57 39 100 87v-794q0 -66 -47 -113t-113 -47h-1472q-66 0 -113 47t-47 113zM0 1098q0 78 41.5 130t118.5 52h1472 q65 0 112.5 -47t47.5 -113q0 -79 -49 -151t-122 -123q-376 -261 -468 -325q-10 -7 -42.5 -30.5t-54 -38t-52 -32.5t-57.5 -27t-50 -9h-1h-1q-23 0 -50 9t-57.5 27t-52 32.5t-54 38t-42.5 30.5q-91 64 -262 182.5t-205 142.5q-62 42 -117 115.5t-55 136.5z" />
+<glyph unicode="&#xf0e1;" d="M0 1217q0 74 51.5 122.5t134.5 48.5t133 -48.5t51 -122.5q1 -73 -50.5 -122t-135.5 -49h-2q-82 0 -132 49t-50 122zM19 -80v991h330v-991h-330zM531 -80q2 399 2 647t-1 296l-1 48h329v-144h-2q20 32 41 56t56.5 52t87 43.5t114.5 15.5q171 0 275 -113.5t104 -332.5v-568 h-329v530q0 105 -40.5 164.5t-126.5 59.5q-63 0 -105.5 -34.5t-63.5 -85.5q-11 -30 -11 -81v-553h-329z" />
+<glyph unicode="&#xf0e2;" d="M0 832v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298t-61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12 q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf0e3;" horiz-adv-x="1792" d="M40 736q0 13 4.5 26t9 22t15.5 22t16.5 18.5t20.5 19t18 16.5q30 28 68 28q10 0 18 -1.5t16.5 -5.5t13.5 -6t13.5 -10t11.5 -10t13 -12.5t12 -12.5q-14 14 -14 34t14 34l348 348q14 14 34 14t34 -14q-2 2 -12.5 12t-12.5 13t-10 11.5t-10 13.5t-6 13.5t-5.5 16.5t-1.5 18 q0 38 28 68q3 3 16.5 18t19 20.5t18.5 16.5t22 15.5t22 9t26 4.5q40 0 68 -28l408 -408q28 -28 28 -68q0 -13 -4.5 -26t-9 -22t-15.5 -22t-16.5 -18.5t-20.5 -19t-18 -16.5q-30 -28 -68 -28q-10 0 -18 1.5t-16.5 5.5t-13.5 6t-13.5 10t-11.5 10t-13 12.5t-12 12.5 q14 -14 14 -34t-14 -34l-126 -126l256 -256q43 43 96 43q52 0 91 -37l363 -363q37 -39 37 -91q0 -53 -37 -90l-107 -108q-39 -37 -91 -37q-53 0 -90 37l-363 364q-38 36 -38 90q0 53 43 96l-256 256l-126 -126q-14 -14 -34 -14t-34 14q2 -2 12.5 -12t12.5 -13t10 -11.5 t10 -13.5t6 -13.5t5.5 -16.5t1.5 -18q0 -38 -28 -68q-3 -3 -16.5 -18t-19 -20.5t-18.5 -16.5t-22 -15.5t-22 -9t-26 -4.5q-40 0 -68 28l-408 408q-28 28 -28 68z" />
+<glyph unicode="&#xf0e4;" horiz-adv-x="1792" d="M0 384q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348q0 -261 -141 -483q-19 -29 -54 -29h-1402q-35 0 -54 29q-141 221 -141 483zM128 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z M320 832q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM710 241q-20 -77 20 -146t117 -89t146 20t89 117q16 60 -6 117t-72 91l101 382q6 26 -7.5 48.5t-38.5 29.5t-48 -6.5t-30 -39.5l-101 -382q-60 -5 -107 -43.5 t-63 -98.5zM768 1024q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1216 832q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1408 384q0 -53 37.5 -90.5 t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf0e5;" horiz-adv-x="1792" d="M0 640q0 174 120 321.5t326 233t450 85.5t450 -85.5t326 -233t120 -321.5t-120 -321.5t-326 -233t-450 -85.5q-70 0 -145 8q-198 -175 -460 -242q-49 -14 -114 -22h-5q-15 0 -27 10.5t-16 27.5v1q-3 4 -0.5 12t2 10t4.5 9.5l6 9t7 8.5t8 9q7 8 31 34.5t34.5 38t31 39.5 t32.5 51t27 59t26 76q-157 89 -247.5 220t-90.5 281zM128 640q0 -112 71.5 -213.5t201.5 -175.5l87 -50l-27 -96q-24 -91 -70 -172q152 63 275 171l43 38l57 -6q69 -8 130 -8q204 0 381.5 69.5t282 187.5t104.5 255t-104.5 255t-282 187.5t-381.5 69.5t-381.5 -69.5 t-282 -187.5t-104.5 -255z" />
+<glyph unicode="&#xf0e6;" horiz-adv-x="1792" d="M0 768q0 139 94 257t256.5 186.5t353.5 68.5t353.5 -68.5t256.5 -186.5t94 -257t-94 -257t-256.5 -186.5t-353.5 -68.5q-86 0 -176 16q-124 -88 -278 -128q-36 -9 -86 -16h-3q-11 0 -20.5 8t-11.5 21q-1 3 -1 6.5t0.5 6.5t2 6l2.5 5t3.5 5.5t4 5t4.5 5t4 4.5q5 6 23 25 t26 29.5t22.5 29t25 38.5t20.5 44q-124 72 -195 177t-71 224zM128 768q0 -82 53 -158t149 -132l97 -56l-35 -84q34 20 62 39l44 31l53 -10q78 -14 153 -14q153 0 286 52t211.5 141t78.5 191t-78.5 191t-211.5 141t-286 52t-286 -52t-211.5 -141t-78.5 -191zM616 132 q58 -4 88 -4q161 0 309 45t264 129q125 92 192 212t67 254q0 77 -23 152q129 -71 204 -178t75 -230q0 -120 -71 -224.5t-195 -176.5q10 -24 20.5 -44t25 -38.5t22.5 -29t26 -29.5t23 -25q1 -1 4 -4.5t4.5 -5t4 -5t3.5 -5.5l2.5 -5t2 -6t0.5 -6.5t-1 -6.5q-3 -14 -13 -22 t-22 -7q-50 7 -86 16q-154 40 -278 128q-90 -16 -176 -16q-271 0 -472 132z" />
+<glyph unicode="&#xf0e7;" horiz-adv-x="896" d="M1 551l201 825q4 14 16 23t28 9h328q19 0 32 -12.5t13 -29.5q0 -8 -5 -18l-171 -463l396 98q8 2 12 2q19 0 34 -15q18 -20 7 -44l-540 -1157q-13 -25 -42 -25q-4 0 -14 2q-17 5 -25.5 19t-4.5 30l197 808l-406 -101q-4 -1 -12 -1q-18 0 -31 11q-18 15 -13 39z" />
+<glyph unicode="&#xf0e8;" horiz-adv-x="1792" d="M0 -32v320q0 40 28 68t68 28h96v192q0 52 38 90t90 38h512v192h-96q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-96v-192h512q52 0 90 -38t38 -90v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320 q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68v320q0 40 28 68t68 28h96v192h-512v-192h96q40 0 68 -28t28 -68v-320q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf0e9;" horiz-adv-x="1664" d="M0 681q0 5 1 7q45 183 172.5 319.5t298 204.5t360.5 68q140 0 274.5 -40t246.5 -113.5t194.5 -187t115.5 -251.5q1 -2 1 -7q0 -13 -9.5 -22.5t-22.5 -9.5q-11 0 -23 10q-49 46 -93 69t-102 23q-68 0 -128 -37t-103 -97q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -28 -17 q-18 0 -29 17q-4 6 -14.5 24t-17.5 28q-43 60 -102.5 97t-127.5 37t-127.5 -37t-102.5 -97q-7 -10 -17.5 -28t-14.5 -24q-11 -17 -29 -17q-17 0 -28 17q-4 6 -14.5 24t-17.5 28q-43 60 -103 97t-128 37q-58 0 -102 -23t-93 -69q-12 -10 -23 -10q-13 0 -22.5 9.5t-9.5 22.5z M384 128q0 26 19 45t45 19t45 -19t19 -45q0 -50 39 -89t89 -39t89 39t39 89v580q33 11 64 11t64 -11v-580q0 -104 -76 -180t-180 -76t-180 76t-76 180zM768 1310v98q0 26 19 45t45 19t45 -19t19 -45v-98q-42 2 -64 2t-64 -2z" />
+<glyph unicode="&#xf0ea;" horiz-adv-x="1792" d="M0 96v1344q0 40 28 68t68 28h1088q40 0 68 -28t28 -68v-328q21 -13 36 -28l408 -408q28 -28 48 -76t20 -88v-672q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28t-28 68v160h-544q-40 0 -68 28t-28 68zM256 1312q0 -13 9.5 -22.5t22.5 -9.5h704q13 0 22.5 9.5t9.5 22.5v64 q0 13 -9.5 22.5t-22.5 9.5h-704q-13 0 -22.5 -9.5t-9.5 -22.5v-64zM768 -128h896v640h-416q-40 0 -68 28t-28 68v416h-384v-1152zM1280 640h299l-299 299v-299z" />
+<glyph unicode="&#xf0eb;" horiz-adv-x="1024" d="M0 960q0 99 44.5 184.5t117 142t164 89t186.5 32.5t186.5 -32.5t164 -89t117 -142t44.5 -184.5q0 -155 -103 -268q-45 -49 -74.5 -87t-59.5 -95.5t-34 -107.5q47 -28 47 -82q0 -37 -25 -64q25 -27 25 -64q0 -52 -45 -81q13 -23 13 -47q0 -46 -31.5 -71t-77.5 -25 q-20 -44 -60 -70t-87 -26t-87 26t-60 70q-46 0 -77.5 25t-31.5 71q0 24 13 47q-45 29 -45 81q0 37 25 64q-25 27 -25 64q0 54 47 82q-4 50 -34 107.5t-59.5 95.5t-74.5 87q-103 113 -103 268zM128 960q0 -101 68 -180q10 -11 30.5 -33t30.5 -33q128 -153 141 -298h228 q13 145 141 298q10 11 30.5 33t30.5 33q68 79 68 180q0 72 -34.5 134t-90 101.5t-123 62t-136.5 22.5t-136.5 -22.5t-123 -62t-90 -101.5t-34.5 -134zM480 1088q0 13 9.5 22.5t22.5 9.5q50 0 99.5 -16t87 -54t37.5 -90q0 -13 -9.5 -22.5t-22.5 -9.5t-22.5 9.5t-9.5 22.5 q0 46 -54 71t-106 25q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0ec;" horiz-adv-x="1792" d="M0 256q0 14 9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h1376q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-1376v-192q0 -13 -9.5 -22.5t-22.5 -9.5q-12 0 -24 10l-319 320q-9 9 -9 22zM0 800v192q0 13 9.5 22.5t22.5 9.5h1376v192q0 14 9 23 t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192h-1376q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0ed;" horiz-adv-x="1920" d="M0 448q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z M512 608q0 -14 9 -23l352 -352q9 -9 23 -9t23 9l351 351q10 12 10 24q0 14 -9 23t-23 9h-224v352q0 13 -9.5 22.5t-22.5 9.5h-192q-13 0 -22.5 -9.5t-9.5 -22.5v-352h-224q-13 0 -22.5 -9.5t-9.5 -22.5z" />
+<glyph unicode="&#xf0ee;" horiz-adv-x="1920" d="M0 448q0 130 70 240t188 165q-2 30 -2 43q0 212 150 362t362 150q156 0 285.5 -87t188.5 -231q71 62 166 62q106 0 181 -75t75 -181q0 -76 -41 -138q130 -31 213.5 -135.5t83.5 -238.5q0 -159 -112.5 -271.5t-271.5 -112.5h-1088q-185 0 -316.5 131.5t-131.5 316.5z M512 672q0 -14 9 -23t23 -9h224v-352q0 -13 9.5 -22.5t22.5 -9.5h192q13 0 22.5 9.5t9.5 22.5v352h224q13 0 22.5 9.5t9.5 22.5q0 14 -9 23l-352 352q-9 9 -23 9t-23 -9l-351 -351q-10 -12 -10 -24z" />
+<glyph unicode="&#xf0f0;" horiz-adv-x="1408" d="M0 131q0 68 5.5 131t24 138t47.5 132.5t81 103t120 60.5q-22 -52 -22 -120v-203q-58 -20 -93 -70t-35 -111q0 -80 56 -136t136 -56t136 56t56 136q0 61 -35.5 111t-92.5 70v203q0 62 25 93q132 -104 295 -104t295 104q25 -31 25 -93v-64q-106 0 -181 -75t-75 -181v-89 q-32 -29 -32 -71q0 -40 28 -68t68 -28t68 28t28 68q0 42 -32 71v89q0 52 38 90t90 38t90 -38t38 -90v-89q-32 -29 -32 -71q0 -40 28 -68t68 -28t68 28t28 68q0 42 -32 71v89q0 68 -34.5 127.5t-93.5 93.5q0 10 0.5 42.5t0 48t-2.5 41.5t-7 47t-13 40q68 -15 120 -60.5 t81 -103t47.5 -132.5t24 -138t5.5 -131q0 -121 -73 -190t-194 -69h-874q-121 0 -194 69t-73 190zM256 192q0 26 19 45t45 19t45 -19t19 -45t-19 -45t-45 -19t-45 19t-19 45zM320 1024q0 159 112.5 271.5t271.5 112.5t271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5 t-271.5 112.5t-112.5 271.5z" />
+<glyph unicode="&#xf0f1;" horiz-adv-x="1408" d="M0 768v512q0 26 19 45t45 19q6 0 16 -2q17 30 47 48t65 18q53 0 90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5q-33 0 -64 18v-402q0 -106 94 -181t226 -75t226 75t94 181v402q-31 -18 -64 -18q-53 0 -90.5 37.5t-37.5 90.5t37.5 90.5t90.5 37.5q35 0 65 -18t47 -48 q10 2 16 2q26 0 45 -19t19 -45v-512q0 -144 -110 -252t-274 -128v-132q0 -106 94 -181t226 -75t226 75t94 181v395q-57 21 -92.5 70t-35.5 111q0 80 56 136t136 56t136 -56t56 -136q0 -62 -35.5 -111t-92.5 -70v-395q0 -159 -131.5 -271.5t-316.5 -112.5t-316.5 112.5 t-131.5 271.5v132q-164 20 -274 128t-110 252zM1152 832q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf0f2;" horiz-adv-x="1792" d="M0 96v832q0 92 66 158t158 66h64v-1280h-64q-92 0 -158 66t-66 158zM384 -128v1280h128v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h128v-1280h-1024zM640 1152h512v128h-512v-128zM1504 -128v1280h64q92 0 158 -66t66 -158v-832q0 -92 -66 -158t-158 -66h-64z " />
+<glyph unicode="&#xf0f3;" horiz-adv-x="1664" d="M0 128q190 161 287 397.5t97 498.5q0 165 96 262t264 117q-8 18 -8 37q0 40 28 68t68 28t68 -28t28 -68q0 -19 -8 -37q168 -20 264 -117t96 -262q0 -262 97 -498.5t287 -397.5q0 -52 -38 -90t-90 -38h-448q0 -106 -75 -181t-181 -75t-181 75t-75 181h-448q-52 0 -90 38 t-38 90zM656 0q0 -73 51.5 -124.5t124.5 -51.5q16 0 16 16t-16 16q-59 0 -101.5 42.5t-42.5 101.5q0 16 -16 16t-16 -16z" />
+<glyph unicode="&#xf0f4;" horiz-adv-x="1920" d="M0 128h1792q0 -106 -75 -181t-181 -75h-1280q-106 0 -181 75t-75 181zM256 480v736q0 26 19 45t45 19h1152q159 0 271.5 -112.5t112.5 -271.5t-112.5 -271.5t-271.5 -112.5h-64v-32q0 -92 -66 -158t-158 -66h-704q-92 0 -158 66t-66 158zM1408 704h64q80 0 136 56t56 136 t-56 136t-136 56h-64v-384z" />
+<glyph unicode="&#xf0f5;" horiz-adv-x="1408" d="M0 832v640q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45v-416q0 -26 19 -45t45 -19t45 19t19 45v416q0 26 19 45t45 19t45 -19t19 -45v-640q0 -61 -35.5 -111t-92.5 -70v-779q0 -52 -38 -90t-90 -38h-128 q-52 0 -90 38t-38 90v779q-57 20 -92.5 70t-35.5 111zM768 416v800q0 132 94 226t226 94h256q26 0 45 -19t19 -45v-1600q0 -52 -38 -90t-90 -38h-128q-52 0 -90 38t-38 90v512h-224q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f6;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM384 160v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64 q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM384 416v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM384 672v64q0 14 9 23t23 9h704q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-704q-14 0 -23 9t-9 23zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf0f7;" horiz-adv-x="1408" d="M0 -192v1664q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM128 -128h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224h384v1536h-1152v-1536zM256 160v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 672v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 928v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 1184v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 416v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 928v64q0 13 9.5 22.5t22.5 9.5h64 q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 1184v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f8;" horiz-adv-x="1408" d="M0 -192v1280q0 26 19 45t45 19h320v288q0 40 28 68t68 28h448q40 0 68 -28t28 -68v-288h320q26 0 45 -19t19 -45v-1280q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM128 -128h384v224q0 13 9.5 22.5t22.5 9.5h320q13 0 22.5 -9.5t9.5 -22.5v-224h384v1152h-256 v-32q0 -40 -28 -68t-68 -28h-448q-40 0 -68 28t-28 68v32h-256v-1152zM256 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM256 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64 q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM512 1056q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v96h128 v-96q0 -13 9.5 -22.5t22.5 -9.5h64q13 0 22.5 9.5t9.5 22.5v320q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-96h-128v96q0 13 -9.5 22.5t-22.5 9.5h-64q-13 0 -22.5 -9.5t-9.5 -22.5v-320zM768 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM768 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 160v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 416v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5zM1024 672v64q0 13 9.5 22.5t22.5 9.5h64q13 0 22.5 -9.5t9.5 -22.5 v-64q0 -13 -9.5 -22.5t-22.5 -9.5h-64q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf0f9;" horiz-adv-x="1920" d="M64 192q0 26 19 45t45 19v416q0 26 13 58t32 51l198 198q19 19 51 32t58 13h160v320q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-1152q0 -26 -19 -45t-45 -19h-192q0 -106 -75 -181t-181 -75t-181 75t-75 181h-384q0 -106 -75 -181t-181 -75t-181 75t-75 181h-128 q-26 0 -45 19t-19 45zM256 640h384v256h-158q-14 -2 -22 -9l-195 -195q-7 -12 -9 -22v-30zM384 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM896 800q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192 q14 0 23 9t9 23v224h224q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192zM1280 128q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf0fa;" horiz-adv-x="1792" d="M0 96v832q0 92 66 158t158 66h32v-1280h-32q-92 0 -158 66t-66 158zM352 -128v1280h160v160q0 40 28 68t68 28h576q40 0 68 -28t28 -68v-160h160v-1280h-1088zM512 416q0 -14 9 -23t23 -9h224v-224q0 -14 9 -23t23 -9h192q14 0 23 9t9 23v224h224q14 0 23 9t9 23v192 q0 14 -9 23t-23 9h-224v224q0 14 -9 23t-23 9h-192q-14 0 -23 -9t-9 -23v-224h-224q-14 0 -23 -9t-9 -23v-192zM640 1152h512v128h-512v-128zM1536 -128v1280h32q92 0 158 -66t66 -158v-832q0 -92 -66 -158t-158 -66h-32z" />
+<glyph unicode="&#xf0fb;" horiz-adv-x="1920" d="M0 512v128l192 24v8h-128v32h-32v192l32 32h96l192 -224h160v416h-64v32h64h160h96q26 0 45 -4.5t19 -11.5t-19 -11.5t-45 -4.5h-69l293 -352h64l224 -64l352 -32q261 -58 287 -93l1 -3q-1 -32 -288 -96l-352 -32l-224 -64h-64l-293 -352h69q26 0 45 -4.5t19 -11.5 t-19 -11.5t-45 -4.5h-96h-160h-64v32h64v416h-160l-192 -224h-96l-32 32v192h32v32h128v8z" />
+<glyph unicode="&#xf0fc;" horiz-adv-x="1664" d="M64 1152l32 128h480l32 128h960l32 -192l-64 -32v-800l128 -192v-192h-1152v192l128 192h-128q-159 0 -271.5 112.5t-112.5 271.5v320zM384 768q0 -53 37.5 -90.5t90.5 -37.5h128v384h-256v-256z" />
+<glyph unicode="&#xf0fd;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 192q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h512v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45 v896q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-512v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-896z" />
+<glyph unicode="&#xf0fe;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 576q0 -26 19 -45t45 -19h320v-320q0 -26 19 -45t45 -19h128q26 0 45 19t19 45v320h320q26 0 45 19t19 45 v128q0 26 -19 45t-45 19h-320v320q0 26 -19 45t-45 19h-128q-26 0 -45 -19t-19 -45v-320h-320q-26 0 -45 -19t-19 -45v-128z" />
+<glyph unicode="&#xf100;" horiz-adv-x="1024" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM429 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23 l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf101;" horiz-adv-x="1024" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM397 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10 l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf102;" horiz-adv-x="1152" d="M77 224q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM77 608q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23 l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf103;" horiz-adv-x="1152" d="M77 672q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM77 1056q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10 l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf104;" horiz-adv-x="640" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf105;" horiz-adv-x="640" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf106;" horiz-adv-x="1152" d="M77 352q0 13 10 23l466 466q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-393 393l-393 -393q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf107;" horiz-adv-x="1152" d="M77 800q0 13 10 23l50 50q10 10 23 10t23 -10l393 -393l393 393q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23z" />
+<glyph unicode="&#xf108;" horiz-adv-x="1920" d="M0 288v1088q0 66 47 113t113 47h1600q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-544q0 -37 16 -77.5t32 -71t16 -43.5q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45q0 14 16 44t32 70t16 78h-544q-66 0 -113 47t-47 113zM128 544q0 -13 9.5 -22.5 t22.5 -9.5h1600q13 0 22.5 9.5t9.5 22.5v832q0 13 -9.5 22.5t-22.5 9.5h-1600q-13 0 -22.5 -9.5t-9.5 -22.5v-832z" />
+<glyph unicode="&#xf109;" horiz-adv-x="1920" d="M0 96v96h160h1600h160v-96q0 -40 -47 -68t-113 -28h-1600q-66 0 -113 28t-47 68zM256 416v704q0 66 47 113t113 47h1088q66 0 113 -47t47 -113v-704q0 -66 -47 -113t-113 -47h-1088q-66 0 -113 47t-47 113zM384 416q0 -13 9.5 -22.5t22.5 -9.5h1088q13 0 22.5 9.5 t9.5 22.5v704q0 13 -9.5 22.5t-22.5 9.5h-1088q-13 0 -22.5 -9.5t-9.5 -22.5v-704zM864 112q0 -16 16 -16h160q16 0 16 16t-16 16h-160q-16 0 -16 -16z" />
+<glyph unicode="&#xf10a;" horiz-adv-x="1152" d="M0 160v1088q0 66 47 113t113 47h832q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-832q-66 0 -113 47t-47 113zM128 288q0 -13 9.5 -22.5t22.5 -9.5h832q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-832q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM512 128 q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf10b;" horiz-adv-x="768" d="M0 128v1024q0 52 38 90t90 38h512q52 0 90 -38t38 -90v-1024q0 -52 -38 -90t-90 -38h-512q-52 0 -90 38t-38 90zM96 288q0 -13 9.5 -22.5t22.5 -9.5h512q13 0 22.5 9.5t9.5 22.5v704q0 13 -9.5 22.5t-22.5 9.5h-512q-13 0 -22.5 -9.5t-9.5 -22.5v-704zM288 1136 q0 -16 16 -16h160q16 0 16 16t-16 16h-160q-16 0 -16 -16zM304 128q0 -33 23.5 -56.5t56.5 -23.5t56.5 23.5t23.5 56.5t-23.5 56.5t-56.5 23.5t-56.5 -23.5t-23.5 -56.5z" />
+<glyph unicode="&#xf10c;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273z" />
+<glyph unicode="&#xf10d;" horiz-adv-x="1664" d="M0 192v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136z M896 192v704q0 104 40.5 198.5t109.5 163.5t163.5 109.5t198.5 40.5h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-64q-106 0 -181 -75t-75 -181v-32q0 -40 28 -68t68 -28h224q80 0 136 -56t56 -136v-384q0 -80 -56 -136t-136 -56h-384q-80 0 -136 56t-56 136z" />
+<glyph unicode="&#xf10e;" horiz-adv-x="1664" d="M0 832v384q0 80 56 136t136 56h384q80 0 136 -56t56 -136v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136zM896 832v384 q0 80 56 136t136 56h384q80 0 136 -56t56 -136v-704q0 -104 -40.5 -198.5t-109.5 -163.5t-163.5 -109.5t-198.5 -40.5h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h64q106 0 181 75t75 181v32q0 40 -28 68t-68 28h-224q-80 0 -136 56t-56 136z" />
+<glyph unicode="&#xf110;" horiz-adv-x="1568" d="M0 640q0 66 47 113t113 47t113 -47t47 -113t-47 -113t-113 -47t-113 47t-47 113zM176 1088q0 73 51.5 124.5t124.5 51.5t124.5 -51.5t51.5 -124.5t-51.5 -124.5t-124.5 -51.5t-124.5 51.5t-51.5 124.5zM208 192q0 60 42 102t102 42q59 0 101.5 -42t42.5 -102t-42.5 -102 t-101.5 -42q-60 0 -102 42t-42 102zM608 1280q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM672 0q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM1136 192q0 46 33 79t79 33t79 -33t33 -79 t-33 -79t-79 -33t-79 33t-33 79zM1168 1088q0 33 23.5 56.5t56.5 23.5t56.5 -23.5t23.5 -56.5t-23.5 -56.5t-56.5 -23.5t-56.5 23.5t-23.5 56.5zM1344 640q0 40 28 68t68 28t68 -28t28 -68t-28 -68t-68 -28t-68 28t-28 68z" />
+<glyph unicode="&#xf111;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5z" />
+<glyph unicode="&#xf112;" horiz-adv-x="1792" d="M0 896q0 26 19 45l512 512q19 19 45 19t45 -19t19 -45v-256h224q713 0 875 -403q53 -134 53 -333q0 -166 -127 -451q-3 -7 -10.5 -24t-13.5 -30t-13 -22q-12 -17 -28 -17q-15 0 -23.5 10t-8.5 25q0 9 2.5 26.5t2.5 23.5q5 68 5 123q0 101 -17.5 181t-48.5 138.5t-80 101 t-105.5 69.5t-133 42.5t-154 21.5t-175.5 6h-224v-256q0 -26 -19 -45t-45 -19t-45 19l-512 512q-19 19 -19 45z" />
+<glyph unicode="&#xf113;" horiz-adv-x="1664" d="M0 496q0 237 136 396q-27 82 -27 170q0 116 51 218q108 0 190 -39.5t189 -123.5q147 35 309 35q148 0 280 -32q105 82 187 121t189 39q51 -102 51 -218q0 -87 -27 -168q136 -160 136 -398q0 -207 -61 -331q-38 -77 -105.5 -133t-141 -86t-170 -47.5t-171.5 -22t-167 -4.5 q-78 0 -142 3t-147.5 12.5t-152.5 30t-137 51.5t-121 81t-86 115q-62 123 -62 331zM224 320q0 -88 32 -153.5t81 -103t122 -60t140 -29.5t149 -7h168q82 0 149 7t140 29.5t122 60t81 103t32 153.5q0 120 -69 204t-187 84q-41 0 -195 -21q-71 -11 -157 -11t-157 11 q-152 21 -195 21q-118 0 -187 -84t-69 -204zM384 320q0 40 12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82t-12.5 -82t-43 -76t-72.5 -34t-72.5 34t-43 76t-12.5 82zM1024 320q0 40 12.5 82t43 76t72.5 34t72.5 -34t43 -76t12.5 -82t-12.5 -82t-43 -76t-72.5 -34t-72.5 34 t-43 76t-12.5 82z" />
+<glyph unicode="&#xf114;" horiz-adv-x="1664" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h672q92 0 158 -66t66 -158v-704q0 -92 -66 -158t-158 -66h-1216q-92 0 -158 66t-66 158zM128 224q0 -40 28 -68t68 -28h1216q40 0 68 28t28 68v704q0 40 -28 68t-68 28h-704q-40 0 -68 28t-28 68v64 q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68v-960z" />
+<glyph unicode="&#xf115;" horiz-adv-x="1920" d="M0 224v960q0 92 66 158t158 66h320q92 0 158 -66t66 -158v-32h544q92 0 158 -66t66 -158v-160h192q54 0 99 -24.5t67 -70.5q15 -32 15 -68q0 -62 -46 -120l-295 -363q-43 -53 -116 -87.5t-140 -34.5h-1088q-92 0 -158 66t-66 158zM128 331l256 315q44 53 116 87.5 t140 34.5h768v160q0 40 -28 68t-68 28h-576q-40 0 -68 28t-28 68v64q0 40 -28 68t-68 28h-320q-40 0 -68 -28t-28 -68v-853zM171 163q0 -35 53 -35h1088q40 0 86 22t71 53l294 363q18 22 18 39q0 35 -53 35h-1088q-40 0 -85.5 -21.5t-71.5 -52.5l-294 -363q-18 -24 -18 -40z " />
+<glyph unicode="&#xf116;" horiz-adv-x="1792" />
+<glyph unicode="&#xf117;" horiz-adv-x="1792" />
+<glyph unicode="&#xf118;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM402 461q-8 25 4 48.5t38 31.5q25 8 48.5 -4t31.5 -38 q25 -80 92.5 -129.5t151.5 -49.5t151.5 49.5t92.5 129.5q8 26 32 38t49 4t37 -31.5t4 -48.5q-37 -121 -138 -195t-228 -74t-228 74t-138 195zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf119;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM402 307q37 121 138 195t228 74t228 -74t138 -195q8 -25 -4 -48.5 t-37 -31.5t-49 4t-32 38q-25 80 -92.5 129.5t-151.5 49.5t-151.5 -49.5t-92.5 -129.5q-8 -26 -31.5 -38t-48.5 -4q-26 8 -38 31.5t-4 48.5zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf11a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM384 448q0 26 19 45t45 19h640q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45zM384 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5 t-90.5 -37.5t-90.5 37.5t-37.5 90.5zM896 896q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5t-37.5 -90.5t-90.5 -37.5t-90.5 37.5t-37.5 90.5z" />
+<glyph unicode="&#xf11b;" horiz-adv-x="1920" d="M0 512q0 212 150 362t362 150h896q212 0 362 -150t150 -362t-150 -362t-362 -150q-192 0 -338 128h-220q-146 -128 -338 -128q-212 0 -362 150t-150 362zM192 448q0 -14 9 -23t23 -9h192v-192q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v192h192q14 0 23 9t9 23v128 q0 14 -9 23t-23 9h-192v192q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-192h-192q-14 0 -23 -9t-9 -23v-128zM1152 384q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5zM1408 640q0 -53 37.5 -90.5t90.5 -37.5 t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z" />
+<glyph unicode="&#xf11c;" horiz-adv-x="1920" d="M0 128v896q0 53 37.5 90.5t90.5 37.5h1664q53 0 90.5 -37.5t37.5 -90.5v-896q0 -53 -37.5 -90.5t-90.5 -37.5h-1664q-53 0 -90.5 37.5t-37.5 90.5zM128 128h1664v896h-1664v-896zM256 272v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM256 528v96 q0 16 16 16h224q16 0 16 -16v-96q0 -16 -16 -16h-224q-16 0 -16 16zM256 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM512 272v96q0 16 16 16h864q16 0 16 -16v-96q0 -16 -16 -16h-864q-16 0 -16 16zM512 784v96q0 16 16 16h96q16 0 16 -16v-96 q0 -16 -16 -16h-96q-16 0 -16 16zM640 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM768 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM896 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16z M1024 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1152 528v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1280 784v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16zM1408 528v96q0 16 16 16h112v240 q0 16 16 16h96q16 0 16 -16v-352q0 -16 -16 -16h-224q-16 0 -16 16zM1536 272v96q0 16 16 16h96q16 0 16 -16v-96q0 -16 -16 -16h-96q-16 0 -16 16z" />
+<glyph unicode="&#xf11d;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v1266q-29 17 -46.5 46t-17.5 64zM320 320v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86 q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102q-15 -9 -33 -9q-16 0 -32 8q-32 19 -32 56zM448 426 q245 113 433 113q55 0 103.5 -7.5t98 -26t77 -31t82.5 -39.5l28 -14q44 -22 101 -22q120 0 293 92v616q-169 -91 -306 -91q-82 0 -145 32q-100 49 -184 76.5t-178 27.5q-173 0 -403 -127v-599z" />
+<glyph unicode="&#xf11e;" horiz-adv-x="1792" d="M64 1280q0 53 37.5 90.5t90.5 37.5t90.5 -37.5t37.5 -90.5q0 -35 -17.5 -64t-46.5 -46v-1266q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v1266q-29 17 -46.5 46t-17.5 64zM320 320v742q0 35 31 55q35 21 78.5 42.5t114 52t152.5 49.5t155 19q112 0 209 -31t209 -86 q38 -19 89 -19q122 0 310 112q22 12 31 17q31 16 62 -2q31 -20 31 -55v-763q0 -39 -35 -57q-10 -5 -17 -9q-218 -116 -369 -116q-88 0 -158 35l-28 14q-64 33 -99 48t-91 29t-114 14q-102 0 -235.5 -44t-228.5 -102q-15 -9 -33 -9q-16 0 -32 8q-32 19 -32 56zM448 426 q205 96 384 110v192q-181 -16 -384 -117v-185zM448 836q215 111 384 118v197q-172 -8 -384 -126v-189zM832 730h19q102 0 192.5 -29t197.5 -82q19 -9 39 -15v-188q42 -17 91 -17q120 0 293 92v184q-235 -116 -384 -71v224q-20 6 -39 15q-5 3 -33 17t-34.5 17t-31.5 15 t-34.5 15.5t-32.5 13t-36 12.5t-35 8.5t-39.5 7.5t-39.5 4t-44 2q-23 0 -49 -3v-222zM1280 828q148 -42 384 90v189q-169 -91 -306 -91q-45 0 -78 8v-196z" />
+<glyph unicode="&#xf120;" horiz-adv-x="1664" d="M13 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23zM640 32v64q0 14 9 23t23 9h960q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-960 q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf121;" horiz-adv-x="1920" d="M45 576q0 13 10 23l466 466q10 10 23 10t23 -10l50 -50q10 -10 10 -23t-10 -23l-393 -393l393 -393q10 -10 10 -23t-10 -23l-50 -50q-10 -10 -23 -10t-23 10l-466 466q-10 10 -10 23zM712 -52l373 1291q4 13 15.5 19.5t23.5 2.5l62 -17q13 -4 19.5 -15.5t2.5 -24.5 l-373 -1291q-4 -13 -15.5 -19.5t-23.5 -2.5l-62 17q-13 4 -19.5 15.5t-2.5 24.5zM1293 160q0 13 10 23l393 393l-393 393q-10 10 -10 23t10 23l50 50q10 10 23 10t23 -10l466 -466q10 -10 10 -23t-10 -23l-466 -466q-10 -10 -23 -10t-23 10l-50 50q-10 10 -10 23z" />
+<glyph unicode="&#xf122;" horiz-adv-x="1792" d="M0 896q0 26 19 45l512 512q29 31 70 14q39 -17 39 -59v-69l-397 -398q-19 -19 -19 -45t19 -45l397 -397v-70q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45zM384 896q0 26 19 45l512 512q29 31 70 14q39 -17 39 -59v-262q411 -28 599 -221 q169 -173 169 -509q0 -58 -17 -133.5t-38.5 -138t-48 -125t-40.5 -90.5l-20 -40q-8 -17 -28 -17q-6 0 -9 1q-25 8 -23 34q43 400 -106 565q-64 71 -170.5 110.5t-267.5 52.5v-251q0 -42 -39 -59q-13 -5 -25 -5q-27 0 -45 19l-512 512q-19 19 -19 45z" />
+<glyph unicode="&#xf123;" horiz-adv-x="1664" d="M2 900.5q9 27.5 54 34.5l502 73l225 455q20 41 49 41q28 0 49 -41l225 -455l502 -73q45 -7 54 -34.5t-24 -59.5l-363 -354l86 -500q5 -33 -6 -51.5t-34 -18.5q-17 0 -40 12l-449 236l-449 -236q-23 -12 -40 -12q-23 0 -34 18.5t-6 51.5l86 500l-364 354q-32 32 -23 59.5z M832 310l59 -31l318 -168l-60 355l-12 66l49 47l257 250l-356 52l-66 10l-30 60l-159 322v-963z" />
+<glyph unicode="&#xf124;" horiz-adv-x="1408" d="M2 561q-5 22 4 42t29 30l1280 640q13 7 29 7q27 0 45 -19q15 -14 18.5 -34.5t-6.5 -39.5l-640 -1280q-17 -35 -57 -35q-5 0 -15 2q-22 5 -35.5 22.5t-13.5 39.5v576h-576q-22 0 -39.5 13.5t-22.5 35.5z" />
+<glyph unicode="&#xf125;" horiz-adv-x="1664" d="M0 928v192q0 14 9 23t23 9h224v224q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-224h851l246 247q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-247 -246v-851h224q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-224v-224q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v224h-864 q-14 0 -23 9t-9 23v864h-224q-14 0 -23 9t-9 23zM512 301l595 595h-595v-595zM557 256h595v595z" />
+<glyph unicode="&#xf126;" horiz-adv-x="1024" d="M0 64q0 52 26 96.5t70 69.5v820q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136q0 -52 -26 -96.5t-70 -69.5v-497q54 26 154 57q55 17 87.5 29.5t70.5 31t59 39.5t40.5 51t28 69.5t8.5 91.5q-44 25 -70 69.5t-26 96.5q0 80 56 136t136 56t136 -56t56 -136 q0 -52 -26 -96.5t-70 -69.5q-2 -287 -226 -414q-68 -38 -203 -81q-128 -40 -169.5 -71t-41.5 -100v-26q44 -25 70 -69.5t26 -96.5q0 -80 -56 -136t-136 -56t-136 56t-56 136zM96 64q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68zM96 1216q0 -40 28 -68 t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68zM736 1088q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68z" />
+<glyph unicode="&#xf127;" horiz-adv-x="1664" d="M0 448q0 14 9 23t23 9h320q14 0 23 -9t9 -23t-9 -23t-23 -9h-320q-14 0 -23 9t-9 23zM16 1088q0 120 85 203l147 146q83 83 203 83q121 0 204 -85l334 -335q21 -21 42 -56l-239 -18l-273 274q-28 28 -68 28q-39 0 -68 -27l-147 -146q-28 -28 -28 -67q0 -40 28 -68 l274 -274l-18 -240q-35 21 -56 42l-336 336q-84 86 -84 204zM128 32q0 13 9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-256 -256q-10 -9 -23 -9q-12 0 -23 9q-9 10 -9 23zM544 -96v320q0 14 9 23t23 9t23 -9t9 -23v-320q0 -14 -9 -23t-23 -9t-23 9t-9 23zM633 364 l239 18l273 -274q27 -27 68 -27.5t68 26.5l147 146q28 28 28 67q0 40 -28 68l-274 275l18 239q35 -21 56 -42l336 -336q84 -86 84 -204q0 -120 -85 -203l-147 -146q-83 -83 -203 -83q-121 0 -204 85l-334 335q-21 21 -42 56zM1056 1184v320q0 14 9 23t23 9t23 -9t9 -23v-320 q0 -14 -9 -23t-23 -9t-23 9t-9 23zM1216 1120q0 13 9 23l256 256q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-256 -256q-11 -9 -23 -9t-23 9q-9 10 -9 23zM1280 960q0 14 9 23t23 9h320q14 0 23 -9t9 -23t-9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf128;" horiz-adv-x="1024" d="M96.5 986q-2.5 15 5.5 28q160 266 464 266q80 0 161 -31t146 -83t106 -127.5t41 -158.5q0 -54 -15.5 -101t-35 -76.5t-55 -59.5t-57.5 -43.5t-61 -35.5q-41 -23 -68.5 -65t-27.5 -67q0 -17 -12 -32.5t-28 -15.5h-240q-15 0 -25.5 18.5t-10.5 37.5v45q0 83 65 156.5 t143 108.5q59 27 84 56t25 76q0 42 -46.5 74t-107.5 32q-65 0 -108 -29q-35 -25 -107 -115q-13 -16 -31 -16q-12 0 -25 8l-164 125q-13 10 -15.5 25zM384 40v240q0 16 12 28t28 12h240q16 0 28 -12t12 -28v-240q0 -16 -12 -28t-28 -12h-240q-16 0 -28 12t-12 28z" />
+<glyph unicode="&#xf129;" horiz-adv-x="640" d="M0 64v128q0 26 19 45t45 19h64v384h-64q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h384q26 0 45 -19t19 -45v-576h64q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45zM128 1152v192q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-192 q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf12a;" horiz-adv-x="640" d="M98 1344q-1 26 17.5 45t44.5 19h320q26 0 44.5 -19t17.5 -45l-28 -768q-1 -26 -20.5 -45t-45.5 -19h-256q-26 0 -45.5 19t-20.5 45zM128 64v224q0 26 19 45t45 19h256q26 0 45 -19t19 -45v-224q0 -26 -19 -45t-45 -19h-256q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf12b;" d="M5 0v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109v-167h-248l-159 252l-24 42q-8 9 -11 21h-3l-9 -21q-10 -20 -25 -44l-155 -250h-258zM1013 713q0 64 26 117t65 86.5 t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q83 65 188 65q110 0 178 -59.5t68 -158.5q0 -56 -24.5 -103t-62 -76.5t-81.5 -58.5t-82 -50.5t-65.5 -51.5t-30.5 -63h232v80h126v-206h-514l-3 27q-4 28 -4 46z " />
+<glyph unicode="&#xf12c;" d="M5 0v167h128l197 291l-185 272h-137v168h276l139 -228q2 -4 23 -42q8 -9 11 -21h3q3 9 11 21l25 42l140 228h257v-168h-125l-184 -267l204 -296h109v-167h-248l-159 252l-24 42q-8 9 -11 21h-3l-9 -21q-10 -20 -25 -44l-155 -250h-258zM1015 -183q0 64 26 117t65 86.5 t84 65t84 54.5t65 54t26 64q0 38 -29.5 62.5t-70.5 24.5q-51 0 -97 -39q-14 -11 -36 -38l-105 92q26 37 63 66q80 65 188 65q110 0 178 -59.5t68 -158.5q0 -66 -34.5 -118.5t-84 -86t-99.5 -62.5t-87 -63t-41 -73h232v80h126v-206h-514l-4 27q-3 45 -3 46z" />
+<glyph unicode="&#xf12d;" horiz-adv-x="1920" d="M1.5 146.5q5.5 37.5 30.5 65.5l896 1024q38 44 96 44h768q38 0 69.5 -20.5t47.5 -54.5q15 -34 9.5 -71.5t-30.5 -65.5l-896 -1024q-38 -44 -96 -44h-768q-38 0 -69.5 20.5t-47.5 54.5q-15 34 -9.5 71.5zM128 128h768l336 384h-768z" />
+<glyph unicode="&#xf12e;" horiz-adv-x="1664" d="M0 0v1024q2 -1 17.5 -3.5t34 -5t21.5 -3.5q150 -24 245 -24q80 0 117 35q46 44 46 89q0 22 -15 50.5t-33.5 53t-33.5 64.5t-15 83q0 82 59 127.5t144 45.5q80 0 134 -44.5t54 -123.5q0 -41 -17.5 -77.5t-38 -59t-38 -56.5t-17.5 -71q0 -57 42 -83.5t103 -26.5 q64 0 180 15t163 17v-2q-1 -2 -3.5 -17.5t-5 -34t-3.5 -21.5q-24 -150 -24 -245q0 -80 35 -117q44 -46 89 -46q22 0 50.5 15t53 33.5t64.5 33.5t83 15q82 0 127.5 -59t45.5 -143q0 -81 -44.5 -135t-123.5 -54q-41 0 -77.5 17.5t-59 38t-56.5 38t-71 17.5q-110 0 -110 -124 q0 -39 16 -115t15 -115v-5q-22 0 -33 -1q-34 -3 -97.5 -11.5t-115.5 -13.5t-98 -5q-61 0 -103 26.5t-42 83.5q0 37 17.5 71t38 56.5t38 59t17.5 77.5q0 79 -54 123.5t-135 44.5q-84 0 -143 -45.5t-59 -127.5q0 -43 15 -83t33.5 -64.5t33.5 -53t15 -50.5q0 -45 -46 -89 q-37 -35 -117 -35q-95 0 -245 24q-9 2 -27.5 4t-27.5 4l-13 2q-1 0 -3 1q-2 0 -2 1z" />
+<glyph unicode="&#xf130;" horiz-adv-x="1152" d="M0 704v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -185 131.5 -316.5t316.5 -131.5t316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -221 -147.5 -384.5t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45 t19 45t45 19h256v132q-217 24 -364.5 187.5t-147.5 384.5zM256 704v512q0 132 94 226t226 94t226 -94t94 -226v-512q0 -132 -94 -226t-226 -94t-226 94t-94 226z" />
+<glyph unicode="&#xf131;" horiz-adv-x="1408" d="M13 64q0 13 10 23l1234 1234q10 10 23 10t23 -10l82 -82q10 -10 10 -23t-10 -23l-361 -361v-128q0 -132 -94 -226t-226 -94q-55 0 -109 19l-96 -96q97 -51 205 -51q185 0 316.5 131.5t131.5 316.5v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -221 -147.5 -384.5 t-364.5 -187.5v-132h256q26 0 45 -19t19 -45t-19 -45t-45 -19h-640q-26 0 -45 19t-19 45t19 45t45 19h256v132q-125 13 -235 81l-254 -254q-10 -10 -23 -10t-23 10l-82 82q-10 10 -10 23zM128 704v128q0 26 19 45t45 19t45 -19t19 -45v-128q0 -53 15 -113l-101 -101 q-42 103 -42 214zM384 704v512q0 132 94 226t226 94q102 0 184.5 -59t116.5 -152z" />
+<glyph unicode="&#xf132;" horiz-adv-x="1280" d="M0 576v768q0 26 19 45t45 19h1152q26 0 45 -19t19 -45v-768q0 -86 -33.5 -170.5t-83 -150t-118 -127.5t-126.5 -103t-121 -77.5t-89.5 -49.5t-42.5 -20q-12 -6 -26 -6t-26 6q-16 7 -42.5 20t-89.5 49.5t-121 77.5t-126.5 103t-118 127.5t-83 150t-33.5 170.5zM640 79 q119 63 213 137q235 184 235 360v640h-448v-1137z" />
+<glyph unicode="&#xf133;" horiz-adv-x="1664" d="M0 -128v1280q0 52 38 90t90 38h128v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h384v96q0 66 47 113t113 47h64q66 0 113 -47t47 -113v-96h128q52 0 90 -38t38 -90v-1280q0 -52 -38 -90t-90 -38h-1408q-52 0 -90 38t-38 90zM128 -128h1408v1024h-1408v-1024z M384 1088q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288zM1152 1088q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v288q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-288z" />
+<glyph unicode="&#xf134;" horiz-adv-x="1408" d="M3.5 940q-8.5 25 3.5 49q5 10 14.5 26t37.5 53.5t60.5 70t85 67t108.5 52.5q-25 42 -25 86q0 66 47 113t113 47t113 -47t47 -113q0 -33 -14 -64h302q0 11 7 20t18 11l448 96q3 1 7 1q12 0 20 -7q12 -9 12 -25v-320q0 -16 -12 -25q-8 -7 -20 -7q-4 0 -7 1l-448 96 q-11 2 -18 11t-7 20h-256v-102q111 -23 183.5 -111t72.5 -203v-800q0 -26 -19 -45t-45 -19h-512q-26 0 -45 19t-19 45v800q0 106 62.5 190.5t161.5 114.5v111h-32q-59 0 -115 -23.5t-91.5 -53t-66 -66.5t-40.5 -53.5t-14 -24.5q-17 -35 -57 -35q-16 0 -29 7q-23 12 -31.5 37 zM384 1344q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf135;" horiz-adv-x="1664" d="M36 464l224 384q10 14 26 16l379 20q96 114 176 195q188 187 358 258t431 71q14 0 24 -9.5t10 -22.5q0 -249 -75.5 -430.5t-253.5 -360.5q-81 -80 -195 -176l-20 -379q-2 -16 -16 -26l-384 -224q-7 -4 -16 -4q-12 0 -23 9l-64 64q-13 14 -8 32l85 276l-281 281l-276 -85 q-3 -1 -9 -1q-14 0 -23 9l-64 64q-17 19 -5 39zM1248 1088q0 -40 28 -68t68 -28t68 28t28 68t-28 68t-68 28t-68 -28t-28 -68z" />
+<glyph unicode="&#xf136;" horiz-adv-x="1792" d="M0 0l204 953l-153 327h1276q101 0 189.5 -40.5t147.5 -113.5q60 -73 81 -168.5t0 -194.5l-164 -763h-334l178 832q13 56 -15 88q-27 33 -83 33h-169l-204 -953h-334l204 953h-286l-204 -953h-334z" />
+<glyph unicode="&#xf137;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM346 640q0 -26 19 -45l454 -454q19 -19 45 -19t45 19l102 102q19 19 19 45t-19 45l-307 307l307 307 q19 19 19 45t-19 45l-102 102q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45z" />
+<glyph unicode="&#xf138;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM506 288q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l454 454q19 19 19 45t-19 45l-454 454 q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45t19 -45l307 -307l-307 -307q-19 -19 -19 -45z" />
+<glyph unicode="&#xf139;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM250 544q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l307 307l307 -307q19 -19 45 -19t45 19l102 102 q19 19 19 45t-19 45l-454 454q-19 19 -45 19t-45 -19l-454 -454q-19 -19 -19 -45z" />
+<glyph unicode="&#xf13a;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM250 736q0 -26 19 -45l454 -454q19 -19 45 -19t45 19l454 454q19 19 19 45t-19 45l-102 102 q-19 19 -45 19t-45 -19l-307 -307l-307 307q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45z" />
+<glyph unicode="&#xf13b;" horiz-adv-x="1408" d="M0 1408h1408l-128 -1438l-578 -162l-574 162zM262 1114l47 -534h612l-22 -228l-197 -53l-196 53l-13 140h-175l22 -278l362 -100h4v1l359 99l50 544h-644l-15 181h674l16 175h-884z" />
+<glyph unicode="&#xf13c;" horiz-adv-x="1792" d="M12 75l71 356h297l-29 -147l422 -161l486 161l68 339h-1208l58 297h1209l38 191h-1208l59 297h1505l-266 -1333l-804 -267z" />
+<glyph unicode="&#xf13d;" horiz-adv-x="1792" d="M0 0v352q0 14 9 23t23 9h352q22 0 30 -20q8 -19 -7 -35l-100 -100q67 -91 189.5 -153.5t271.5 -82.5v647h-192q-26 0 -45 19t-19 45v128q0 26 19 45t45 19h192v163q-58 34 -93 92.5t-35 128.5q0 106 75 181t181 75t181 -75t75 -181q0 -70 -35 -128.5t-93 -92.5v-163h192 q26 0 45 -19t19 -45v-128q0 -26 -19 -45t-45 -19h-192v-647q149 20 271.5 82.5t189.5 153.5l-100 100q-15 16 -7 35q8 20 30 20h352q14 0 23 -9t9 -23v-352q0 -22 -20 -30q-8 -2 -12 -2q-13 0 -23 9l-93 93q-119 -143 -318.5 -226.5t-429.5 -83.5t-429.5 83.5t-318.5 226.5 l-93 -93q-9 -9 -23 -9q-4 0 -12 2q-20 8 -20 30zM832 1280q0 -26 19 -45t45 -19t45 19t19 45t-19 45t-45 19t-45 -19t-19 -45z" />
+<glyph unicode="&#xf13e;" horiz-adv-x="1152" d="M0 96v576q0 40 28 68t68 28h32v320q0 185 131.5 316.5t316.5 131.5t316.5 -131.5t131.5 -316.5q0 -26 -19 -45t-45 -19h-64q-26 0 -45 19t-19 45q0 106 -75 181t-181 75t-181 -75t-75 -181v-320h736q40 0 68 -28t28 -68v-576q0 -40 -28 -68t-68 -28h-960q-40 0 -68 28 t-28 68z" />
+<glyph unicode="&#xf140;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5zM256 640q0 212 150 362t362 150t362 -150t150 -362t-150 -362t-362 -150t-362 150t-150 362zM384 640q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5 t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM512 640q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181z" />
+<glyph unicode="&#xf141;" horiz-adv-x="1408" d="M0 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM512 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM1024 608v192q0 40 28 68t68 28h192 q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf142;" horiz-adv-x="384" d="M0 96v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM0 608v192q0 40 28 68t68 28h192q40 0 68 -28t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68zM0 1120v192q0 40 28 68t68 28h192q40 0 68 -28 t28 -68v-192q0 -40 -28 -68t-68 -28h-192q-40 0 -68 28t-28 68z" />
+<glyph unicode="&#xf143;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 256q0 -53 37.5 -90.5t90.5 -37.5t90.5 37.5t37.5 90.5t-37.5 90.5t-90.5 37.5t-90.5 -37.5t-37.5 -90.5z M256 575q0 -13 8.5 -22t21.5 -10q154 -11 264 -121t121 -264q1 -13 10 -21.5t22 -8.5h128q13 0 23 10t9 24q-13 232 -177 396t-396 177q-14 1 -24 -9t-10 -23v-128zM256 959q0 -13 9 -22t22 -10q204 -7 378 -111.5t278.5 -278.5t111.5 -378q1 -13 10 -22t22 -9h128 q13 0 23 10q11 9 9 23q-5 154 -56 297.5t-139.5 260t-205 205t-260 139.5t-297.5 56q-14 1 -23 -9q-10 -10 -10 -23v-128z" />
+<glyph unicode="&#xf144;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM512 320q0 -37 32 -56q16 -8 32 -8q17 0 32 9l544 320q32 18 32 55t-32 55l-544 320q-31 19 -64 1 q-32 -19 -32 -56v-640z" />
+<glyph unicode="&#xf145;" horiz-adv-x="1792" d="M54 448.5q0 53.5 37 90.5l907 906q37 37 90.5 37t90.5 -37l125 -125q-56 -56 -56 -136t56 -136t136 -56t136 56l126 -125q37 -37 37 -90.5t-37 -90.5l-907 -908q-37 -37 -90.5 -37t-90.5 37l-126 126q56 56 56 136t-56 136t-136 56t-136 -56l-125 126q-37 37 -37 90.5z M342 512q0 -26 19 -45l362 -362q18 -18 45 -18t45 18l618 618q19 19 19 45t-19 45l-362 362q-18 18 -45 18t-45 -18l-618 -618q-19 -19 -19 -45zM452 512l572 572l316 -316l-572 -572z" />
+<glyph unicode="&#xf146;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 576q0 -26 19 -45t45 -19h896q26 0 45 19t19 45v128q0 26 -19 45t-45 19h-896q-26 0 -45 -19t-19 -45v-128 z" />
+<glyph unicode="&#xf147;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832zM256 672v64q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf148;" horiz-adv-x="1024" d="M3 18q-8 20 4 35l160 192q9 11 25 11h320v640h-192q-40 0 -58 37q-17 37 9 68l320 384q18 22 49 22t49 -22l320 -384q27 -32 9 -68q-18 -37 -58 -37h-192v-864q0 -14 -9 -23t-23 -9h-704q-21 0 -29 18z" />
+<glyph unicode="&#xf149;" horiz-adv-x="1024" d="M3 1261q9 19 29 19h704q13 0 22.5 -9.5t9.5 -23.5v-863h192q40 0 58 -37t-9 -69l-320 -384q-18 -22 -49 -22t-49 22l-320 384q-26 31 -9 69q18 37 58 37h192v640h-320q-14 0 -25 11l-160 192q-13 14 -4 34z" />
+<glyph unicode="&#xf14a;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM218 640q0 -26 19 -45l358 -358q19 -19 45 -19t45 19l614 614q19 19 19 45t-19 45l-102 102q-19 19 -45 19 t-45 -19l-467 -467l-211 211q-19 19 -45 19t-45 -19l-102 -102q-19 -19 -19 -45z" />
+<glyph unicode="&#xf14b;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 128h288l544 544l-288 288l-544 -544v-288zM352 320v56l52 52l152 -152l-52 -52h-56v96h-96zM494 494 q-14 13 3 30l291 291q17 17 30 3q14 -13 -3 -30l-291 -291q-17 -17 -30 -3zM864 1024l288 -288l92 92q28 28 28 68t-28 68l-152 152q-28 28 -68 28t-68 -28z" />
+<glyph unicode="&#xf14c;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM282 320q0 -26 19 -45l102 -102q19 -19 45 -19t45 19l534 534l144 -144q18 -19 45 -19q12 0 25 5q39 17 39 59 v480q0 26 -19 45t-45 19h-480q-42 0 -59 -39q-17 -41 14 -70l144 -144l-534 -534q-19 -19 -19 -45z" />
+<glyph unicode="&#xf14d;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 448q0 -181 167 -404q10 -12 25 -12q7 0 13 3q22 9 19 33q-44 354 62 473q46 52 130 75.5t224 23.5v-160 q0 -42 40 -59q12 -5 24 -5q26 0 45 19l352 352q19 19 19 45t-19 45l-352 352q-30 31 -69 14q-40 -17 -40 -59v-160q-119 0 -216 -19.5t-162.5 -51t-114 -79t-76.5 -95.5t-44.5 -109t-21.5 -111.5t-5 -110.5z" />
+<glyph unicode="&#xf14e;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 241v542l512 256v-542zM640 448l256 128l-256 128v-256z" />
+<glyph unicode="&#xf150;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM391 861q17 35 57 35h640q40 0 57 -35q18 -35 -5 -66l-320 -448q-19 -27 -52 -27t-52 27l-320 448q-23 31 -5 66z" />
+<glyph unicode="&#xf151;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM391 419q-18 35 5 66l320 448q19 27 52 27t52 -27l320 -448q23 -31 5 -66q-17 -35 -57 -35h-640q-40 0 -57 35z" />
+<glyph unicode="&#xf152;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -14 9 -23t23 -9h960q14 0 23 9t9 23v960q0 14 -9 23t-23 9h-960q-14 0 -23 -9t-9 -23v-960z M512 320v640q0 40 35 57q35 18 66 -5l448 -320q27 -19 27 -52t-27 -52l-448 -320q-31 -23 -66 -5q-35 17 -35 57z" />
+<glyph unicode="&#xf153;" horiz-adv-x="1024" d="M0 514v113q0 13 9.5 22.5t22.5 9.5h66q-2 57 1 105h-67q-14 0 -23 9t-9 23v114q0 14 9 23t23 9h98q67 210 243.5 338t400.5 128q102 0 194 -23q11 -3 20 -15q6 -11 3 -24l-43 -159q-3 -13 -14 -19.5t-24 -2.5l-4 1q-4 1 -11.5 2.5l-17.5 3.5t-22.5 3.5t-26 3t-29 2.5 t-29.5 1q-126 0 -226 -64t-150 -176h468q16 0 25 -12q10 -12 7 -26l-24 -114q-5 -26 -32 -26h-488q-3 -37 0 -105h459q15 0 25 -12q9 -12 6 -27l-24 -112q-2 -11 -11 -18.5t-20 -7.5h-387q48 -117 149.5 -185.5t228.5 -68.5q18 0 36 1.5t33.5 3.5t29.5 4.5t24.5 5t18.5 4.5 l12 3l5 2q13 5 26 -2q12 -7 15 -21l35 -159q3 -12 -3 -22.5t-17 -14.5l-5 -1q-4 -2 -10.5 -3.5t-16 -4.5t-21.5 -5.5t-25.5 -5t-30 -5t-33.5 -4.5t-36.5 -3t-38.5 -1q-234 0 -409 130.5t-238 351.5h-95q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf154;" horiz-adv-x="1024" d="M0 32v150q0 13 9.5 22.5t22.5 9.5h97v383h-95q-14 0 -23 9.5t-9 22.5v131q0 14 9 23t23 9h95v223q0 171 123.5 282t314.5 111q185 0 335 -125q9 -8 10 -20.5t-7 -22.5l-103 -127q-9 -11 -22 -12q-13 -2 -23 7q-5 5 -26 19t-69 32t-93 18q-85 0 -137 -47t-52 -123v-215 h305q13 0 22.5 -9t9.5 -23v-131q0 -13 -9.5 -22.5t-22.5 -9.5h-305v-379h414v181q0 13 9 22.5t23 9.5h162q14 0 23 -9.5t9 -22.5v-367q0 -14 -9 -23t-23 -9h-956q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf155;" horiz-adv-x="1024" d="M52 171l103 135q7 10 23 12q15 2 24 -9l2 -2q113 -99 243 -125q37 -8 74 -8q81 0 142.5 43t61.5 122q0 28 -15 53t-33.5 42t-58.5 37.5t-66 32t-80 32.5q-39 16 -61.5 25t-61.5 26.5t-62.5 31t-56.5 35.5t-53.5 42.5t-43.5 49t-35.5 58t-21 66.5t-8.5 78q0 138 98 242 t255 134v180q0 13 9.5 22.5t22.5 9.5h135q14 0 23 -9t9 -23v-176q57 -6 110.5 -23t87 -33.5t63.5 -37.5t39 -29t15 -14q17 -18 5 -38l-81 -146q-8 -15 -23 -16q-14 -3 -27 7q-3 3 -14.5 12t-39 26.5t-58.5 32t-74.5 26t-85.5 11.5q-95 0 -155 -43t-60 -111q0 -26 8.5 -48 t29.5 -41.5t39.5 -33t56 -31t60.5 -27t70 -27.5q53 -20 81 -31.5t76 -35t75.5 -42.5t62 -50t53 -63.5t31.5 -76.5t13 -94q0 -153 -99.5 -263.5t-258.5 -136.5v-175q0 -14 -9 -23t-23 -9h-135q-13 0 -22.5 9.5t-9.5 22.5v175q-66 9 -127.5 31t-101.5 44.5t-74 48t-46.5 37.5 t-17.5 18q-17 21 -2 41z" />
+<glyph unicode="&#xf156;" horiz-adv-x="898" d="M0 605v127q0 13 9.5 22.5t22.5 9.5h112q132 0 212.5 43t102.5 125h-427q-14 0 -23 9t-9 23v102q0 14 9 23t23 9h413q-57 113 -268 113h-145q-13 0 -22.5 9.5t-9.5 22.5v133q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-102q0 -14 -9 -23t-23 -9h-233q47 -61 64 -144h171 q14 0 23 -9t9 -23v-102q0 -14 -9 -23t-23 -9h-168q-23 -144 -129 -234t-276 -110q167 -178 459 -536q14 -16 4 -34q-8 -18 -29 -18h-195q-16 0 -25 12q-306 367 -498 571q-9 9 -9 22z" />
+<glyph unicode="&#xf157;" horiz-adv-x="1027" d="M4 1360q-8 16 0 32q10 16 28 16h194q19 0 29 -18l215 -425q19 -38 56 -125q10 24 30.5 68t27.5 61l191 420q8 19 29 19h191q17 0 27 -16q9 -14 1 -31l-313 -579h215q13 0 22.5 -9.5t9.5 -22.5v-104q0 -14 -9.5 -23t-22.5 -9h-290v-85h290q13 0 22.5 -9.5t9.5 -22.5v-103 q0 -14 -9.5 -23t-22.5 -9h-290v-330q0 -13 -9.5 -22.5t-22.5 -9.5h-172q-13 0 -22.5 9t-9.5 23v330h-288q-13 0 -22.5 9t-9.5 23v103q0 13 9.5 22.5t22.5 9.5h288v85h-288q-13 0 -22.5 9t-9.5 23v104q0 13 9.5 22.5t22.5 9.5h214z" />
+<glyph unicode="&#xf158;" horiz-adv-x="1280" d="M0 256v128q0 14 9 23t23 9h224v118h-224q-14 0 -23 9t-9 23v149q0 13 9 22.5t23 9.5h224v629q0 14 9 23t23 9h539q200 0 326.5 -122t126.5 -315t-126.5 -315t-326.5 -122h-340v-118h505q14 0 23 -9t9 -23v-128q0 -14 -9 -23t-23 -9h-505v-192q0 -14 -9.5 -23t-22.5 -9 h-167q-14 0 -23 9t-9 23v192h-224q-14 0 -23 9t-9 23zM487 747h320q106 0 171 62t65 162t-65 162t-171 62h-320v-448z" />
+<glyph unicode="&#xf159;" horiz-adv-x="1792" d="M0 672v64q0 14 9 23t23 9h175l-33 128h-142q-14 0 -23 9t-9 23v64q0 14 9 23t23 9h109l-89 344q-5 15 5 28q10 12 26 12h137q26 0 31 -24l90 -360h359l97 360q7 24 31 24h126q24 0 31 -24l98 -360h365l93 360q5 24 31 24h137q16 0 26 -12q10 -13 5 -28l-91 -344h111 q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-145l-34 -128h179q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-213l-164 -616q-7 -24 -31 -24h-159q-24 0 -31 24l-166 616h-209l-167 -616q-7 -24 -31 -24h-159q-11 0 -19.5 7t-10.5 17l-160 616h-208q-14 0 -23 9t-9 23z M373 896l32 -128h225l35 128h-292zM436 640l75 -300q1 -1 1 -3t1 -3q0 1 0.5 3.5t0.5 3.5l81 299h-159zM822 768h139l-35 128h-70zM1118 896l34 -128h230l33 128h-297zM1187 640l81 -299q0 -1 0.5 -3.5t1.5 -3.5q0 1 0.5 3t0.5 3l78 300h-162z" />
+<glyph unicode="&#xf15a;" horiz-adv-x="1280" d="M56 0l31 183h111q50 0 58 51v402h16q-6 1 -16 1v287q-13 68 -89 68h-111v164l212 -1q64 0 97 1v252h154v-247q82 2 122 2v245h154v-252q79 -7 140 -22.5t113 -45t82.5 -78t36.5 -114.5q18 -182 -131 -258q117 -28 175 -103t45 -214q-7 -71 -32.5 -125t-64.5 -89 t-97 -58.5t-121.5 -34.5t-145.5 -15v-255h-154v251q-80 0 -122 1v-252h-154v255q-18 0 -54 0.5t-55 0.5h-200zM522 182q8 0 37 -0.5t48 -0.5t53 1.5t58.5 4t57 8.5t55.5 14t47.5 21t39.5 30t24.5 40t9.5 51q0 36 -15 64t-37 46t-57.5 30.5t-65.5 18.5t-74 9t-69 3t-64.5 -1 t-47.5 -1v-338zM522 674q5 0 34.5 -0.5t46.5 0t50 2t55 5.5t51.5 11t48.5 18.5t37 27t27 38.5t9 51q0 33 -12.5 58.5t-30.5 42t-48 28t-55 16.5t-61.5 8t-58 2.5t-54 -1t-39.5 -0.5v-307z" />
+<glyph unicode="&#xf15b;" d="M0 -160v1600q0 40 28 68t68 28h800v-544q0 -40 28 -68t68 -28h544v-1056q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM1024 1024v472q22 -14 36 -28l408 -408q14 -14 28 -36h-472z" />
+<glyph unicode="&#xf15c;" d="M0 -160v1600q0 40 28 68t68 28h800v-544q0 -40 28 -68t68 -28h544v-1056q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM384 160q0 -14 9 -23t23 -9h704q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM384 416q0 -14 9 -23t23 -9h704 q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM384 672q0 -14 9 -23t23 -9h704q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-704q-14 0 -23 -9t-9 -23v-64zM1024 1024v472q22 -14 36 -28l408 -408q14 -14 28 -36h-472z" />
+<glyph unicode="&#xf15d;" horiz-adv-x="1664" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM899 768v106h70l230 662h162l230 -662h70v-106h-288v106h75l-47 144h-243l-47 -144h75v-106 h-287zM988 -166l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -11v-2l14 2q9 2 30 2h248v119h121v-233h-584v90zM1191 1128h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18 t-7.5 -29z" />
+<glyph unicode="&#xf15e;" horiz-adv-x="1664" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM899 -150h70l230 662h162l230 -662h70v-106h-288v106h75l-47 144h-243l-47 -144h75v-106h-287 v106zM988 768v90l369 529q12 18 21 27l11 9v3q-2 0 -6.5 -0.5t-7.5 -0.5q-12 -3 -30 -3h-232v-115h-120v229h567v-89l-369 -530q-6 -8 -21 -26l-11 -10v-3l14 3q9 1 30 1h248v119h121v-233h-584zM1191 104h177l-72 218l-12 47q-2 16 -2 20h-4l-3 -20q0 -1 -3.5 -18t-7.5 -29 z" />
+<glyph unicode="&#xf160;" horiz-adv-x="1792" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM896 -32q0 14 9 23t23 9h832q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9 t-9 23v192zM896 288v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23zM896 800v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23zM896 1312v192q0 14 9 23t23 9h256q14 0 23 -9t9 -23 v-192q0 -14 -9 -23t-23 -9h-256q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf161;" horiz-adv-x="1792" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM896 -32q0 14 9 23t23 9h256q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-256q-14 0 -23 9 t-9 23v192zM896 288v192q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-448q-14 0 -23 9t-9 23zM896 800v192q0 14 9 23t23 9h640q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-640q-14 0 -23 9t-9 23zM896 1312v192q0 14 9 23t23 9h832q14 0 23 -9t9 -23 v-192q0 -14 -9 -23t-23 -9h-832q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf162;" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM946 261q0 105 72 178t181 73q123 0 205 -94.5t82 -252.5q0 -62 -13 -121.5t-41 -114 t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5zM976 1351l192 185h123v-654h165v-114h-469v114h167v432q0 7 0.5 19t0.5 17 v16h-2l-7 -12q-8 -13 -26 -31l-62 -58zM1085 261q0 -57 36.5 -95t104.5 -38q50 0 85 27t35 68q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94z" />
+<glyph unicode="&#xf163;" d="M34 108q8 20 30 20h192v1376q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1376h192q14 0 23 -9t9 -23q0 -12 -10 -24l-319 -319q-10 -9 -23 -9q-12 0 -23 9l-320 320q-15 16 -7 35zM946 1285q0 105 72 178t181 73q123 0 205 -94.5t82 -252.5q0 -62 -13 -121.5t-41 -114 t-68 -95.5t-98.5 -65.5t-127.5 -24.5q-62 0 -108 16q-24 8 -42 15l39 113q15 -7 31 -11q37 -13 75 -13q84 0 134.5 58.5t66.5 145.5h-2q-21 -23 -61.5 -37t-84.5 -14q-106 0 -173 71.5t-67 172.5zM976 327l192 185h123v-654h165v-114h-469v114h167v432q0 7 0.5 19t0.5 17v16 h-2l-7 -12q-8 -13 -26 -31l-62 -58zM1085 1285q0 -57 36.5 -95t104.5 -38q50 0 85 27t35 68q0 63 -44 116t-103 53q-52 0 -83 -37t-31 -94z" />
+<glyph unicode="&#xf164;" horiz-adv-x="1664" d="M0 64v640q0 26 19 45t45 19h288q26 0 45 -19t19 -45v-640q0 -26 -19 -45t-45 -19h-288q-26 0 -45 19t-19 45zM128 192q0 -27 18.5 -45.5t45.5 -18.5q26 0 45 18.5t19 45.5q0 26 -19 45t-45 19q-27 0 -45.5 -19t-18.5 -45zM480 64v641q0 25 18 43.5t43 20.5q24 2 76 59 t101 121q68 87 101 120q18 18 31 48t17.5 48.5t13.5 60.5q7 39 12.5 61t19.5 52t34 50q19 19 45 19q46 0 82.5 -10.5t60 -26t40 -40.5t24 -45t12 -50t5 -45t0.5 -39q0 -38 -9.5 -76t-19 -60t-27.5 -56q-3 -6 -10 -18t-11 -22t-8 -24h277q78 0 135 -57t57 -135 q0 -86 -55 -149q15 -44 15 -76q3 -76 -43 -137q17 -56 0 -117q-15 -57 -54 -94q9 -112 -49 -181q-64 -76 -197 -78h-36h-76h-17q-66 0 -144 15.5t-121.5 29t-120.5 39.5q-123 43 -158 44q-26 1 -45 19.5t-19 44.5z" />
+<glyph unicode="&#xf165;" horiz-adv-x="1664" d="M0 448q0 -26 19 -45t45 -19h288q26 0 45 19t19 45v640q0 26 -19 45t-45 19h-288q-26 0 -45 -19t-19 -45v-640zM128 960q0 27 18.5 45.5t45.5 18.5q26 0 45 -18.5t19 -45.5q0 -26 -19 -45t-45 -19q-27 0 -45.5 19t-18.5 45zM480 447v641q0 26 19 44.5t45 19.5q35 1 158 44 q77 26 120.5 39.5t121.5 29t144 15.5h17h76h36q133 -2 197 -78q58 -69 49 -181q39 -37 54 -94q17 -61 0 -117q46 -61 43 -137q0 -32 -15 -76q55 -61 55 -149q-1 -78 -57.5 -135t-134.5 -57h-277q4 -14 8 -24t11 -22t10 -18q18 -37 27 -57t19 -58.5t10 -76.5q0 -24 -0.5 -39 t-5 -45t-12 -50t-24 -45t-40 -40.5t-60 -26t-82.5 -10.5q-26 0 -45 19q-20 20 -34 50t-19.5 52t-12.5 61q-9 42 -13.5 60.5t-17.5 48.5t-31 48q-33 33 -101 120q-49 64 -101 121t-76 59q-25 2 -43 20.5t-18 43.5z" />
+<glyph unicode="&#xf166;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM218 366q0 -176 20 -260q10 -43 42.5 -73t75.5 -35q137 -15 412 -15t412 15q43 5 75.5 35t42.5 73 q20 84 20 260q0 177 -19 260q-10 44 -43 73.5t-76 34.5q-136 15 -412 15q-275 0 -411 -15q-44 -5 -76.5 -34.5t-42.5 -73.5q-20 -87 -20 -260zM300 551v70h232v-70h-80v-423h-74v423h-78zM396 1313l24 -69t23 -69q35 -103 46 -158v-201h74v201l90 296h-75l-51 -195l-53 195 h-78zM542 205v290h66v-270q0 -24 1 -26q1 -15 15 -15q20 0 42 31v280h67v-367h-67v40q-39 -45 -76 -45q-33 0 -42 28q-6 16 -6 54zM654 936q0 -58 21 -87q27 -38 78 -38q49 0 78 38q21 27 21 87v130q0 58 -21 87q-29 38 -78 38q-51 0 -78 -38q-21 -29 -21 -87v-130zM721 923 v156q0 52 32 52t32 -52v-156q0 -51 -32 -51t-32 51zM790 128v493h67v-161q32 40 68 40q41 0 53 -42q7 -21 7 -74v-146q0 -52 -7 -73q-12 -42 -53 -42q-35 0 -68 41v-36h-67zM857 200q16 -16 33 -16q29 0 29 49v157q0 50 -29 50q-17 0 -33 -16v-224zM907 893q0 -37 6 -55 q11 -27 43 -27q36 0 77 45v-40h67v370h-67v-283q-22 -31 -42 -31q-15 0 -16 16q-1 2 -1 26v272h-67v-293zM1037 247v129q0 59 20 86q29 38 80 38t78 -38q21 -28 21 -86v-76h-133v-65q0 -51 34 -51q24 0 30 26q0 1 0.5 7t0.5 16.5v21.5h68v-9q0 -29 -2 -43q-3 -22 -15 -40 q-27 -40 -80 -40q-52 0 -81 38q-21 27 -21 86zM1103 355h66v34q0 51 -33 51t-33 -51v-34z" />
+<glyph unicode="&#xf167;" d="M27 260q0 234 26 350q14 59 58 99t103 47q183 20 554 20t555 -20q58 -7 102.5 -47t57.5 -99q26 -112 26 -350q0 -234 -26 -350q-14 -59 -58 -99t-102 -46q-184 -21 -555 -21t-555 21q-58 6 -102.5 46t-57.5 99q-26 112 -26 350zM138 509h105v-569h100v569h107v94h-312 v-94zM266 1536h106l71 -263l68 263h102l-121 -399v-271h-100v271q-14 74 -61 212q-37 103 -65 187zM463 43q0 -49 8 -73q12 -37 58 -37q48 0 102 61v-54h89v494h-89v-378q-30 -42 -57 -42q-18 0 -21 21q-1 3 -1 35v364h-89v-391zM614 1028v175q0 80 28 117q38 51 105 51 q69 0 106 -51q28 -37 28 -117v-175q0 -81 -28 -118q-37 -51 -106 -51q-67 0 -105 51q-28 38 -28 118zM704 1011q0 -70 43 -70t43 70v210q0 69 -43 69t-43 -69v-210zM798 -60h89v48q45 -55 93 -55q54 0 71 55q9 27 9 100v197q0 73 -9 99q-17 56 -71 56q-50 0 -93 -54v217h-89 v-663zM887 36v301q22 22 45 22q39 0 39 -67v-211q0 -67 -39 -67q-23 0 -45 22zM955 971v394h91v-367q0 -33 1 -35q3 -22 21 -22q27 0 57 43v381h91v-499h-91v55q-53 -62 -103 -62q-46 0 -59 37q-8 24 -8 75zM1130 100q0 -79 29 -116q39 -51 108 -51q72 0 108 53q18 27 21 54 q2 9 2 58v13h-91q0 -51 -2 -61q-7 -36 -40 -36q-46 0 -46 69v87h179v103q0 79 -27 116q-39 51 -106 51q-68 0 -107 -51q-28 -37 -28 -116v-173zM1219 245v46q0 68 45 68t45 -68v-46h-90z" />
+<glyph unicode="&#xf168;" horiz-adv-x="1408" d="M5 384q-10 17 0 36l253 448q1 0 0 1l-161 279q-12 22 -1 37q9 15 32 15h239q40 0 66 -45l164 -286q-10 -18 -257 -456q-27 -46 -65 -46h-239q-21 0 -31 17zM536 539q18 32 531 942q25 45 64 45h241q22 0 31 -15q11 -16 0 -37l-528 -934v-1l336 -615q11 -20 1 -37 q-10 -15 -32 -15h-239q-42 0 -66 45z" />
+<glyph unicode="&#xf169;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM227 396q8 -13 24 -13h185q31 0 50 36l199 352q0 1 -126 222q-21 34 -52 34h-184q-18 0 -26 -11q-7 -12 1 -29 l125 -216v-1l-196 -346q-9 -14 0 -28zM638 516q1 -2 262 -481q20 -35 52 -35h184q18 0 25 12q8 13 -1 28l-260 476v1l409 723q8 16 0 28q-7 12 -24 12h-187q-30 0 -49 -35z" />
+<glyph unicode="&#xf16a;" horiz-adv-x="1792" d="M0 640q0 96 1 150t8.5 136.5t22.5 147.5q16 73 69 123t124 58q222 25 671 25t671 -25q71 -8 124.5 -58t69.5 -123q14 -65 21.5 -147.5t8.5 -136.5t1 -150t-1 -150t-8.5 -136.5t-22.5 -147.5q-16 -73 -69 -123t-124 -58q-222 -25 -671 -25t-671 25q-71 8 -124.5 58 t-69.5 123q-14 65 -21.5 147.5t-8.5 136.5t-1 150zM640 320q0 -38 33 -56q16 -8 31 -8q20 0 34 10l512 320q30 17 30 54t-30 54l-512 320q-31 20 -65 2q-33 -18 -33 -56v-640z" />
+<glyph unicode="&#xf16b;" horiz-adv-x="1792" d="M64 558l338 271l494 -305l-342 -285zM64 1099l490 319l342 -285l-494 -304zM407 166v108l147 -96l342 284v2l1 -1l1 1v-2l343 -284l147 96v-108l-490 -293v-1l-1 1l-1 -1v1zM896 524l494 305l338 -271l-489 -319zM896 1133l343 285l489 -319l-338 -270z" />
+<glyph unicode="&#xf16c;" horiz-adv-x="1408" d="M0 -255v736h121v-618h928v618h120v-701l-1 -35v-1h-1132l-35 1h-1zM221 -17v151l707 1v-151zM227 243l14 150l704 -65l-13 -150zM270 563l39 146l683 -183l-39 -146zM395 928l77 130l609 -360l-77 -130zM707 1303l125 86l398 -585l-124 -85zM1136 1510l149 26l121 -697 l-149 -26z" />
+<glyph unicode="&#xf16d;" d="M0 69v1142q0 81 58 139t139 58h1142q81 0 139 -58t58 -139v-1142q0 -81 -58 -139t-139 -58h-1142q-81 0 -139 58t-58 139zM171 110q0 -26 17.5 -43.5t43.5 -17.5h1069q25 0 43 17.5t18 43.5v648h-135q20 -63 20 -131q0 -126 -64 -232.5t-174 -168.5t-240 -62 q-197 0 -337 135.5t-140 327.5q0 68 20 131h-141v-648zM461 643q0 -124 90.5 -211.5t217.5 -87.5q128 0 218.5 87.5t90.5 211.5t-90.5 211.5t-218.5 87.5q-127 0 -217.5 -87.5t-90.5 -211.5zM1050 1003q0 -29 20 -49t49 -20h174q29 0 49 20t20 49v165q0 28 -20 48.5 t-49 20.5h-174q-29 0 -49 -20.5t-20 -48.5v-165z" />
+<glyph unicode="&#xf16e;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM274 640q0 -88 62 -150t150 -62t150 62t62 150t-62 150t-150 62t-150 -62t-62 -150zM838 640q0 -88 62 -150 t150 -62t150 62t62 150t-62 150t-150 62t-150 -62t-62 -150z" />
+<glyph unicode="&#xf170;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM309 384h94l104 160h522l104 -160h94l-459 691zM567 608l201 306l201 -306h-402z" />
+<glyph unicode="&#xf171;" horiz-adv-x="1408" d="M0 1222q3 26 17.5 48.5t31.5 37.5t45 30t46 22.5t48 18.5q125 46 313 64q379 37 676 -50q155 -46 215 -122q16 -20 16.5 -51t-5.5 -54q-26 -167 -111 -655q-5 -30 -27 -56t-43.5 -40t-54.5 -31q-252 -126 -610 -88q-248 27 -394 139q-15 12 -25.5 26.5t-17 35t-9 34 t-6 39.5t-5.5 35q-9 50 -26.5 150t-28 161.5t-23.5 147.5t-22 158zM173 285l6 16l18 9q223 -148 506.5 -148t507.5 148q21 -6 24 -23t-5 -45t-8 -37q-8 -26 -15.5 -76.5t-14 -84t-28.5 -70t-58 -56.5q-86 -48 -189.5 -71.5t-202 -22t-201.5 18.5q-46 8 -81.5 18t-76.5 27 t-73 43.5t-52 61.5q-25 96 -57 292zM243 1240q30 -28 76 -45.5t73.5 -22t87.5 -11.5q228 -29 448 -1q63 8 89.5 12t72.5 21.5t75 46.5q-20 27 -56 44.5t-58 22t-71 12.5q-291 47 -566 -2q-43 -7 -66 -12t-55 -22t-50 -43zM481 657q4 -91 77.5 -155t165.5 -56q91 8 152 84 t50 168q-14 107 -113 164t-197 13q-63 -28 -100.5 -88.5t-34.5 -129.5zM599 710q14 41 52 58q36 18 72.5 12t64 -35.5t27.5 -67.5q8 -63 -50.5 -101t-111.5 -6q-39 17 -53.5 58t-0.5 82z" />
+<glyph unicode="&#xf172;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM260 1060q8 -68 19 -138t29 -171t24 -137q1 -5 5 -31t7 -36t12 -27t22 -28q105 -80 284 -100q259 -28 440 63 q24 13 39.5 23t31 29t19.5 40q48 267 80 473q9 53 -8 75q-43 55 -155 88q-216 63 -487 36q-132 -12 -226 -46q-38 -15 -59.5 -25t-47 -34t-29.5 -54zM385 384q26 -154 41 -210q47 -81 204 -108q249 -46 428 53q34 19 49 51.5t22.5 85.5t12.5 71q0 7 5.5 26.5t3 32 t-17.5 16.5q-161 -106 -365 -106t-366 106l-12 -6zM436 1073q13 19 36 31t40 15.5t47 8.5q198 35 408 1q33 -5 51 -8.5t43 -16t39 -31.5q-20 -21 -53.5 -34t-53 -16t-63.5 -8q-155 -20 -324 0q-44 6 -63 9.5t-52.5 16t-54.5 32.5zM607 653q-2 49 25.5 93t72.5 64 q70 31 141.5 -10t81.5 -118q8 -66 -36 -121t-110 -61t-119 40t-56 113zM687.5 660.5q0.5 -52.5 43.5 -70.5q39 -23 81 4t36 72q0 43 -41 66t-77 1q-43 -20 -42.5 -72.5z" />
+<glyph unicode="&#xf173;" horiz-adv-x="1024" d="M78 779v217q91 30 155 84q64 55 103 132q39 78 54 196h219v-388h364v-241h-364v-394q0 -136 14 -172q13 -37 52 -60q50 -31 117 -31q117 0 232 76v-242q-102 -48 -178 -65q-77 -19 -173 -19q-105 0 -186 27q-78 25 -138 75q-58 51 -79 105q-22 54 -22 161v539h-170z" />
+<glyph unicode="&#xf174;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM413 744h127v-404q0 -78 17 -121q17 -42 59 -78q43 -37 104 -57q62 -20 140 -20q67 0 129 14q57 13 134 49v181 q-88 -56 -174 -56q-51 0 -88 23q-29 17 -39 45q-11 30 -11 129v295h274v181h-274v291h-164q-11 -90 -40 -147t-78 -99q-48 -40 -116 -63v-163z" />
+<glyph unicode="&#xf175;" horiz-adv-x="768" d="M3 237q9 19 29 19h224v1248q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-1248h224q21 0 29 -19t-5 -35l-350 -384q-10 -10 -23 -10q-14 0 -24 10l-355 384q-13 16 -5 35z" />
+<glyph unicode="&#xf176;" horiz-adv-x="768" d="M3 1043q-8 19 5 35l350 384q10 10 23 10q14 0 24 -10l355 -384q13 -16 5 -35q-9 -19 -29 -19h-224v-1248q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v1248h-224q-21 0 -29 19z" />
+<glyph unicode="&#xf177;" horiz-adv-x="1792" d="M64 637q0 14 10 24l384 354q16 14 35 6q19 -9 19 -29v-224h1248q14 0 23 -9t9 -23v-192q0 -14 -9 -23t-23 -9h-1248v-224q0 -21 -19 -29t-35 5l-384 350q-10 10 -10 23z" />
+<glyph unicode="&#xf178;" horiz-adv-x="1792" d="M0 544v192q0 14 9 23t23 9h1248v224q0 21 19 29t35 -5l384 -350q10 -10 10 -23q0 -14 -10 -24l-384 -354q-16 -14 -35 -6q-19 9 -19 29v224h-1248q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf179;" horiz-adv-x="1408" d="M0 634q0 228 113 374q112 144 284 144q72 0 177 -30q104 -30 138 -30q45 0 143 34q102 34 173 34q119 0 213 -65q52 -36 104 -100q-79 -67 -114 -118q-65 -94 -65 -207q0 -124 69 -223t158 -126q-39 -125 -123 -250q-129 -196 -257 -196q-49 0 -140 32q-86 32 -151 32 q-61 0 -142 -33q-81 -34 -132 -34q-152 0 -301 259q-147 261 -147 503zM683 1131q3 149 78 257q74 107 250 148q1 -3 2.5 -11t2.5 -11q0 -4 0.5 -10t0.5 -10q0 -61 -29 -136q-30 -75 -93 -138q-54 -54 -108 -72q-37 -11 -104 -17z" />
+<glyph unicode="&#xf17a;" horiz-adv-x="1664" d="M0 -27v557h682v-651zM0 614v565l682 94v-659h-682zM757 -131v661h907v-786zM757 614v669l907 125v-794h-907z" />
+<glyph unicode="&#xf17b;" horiz-adv-x="1408" d="M0 337v430q0 42 30 72t73 30q42 0 72 -30t30 -72v-430q0 -43 -29.5 -73t-72.5 -30t-73 30t-30 73zM241 886q0 117 64 215.5t172 153.5l-71 131q-7 13 5 20q13 6 20 -6l72 -132q95 42 201 42t201 -42l72 132q7 12 20 6q12 -7 5 -20l-71 -131q107 -55 171 -153.5t64 -215.5 h-925zM245 184v666h918v-666q0 -46 -32 -78t-77 -32h-75v-227q0 -43 -30 -73t-73 -30t-73 30t-30 73v227h-138v-227q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73l-1 227h-74q-46 0 -78 32t-32 78zM455 1092q0 -16 11 -27.5t27 -11.5t27.5 11.5t11.5 27.5t-11.5 27.5 t-27.5 11.5t-27 -11.5t-11 -27.5zM876 1092q0 -16 11.5 -27.5t27.5 -11.5t27 11.5t11 27.5t-11 27.5t-27 11.5t-27.5 -11.5t-11.5 -27.5zM1203 337v430q0 43 30 72.5t72 29.5q43 0 73 -29.5t30 -72.5v-430q0 -43 -30 -73t-73 -30q-42 0 -72 30t-30 73z" />
+<glyph unicode="&#xf17c;" d="M11 -115q-10 23 7 66.5t18 54.5q1 16 -4 40t-10 42.5t-4.5 36.5t10.5 27q14 12 57 14t60 12q30 18 42 35t12 51q21 -73 -32 -106q-32 -20 -83 -15q-34 3 -43 -10q-13 -15 5 -57q2 -6 8 -18t8.5 -18t4.5 -17t1 -22q0 -15 -17 -49t-14 -48q3 -17 37 -26q20 -6 84.5 -18.5 t99.5 -20.5q24 -6 74 -22t82.5 -23t55.5 -4q43 6 64.5 28t23 48t-7.5 58.5t-19 52t-20 36.5q-121 190 -169 242q-68 74 -113 40q-11 -9 -15 15q-3 16 -2 38q1 29 10 52t24 47t22 42q8 21 26.5 72t29.5 78t30 61t39 54q110 143 124 195q-12 112 -16 310q-2 90 24 151.5 t106 104.5q39 21 104 21q53 1 106 -13.5t89 -41.5q57 -42 91.5 -121.5t29.5 -147.5q-5 -95 30 -214q34 -113 133 -218q55 -59 99.5 -163t59.5 -191q8 -49 5 -84.5t-12 -55.5t-20 -22q-10 -2 -23.5 -19t-27 -35.5t-40.5 -33.5t-61 -14q-18 1 -31.5 5t-22.5 13.5t-13.5 15.5 t-11.5 20.5t-9 19.5q-22 37 -41 30t-28 -49t7 -97q20 -70 1 -195q-10 -65 18 -100.5t73 -33t85 35.5q59 49 89.5 66.5t103.5 42.5q53 18 77 36.5t18.5 34.5t-25 28.5t-51.5 23.5q-33 11 -49.5 48t-15 72.5t15.5 47.5q1 -31 8 -56.5t14.5 -40.5t20.5 -28.5t21 -19t21.5 -13 t16.5 -9.5q20 -12 31 -24.5t12 -24t-2.5 -22.5t-15.5 -22t-23.5 -19.5t-30 -18.5t-31.5 -16.5t-32 -15.5t-27 -13q-38 -19 -85.5 -56t-75.5 -64q-17 -16 -68 -19.5t-89 14.5q-18 9 -29.5 23.5t-16.5 25.5t-22 19.5t-47 9.5q-44 1 -130 1q-19 0 -57 -1.5t-58 -2.5 q-44 -1 -79.5 -15t-53.5 -30t-43.5 -28.5t-53.5 -11.5q-29 1 -111 31t-146 43q-19 4 -51 9.5t-50 9t-39.5 9.5t-33.5 14.5t-17 19.5zM321 495q-36 -65 10 -166q5 -12 25 -28t24 -20q20 -23 104 -90.5t93 -76.5q16 -15 17.5 -38t-14 -43t-45.5 -23q8 -15 29 -44.5t28 -54 t7 -70.5q46 24 7 92q-4 8 -10.5 16t-9.5 12t-2 6q3 5 13 9.5t20 -2.5q46 -52 166 -36q133 15 177 87q23 38 34 30q12 -6 10 -52q-1 -25 -23 -92q-9 -23 -6 -37.5t24 -15.5q3 19 14.5 77t13.5 90q2 21 -6.5 73.5t-7.5 97t23 70.5q15 18 51 18q1 37 34.5 53t72.5 10.5 t60 -22.5q0 18 -55 42q4 15 7.5 27.5t5 26t3 21.5t0.5 22.5t-1 19.5t-3.5 22t-4 20.5t-5 25t-5.5 26.5q-10 48 -47 103t-72 75q24 -20 57 -83q87 -162 54 -278q-11 -40 -50 -42q-31 -4 -38.5 18.5t-8 83.5t-11.5 107q-9 39 -19.5 69t-19.5 45.5t-15.5 24.5t-13 15t-7.5 7 q-14 62 -31 103t-29.5 56t-23.5 33t-15 40q-4 21 6 53.5t4.5 49.5t-44.5 25q-15 3 -44.5 18t-35.5 16q-8 1 -11 26t8 51t36 27q37 3 51 -30t4 -58q-11 -19 -2 -26.5t30 -0.5q13 4 13 36v37q-5 30 -13.5 50t-21 30.5t-23.5 15t-27 7.5q-107 -8 -89 -134q0 -15 -1 -15 q-9 9 -29.5 10.5t-33 -0.5t-15.5 5q1 57 -16 90t-45 34q-27 1 -41.5 -27.5t-16.5 -59.5q-1 -15 3.5 -37t13 -37.5t15.5 -13.5q10 3 16 14q4 9 -7 8q-7 0 -15.5 14.5t-9.5 33.5q-1 22 9 37t34 14q17 0 27 -21t9.5 -39t-1.5 -22q-22 -15 -31 -29q-8 -12 -27.5 -23.5 t-20.5 -12.5q-13 -14 -15.5 -27t7.5 -18q14 -8 25 -19.5t16 -19t18.5 -13t35.5 -6.5q47 -2 102 15q2 1 23 7t34.5 10.5t29.5 13t21 17.5q9 14 20 8q5 -3 6.5 -8.5t-3 -12t-16.5 -9.5q-20 -6 -56.5 -21.5t-45.5 -19.5q-44 -19 -70 -23q-25 -5 -79 2q-10 2 -9 -2t17 -19 q25 -23 67 -22q17 1 36 7t36 14t33.5 17.5t30 17t24.5 12t17.5 2.5t8.5 -11q0 -2 -1 -4.5t-4 -5t-6 -4.5t-8.5 -5t-9 -4.5t-10 -5t-9.5 -4.5q-28 -14 -67.5 -44t-66.5 -43t-49 -1q-21 11 -63 73q-22 31 -25 22q-1 -3 -1 -10q0 -25 -15 -56.5t-29.5 -55.5t-21 -58t11.5 -63 q-23 -6 -62.5 -90t-47.5 -141q-2 -18 -1.5 -69t-5.5 -59q-8 -24 -29 -3q-32 31 -36 94q-2 28 4 56q4 19 -1 18zM372 630q4 -1 12.5 7t12.5 18q1 3 2 7t2 6t1.5 4.5t0.5 4v3t-1 2.5t-3 2q-4 1 -6 -3t-4.5 -12.5t-5.5 -13.5t-10 -13q-7 -10 -1 -12zM603 1190q2 -5 5 -6 q10 0 7 -15q-3 -20 8 -20q3 0 3 3q3 17 -2.5 30t-11.5 15q-9 2 -9 -7zM634 1110q0 12 19 15h10q-11 -1 -15.5 -10.5t-8.5 -9.5q-5 -1 -5 5zM721 1122q24 11 32 -2q3 -6 -3 -9q-4 -1 -11.5 6.5t-17.5 4.5zM835 1196l4 -2q14 -4 18 -31q0 -3 8 2l2 3q0 11 -5 19.5t-11 12.5 t-9 3q-14 -1 -7 -7zM851 1381.5q-1 -2.5 3 -8.5q4 -3 8 0t11 9t15 9q1 1 9 1t15 2t9 7q0 2 -2.5 5t-9 7t-9.5 6q-15 15 -24 15q-9 -1 -11.5 -7.5t-1 -13t-0.5 -12.5q-1 -4 -6 -10.5t-6 -9zM981 1002q-14 -16 7 -43.5t39 -31.5q9 -1 14.5 8t3.5 20q-2 8 -6.5 11.5t-13 5 t-14.5 5.5q-5 3 -9.5 8t-7 8t-5.5 6.5t-4 4t-4 -1.5z" />
+<glyph unicode="&#xf17d;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM112 640q0 -124 44 -236.5t124 -201.5q50 89 123.5 166.5t142.5 124.5t130.5 81t99.5 48l37 13 q4 1 13 3.5t13 4.5q-21 49 -53 111q-311 -93 -673 -93q-1 -7 -1 -21zM126 775q302 0 606 80q-120 213 -244 378q-138 -65 -234 -186t-128 -272zM350 134q184 -150 418 -150q132 0 256 52q-42 241 -140 498h-2l-2 -1q-16 -6 -43 -16.5t-101 -49t-137 -82t-131 -114.5 t-103 -148zM609 1276q1 1 2 1q-1 0 -2 -1zM613 1277q131 -170 246 -382q69 26 130 60.5t96.5 61.5t65.5 57t37.5 40.5l12.5 17.5q-185 164 -433 164q-76 0 -155 -19zM909 797q25 -53 44 -95q2 -6 6.5 -17.5t7.5 -16.5q36 5 74.5 7t73.5 2t69 -1.5t64 -4t56.5 -5.5t48 -6.5 t36.5 -6t25 -4.5l10 -2q-3 232 -149 410l-1 -1q-9 -12 -19 -24.5t-43.5 -44.5t-71 -60.5t-100 -65t-131.5 -64.5zM1007 565q87 -239 128 -469q111 75 185 189.5t96 250.5q-210 60 -409 29z" />
+<glyph unicode="&#xf17e;" d="M0 1024q0 159 112.5 271.5t271.5 112.5q130 0 234 -80q77 16 150 16q143 0 273.5 -55.5t225 -150t150 -225t55.5 -273.5q0 -73 -16 -150q80 -104 80 -234q0 -159 -112.5 -271.5t-271.5 -112.5q-130 0 -234 80q-77 -16 -150 -16q-143 0 -273.5 55.5t-225 150t-150 225 t-55.5 273.5q0 73 16 150q-80 104 -80 234zM376 399q0 -92 122 -157.5t291 -65.5q73 0 140 18.5t122.5 53.5t88.5 93.5t33 131.5q0 50 -19.5 91.5t-48.5 68.5t-73 49t-82.5 34t-87.5 23l-104 24q-30 7 -44 10.5t-35 11.5t-30 16t-16.5 21t-7.5 30q0 77 144 77q43 0 77 -12 t54 -28.5t38 -33.5t40 -29t48 -12q47 0 75.5 32t28.5 77q0 55 -56 99.5t-142 67.5t-182 23q-68 0 -132 -15.5t-119.5 -47t-89 -87t-33.5 -128.5q0 -61 19 -106.5t56 -75.5t80 -48.5t103 -32.5l146 -36q90 -22 112 -36q32 -20 32 -60q0 -39 -40 -64.5t-105 -25.5 q-51 0 -91.5 16t-65 38.5t-45.5 45t-46 38.5t-54 16q-50 0 -75.5 -30t-25.5 -75z" />
+<glyph unicode="&#xf180;" horiz-adv-x="1664" d="M0 640q0 75 53 128l587 587q53 53 128 53t128 -53l265 -265l-398 -399l-188 188q-42 42 -99 42q-59 0 -100 -41l-120 -121q-42 -40 -42 -99q0 -58 42 -100l406 -408q30 -28 67 -37l6 -4h28q60 0 99 41l619 619l2 -3q53 -53 53 -128t-53 -128l-587 -587 q-52 -53 -127.5 -53t-128.5 53l-587 587q-53 53 -53 128zM302 660q0 21 14 35l121 120q13 15 35 15t36 -15l252 -252l574 575q15 15 36 15t36 -15l120 -120q14 -15 14 -36t-14 -36l-730 -730q-17 -15 -37 -15q-4 0 -6 1q-18 2 -30 14l-407 408q-14 15 -14 36z" />
+<glyph unicode="&#xf181;" d="M0 -64v1408q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-1408q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM160 192q0 -14 9 -23t23 -9h480q14 0 23 9t9 23v1024q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-1024zM832 576q0 -14 9 -23t23 -9h480q14 0 23 9t9 23 v640q0 14 -9 23t-23 9h-480q-14 0 -23 -9t-9 -23v-640z" />
+<glyph unicode="&#xf182;" horiz-adv-x="1280" d="M0 480q0 29 16 53l256 384q73 107 176 107h384q103 0 176 -107l256 -384q16 -24 16 -53q0 -40 -28 -68t-68 -28q-51 0 -80 43l-227 341h-45v-132l247 -411q9 -15 9 -33q0 -26 -19 -45t-45 -19h-192v-272q0 -46 -33 -79t-79 -33h-160q-46 0 -79 33t-33 79v272h-192 q-26 0 -45 19t-19 45q0 18 9 33l247 411v132h-45l-227 -341q-29 -43 -80 -43q-40 0 -68 28t-28 68zM416 1280q0 93 65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf183;" horiz-adv-x="1024" d="M0 416v416q0 80 56 136t136 56h640q80 0 136 -56t56 -136v-416q0 -40 -28 -68t-68 -28t-68 28t-28 68v352h-64v-912q0 -46 -33 -79t-79 -33t-79 33t-33 79v464h-64v-464q0 -46 -33 -79t-79 -33t-79 33t-33 79v912h-64v-352q0 -40 -28 -68t-68 -28t-68 28t-28 68z M288 1280q0 93 65.5 158.5t158.5 65.5t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf184;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM399.5 766q8.5 -37 24.5 -59l349 -473l350 473q16 22 24.5 59t-6 85t-61.5 79q-40 26 -83 25.5 t-73.5 -17.5t-54.5 -45q-36 -40 -96 -40q-59 0 -95 40q-24 28 -54.5 45t-73.5 17.5t-84 -25.5q-46 -31 -60.5 -79t-6 -85z" />
+<glyph unicode="&#xf185;" horiz-adv-x="1792" d="M44 363q-5 17 4 29l180 248l-180 248q-9 13 -4 29q4 15 20 20l292 96v306q0 16 13 26q15 10 29 4l292 -94l180 248q9 12 26 12t26 -12l180 -248l292 94q14 6 29 -4q13 -10 13 -26v-306l292 -96q16 -5 20 -20q5 -16 -4 -29l-180 -248l180 -248q9 -12 4 -29q-4 -15 -20 -20 l-292 -96v-306q0 -16 -13 -26q-15 -10 -29 -4l-292 94l-180 -248q-10 -13 -26 -13t-26 13l-180 248l-292 -94q-14 -6 -29 4q-13 10 -13 26v306l-292 96q-16 5 -20 20zM320 640q0 -117 45.5 -223.5t123 -184t184 -123t223.5 -45.5t223.5 45.5t184 123t123 184t45.5 223.5 t-45.5 223.5t-123 184t-184 123t-223.5 45.5t-223.5 -45.5t-184 -123t-123 -184t-45.5 -223.5z" />
+<glyph unicode="&#xf186;" d="M0 640q0 153 57.5 292.5t156 241.5t235.5 164.5t290 68.5q44 2 61 -39q18 -41 -15 -72q-86 -78 -131.5 -181.5t-45.5 -218.5q0 -148 73 -273t198 -198t273 -73q118 0 228 51q41 18 72 -13q14 -14 17.5 -34t-4.5 -38q-94 -203 -283.5 -324.5t-413.5 -121.5q-156 0 -298 61 t-245 164t-164 245t-61 298zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51q144 0 273.5 61.5t220.5 171.5q-54 -9 -110 -9q-182 0 -337 90t-245 245t-90 337q0 192 104 357q-201 -60 -328.5 -229t-127.5 -384z" />
+<glyph unicode="&#xf187;" horiz-adv-x="1792" d="M64 1088v256q0 26 19 45t45 19h1536q26 0 45 -19t19 -45v-256q0 -26 -19 -45t-45 -19h-1536q-26 0 -45 19t-19 45zM128 -64v960q0 26 19 45t45 19h1408q26 0 45 -19t19 -45v-960q0 -26 -19 -45t-45 -19h-1408q-26 0 -45 19t-19 45zM704 704q0 -26 19 -45t45 -19h256 q26 0 45 19t19 45t-19 45t-45 19h-256q-26 0 -45 -19t-19 -45z" />
+<glyph unicode="&#xf188;" horiz-adv-x="1664" d="M32 576q0 26 19 45t45 19h224v294l-173 173q-19 19 -19 45t19 45t45 19t45 -19l173 -173h844l173 173q19 19 45 19t45 -19t19 -45t-19 -45l-173 -173v-294h224q26 0 45 -19t19 -45t-19 -45t-45 -19h-224q0 -171 -67 -290l208 -209q19 -19 19 -45t-19 -45q-18 -19 -45 -19 t-45 19l-198 197q-5 -5 -15 -13t-42 -28.5t-65 -36.5t-82 -29t-97 -13v896h-128v-896q-51 0 -101.5 13.5t-87 33t-66 39t-43.5 32.5l-15 14l-183 -207q-20 -21 -48 -21q-24 0 -43 16q-19 18 -20.5 44.5t15.5 46.5l202 227q-58 114 -58 274h-224q-26 0 -45 19t-19 45z M512 1152q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5h-640z" />
+<glyph unicode="&#xf189;" horiz-adv-x="1920" d="M-1 1004q0 11 3 16l4 6q15 19 57 19l274 2q12 -2 23 -6.5t16 -8.5l5 -3q16 -11 24 -32q20 -50 46 -103.5t41 -81.5l16 -29q29 -60 56 -104t48.5 -68.5t41.5 -38.5t34 -14t27 5q2 1 5 5t12 22t13.5 47t9.5 81t0 125q-2 40 -9 73t-14 46l-6 12q-25 34 -85 43q-13 2 5 24 q17 19 38 30q53 26 239 24q82 -1 135 -13q20 -5 33.5 -13.5t20.5 -24t10.5 -32t3.5 -45.5t-1 -55t-2.5 -70.5t-1.5 -82.5q0 -11 -1 -42t-0.5 -48t3.5 -40.5t11.5 -39t22.5 -24.5q8 -2 17 -4t26 11t38 34.5t52 67t68 107.5q60 104 107 225q4 10 10 17.5t11 10.5l4 3l5 2.5 t13 3t20 0.5l288 2q39 5 64 -2.5t31 -16.5l6 -10q23 -64 -150 -294q-24 -32 -65 -85q-78 -100 -90 -131q-17 -41 14 -81q17 -21 81 -82h1l1 -1l1 -1l2 -2q141 -131 191 -221q3 -5 6.5 -12.5t7 -26.5t-0.5 -34t-25 -27.5t-59 -12.5l-256 -4q-24 -5 -56 5t-52 22l-20 12 q-30 21 -70 64t-68.5 77.5t-61 58t-56.5 15.5q-3 -1 -8 -3.5t-17 -14.5t-21.5 -29.5t-17 -52t-6.5 -77.5q0 -15 -3.5 -27.5t-7.5 -18.5l-4 -5q-18 -19 -53 -22h-115q-71 -4 -146 16.5t-131.5 53t-103 66t-70.5 57.5l-25 24q-10 10 -27.5 30t-71.5 91t-106 151t-122.5 211 t-130.5 272q-6 16 -6 27z" />
+<glyph unicode="&#xf18a;" horiz-adv-x="1792" d="M0 391q0 115 69.5 245t197.5 258q169 169 341.5 236t246.5 -7q65 -64 20 -209q-4 -14 -1 -20t10 -7t14.5 0.5t13.5 3.5l6 2q139 59 246 59t153 -61q45 -63 0 -178q-2 -13 -4.5 -20t4.5 -12.5t12 -7.5t17 -6q57 -18 103 -47t80 -81.5t34 -116.5q0 -68 -37 -139.5 t-109 -137t-168.5 -117.5t-226 -83t-270.5 -31t-275 33.5t-240.5 93t-171.5 151t-65 199.5zM181 320q9 -96 89 -170t208.5 -109t274.5 -21q223 23 369.5 141.5t132.5 264.5q-9 96 -89 170t-208.5 109t-274.5 21q-223 -23 -369.5 -141.5t-132.5 -264.5zM413.5 230.5 q-40.5 92.5 6.5 187.5q47 93 151.5 139t210.5 19q111 -29 158.5 -119.5t2.5 -190.5q-45 -102 -158 -150t-224 -12q-107 34 -147.5 126.5zM495 257.5q9 -34.5 43 -50.5t74.5 -2.5t62.5 47.5q21 34 11 69t-45 50q-34 14 -73 1t-60 -46q-22 -34 -13 -68.5zM705 399 q-17 -31 13 -45q14 -5 29 0.5t22 18.5q8 13 3.5 26.5t-17.5 18.5q-14 5 -28.5 -0.5t-21.5 -18.5zM1165 1274q-6 28 9.5 51.5t43.5 29.5q123 26 244 -11.5t208 -134.5q87 -96 112.5 -222.5t-13.5 -241.5q-9 -27 -34 -40t-52 -4t-40 34t-5 52q28 82 10 172t-80 158 q-62 69 -148 95.5t-173 8.5q-28 -6 -52 9.5t-30 43.5zM1224 1047q-5 24 8 44.5t37 25.5q60 13 119 -5.5t101 -65.5t54.5 -108.5t-6.5 -117.5q-8 -23 -29.5 -34t-44.5 -4q-23 8 -34 29.5t-4 44.5q20 63 -24 111t-107 35q-24 -5 -45 8t-25 37z" />
+<glyph unicode="&#xf18b;" d="M0 638q0 187 83.5 349.5t229.5 269.5t325 137v-485q0 -252 -126.5 -459.5t-330.5 -306.5q-181 215 -181 495zM398 -34q138 87 235.5 211t131.5 268q35 -144 132.5 -268t235.5 -211q-171 -94 -368 -94q-196 0 -367 94zM898 909v485q179 -30 325 -137t229.5 -269.5 t83.5 -349.5q0 -280 -181 -495q-204 99 -330.5 306.5t-126.5 459.5z" />
+<glyph unicode="&#xf18c;" horiz-adv-x="1408" d="M0 -211q0 19 13 31.5t32 12.5q173 1 322.5 107.5t251.5 294.5q-36 -14 -72 -23t-83 -13t-91 2.5t-93 28.5t-92 59t-84.5 100t-74.5 146q114 47 214 57t167.5 -7.5t124.5 -56.5t88.5 -77t56.5 -82q53 131 79 291q-7 -1 -18 -2.5t-46.5 -2.5t-69.5 0.5t-81.5 10t-88.5 23 t-84 42.5t-75 65t-54.5 94.5t-28.5 127.5q70 28 133.5 36.5t112.5 -1t92 -30t73.5 -50t56 -61t42 -63t27.5 -56t16 -39.5l4 -16q12 122 12 195q-8 6 -21.5 16t-49 44.5t-63.5 71.5t-54 93t-33 112.5t12 127t70 138.5q73 -25 127.5 -61.5t84.5 -76.5t48 -85t20.5 -89 t-0.5 -85.5t-13 -76.5t-19 -62t-17 -42l-7 -15q1 -5 1 -50.5t-1 -71.5q3 7 10 18.5t30.5 43t50.5 58t71 55.5t91.5 44.5t112 14.5t132.5 -24q-2 -78 -21.5 -141.5t-50 -104.5t-69.5 -71.5t-81.5 -45.5t-84.5 -24t-80 -9.5t-67.5 1t-46.5 4.5l-17 3q-23 -147 -73 -283 q6 7 18 18.5t49.5 41t77.5 52.5t99.5 42t117.5 20t129 -23.5t137 -77.5q-32 -80 -76 -138t-91 -88.5t-99 -46.5t-101.5 -14.5t-96.5 8.5t-86.5 22t-69.5 27.5t-46 22.5l-17 10q-113 -228 -289.5 -359.5t-384.5 -132.5q-19 0 -32 13t-13 32z" />
+<glyph unicode="&#xf18d;" horiz-adv-x="1280" d="M21 217v66h1238v-66q0 -85 -57.5 -144.5t-138.5 -59.5h-57l-260 -269v269h-529q-81 0 -138.5 59.5t-57.5 144.5zM21 354v255h1238v-255h-1238zM21 682v255h1238v-255h-1238zM21 1010v67q0 84 57.5 143.5t138.5 59.5h846q81 0 138.5 -59.5t57.5 -143.5v-67h-1238z" />
+<glyph unicode="&#xf18e;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM384 544v192q0 13 9.5 22.5t22.5 9.5h352v192q0 14 9 23t23 9q12 0 24 -10l319 -319q9 -9 9 -23t-9 -23l-320 -320q-9 -9 -23 -9q-13 0 -22.5 9.5t-9.5 22.5v192h-352q-13 0 -22.5 9.5t-9.5 22.5z" />
+<glyph unicode="&#xf190;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM384 640q0 14 9 23l320 320q9 9 23 9q13 0 22.5 -9.5t9.5 -22.5v-192h352q13 0 22.5 -9.5t9.5 -22.5v-192q0 -13 -9.5 -22.5t-22.5 -9.5h-352v-192q0 -14 -9 -23t-23 -9q-12 0 -24 10l-319 319q-9 9 -9 23z" />
+<glyph unicode="&#xf191;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 160q0 -13 9.5 -22.5t22.5 -9.5h960q13 0 22.5 9.5t9.5 22.5v960q0 13 -9.5 22.5t-22.5 9.5h-960 q-13 0 -22.5 -9.5t-9.5 -22.5v-960zM448 640q0 33 27 52l448 320q17 12 37 12q26 0 45 -19t19 -45v-640q0 -26 -19 -45t-45 -19q-20 0 -37 12l-448 320q-27 19 -27 52z" />
+<glyph unicode="&#xf192;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM224 640q0 -148 73 -273t198 -198t273 -73t273 73t198 198t73 273t-73 273t-198 198t-273 73t-273 -73 t-198 -198t-73 -273zM512 640q0 106 75 181t181 75t181 -75t75 -181t-75 -181t-181 -75t-181 75t-75 181z" />
+<glyph unicode="&#xf193;" horiz-adv-x="1664" d="M0 320q0 181 104.5 330t274.5 211l17 -131q-122 -54 -195 -165.5t-73 -244.5q0 -185 131.5 -316.5t316.5 -131.5q126 0 232.5 65t165 175.5t49.5 236.5l102 -204q-58 -179 -210 -290t-339 -111q-156 0 -288.5 77.5t-210 210t-77.5 288.5zM416 1348q-2 16 6 42 q14 51 57 82.5t97 31.5q66 0 113 -47t47 -113q0 -69 -52 -117.5t-120 -41.5l37 -289h423v-128h-407l16 -128h455q40 0 57 -35l228 -455l198 99l58 -114l-256 -128q-13 -7 -29 -7q-40 0 -57 35l-239 477h-472q-24 0 -42.5 16.5t-21.5 40.5z" />
+<glyph unicode="&#xf194;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 806q16 -8 25.5 -26t21.5 -20q21 -3 54.5 8.5t58 10.5t41.5 -30q11 -18 18.5 -38.5t15 -48t12.5 -40.5 q17 -46 53 -187q36 -146 57 -197q42 -99 103 -125q43 -12 85 -1.5t76 31.5q131 77 250 237q104 139 172.5 292.5t82.5 226.5q16 85 -21 132q-52 65 -187 45q-17 -3 -41 -12.5t-57.5 -30.5t-64.5 -48.5t-59.5 -70t-44.5 -91.5q80 7 113.5 -16t26.5 -99q-5 -52 -52 -143 q-43 -78 -71 -99q-44 -32 -87 14q-23 24 -37.5 64.5t-19 73t-10 84t-8.5 71.5q-23 129 -34 164q-12 37 -35.5 69t-50.5 40q-57 16 -127 -25q-54 -32 -136.5 -106t-122.5 -102v-7z" />
+<glyph unicode="&#xf195;" horiz-adv-x="1152" d="M0 608v128q0 23 23 31l233 71v93l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26v128q0 23 23 31l233 71v250q0 14 9 23t23 9h160q14 0 23 -9t9 -23v-181l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31l-393 -121v-93l375 116q15 5 28 -5t13 -26v-128q0 -23 -23 -31 l-393 -121v-487q188 13 318 151t130 328q0 14 9 23t23 9h160q14 0 23 -9t9 -23q0 -191 -94.5 -353t-256.5 -256.5t-353 -94.5h-160q-14 0 -23 9t-9 23v611l-215 -66q-3 -1 -9 -1q-10 0 -19 6q-13 10 -13 26z" />
+<glyph unicode="&#xf196;" horiz-adv-x="1408" d="M0 288v832q0 119 84.5 203.5t203.5 84.5h832q119 0 203.5 -84.5t84.5 -203.5v-832q0 -119 -84.5 -203.5t-203.5 -84.5h-832q-119 0 -203.5 84.5t-84.5 203.5zM128 288q0 -66 47 -113t113 -47h832q66 0 113 47t47 113v832q0 66 -47 113t-113 47h-832q-66 0 -113 -47 t-47 -113v-832zM256 672v64q0 14 9 23t23 9h352v352q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-352h352q14 0 23 -9t9 -23v-64q0 -14 -9 -23t-23 -9h-352v-352q0 -14 -9 -23t-23 -9h-64q-14 0 -23 9t-9 23v352h-352q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf197;" horiz-adv-x="2176" d="M0 576q0 12 38.5 20.5t96.5 10.5q-7 25 -7 49q0 33 9.5 56.5t22.5 23.5h64v64h128q158 0 268 -64h1113q42 -7 106.5 -18t80.5 -14q89 -15 150 -40.5t83.5 -47.5t22.5 -40t-22.5 -40t-83.5 -47.5t-150 -40.5q-16 -3 -80.5 -14t-106.5 -18h-1113q-110 -64 -268 -64h-128v64 h-64q-13 0 -22.5 23.5t-9.5 56.5q0 24 7 49q-58 2 -96.5 10.5t-38.5 20.5zM323 336h29q157 0 273 64h1015q-217 -38 -456 -80q-57 0 -113 -24t-83 -48l-28 -24l-288 -288q-26 -26 -70.5 -45t-89.5 -19h-96zM323 816l93 464h96q46 0 90 -19t70 -45l288 -288q4 -4 11 -10.5 t30.5 -23t48.5 -29t61.5 -23t72.5 -10.5l456 -80h-1015q-116 64 -273 64h-29zM1739 484l81 -30q68 48 68 122t-68 122l-81 -30q53 -36 53 -92t-53 -92z" />
+<glyph unicode="&#xf198;" horiz-adv-x="1664" d="M0 796q0 47 27.5 85t71.5 53l157 53l-53 159q-8 24 -8 47q0 60 42 102.5t102 42.5q47 0 85 -27t53 -72l54 -160l310 105l-54 160q-8 24 -8 47q0 59 42.5 102t101.5 43q47 0 85.5 -27.5t53.5 -71.5l53 -161l162 55q21 6 43 6q60 0 102.5 -39.5t42.5 -98.5q0 -45 -30 -81.5 t-74 -51.5l-157 -54l105 -316l164 56q24 8 46 8q62 0 103.5 -40.5t41.5 -101.5q0 -97 -93 -130l-172 -59l56 -167q7 -21 7 -47q0 -59 -42 -102t-101 -43q-47 0 -85.5 27t-53.5 72l-55 165l-310 -106l55 -164q8 -24 8 -47q0 -59 -42 -102t-102 -43q-47 0 -85 27t-53 72 l-55 163l-153 -53q-29 -9 -50 -9q-61 0 -101.5 40t-40.5 101q0 47 27.5 85t71.5 53l156 53l-105 313l-156 -54q-26 -8 -48 -8q-60 0 -101 40.5t-41 100.5zM620 811l105 -313l310 105l-105 315z" />
+<glyph unicode="&#xf199;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 352q0 -40 28 -68t68 -28h832q40 0 68 28t28 68v436q-31 -35 -64 -55q-34 -22 -132.5 -85t-151.5 -99 q-98 -69 -164 -69t-164 69q-46 32 -141.5 92.5t-142.5 92.5q-12 8 -33 27t-31 27v-436zM256 928q0 -37 30.5 -76.5t67.5 -64.5q47 -32 137.5 -89t129.5 -83q3 -2 17 -11.5t21 -14t21 -13t23.5 -13t21.5 -9.5t22.5 -7.5t20.5 -2.5t20.5 2.5t22.5 7.5t21.5 9.5t23.5 13t21 13 t21 14t17 11.5l267 174q35 23 66.5 62.5t31.5 73.5q0 41 -27.5 70t-68.5 29h-832q-40 0 -68 -28t-28 -68z" />
+<glyph unicode="&#xf19a;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM41 640q0 -173 68 -331.5t182.5 -273t273 -182.5t331.5 -68t331.5 68t273 182.5t182.5 273t68 331.5 t-68 331.5t-182.5 273t-273 182.5t-331.5 68t-331.5 -68t-273 -182.5t-182.5 -273t-68 -331.5zM127 640q0 163 67 313l367 -1005q-196 95 -315 281t-119 411zM254 1062q105 160 274.5 253.5t367.5 93.5q147 0 280.5 -53t238.5 -149h-10q-55 0 -92 -40.5t-37 -95.5 q0 -12 2 -24t4 -21.5t8 -23t9 -21t12 -22.5t12.5 -21t14.5 -24t14 -23q63 -107 63 -212q0 -19 -2.5 -38.5t-10 -49.5t-11.5 -44t-17.5 -59t-17.5 -58l-76 -256l-278 826q46 3 88 8q19 2 26 18.5t-2.5 31t-28.5 13.5l-205 -10q-75 1 -202 10q-12 1 -20.5 -5t-11.5 -15 t-1.5 -18.5t9 -16.5t19.5 -8l80 -8l120 -328l-168 -504l-280 832q46 3 88 8q19 2 26 18.5t-2.5 31t-28.5 13.5l-205 -10q-7 0 -23 0.5t-26 0.5zM679 -97l230 670l237 -647q1 -6 5 -11q-126 -44 -255 -44q-112 0 -217 32zM1282 -24l235 678q59 169 59 276q0 42 -6 79 q95 -174 95 -369q0 -209 -104 -385.5t-279 -278.5z" />
+<glyph unicode="&#xf19b;" horiz-adv-x="1792" d="M0 455q0 140 100.5 263.5t275 205.5t391.5 108v-172q-217 -38 -356.5 -150t-139.5 -255q0 -152 154.5 -267t388.5 -145v1360l272 133v-1536l-272 -128q-228 20 -414 102t-293 208.5t-107 272.5zM1134 860v172q277 -33 481 -157l140 79l37 -390l-525 114l147 83 q-119 70 -280 99z" />
+<glyph unicode="&#xf19c;" horiz-adv-x="2048" d="M0 -128q0 26 20.5 45t48.5 19h1782q28 0 48.5 -19t20.5 -45v-128h-1920v128zM0 1024v128l960 384l960 -384v-128h-128q0 -26 -20.5 -45t-48.5 -19h-1526q-28 0 -48.5 19t-20.5 45h-128zM128 0v64q0 26 20.5 45t48.5 19h59v768h256v-768h128v768h256v-768h128v768h256 v-768h128v768h256v-768h59q28 0 48.5 -19t20.5 -45v-64h-1664z" />
+<glyph unicode="&#xf19d;" horiz-adv-x="2304" d="M0 1024q0 23 22 31l1120 352q4 1 10 1t10 -1l1120 -352q22 -8 22 -31t-22 -31l-1120 -352q-4 -1 -10 -1t-10 1l-652 206q-43 -34 -71 -111.5t-34 -178.5q63 -36 63 -109q0 -69 -58 -107l58 -433q2 -14 -8 -25q-9 -11 -24 -11h-192q-15 0 -24 11q-10 11 -8 25l58 433 q-58 38 -58 107q0 73 65 111q11 207 98 330l-333 104q-22 8 -22 31zM512 384l18 316l574 -181q22 -7 48 -7t48 7l574 181l18 -316q4 -69 -82 -128t-235 -93.5t-323 -34.5t-323 34.5t-235 93.5t-82 128z" />
+<glyph unicode="&#xf19e;" d="M109 1536q58 -15 108 -15q43 0 111 15q63 -111 133.5 -229.5t167 -276.5t138.5 -227q37 61 109.5 177.5t117.5 190t105 176t107 189.5q54 -14 107 -14q56 0 114 14q-28 -39 -60 -88.5t-49.5 -78.5t-56.5 -96t-49 -84q-146 -248 -353 -610l13 -707q-62 11 -105 11 q-41 0 -105 -11l13 707q-40 69 -168.5 295.5t-216.5 374.5t-181 287z" />
+<glyph unicode="&#xf1a0;" horiz-adv-x="1280" d="M111 182q0 81 44.5 150t118.5 115q131 82 404 100q-32 41 -47.5 73.5t-15.5 73.5q0 40 21 85q-46 -4 -68 -4q-148 0 -249.5 96.5t-101.5 244.5q0 82 36 159t99 131q76 66 182 98t218 32h417l-137 -88h-132q75 -63 113 -133t38 -160q0 -72 -24.5 -129.5t-59.5 -93 t-69.5 -65t-59 -61.5t-24.5 -66q0 -36 32 -70.5t77 -68t90.5 -73.5t77.5 -104t32 -142q0 -91 -49 -173q-71 -122 -209.5 -179.5t-298.5 -57.5q-132 0 -246.5 41.5t-172.5 137.5q-36 59 -36 131zM297 228q0 -56 23.5 -102t61 -75.5t87 -50t100 -29t101.5 -8.5q58 0 111.5 13 t99 39t73 73t27.5 109q0 25 -7 49t-14.5 42t-27 41.5t-29.5 35t-38.5 34.5t-36.5 29t-41.5 30t-36.5 26q-16 2 -49 2q-53 0 -104.5 -7t-107 -25t-97 -46t-68.5 -74.5t-27 -105.5zM403 1222q0 -46 10 -97.5t31.5 -103t52 -92.5t75 -67t96.5 -26q37 0 77.5 16.5t65.5 43.5 q53 56 53 159q0 59 -17 125.5t-48 129t-84 103.5t-117 41q-42 0 -82.5 -19.5t-66.5 -52.5q-46 -59 -46 -160z" />
+<glyph unicode="&#xf1a1;" horiz-adv-x="1984" d="M0 722q0 94 66 160t160 66q83 0 148 -55q248 158 592 164l134 423q4 14 17.5 21.5t28.5 4.5l347 -82q22 50 68.5 81t102.5 31q77 0 131.5 -54.5t54.5 -131.5t-54.5 -132t-131.5 -55q-76 0 -130.5 54t-55.5 131l-315 74l-116 -366q327 -14 560 -166q64 58 151 58 q94 0 160 -66t66 -160q0 -62 -31 -114t-83 -82q5 -33 5 -61q0 -121 -68.5 -230.5t-197.5 -193.5q-125 -82 -285.5 -125.5t-335.5 -43.5q-176 0 -336.5 43.5t-284.5 125.5q-129 84 -197.5 193t-68.5 231q0 29 5 66q-48 31 -77 81.5t-29 109.5zM77 722q0 -67 51 -111 q49 131 180 235q-36 25 -82 25q-62 0 -105.5 -43.5t-43.5 -105.5zM178 465q0 -101 59.5 -194t171.5 -166q116 -75 265.5 -115.5t313.5 -40.5t313.5 40.5t265.5 115.5q112 73 171.5 166t59.5 194t-59.5 193.5t-171.5 165.5q-116 75 -265.5 115.5t-313.5 40.5t-313.5 -40.5 t-265.5 -115.5q-112 -73 -171.5 -165.5t-59.5 -193.5zM555 572q0 57 41.5 98t97.5 41t96.5 -41t40.5 -98q0 -56 -40.5 -96t-96.5 -40q-57 0 -98 40t-41 96zM661 209.5q0 16.5 11 27.5t27 11t27 -11q77 -77 265 -77h2q188 0 265 77q11 11 27 11t27 -11t11 -27.5t-11 -27.5 q-99 -99 -319 -99h-2q-220 0 -319 99q-11 11 -11 27.5zM1153 572q0 57 41.5 98t97.5 41t96.5 -41t40.5 -98q0 -56 -40.5 -96t-96.5 -40q-57 0 -98 40t-41 96zM1555 1350q0 -45 32 -77t77 -32t77 32t32 77t-32 77t-77 32t-77 -32t-32 -77zM1672 843q131 -105 178 -238 q57 46 57 117q0 62 -43.5 105.5t-105.5 43.5q-49 0 -86 -28z" />
+<glyph unicode="&#xf1a2;" d="M0 193v894q0 133 94 227t226 94h896q132 0 226 -94t94 -227v-894q0 -133 -94 -227t-226 -94h-896q-132 0 -226 94t-94 227zM155 709q0 -37 19.5 -67.5t52.5 -45.5q-7 -25 -7 -54q0 -98 74 -181.5t201.5 -132t278.5 -48.5q150 0 277.5 48.5t201.5 132t74 181.5q0 27 -6 54 q35 14 57 45.5t22 70.5q0 51 -36 87.5t-87 36.5q-60 0 -98 -48q-151 107 -375 115l83 265l206 -49q1 -50 36.5 -85t84.5 -35q50 0 86 35.5t36 85.5t-36 86t-86 36q-36 0 -66 -20.5t-45 -53.5l-227 54q-9 2 -17.5 -2.5t-11.5 -14.5l-95 -302q-224 -4 -381 -113q-36 43 -93 43 q-51 0 -87 -36.5t-36 -87.5zM493 613q0 37 26 63t63 26t63 -26t26 -63t-26 -64t-63 -27t-63 27t-26 64zM560 375q0 11 8 18q7 7 17.5 7t17.5 -7q49 -51 172 -51h1h1q122 0 173 51q7 7 17.5 7t17.5 -7t7 -18t-7 -18q-65 -64 -208 -64h-1h-1q-143 0 -207 64q-8 7 -8 18z M882 613q0 37 26 63t63 26t63 -26t26 -63t-26 -64t-63 -27t-63 27t-26 64zM1143 1120q0 30 21 51t50 21q30 0 51 -21t21 -51q0 -29 -21 -50t-51 -21q-29 0 -50 21t-21 50z" />
+<glyph unicode="&#xf1a3;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM320 502q0 -82 57.5 -139t139.5 -57q81 0 138.5 56.5t57.5 136.5v280q0 19 13.5 33t33.5 14 q19 0 32.5 -14t13.5 -33v-54l60 -28l90 27v62q0 79 -58 135t-138 56t-138 -55.5t-58 -134.5v-283q0 -20 -14 -33.5t-33 -13.5t-32.5 13.5t-13.5 33.5v120h-151v-122zM806 500q0 -80 58 -137t139 -57t138.5 57t57.5 139v122h-150v-126q0 -20 -13.5 -33.5t-33.5 -13.5 q-19 0 -32.5 14t-13.5 33v123l-90 -26l-60 28v-123z" />
+<glyph unicode="&#xf1a4;" horiz-adv-x="1920" d="M0 336v266h328v-262q0 -43 30 -72.5t72 -29.5t72 29.5t30 72.5v620q0 171 126.5 292t301.5 121q176 0 302 -122t126 -294v-136l-195 -58l-131 61v118q0 42 -30 72t-72 30t-72 -30t-30 -72v-612q0 -175 -126 -299t-303 -124q-178 0 -303.5 125.5t-125.5 303.5zM1062 332 v268l131 -61l195 58v-270q0 -42 30 -71.5t72 -29.5t72 29.5t30 71.5v275h328v-266q0 -178 -125.5 -303.5t-303.5 -125.5q-177 0 -303 124.5t-126 300.5z" />
+<glyph unicode="&#xf1a5;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM64 640h704v-704h480q93 0 158.5 65.5t65.5 158.5v480h-704v704h-480q-93 0 -158.5 -65.5t-65.5 -158.5v-480z " />
+<glyph unicode="&#xf1a6;" horiz-adv-x="2048" d="M0 271v697h328v286h204v-983h-532zM205 435h123v369h-123v-369zM614 271h205v697h-205v-697zM614 1050h205v204h-205v-204zM901 26v163h328v82h-328v697h533v-942h-533zM1106 435h123v369h-123v-369zM1516 26v163h327v82h-327v697h532v-942h-532zM1720 435h123v369h-123 v-369z" />
+<glyph unicode="&#xf1a7;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM293 388l211 41v206q55 -19 116 -19q125 0 213.5 95t88.5 229t-88.5 229t-213.5 95q-74 0 -141 -36h-186v-840z M504 804v277q28 17 70 17q53 0 91 -45t38 -109t-38 -109.5t-91 -45.5q-43 0 -70 15zM636 -39l211 41v206q51 -19 117 -19q125 0 213 95t88 229t-88 229t-213 95q-20 0 -39 -3q-23 -78 -78 -136q-87 -95 -211 -101v-636zM847 377v277q28 17 70 17q53 0 91 -45.5t38 -109.5 t-38 -109t-91 -45q-43 0 -70 15z" />
+<glyph unicode="&#xf1a8;" horiz-adv-x="2038" d="M41 455q0 15 8.5 26.5t22.5 14.5l486 106q-8 14 -8 25t5.5 17.5t16 11.5t20 7t23 4.5t18.5 4.5q4 1 15.5 7.5t17.5 6.5q15 0 28 -16t20 -33q163 37 172 37q17 0 29.5 -11t12.5 -28q0 -15 -8.5 -26t-23.5 -14l-182 -40l-1 -16q-1 -26 81.5 -117.5t104.5 -91.5q47 0 119 80 t72 129q0 36 -23.5 53t-51 18.5t-51 11.5t-23.5 34q0 16 10 34l-68 19q43 44 43 117q0 26 -5 58q82 16 144 16q44 0 71.5 -1.5t48.5 -8.5t31 -13.5t20.5 -24.5t15.5 -33.5t17 -47.5t24 -60l50 25q-3 -40 -23 -60t-42.5 -21t-40 -6.5t-16.5 -20.5l1 -21q75 3 143.5 -20.5 t118 -58.5t101 -94.5t84 -108t75.5 -120.5q33 -56 78.5 -109t75.5 -80.5t99 -88.5q-48 -30 -108.5 -57.5t-138.5 -59t-114 -47.5q-44 37 -74 115t-43.5 164.5t-33 180.5t-42.5 168.5t-72.5 123t-122.5 48.5l-10 -2l-6 -4q4 -5 13 -14q6 -5 28 -23.5t25.5 -22t19 -18 t18 -20.5t11.5 -21t10.5 -27.5t4.5 -31t4 -40.5l1 -33q1 -26 -2.5 -57.5t-7.5 -52t-12.5 -58.5t-11.5 -53q-35 1 -101 -9.5t-98 -10.5q-39 0 -72 10q-2 16 -2 47q0 74 3 96q2 13 31.5 41.5t57 59t26.5 51.5q-24 2 -43 -24q-36 -53 -111.5 -99.5t-136.5 -46.5q-25 0 -75.5 63 t-106.5 139.5t-84 96.5q-6 4 -27 30q-482 -112 -513 -112q-16 0 -28 11t-12 27zM764 676q10 1 32.5 7t34.5 6q19 0 35 -10l-96 -20zM822 568l48 12l109 -177l-73 -48zM859 884q16 30 36 46.5t54 29.5t65.5 36t46 36.5t50 55t43.5 50.5q12 -9 28 -31.5t32 -36.5t38 -13l12 1 v-76l22 -1q247 95 371 190q28 21 50 39t42.5 37.5t33 31t29.5 34t24 31t24.5 37t23 38t27 47.5t29.5 53l7 9q-2 -53 -43 -139q-79 -165 -205 -264t-306 -142q-14 -3 -42 -7.5t-50 -9.5t-39 -14q3 -19 24.5 -46t21.5 -34q0 -11 -26 -30q-5 5 -13.5 15.5t-12 14.5t-10.5 11.5 t-10 10.5l-8 8t-8.5 7.5t-8 5t-8.5 4.5q-7 3 -14.5 5t-20.5 2.5t-22 0.5h-32.5h-37.5q-126 0 -217 -43zM1061 45h31l10 -83l-41 -12v95zM1061 -79q39 26 131.5 47.5t146.5 21.5q9 0 22.5 -15.5t28 -42.5t26 -50t24 -51t14.5 -33q-121 -45 -244 -45q-61 0 -125 11zM1116 29 q21 2 60.5 8.5t72 10t60.5 3.5h14q3 -15 3 -16q0 -7 -17.5 -14.5t-46 -13t-54 -9.5t-53.5 -7.5t-32 -4.5zM1947 1528l1 3l2 4l-1 -5zM1950 1535v1v-1zM1950 1535l1 1z" />
+<glyph unicode="&#xf1a9;" d="M0 520q0 89 19.5 172.5t49 145.5t70.5 118.5t78.5 94t78.5 69.5t64.5 46.5t42.5 24.5q14 8 51 26.5t54.5 28.5t48 30t60.5 44q36 28 58 72.5t30 125.5q129 -155 186 -193q44 -29 130 -68t129 -66q21 -13 39 -25t60.5 -46.5t76 -70.5t75 -95t69 -122t47 -148.5 t19.5 -177.5q0 -164 -62 -304.5t-166 -236t-242.5 -149.5t-290.5 -54t-293 57.5t-247.5 157t-170.5 241.5t-64 302zM333 256q-2 -112 74 -164q29 -20 62.5 -28.5t103.5 -8.5q57 0 132 32.5t134 71t120 70.5t93 31q26 -1 65 -31.5t71.5 -67t68 -67.5t55.5 -32q35 -3 58.5 14 t55.5 63q28 41 42.5 101t14.5 106q0 22 -5 44.5t-16.5 45t-34 36.5t-52.5 14q-33 0 -97 -41.5t-129 -83.5t-101 -42q-27 -1 -63.5 19t-76 49t-83.5 58t-100 49t-111 19q-115 -1 -197 -78.5t-84 -178.5zM685.5 -76q-0.5 -10 7.5 -20q34 -32 87.5 -46t102.5 -12.5t99 4.5 q41 4 84.5 20.5t65 30t28.5 20.5q12 12 7 29q-5 19 -24 5q-30 -22 -87 -39t-131 -17q-129 0 -193 49q-5 4 -13 4q-11 0 -26 -12q-7 -6 -7.5 -16zM852 31q9 -8 17.5 -4.5t31.5 23.5q3 2 10.5 8.5t10.5 8.5t10 7t11.5 7t12.5 5t15 4.5t16.5 2.5t20.5 1q27 0 44.5 -7.5 t23 -14.5t13.5 -22q10 -17 12.5 -20t12.5 1q23 12 14 34q-19 47 -39 61q-23 15 -76 15q-47 0 -71 -10q-29 -12 -78 -56q-26 -24 -12 -44z" />
+<glyph unicode="&#xf1aa;" d="M0 78q0 72 44.5 128t113.5 72q-22 86 1 173t88 152l12 12l151 -152l-11 -11q-37 -37 -37 -89t37 -90q37 -37 89 -37t89 37l30 30l151 152l161 160l151 -152l-160 -160l-151 -152l-30 -30q-65 -64 -151.5 -87t-171.5 -2q-16 -70 -72 -115t-129 -45q-85 0 -145 60.5 t-60 145.5zM2 1202q0 85 60 145.5t145 60.5q76 0 133.5 -49t69.5 -123q84 20 169.5 -3.5t149.5 -87.5l12 -12l-152 -152l-12 12q-37 37 -89 37t-89 -37t-37 -89.5t37 -89.5l29 -29l152 -152l160 -160l-151 -152l-161 160l-151 152l-30 30q-68 67 -90 159.5t5 179.5 q-70 15 -115 71t-45 129zM446 803l161 160l152 152l29 30q67 67 159 89.5t178 -3.5q11 75 68.5 126t135.5 51q85 0 145 -60.5t60 -145.5q0 -77 -51 -135t-127 -69q26 -85 3 -176.5t-90 -158.5l-12 -12l-151 152l12 12q37 37 37 89t-37 89t-89 37t-89 -37l-30 -30l-152 -152 l-160 -160zM776 793l152 152l160 -160l152 -152l29 -30q64 -64 87.5 -150.5t2.5 -171.5q76 -11 126.5 -68.5t50.5 -134.5q0 -85 -60 -145.5t-145 -60.5q-74 0 -131 47t-71 118q-86 -28 -179.5 -6t-161.5 90l-11 12l151 152l12 -12q37 -37 89 -37t89 37t37 89t-37 89l-30 30 l-152 152z" />
+<glyph unicode="&#xf1ab;" d="M0 -16v1078q3 9 4 10q5 6 20 11q106 35 149 50v384l558 -198q2 0 160.5 55t316 108.5t161.5 53.5q20 0 20 -21v-418l147 -47v-1079l-774 246q-14 -6 -375 -127.5t-368 -121.5q-13 0 -18 13q0 1 -1 3zM39 15l694 232v1032l-694 -233v-1031zM147 293q6 4 82 92 q21 24 85.5 115t78.5 118q17 30 51 98.5t36 77.5q-8 1 -110 -33q-8 -2 -27.5 -7.5t-34.5 -9.5t-17 -5q-2 -2 -2 -10.5t-1 -9.5q-5 -10 -31 -15q-23 -7 -47 0q-18 4 -28 21q-4 6 -5 23q6 2 24.5 5t29.5 6q58 16 105 32q100 35 102 35q10 2 43 19.5t44 21.5q9 3 21.5 8 t14.5 5.5t6 -0.5q2 -12 -1 -33q0 -2 -12.5 -27t-26.5 -53.5t-17 -33.5q-25 -50 -77 -131l64 -28q12 -6 74.5 -32t67.5 -28q4 -1 10.5 -25.5t4.5 -30.5q-1 -3 -12.5 0.5t-31.5 11.5l-20 9q-44 20 -87 49q-7 5 -41 31.5t-38 28.5q-67 -103 -134 -181q-81 -95 -105 -110 q-4 -2 -19.5 -4t-18.5 0zM268 933l1 3q3 -3 19.5 -5t26.5 0t58 16q36 12 55 14q17 0 21 -17q3 -15 -4 -28q-12 -23 -50 -38q-30 -12 -60 -12q-26 3 -49 26q-14 15 -18 41zM310 -116q0 8 5 13.5t13 5.5q4 0 18 -7.5t30.5 -16.5t20.5 -11q73 -37 159.5 -61.5t157.5 -24.5 q95 0 167 14.5t157 50.5q15 7 30.5 15.5t34 19t28.5 16.5l-43 73l158 -13l-54 -160l-40 66q-130 -83 -276 -108q-58 -12 -91 -12h-84q-79 0 -199.5 39t-183.5 85q-8 7 -8 16zM777 1294l573 -184v380zM885 453l102 -31l45 110l211 -65l37 -135l102 -31l-181 657l-100 31z M1071 630l76 185l63 -227z" />
+<glyph unicode="&#xf1ac;" horiz-adv-x="1792" d="M0 -96v1088q0 66 47 113t113 47h128q66 0 113 -47t47 -113v-1088q0 -66 -47 -113t-113 -47h-128q-66 0 -113 47t-47 113zM512 -96v1536q0 40 28 68t68 28h672q40 0 88 -20t76 -48l152 -152q28 -28 48 -76t20 -88v-163q58 -34 93 -93t35 -128v-768q0 -106 -75 -181 t-181 -75h-864q-66 0 -113 47t-47 113zM640 896h896v256h-160q-40 0 -68 28t-28 68v160h-640v-512zM736 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM736 256q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9 h-128q-14 0 -23 -9t-9 -23v-128zM736 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 256q0 -14 9 -23t23 -9h128 q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM992 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM1248 0q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23 v-128zM1248 256q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128zM1248 512q0 -14 9 -23t23 -9h128q14 0 23 9t9 23v128q0 14 -9 23t-23 9h-128q-14 0 -23 -9t-9 -23v-128z" />
+<glyph unicode="&#xf1ad;" d="M0 -192v1664q0 26 19 45t45 19h1280q26 0 45 -19t19 -45v-1664q0 -26 -19 -45t-45 -19h-1280q-26 0 -45 19t-19 45zM256 160q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM256 1184q0 -14 9 -23 t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 96v-192q0 -14 9 -23t23 -9h320q14 0 23 9t9 23v192q0 14 -9 23t-23 9h-320q-14 0 -23 -9t-9 -23zM512 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9 t-9 -23v-64zM512 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM512 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 928q0 -14 9 -23 t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM768 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 160q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9 t-9 -23v-64zM1024 416q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 672q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 928q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64 q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64zM1024 1184q0 -14 9 -23t23 -9h64q14 0 23 9t9 23v64q0 14 -9 23t-23 9h-64q-14 0 -23 -9t-9 -23v-64z" />
+<glyph unicode="&#xf1ae;" horiz-adv-x="1280" d="M64 1056q0 40 28 68t68 28t68 -28l228 -228h368l228 228q28 28 68 28t68 -28t28 -68t-28 -68l-292 -292v-824q0 -46 -33 -79t-79 -33t-79 33t-33 79v384h-64v-384q0 -46 -33 -79t-79 -33t-79 33t-33 79v824l-292 292q-28 28 -28 68zM416 1152q0 93 65.5 158.5t158.5 65.5 t158.5 -65.5t65.5 -158.5t-65.5 -158.5t-158.5 -65.5t-158.5 65.5t-65.5 158.5z" />
+<glyph unicode="&#xf1b0;" horiz-adv-x="1664" d="M0 724q0 80 42 139.5t119 59.5q76 0 141.5 -55.5t100.5 -134t35 -152.5q0 -80 -42 -139t-119 -59q-76 0 -141.5 55.5t-100.5 133.5t-35 152zM256 19q0 86 56 191.5t139.5 192.5t187.5 146t193 59q118 0 255 -97.5t229 -237t92 -254.5q0 -46 -17 -76.5t-48.5 -45 t-64.5 -20t-76 -5.5q-68 0 -187.5 45t-182.5 45q-66 0 -192.5 -44.5t-200.5 -44.5q-183 0 -183 146zM333 1163q0 60 19 113.5t63 92.5t105 39q77 0 138.5 -57.5t91.5 -135t30 -151.5q0 -60 -19 -113.5t-63 -92.5t-105 -39q-76 0 -138 57.5t-92 135.5t-30 151zM884 1064 q0 74 30 151.5t91.5 135t138.5 57.5q61 0 105 -39t63 -92.5t19 -113.5q0 -73 -30 -151t-92 -135.5t-138 -57.5q-61 0 -105 39t-63 92.5t-19 113.5zM1226 581q0 74 35 152.5t100.5 134t141.5 55.5q77 0 119 -59.5t42 -139.5q0 -74 -35 -152t-100.5 -133.5t-141.5 -55.5 q-77 0 -119 59t-42 139z" />
+<glyph unicode="&#xf1b1;" horiz-adv-x="768" d="M64 1008q0 128 42.5 249.5t117.5 200t160 78.5t160 -78.5t117.5 -200t42.5 -249.5q0 -145 -57 -243.5t-152 -135.5l45 -821q2 -26 -16 -45t-44 -19h-192q-26 0 -44 19t-16 45l45 821q-95 37 -152 135.5t-57 243.5z" />
+<glyph unicode="&#xf1b2;" horiz-adv-x="1792" d="M0 256v768q0 40 23 73t61 47l704 256q22 8 44 8t44 -8l704 -256q38 -14 61 -47t23 -73v-768q0 -35 -18 -65t-49 -47l-704 -384q-28 -16 -61 -16t-61 16l-704 384q-31 17 -49 47t-18 65zM134 1026l698 -254l698 254l-698 254zM896 -93l640 349v636l-640 -233v-752z" />
+<glyph unicode="&#xf1b3;" horiz-adv-x="2304" d="M0 96v416q0 38 21.5 70t56.5 48l434 186v400q0 38 21.5 70t56.5 48l448 192q23 10 50 10t50 -10l448 -192q35 -16 56.5 -48t21.5 -70v-400l434 -186q36 -16 57 -48t21 -70v-416q0 -36 -19 -67t-52 -47l-448 -224q-25 -14 -57 -14t-57 14l-448 224q-5 2 -7 4q-2 -2 -7 -4 l-448 -224q-25 -14 -57 -14t-57 14l-448 224q-33 16 -52 47t-19 67zM172 531l404 -173l404 173l-404 173zM640 -96l384 192v314l-384 -164v-342zM647 1219l441 -189l441 189l-441 189zM1152 651l384 165v266l-384 -164v-267zM1196 531l404 -173l404 173l-404 173zM1664 -96 l384 192v314l-384 -164v-342z" />
+<glyph unicode="&#xf1b4;" horiz-adv-x="2048" d="M0 22v1260h594q87 0 155 -14t126.5 -47.5t90 -96.5t31.5 -154q0 -181 -172 -263q114 -32 172 -115t58 -204q0 -75 -24.5 -136.5t-66 -103.5t-98.5 -71t-121 -42t-134 -13h-611zM277 236h296q205 0 205 167q0 180 -199 180h-302v-347zM277 773h281q78 0 123.5 36.5 t45.5 113.5q0 144 -190 144h-260v-294zM1137 477q0 208 130.5 345.5t336.5 137.5q138 0 240.5 -68t153 -179t50.5 -248q0 -17 -2 -47h-658q0 -111 57.5 -171.5t166.5 -60.5q63 0 122 32t76 87h221q-100 -307 -427 -307q-214 0 -340.5 132t-126.5 347zM1337 1073h511v124 h-511v-124zM1388 576h408q-18 195 -200 195q-90 0 -146 -52.5t-62 -142.5z" />
+<glyph unicode="&#xf1b5;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 254h382q117 0 197 57.5t80 170.5q0 158 -143 200q107 52 107 164q0 57 -19.5 96.5t-56.5 60.5t-79 29.5 t-97 8.5h-371v-787zM301 388v217h189q124 0 124 -113q0 -104 -128 -104h-185zM301 723v184h163q119 0 119 -90q0 -94 -106 -94h-176zM838 538q0 -135 79 -217t213 -82q205 0 267 191h-138q-11 -34 -47.5 -54t-75.5 -20q-68 0 -104 38t-36 107h411q1 10 1 30 q0 132 -74.5 220.5t-203.5 88.5q-128 0 -210 -86t-82 -216zM964 911v77h319v-77h-319zM996 600q4 56 39 89t91 33q113 0 124 -122h-254z" />
+<glyph unicode="&#xf1b6;" horiz-adv-x="2048" d="M0 764q0 86 61 146.5t146 60.5q73 0 130 -46t73 -117l783 -315q49 29 106 29q14 0 21 -1l173 248q1 114 82 194.5t195 80.5q115 0 196.5 -81t81.5 -196t-81.5 -196.5t-196.5 -81.5l-265 -194q-8 -80 -67.5 -133.5t-138.5 -53.5q-73 0 -130 46t-73 117l-783 315 q-51 -30 -106 -30q-85 0 -146 61t-61 147zM55 764q0 -64 44.5 -108.5t107.5 -44.5q11 0 33 4l-64 26q-33 14 -52.5 44.5t-19.5 66.5q0 50 35.5 85.5t85.5 35.5q20 0 41 -8v1l76 -31q-20 37 -56.5 59t-78.5 22q-63 0 -107.5 -44.5t-44.5 -107.5zM1164 244q19 -37 55.5 -59 t79.5 -22q63 0 107.5 44.5t44.5 107.5t-44.5 108t-107.5 45q-13 0 -33 -4q2 -1 20 -8t21.5 -8.5t18.5 -8.5t19 -10t16 -11t15.5 -13.5t11 -14.5t10 -18t5 -21t2.5 -25q0 -50 -35.5 -85.5t-85.5 -35.5q-14 0 -31.5 4.5t-29 9t-31.5 13.5t-28 12zM1584 767q0 -77 54.5 -131.5 t131.5 -54.5t132 54.5t55 131.5t-55 131.5t-132 54.5q-76 0 -131 -54.5t-55 -131.5zM1623 767q0 62 43.5 105.5t104.5 43.5t105 -44t44 -105t-43.5 -104.5t-105.5 -43.5q-61 0 -104.5 43.5t-43.5 104.5z" />
+<glyph unicode="&#xf1b7;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM128 693q0 -53 38 -91t92 -38q36 0 66 18l489 -197q10 -44 45.5 -73t81.5 -29q50 0 86.5 34t41.5 83l167 122 q71 0 122 50.5t51 122.5t-51 123t-122 51q-72 0 -122.5 -50.5t-51.5 -121.5l-108 -155q-2 0 -6.5 0.5t-6.5 0.5q-35 0 -67 -19l-489 197q-10 44 -45.5 73t-80.5 29q-54 0 -92 -38t-38 -92zM162 693q0 40 28 68t68 28q27 0 49.5 -14t34.5 -37l-48 19q-29 11 -56.5 -2 t-38.5 -41q-12 -29 -0.5 -57t39.5 -40v-1l40 -16q-14 -2 -20 -2q-40 0 -68 27.5t-28 67.5zM855 369q5 -2 47 -19q29 -12 58 0.5t41 41.5q11 29 -1 57.5t-41 40.5l-40 16q14 2 21 2q39 0 67 -27.5t28 -67.5t-28 -67.5t-67 -27.5q-59 0 -85 51zM1118 695q0 48 34 82t83 34 q48 0 82 -34t34 -82t-34 -82t-82 -34q-49 0 -83 34t-34 82zM1142 696q0 -39 27.5 -66t65.5 -27t65.5 27t27.5 66q0 38 -27.5 65.5t-65.5 27.5t-65.5 -27.5t-27.5 -65.5z" />
+<glyph unicode="&#xf1b8;" horiz-adv-x="1792" d="M16 970l433 -17l180 -379l-147 92q-63 -72 -111.5 -144.5t-72.5 -125t-39.5 -94.5t-18.5 -63l-4 -21l-190 357q-17 26 -18 56t6 47l8 18q35 63 114 188zM270.5 158q-3.5 28 4 65t12 55t21.5 64t19 53q78 -12 509 -28l-15 -368l-2 -22l-420 29q-36 3 -67 31.5t-47 65.5 q-11 27 -14.5 55zM294 1124l225 356q20 31 60 45t80 10q24 -2 48.5 -12t42 -21t41.5 -33t36 -34.5t36 -39.5t32 -35q-47 -63 -265 -435l-317 187zM782 1524l405 -1q31 3 58 -10.5t39 -28.5l11 -15q39 -61 112 -190l142 83l-220 -373l-419 20l151 86q-34 89 -75 166 t-75.5 123.5t-64.5 80t-47 46.5zM953 197l211 362l7 -173q170 -16 283 -5t170 33l56 22l-188 -359q-12 -29 -36.5 -46.5t-43.5 -20.5l-18 -4q-71 -7 -219 -12l8 -164zM1218 847l313 195l19 11l212 -363q18 -37 12.5 -76t-27.5 -74q-13 -20 -33 -37t-38 -28t-48.5 -22 t-47 -16t-51.5 -14t-46 -12q-34 72 -265 436z" />
+<glyph unicode="&#xf1b9;" horiz-adv-x="1984" d="M0 160v384q0 93 65.5 158.5t158.5 65.5h28l105 419q23 94 104 157.5t179 63.5h704q98 0 179 -63.5t104 -157.5l105 -419h28q93 0 158.5 -65.5t65.5 -158.5v-384q0 -14 -9 -23t-23 -9h-128v-128q0 -80 -56 -136t-136 -56t-136 56t-56 136v128h-928v-128q0 -80 -56 -136 t-136 -56t-136 56t-56 136v128h-96q-14 0 -23 9t-9 23zM160 448q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113zM516 768h952l-89 357q-2 8 -14 17.5t-21 9.5h-704q-9 0 -21 -9.5t-14 -17.5zM1472 448q0 -66 47 -113t113 -47t113 47t47 113 t-47 113t-113 47t-113 -47t-47 -113z" />
+<glyph unicode="&#xf1ba;" horiz-adv-x="1984" d="M0 32v384q0 93 65.5 158.5t158.5 65.5h28l105 419q23 94 104 157.5t179 63.5h128v224q0 14 9 23t23 9h448q14 0 23 -9t9 -23v-224h64q98 0 179 -63.5t104 -157.5l105 -419h28q93 0 158.5 -65.5t65.5 -158.5v-384q0 -14 -9 -23t-23 -9h-128v-64q0 -80 -56 -136t-136 -56 t-136 56t-56 136v64h-928v-64q0 -80 -56 -136t-136 -56t-136 56t-56 136v64h-96q-14 0 -23 9t-9 23zM160 320q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113zM516 640h952l-89 357q-2 8 -14 17.5t-21 9.5h-704q-9 0 -21 -9.5t-14 -17.5zM1472 320 q0 -66 47 -113t113 -47t113 47t47 113t-47 113t-113 47t-113 -47t-47 -113z" />
+<glyph unicode="&#xf1bb;" d="M32 64q0 26 19 45l402 403h-229q-26 0 -45 19t-19 45t19 45l402 403h-197q-26 0 -45 19t-19 45t19 45l384 384q19 19 45 19t45 -19l384 -384q19 -19 19 -45t-19 -45t-45 -19h-197l402 -403q19 -19 19 -45t-19 -45t-45 -19h-229l402 -403q19 -19 19 -45t-19 -45t-45 -19 h-462q1 -17 6 -87.5t5 -108.5q0 -25 -18 -42.5t-43 -17.5h-320q-25 0 -43 17.5t-18 42.5q0 38 5 108.5t6 87.5h-462q-26 0 -45 19t-19 45z" />
+<glyph unicode="&#xf1bc;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM237 886q0 -31 20.5 -52t51.5 -21q11 0 40 8q133 37 307 37q159 0 309.5 -34t253.5 -95q21 -12 40 -12 q29 0 50.5 20.5t21.5 51.5q0 47 -40 70q-126 73 -293 110.5t-343 37.5q-204 0 -364 -47q-23 -7 -38.5 -25.5t-15.5 -48.5zM289 637q0 -25 17.5 -42.5t42.5 -17.5q7 0 37 8q122 33 251 33q279 0 488 -124q24 -13 38 -13q25 0 42.5 17.5t17.5 42.5q0 40 -35 61 q-237 141 -548 141q-153 0 -303 -42q-48 -13 -48 -64zM321 406q0 -20 13.5 -34.5t35.5 -14.5q5 0 37 8q132 27 243 27q226 0 397 -103q19 -11 33 -11q19 0 33 13.5t14 34.5q0 32 -30 51q-193 115 -447 115q-133 0 -287 -34q-42 -9 -42 -52z" />
+<glyph unicode="&#xf1bd;" d="M0 11v1258q0 58 40.5 98.5t98.5 40.5h1258q58 0 98.5 -40.5t40.5 -98.5v-1258q0 -58 -40.5 -98.5t-98.5 -40.5h-1258q-58 0 -98.5 40.5t-40.5 98.5zM71 11q0 -28 20 -48t48 -20h1258q28 0 48 20t20 48v1258q0 28 -20 48t-48 20h-1258q-28 0 -48 -20t-20 -48v-1258z M121 11v141l711 195l-212 439q4 1 12 2.5t12 1.5q170 32 303.5 21.5t221 -46t143.5 -94.5q27 -28 -25 -42q-64 -16 -256 -62l-97 198q-111 7 -240 -16l188 -387l533 145v-496q0 -7 -5.5 -12.5t-12.5 -5.5h-1258q-7 0 -12.5 5.5t-5.5 12.5zM121 709v560q0 7 5.5 12.5 t12.5 5.5h1258q7 0 12.5 -5.5t5.5 -12.5v-428q-85 30 -188 52q-294 64 -645 12l-18 -3l-65 134h-233l85 -190q-132 -51 -230 -137zM246 413q-24 203 166 305l129 -270l-255 -61q-14 -3 -26 4.5t-14 21.5z" />
+<glyph unicode="&#xf1be;" horiz-adv-x="2304" d="M0 405l17 128q2 9 9 9t9 -9l20 -128l-20 -126q-2 -9 -9 -9t-9 9zM79 405l23 207q0 9 9 9q8 0 10 -9l26 -207l-26 -203q-2 -9 -10 -9q-9 0 -9 10zM169 405l21 245q2 12 12 12q11 0 11 -12l25 -245l-25 -237q0 -11 -11 -11q-10 0 -12 11zM259 405l21 252q0 13 13 13 q12 0 14 -13l23 -252l-23 -244q-2 -13 -14 -13q-13 0 -13 13zM350 405l20 234q0 6 4.5 10.5t10.5 4.5q14 0 16 -15l21 -234l-21 -246q-2 -16 -16 -16q-6 0 -10.5 4.5t-4.5 11.5zM401 159zM442 405l18 380q2 18 18 18q7 0 12 -5.5t5 -12.5l21 -380l-21 -246q0 -7 -5 -12.5 t-12 -5.5q-16 0 -18 18zM534 403l16 468q2 19 20 19q8 0 13.5 -5.5t5.5 -13.5l19 -468l-19 -244q0 -8 -5.5 -13.5t-13.5 -5.5q-18 0 -20 19zM628 405l16 506q0 9 6.5 15.5t14.5 6.5q9 0 15 -6.5t7 -15.5l18 -506l-18 -242q-2 -21 -22 -21q-19 0 -21 21zM723 405l14 -241 q1 -10 7.5 -16.5t15.5 -6.5q22 0 24 23l16 241l-16 523q-1 10 -7.5 17t-16.5 7q-9 0 -16 -7t-7 -17zM784 164zM817 405l14 510q0 11 7.5 18t17.5 7t17.5 -7t7.5 -18l15 -510l-15 -239q0 -10 -7.5 -17.5t-17.5 -7.5t-17 7t-8 18zM913 404l12 492q1 12 9 20t19 8t18.5 -8 t8.5 -20l14 -492l-14 -236q0 -11 -8 -19t-19 -8t-19 8t-9 19zM1010 405q0 -1 11 -236v-1q0 -10 6 -17q9 -11 23 -11q11 0 20 9q9 7 9 20l1 24l11 211l-12 586q0 16 -13 24q-8 5 -16 5t-16 -5q-13 -8 -13 -24l-1 -6zM1079 169zM1103 404l12 636v3q2 15 12 24q9 7 20 7 q8 0 15 -5q14 -8 16 -26l14 -639l-14 -231q0 -13 -9 -22t-22 -9t-22 9t-10 22l-6 114zM1204 174v899q0 23 28 33q85 34 181 34q195 0 338 -131.5t160 -323.5q53 22 110 22q117 0 200 -83t83 -201q0 -117 -83 -199.5t-200 -82.5h-786q-13 2 -22 11t-9 22z" />
+<glyph unicode="&#xf1c0;" d="M0 0v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 384v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 768 v170q119 -84 325 -127t443 -43t443 43t325 127v-170q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5t-103 128zM0 1152v128q0 69 103 128t280 93.5t385 34.5t385 -34.5t280 -93.5t103 -128v-128q0 -69 -103 -128t-280 -93.5t-385 -34.5t-385 34.5t-280 93.5 t-103 128z" />
+<glyph unicode="&#xf1c1;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM257 60q9 40 56 91.5t132 96.5q14 9 23 -6q2 -2 2 -4 q52 85 107 197q68 136 104 262q-24 82 -30.5 159.5t6.5 127.5q11 40 42 40h21h1q23 0 35 -15q18 -21 9 -68q-2 -6 -4 -8q1 -3 1 -8v-30q-2 -123 -14 -192q55 -164 146 -238q33 -26 84 -56q59 7 117 7q147 0 177 -49q16 -22 2 -52q0 -1 -1 -2l-2 -2v-1q-6 -38 -71 -38 q-48 0 -115 20t-130 53q-221 -24 -392 -83q-153 -262 -242 -262q-15 0 -28 7l-24 12q-1 1 -6 5q-10 10 -6 36zM318 54q52 24 137 158q-51 -40 -87.5 -84t-49.5 -74zM592 313q135 54 284 81q-2 1 -13 9.5t-16 13.5q-76 67 -127 176q-27 -86 -83 -197q-30 -56 -45 -83z M714 842q1 7 7 44q0 3 7 43q1 4 4 8q-1 1 -1 2t-0.5 1.5t-0.5 1.5q-1 22 -13 36q0 -1 -1 -2v-2q-15 -42 -2 -132zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376zM1098 353q76 -28 124 -28q14 0 18 1q0 1 -2 3q-24 24 -140 24z" />
+<glyph unicode="&#xf1c2;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM233 661h70l164 -661h159l128 485q7 20 10 46q2 16 2 24 h4l3 -24q1 -3 3.5 -20t5.5 -26l128 -485h159l164 661h70v107h-300v-107h90l-99 -438q-5 -20 -7 -46l-2 -21h-4l-3 21q-1 5 -4 21t-5 25l-144 545h-114l-144 -545q-2 -9 -4.5 -24.5t-3.5 -21.5l-4 -21h-4l-2 21q-2 26 -7 46l-99 438h90v107h-300v-107zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c3;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM429 0h281v106h-75l103 161q5 7 10 16.5t7.5 13.5t3.5 4 h2q1 -4 5 -10q2 -4 4.5 -7.5t6 -8t6.5 -8.5l107 -161h-76v-106h291v106h-68l-192 273l195 282h67v107h-279v-107h74l-103 -159q-4 -7 -10 -16.5t-9 -13.5l-2 -3h-2q-1 4 -5 10q-6 11 -17 23l-106 159h76v107h-290v-107h68l189 -272l-194 -283h-68v-106zM1024 1024h376 q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c4;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM416 0h327v106h-93v167h137q76 0 118 15q67 23 106.5 87 t39.5 146q0 81 -37 141t-100 87q-48 19 -130 19h-368v-107h92v-555h-92v-106zM650 386v268h120q52 0 83 -18q56 -33 56 -115q0 -89 -62 -120q-31 -15 -78 -15h-119zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c5;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 0v192l192 192l128 -128l384 384l320 -320v-320 h-1024zM256 704q0 80 56 136t136 56t136 -56t56 -136t-56 -136t-136 -56t-136 56t-56 136zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c6;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-128v-128h-128v128h-512v-1536zM384 192q0 25 8 52q21 63 120 396 v128h128v-128h79q22 0 39 -13t23 -34l107 -349q8 -27 8 -52q0 -83 -72.5 -137.5t-183.5 -54.5t-183.5 54.5t-72.5 137.5zM512 192q0 -26 37.5 -45t90.5 -19t90.5 19t37.5 45t-37.5 45t-90.5 19t-90.5 -19t-37.5 -45zM512 896h128v128h-128v-128zM512 1152h128v128h-128v-128 zM640 768h128v128h-128v-128zM640 1024h128v128h-128v-128zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c7;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 288v192q0 14 9 23t23 9h131l166 167q16 15 35 7 q20 -8 20 -30v-544q0 -22 -20 -30q-8 -2 -12 -2q-12 0 -23 9l-166 167h-131q-14 0 -23 9t-9 23zM762 206.5q1 -26.5 20 -44.5q20 -17 44 -17q27 0 47 20q87 93 87 219t-87 219q-18 19 -45 20t-46 -17t-20 -44.5t18 -46.5q52 -57 52 -131t-52 -131q-19 -20 -18 -46.5z M973.5 54.5q2.5 -26.5 23.5 -42.5q18 -15 40 -15q31 0 50 24q129 159 129 363t-129 363q-16 21 -43 24t-47 -14q-21 -17 -23.5 -43.5t14.5 -47.5q100 -123 100 -282t-100 -282q-17 -21 -14.5 -47.5zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c8;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM256 256v384q0 52 38 90t90 38h384q52 0 90 -38t38 -90 v-384q0 -52 -38 -90t-90 -38h-384q-52 0 -90 38t-38 90zM960 403v90l265 266q9 9 23 9q4 0 12 -2q20 -8 20 -30v-576q0 -22 -20 -30q-8 -2 -12 -2q-14 0 -23 9zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1c9;" d="M0 -160v1600q0 40 28 68t68 28h896q40 0 88 -20t76 -48l312 -312q28 -28 48 -76t20 -88v-1152q0 -40 -28 -68t-68 -28h-1344q-40 0 -68 28t-28 68zM128 -128h1280v1024h-416q-40 0 -68 28t-28 68v416h-768v-1536zM254 429q-14 19 0 38l226 301q8 11 21 12.5t24 -6.5 l51 -38q11 -8 12.5 -21t-6.5 -24l-182 -243l182 -243q8 -11 6.5 -24t-12.5 -21l-51 -38q-11 -8 -24 -6.5t-21 12.5zM636 43l138 831q2 13 13 20.5t24 5.5l63 -10q13 -2 20.5 -13t5.5 -24l-138 -831q-2 -13 -13 -20.5t-24 -5.5l-63 10q-13 2 -20.5 13t-5.5 24zM947.5 181 q-1.5 13 6.5 24l182 243l-182 243q-8 11 -6.5 24t12.5 21l51 38q11 8 24 6.5t21 -12.5l226 -301q14 -19 0 -38l-226 -301q-8 -11 -21 -12.5t-24 6.5l-51 38q-11 8 -12.5 21zM1024 1024h376q-10 29 -22 41l-313 313q-12 12 -41 22v-376z" />
+<glyph unicode="&#xf1ca;" d="M39 1286h283q26 -218 70 -398.5t104.5 -317t121.5 -235.5t140 -195q169 169 287 406q-142 72 -223 220t-81 333q0 192 104 314.5t284 122.5q178 0 273 -105.5t95 -297.5q0 -159 -58 -286q-7 -1 -19.5 -3t-46 -2t-63 6t-62 25.5t-50.5 51.5q31 103 31 184q0 87 -29 132 t-79 45q-53 0 -85 -49.5t-32 -140.5q0 -186 105 -293.5t267 -107.5q62 0 121 14v-198q-101 -23 -198 -23q-65 -136 -165.5 -271t-181.5 -215.5t-128 -106.5q-80 -45 -162 3q-28 17 -60.5 43.5t-85 83.5t-102.5 128.5t-107.5 184t-105.5 244t-91.5 314.5t-70.5 390z" />
+<glyph unicode="&#xf1cb;" horiz-adv-x="1792" d="M0 367v546q0 41 34 64l819 546q21 13 43 13t43 -13l819 -546q34 -23 34 -64v-546q0 -41 -34 -64l-819 -546q-21 -13 -43 -13t-43 13l-819 546q-34 23 -34 64zM154 511l193 129l-193 129v-258zM216 367l603 -402v359l-334 223zM216 913l269 -180l334 223v359zM624 640 l272 -182l272 182l-272 182zM973 -35l603 402l-269 180l-334 -223v-359zM973 956l334 -223l269 180l-603 402v-359zM1445 640l193 -129v258z" />
+<glyph unicode="&#xf1cc;" horiz-adv-x="2048" d="M0 407q0 110 55 203t147 147q-12 39 -12 82q0 115 82 196t199 81q95 0 172 -58q75 154 222.5 248t326.5 94q166 0 306 -80.5t221.5 -218.5t81.5 -301q0 -6 -0.5 -18t-0.5 -18q111 -46 179.5 -145.5t68.5 -221.5q0 -164 -118 -280.5t-285 -116.5q-4 0 -11.5 0.5t-10.5 0.5 h-1209h-1h-2h-5q-170 10 -288 125.5t-118 280.5zM468 498q0 -122 84 -193t208 -71q137 0 240 99q-16 20 -47.5 56.5t-43.5 50.5q-67 -65 -144 -65q-55 0 -93.5 33.5t-38.5 87.5q0 53 38.5 87t91.5 34q44 0 84.5 -21t73 -55t65 -75t69 -82t77 -75t97 -55t121.5 -21 q121 0 204.5 71.5t83.5 190.5q0 121 -84 192t-207 71q-143 0 -241 -97q14 -16 29.5 -34t34.5 -40t29 -34q66 64 142 64q52 0 92 -33t40 -84q0 -57 -37 -91.5t-94 -34.5q-43 0 -82.5 21t-72 55t-65.5 75t-69.5 82t-77.5 75t-96.5 55t-118.5 21q-122 0 -207 -70.5t-85 -189.5z " />
+<glyph unicode="&#xf1cd;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM128 640q0 -190 90 -361l194 194q-28 82 -28 167t28 167l-194 194q-90 -171 -90 -361zM512 640 q0 -159 112.5 -271.5t271.5 -112.5t271.5 112.5t112.5 271.5t-112.5 271.5t-271.5 112.5t-271.5 -112.5t-112.5 -271.5zM535 -38q171 -90 361 -90t361 90l-194 194q-82 -28 -167 -28t-167 28zM535 1318l194 -194q82 28 167 28t167 -28l194 194q-171 90 -361 90t-361 -90z M1380 473l194 -194q90 171 90 361t-90 361l-194 -194q28 -82 28 -167t-28 -167z" />
+<glyph unicode="&#xf1ce;" horiz-adv-x="1792" d="M0 640q0 222 101 414.5t276.5 317t390.5 155.5v-260q-221 -45 -366.5 -221t-145.5 -406q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5q0 230 -145.5 406t-366.5 221v260q215 -31 390.5 -155.5t276.5 -317t101 -414.5 q0 -182 -71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348z" />
+<glyph unicode="&#xf1d0;" horiz-adv-x="1792" d="M19 662q8 217 116 406t305 318h5q0 -1 -1 -3q-8 -8 -28 -33.5t-52 -76.5t-60 -110.5t-44.5 -135.5t-14 -150.5t39 -157.5t108.5 -154q50 -50 102 -69.5t90.5 -11.5t69.5 23.5t47 32.5l16 16q39 51 53 116.5t6.5 122.5t-21 107t-26.5 80l-14 29q-10 25 -30.5 49.5t-43 41 t-43.5 29.5t-35 19l-13 6l104 115q39 -17 78 -52t59 -61l19 -27q1 48 -18.5 103.5t-40.5 87.5l-20 31l161 183l160 -181q-33 -46 -52.5 -102.5t-22.5 -90.5l-4 -33q22 37 61.5 72.5t67.5 52.5l28 17l103 -115q-44 -14 -85 -50t-60 -65l-19 -29q-31 -56 -48 -133.5t-7 -170 t57 -156.5q33 -45 77.5 -60.5t85 -5.5t76 26.5t57.5 33.5l21 16q60 53 96.5 115t48.5 121.5t10 121.5t-18 118t-37 107.5t-45.5 93t-45 72t-34.5 47.5l-13 17q-14 13 -7 13l10 -3q40 -29 62.5 -46t62 -50t64 -58t58.5 -65t55.5 -77t45.5 -88t38 -103t23.5 -117t10.5 -136 q3 -259 -108 -465t-312 -321t-456 -115q-185 0 -351 74t-283.5 198t-184 293t-60.5 353z" />
+<glyph unicode="&#xf1d1;" horiz-adv-x="1792" d="M0 640q0 182 71 348t191 286t286 191t348 71t348 -71t286 -191t191 -286t71 -348t-71 -348t-191 -286t-286 -191t-348 -71t-348 71t-286 191t-191 286t-71 348zM44 640q0 -173 67.5 -331t181.5 -272t272 -181.5t331 -67.5t331 67.5t272 181.5t181.5 272t67.5 331 t-67.5 331t-181.5 272t-272 181.5t-331 67.5t-331 -67.5t-272 -181.5t-181.5 -272t-67.5 -331zM87 640q0 205 98 385l57 -33q-30 -56 -49 -112l82 -28q-35 -100 -35 -212q0 -109 36 -212l-83 -28q22 -60 49 -112l-57 -33q-98 180 -98 385zM206 217l58 34q29 -49 73 -99 l65 57q148 -168 368 -212l-17 -86q65 -12 121 -13v-66q-208 6 -385 109.5t-283 275.5zM207 1063q106 172 282 275.5t385 109.5v-66q-65 -2 -121 -13l17 -86q-220 -42 -368 -211l-65 56q-38 -42 -73 -98zM415 805q33 93 99 169l185 -162q59 68 147 86l-48 240q44 10 98 10 t98 -10l-48 -240q88 -18 147 -86l185 162q66 -76 99 -169l-233 -80q14 -42 14 -85t-14 -85l232 -80q-31 -92 -98 -169l-185 162q-57 -67 -147 -85l48 -241q-52 -10 -98 -10t-98 10l48 241q-90 18 -147 85l-185 -162q-67 77 -98 169l232 80q-14 42 -14 85t14 85zM918 -102 q56 1 121 13l-17 86q220 44 368 212l65 -57q44 50 73 99l58 -34q-106 -172 -283 -275.5t-385 -109.5v66zM918 1382v66q209 -6 385 -109.5t282 -275.5l-57 -33q-35 56 -73 98l-65 -56q-148 169 -368 211l17 86q-56 11 -121 13zM1516 428q36 103 36 212q0 112 -35 212l82 28 q-19 56 -49 112l57 33q98 -180 98 -385t-98 -385l-57 33q27 52 49 112z" />
+<glyph unicode="&#xf1d2;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 218q0 -45 20 -78.5t54 -51t72 -25.5t81 -8q224 0 224 188q0 67 -48 99t-126 46q-27 5 -51.5 20.5 t-24.5 39.5q0 44 49 52q77 15 122 70t45 134q0 24 -10 52q37 9 49 13v125q-78 -29 -135 -29q-50 29 -110 29q-86 0 -145 -57t-59 -143q0 -50 29.5 -102t73.5 -67v-3q-38 -17 -38 -85q0 -53 41 -77v-3q-113 -37 -113 -139zM382 225q0 64 98 64q102 0 102 -61q0 -66 -93 -66 q-107 0 -107 63zM395 693q0 90 77 90q36 0 55 -25.5t19 -63.5q0 -85 -74 -85q-77 0 -77 84zM755 1072q0 -36 25 -62.5t60 -26.5t59.5 27t24.5 62q0 36 -24 63.5t-60 27.5t-60.5 -27t-24.5 -64zM771 350h137q-2 27 -2 82v387q0 46 2 69h-137q3 -23 3 -71v-392q0 -50 -3 -75z M966 771q36 3 37 3q3 0 11 -0.5t12 -0.5v-2h-2v-217q0 -37 2.5 -64t11.5 -56.5t24.5 -48.5t43.5 -31t66 -12q64 0 108 24v121q-30 -21 -68 -21q-53 0 -53 82v225h52q9 0 26.5 -1t26.5 -1v117h-105q0 82 3 102h-140q4 -24 4 -55v-47h-60v-117z" />
+<glyph unicode="&#xf1d3;" horiz-adv-x="1792" d="M68 7q0 165 182 225v4q-67 41 -67 126q0 109 63 137v4q-72 24 -119.5 108.5t-47.5 165.5q0 139 95 231.5t235 92.5q96 0 178 -47q98 0 218 47v-202q-36 -12 -79 -22q16 -43 16 -84q0 -127 -73 -216.5t-197 -112.5q-40 -8 -59.5 -27t-19.5 -58q0 -31 22.5 -51.5t58 -32 t78.5 -22t86 -25.5t78.5 -37.5t58 -64t22.5 -98.5q0 -304 -363 -304q-69 0 -130 12.5t-116 41t-87.5 82t-32.5 127.5zM272 18q0 -101 172 -101q151 0 151 105q0 100 -165 100q-158 0 -158 -104zM293 775q0 -135 124 -135q119 0 119 137q0 61 -30 102t-89 41 q-124 0 -124 -145zM875 1389q0 59 39.5 103t98.5 44q58 0 96.5 -44.5t38.5 -102.5t-39 -101.5t-96 -43.5q-58 0 -98 43.5t-40 101.5zM901 220q4 45 4 134v609q0 94 -4 128h222q-4 -33 -4 -124v-613q0 -89 4 -134h-222zM1217 901v190h96v76q0 54 -6 89h227q-6 -41 -6 -165 h171v-190q-15 0 -43.5 2t-42.5 2h-85v-365q0 -131 87 -131q61 0 109 33v-196q-71 -39 -174 -39q-62 0 -107 20t-70 50t-39.5 78t-18.5 92t-4 103v351h2v4q-7 0 -19 1t-18 1q-21 0 -59 -6z" />
+<glyph unicode="&#xf1d4;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM368 1135l323 -589v-435h134v436l343 588h-150q-21 -39 -63.5 -118.5t-68 -128.5t-59.5 -118.5t-60 -128.5h-3 q-21 48 -44.5 97t-52 105.5t-46.5 92t-54 104.5t-49 95h-150z" />
+<glyph unicode="&#xf1d5;" horiz-adv-x="1280" d="M57 953q0 119 46.5 227t124.5 186t186 124t226 46q158 0 292.5 -78t212.5 -212.5t78 -292.5t-78 -292t-212.5 -212t-292.5 -78q-64 0 -131 14q-21 5 -32.5 23.5t-6.5 39.5q5 20 23 31.5t39 7.5q51 -13 108 -13q97 0 186 38t153 102t102 153t38 186t-38 186t-102 153 t-153 102t-186 38t-186 -38t-153 -102t-102 -153t-38 -186q0 -114 52 -218q10 -20 3.5 -40t-25.5 -30t-39.5 -3t-30.5 26q-64 123 -64 265zM113.5 38.5q10.5 121.5 29.5 217t54 186t69 155.5t74 125q61 90 132 165q-16 35 -16 77q0 80 56.5 136.5t136.5 56.5t136.5 -56.5 t56.5 -136.5t-57 -136.5t-136 -56.5q-60 0 -111 35q-62 -67 -115 -146q-247 -371 -202 -859q1 -22 -12.5 -38.5t-34.5 -18.5h-5q-20 0 -35 13.5t-17 33.5q-14 126 -3.5 247.5z" />
+<glyph unicode="&#xf1d6;" horiz-adv-x="1792" d="M18 264q0 275 252 466q-8 19 -8 52q0 20 11 49t24 45q-1 22 7.5 53t22.5 43q0 139 92.5 288.5t217.5 209.5q139 66 324 66q133 0 266 -55q49 -21 90 -48t71 -56t55 -68t42 -74t32.5 -84.5t25.5 -89.5t22 -98l1 -5q55 -83 55 -150q0 -14 -9 -40t-9 -38q0 -1 1.5 -3.5 t3.5 -5t2 -3.5q77 -114 120.5 -214.5t43.5 -208.5q0 -43 -19.5 -100t-55.5 -57q-9 0 -19.5 7.5t-19 17.5t-19 26t-16 26.5t-13.5 26t-9 17.5q-1 1 -3 1l-5 -4q-59 -154 -132 -223q20 -20 61.5 -38.5t69 -41.5t35.5 -65q-2 -4 -4 -16t-7 -18q-64 -97 -302 -97q-53 0 -110.5 9 t-98 20t-104.5 30q-15 5 -23 7q-14 4 -46 4.5t-40 1.5q-41 -45 -127.5 -65t-168.5 -20q-35 0 -69 1.5t-93 9t-101 20.5t-74.5 40t-32.5 64q0 40 10 59.5t41 48.5q11 2 40.5 13t49.5 12q4 0 14 2q2 2 2 4l-2 3q-48 11 -108 105.5t-73 156.5l-5 3q-4 0 -12 -20 q-18 -41 -54.5 -74.5t-77.5 -37.5h-1q-4 0 -6 4.5t-5 5.5q-23 54 -23 100z" />
+<glyph unicode="&#xf1d7;" horiz-adv-x="2048" d="M0 858q0 169 97.5 311t264 223.5t363.5 81.5q176 0 332.5 -66t262 -182.5t136.5 -260.5q-31 4 -70 4q-169 0 -311 -77t-223.5 -208.5t-81.5 -287.5q0 -78 23 -152q-35 -3 -68 -3q-26 0 -50 1.5t-55 6.5t-44.5 7t-54.5 10.5t-50 10.5l-253 -127l72 218q-290 203 -290 490z M380 1075q0 -39 33 -64.5t76 -25.5q41 0 66 24.5t25 65.5t-25 66t-66 25q-43 0 -76 -25.5t-33 -65.5zM816 404q0 143 81.5 264t223.5 191.5t311 70.5q161 0 303 -70.5t227.5 -192t85.5 -263.5q0 -117 -68.5 -223.5t-185.5 -193.5l55 -181l-199 109q-150 -37 -218 -37 q-169 0 -311 70.5t-223.5 191.5t-81.5 264zM888 1075q0 -39 33 -64.5t76 -25.5q41 0 65.5 24.5t24.5 65.5t-24.5 66t-65.5 25q-43 0 -76 -25.5t-33 -65.5zM1160 568q0 -28 22.5 -50.5t49.5 -22.5q40 0 65.5 22t25.5 51q0 28 -25.5 50t-65.5 22q-27 0 -49.5 -22.5 t-22.5 -49.5zM1559 568q0 -28 22.5 -50.5t49.5 -22.5q39 0 65 22t26 51q0 28 -26 50t-65 22q-27 0 -49.5 -22.5t-22.5 -49.5z" />
+<glyph unicode="&#xf1d8;" horiz-adv-x="1792" d="M0 508q-2 40 32 59l1664 960q15 9 32 9q20 0 36 -11q33 -24 27 -64l-256 -1536q-5 -29 -32 -45q-14 -8 -31 -8q-11 0 -24 5l-453 185l-242 -295q-18 -23 -49 -23q-13 0 -22 4q-19 7 -30.5 23.5t-11.5 36.5v349l864 1059l-1069 -925l-395 162q-37 14 -40 55z" />
+<glyph unicode="&#xf1d9;" horiz-adv-x="1792" d="M0 508q-3 39 32 59l1664 960q35 21 68 -2q33 -24 27 -64l-256 -1536q-5 -29 -32 -45q-14 -8 -31 -8q-11 0 -24 5l-527 215l-298 -327q-18 -21 -47 -21q-14 0 -23 4q-19 7 -30 23.5t-11 36.5v452l-472 193q-37 14 -40 55zM209 522l336 -137l863 639l-478 -797l492 -201 l221 1323z" />
+<glyph unicode="&#xf1da;" d="M0 832v448q0 42 40 59q39 17 69 -14l130 -129q107 101 244.5 156.5t284.5 55.5q156 0 298 -61t245 -164t164 -245t61 -298t-61 -298t-164 -245t-245 -164t-298 -61q-172 0 -327 72.5t-264 204.5q-7 10 -6.5 22.5t8.5 20.5l137 138q10 9 25 9q16 -2 23 -12 q73 -95 179 -147t225 -52q104 0 198.5 40.5t163.5 109.5t109.5 163.5t40.5 198.5t-40.5 198.5t-109.5 163.5t-163.5 109.5t-198.5 40.5q-98 0 -188 -35.5t-160 -101.5l137 -138q31 -30 14 -69q-17 -40 -59 -40h-448q-26 0 -45 19t-19 45zM512 480v64q0 14 9 23t23 9h224v352 q0 14 9 23t23 9h64q14 0 23 -9t9 -23v-448q0 -14 -9 -23t-23 -9h-320q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf1db;" d="M0 640q0 209 103 385.5t279.5 279.5t385.5 103t385.5 -103t279.5 -279.5t103 -385.5t-103 -385.5t-279.5 -279.5t-385.5 -103t-385.5 103t-279.5 279.5t-103 385.5zM128 640q0 -130 51 -248.5t136.5 -204t204 -136.5t248.5 -51t248.5 51t204 136.5t136.5 204t51 248.5 t-51 248.5t-136.5 204t-204 136.5t-248.5 51t-248.5 -51t-204 -136.5t-136.5 -204t-51 -248.5z" />
+<glyph unicode="&#xf1dc;" horiz-adv-x="1792" d="M62 1338q0 26 12 48t36 22q46 0 138.5 -3.5t138.5 -3.5q42 0 126.5 3.5t126.5 3.5q25 0 37.5 -22t12.5 -48q0 -30 -17 -43.5t-38.5 -14.5t-49.5 -4t-43 -13q-35 -21 -35 -160l1 -320q0 -21 1 -32q13 -3 39 -3h699q25 0 38 3q1 11 1 32l1 320q0 139 -35 160 q-18 11 -58.5 12.5t-66 13t-25.5 49.5q0 26 12.5 48t37.5 22q44 0 132 -3.5t132 -3.5q43 0 129 3.5t129 3.5q25 0 37.5 -22t12.5 -48q0 -30 -17.5 -44t-40 -14.5t-51.5 -3t-44 -12.5q-35 -23 -35 -161l1 -943q0 -119 34 -140q16 -10 46 -13.5t53.5 -4.5t41.5 -15.5t18 -44.5 q0 -26 -12 -48t-36 -22q-44 0 -132.5 3.5t-133.5 3.5q-44 0 -132 -3.5t-132 -3.5q-24 0 -37 20.5t-13 45.5q0 31 17 46t39 17t51 7t45 15q33 21 33 140l-1 391q0 21 -1 31q-13 4 -50 4h-675q-38 0 -51 -4q-1 -10 -1 -31l-1 -371q0 -142 37 -164q16 -10 48 -13t57 -3.5 t45 -15t20 -45.5q0 -26 -12.5 -48t-36.5 -22q-47 0 -139.5 3.5t-138.5 3.5q-43 0 -128 -3.5t-127 -3.5q-23 0 -35.5 21t-12.5 45q0 30 15.5 45t36 17.5t47.5 7.5t42 15q33 23 33 143l-1 57v813q0 3 0.5 26t0 36.5t-1.5 38.5t-3.5 42t-6.5 36.5t-11 31.5t-16 18 q-15 10 -45 12t-53 2t-41 14t-18 45z" />
+<glyph unicode="&#xf1dd;" horiz-adv-x="1280" d="M24 926q0 166 88 286q88 118 209 159q111 37 417 37h479q25 0 43 -18t18 -43v-73q0 -29 -18.5 -61t-42.5 -32q-50 0 -54 -1q-26 -6 -32 -31q-3 -11 -3 -64v-1152q0 -25 -18 -43t-43 -18h-108q-25 0 -43 18t-18 43v1218h-143v-1218q0 -25 -17.5 -43t-43.5 -18h-108 q-26 0 -43.5 18t-17.5 43v496q-147 12 -245 59q-126 58 -192 179q-64 117 -64 259z" />
+<glyph unicode="&#xf1de;" d="M0 736v64q0 40 28 68t68 28h320q40 0 68 -28t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM128 -96v672h256v-672q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM128 960v416q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-416h-256zM512 224v64q0 40 28 68 t68 28h320q40 0 68 -28t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM640 64h256v-160q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23v160zM640 448v928q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-928h-256zM1024 992v64q0 40 28 68t68 28h320q40 0 68 -28 t28 -68v-64q0 -40 -28 -68t-68 -28h-320q-40 0 -68 28t-28 68zM1152 -96v928h256v-928q0 -14 -9 -23t-23 -9h-192q-14 0 -23 9t-9 23zM1152 1216v160q0 14 9 23t23 9h192q14 0 23 -9t9 -23v-160h-256z" />
+<glyph unicode="&#xf1e0;" d="M0 640q0 133 93.5 226.5t226.5 93.5q126 0 218 -86l360 180q-2 22 -2 34q0 133 93.5 226.5t226.5 93.5t226.5 -93.5t93.5 -226.5t-93.5 -226.5t-226.5 -93.5q-126 0 -218 86l-360 -180q2 -22 2 -34t-2 -34l360 -180q92 86 218 86q133 0 226.5 -93.5t93.5 -226.5 t-93.5 -226.5t-226.5 -93.5t-226.5 93.5t-93.5 226.5q0 12 2 34l-360 180q-92 -86 -218 -86q-133 0 -226.5 93.5t-93.5 226.5z" />
+<glyph unicode="&#xf1e1;" d="M0 160v960q0 119 84.5 203.5t203.5 84.5h960q119 0 203.5 -84.5t84.5 -203.5v-960q0 -119 -84.5 -203.5t-203.5 -84.5h-960q-119 0 -203.5 84.5t-84.5 203.5zM256 640q0 -88 62.5 -150.5t150.5 -62.5q83 0 145 57l241 -120q-2 -16 -2 -23q0 -88 63 -150.5t151 -62.5 t150.5 62.5t62.5 150.5t-62.5 151t-150.5 63q-84 0 -145 -58l-241 120q2 16 2 23t-2 23l241 120q61 -58 145 -58q88 0 150.5 63t62.5 151t-62.5 150.5t-150.5 62.5t-151 -62.5t-63 -150.5q0 -7 2 -23l-241 -120q-62 57 -145 57q-88 0 -150.5 -62.5t-62.5 -150.5z" />
+<glyph unicode="&#xf1e2;" horiz-adv-x="1792" d="M0 448q0 143 55.5 273.5t150 225t225 150t273.5 55.5q182 0 343 -89l64 64q19 19 45.5 19t45.5 -19l68 -68l243 244l46 -46l-244 -243l68 -68q19 -19 19 -45.5t-19 -45.5l-64 -64q89 -161 89 -343q0 -143 -55.5 -273.5t-150 -225t-225 -150t-273.5 -55.5t-273.5 55.5 t-225 150t-150 225t-55.5 273.5zM170 615q10 -24 35 -34q13 -5 24 -5q42 0 60 40q34 84 98.5 148.5t148.5 98.5q25 11 35 35t0 49t-34 35t-49 0q-108 -44 -191 -127t-127 -191q-10 -25 0 -49zM1376 1472q0 13 9 23q10 9 23 9t23 -9l90 -91q10 -9 10 -22.5t-10 -22.5 q-10 -10 -22 -10q-13 0 -23 10l-91 90q-9 10 -9 23zM1536 1408v96q0 14 9 23t23 9t23 -9t9 -23v-96q0 -14 -9 -23t-23 -9t-23 9t-9 23zM1605 1242.5q0 13.5 10 22.5q9 10 22.5 10t22.5 -10l91 -90q9 -10 9 -23t-9 -23q-11 -9 -23 -9t-23 9l-90 91q-10 9 -10 22.5z M1605 1381.5q0 13.5 10 22.5l90 91q10 9 23 9t23 -9q9 -10 9 -23t-9 -23l-91 -90q-10 -10 -22 -10q-13 0 -23 10q-10 9 -10 22.5zM1632 1312q0 14 9 23t23 9h96q14 0 23 -9t9 -23t-9 -23t-23 -9h-96q-14 0 -23 9t-9 23z" />
+<glyph unicode="&#xf1e3;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e4;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e5;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e6;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e7;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e8;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1e9;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ea;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1eb;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ec;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ed;" horiz-adv-x="1792" />
+<glyph unicode="&#xf1ee;" horiz-adv-x="1792" />
+<glyph unicode="&#xf500;" horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+<glyph horiz-adv-x="1792" />
+</font>
+</defs></svg> \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.ttf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.ttf
new file mode 100755
index 00000000000..5cd6cff6d6f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.ttf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.woff b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.woff
new file mode 100755
index 00000000000..9eaecb37996
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/FontAwesome/fontawesome-webfont.woff
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt
new file mode 100755
index 00000000000..75b52484ea4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Bold.ttf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Bold.ttf
new file mode 100755
index 00000000000..fd79d43bea0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Bold.ttf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Italic.ttf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Italic.ttf
new file mode 100755
index 00000000000..c90da48ff3b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Italic.ttf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Light.ttf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Light.ttf
new file mode 100755
index 00000000000..0d381897da2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Light.ttf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-LightItalic.ttf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-LightItalic.ttf
new file mode 100755
index 00000000000..68299c4bc6b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-LightItalic.ttf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Regular.ttf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Regular.ttf
new file mode 100755
index 00000000000..db433349b70
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Open_Sans/OpenSans-Regular.ttf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/OFL.txt b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/OFL.txt
new file mode 100755
index 00000000000..3b859d9138f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/OFL.txt
@@ -0,0 +1,93 @@
+Copyright (c) 2009, Matt McInerney (matt@pixelspread.com),
+with Reserved Font Name Orbitron.
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+http://scripts.sil.org/OFL
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/Orbitron-Regular.ttf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/Orbitron-Regular.ttf
new file mode 100755
index 00000000000..42563d6b6ef
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Orbitron/Orbitron-Regular.ttf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/OFL.txt b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/OFL.txt
new file mode 100755
index 00000000000..ff7febddcb2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/OFL.txt
@@ -0,0 +1,92 @@
+Copyright (c) 2011-2012, Vernon Adams (vern@newtypography.co.uk), with Reserved Font Names 'Oswald'
+This Font Software is licensed under the SIL Open Font License, Version 1.1.
+This license is copied below, and is also available with a FAQ at:
+http://scripts.sil.org/OFL
+
+
+-----------------------------------------------------------
+SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
+-----------------------------------------------------------
+
+PREAMBLE
+The goals of the Open Font License (OFL) are to stimulate worldwide
+development of collaborative font projects, to support the font creation
+efforts of academic and linguistic communities, and to provide a free and
+open framework in which fonts may be shared and improved in partnership
+with others.
+
+The OFL allows the licensed fonts to be used, studied, modified and
+redistributed freely as long as they are not sold by themselves. The
+fonts, including any derivative works, can be bundled, embedded,
+redistributed and/or sold with any software provided that any reserved
+names are not used by derivative works. The fonts and derivatives,
+however, cannot be released under any other type of license. The
+requirement for fonts to remain under this license does not apply
+to any document created using the fonts or their derivatives.
+
+DEFINITIONS
+"Font Software" refers to the set of files released by the Copyright
+Holder(s) under this license and clearly marked as such. This may
+include source files, build scripts and documentation.
+
+"Reserved Font Name" refers to any names specified as such after the
+copyright statement(s).
+
+"Original Version" refers to the collection of Font Software components as
+distributed by the Copyright Holder(s).
+
+"Modified Version" refers to any derivative made by adding to, deleting,
+or substituting -- in part or in whole -- any of the components of the
+Original Version, by changing formats or by porting the Font Software to a
+new environment.
+
+"Author" refers to any designer, engineer, programmer, technical
+writer or other person who contributed to the Font Software.
+
+PERMISSION & CONDITIONS
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of the Font Software, to use, study, copy, merge, embed, modify,
+redistribute, and sell modified and unmodified copies of the Font
+Software, subject to the following conditions:
+
+1) Neither the Font Software nor any of its individual components,
+in Original or Modified Versions, may be sold by itself.
+
+2) Original or Modified Versions of the Font Software may be bundled,
+redistributed and/or sold with any software, provided that each copy
+contains the above copyright notice and this license. These can be
+included either as stand-alone text files, human-readable headers or
+in the appropriate machine-readable metadata fields within text or
+binary files as long as those fields can be easily viewed by the user.
+
+3) No Modified Version of the Font Software may use the Reserved Font
+Name(s) unless explicit written permission is granted by the corresponding
+Copyright Holder. This restriction only applies to the primary font name as
+presented to the users.
+
+4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
+Software shall not be used to promote, endorse or advertise any
+Modified Version, except to acknowledge the contribution(s) of the
+Copyright Holder(s) and the Author(s) or with their explicit written
+permission.
+
+5) The Font Software, modified or unmodified, in part or in whole,
+must be distributed entirely under this license, and must not be
+distributed under any other license. The requirement for fonts to
+remain under this license does not apply to any document created
+using the Font Software.
+
+TERMINATION
+This license becomes null and void if any of the above conditions are
+not met.
+
+DISCLAIMER
+THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
+OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
+COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
+DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
+OTHER DEALINGS IN THE FONT SOFTWARE.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/Oswald-Regular.ttf b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/Oswald-Regular.ttf
new file mode 100755
index 00000000000..0798e241955
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/fonts/Oswald/Oswald-Regular.ttf
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-buildfail.ico b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-buildfail.ico
new file mode 100644
index 00000000000..8fdb76e344a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-buildfail.ico
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-fail.ico b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-fail.ico
new file mode 100644
index 00000000000..e028baefaba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-fail.ico
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-ok.ico b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-ok.ico
new file mode 100644
index 00000000000..19f0e173de8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-ok.ico
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-panic.ico b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-panic.ico
new file mode 100644
index 00000000000..46b1bd085a0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/ico/goconvey-panic.ico
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/composer.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/composer.js
new file mode 100644
index 00000000000..7ddb0c8d01d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/composer.js
@@ -0,0 +1,171 @@
+var composer = {
+ tab: "\t",
+ template: "",
+ isFunc: function(scope)
+ {
+ if (!scope.title || typeof scope.depth === 'undefined')
+ return false;
+
+ return scope.title.indexOf("Test") === 0 && scope.depth === 0;
+ },
+ discardLastKey: false
+};
+
+
+$(function()
+{
+ // Begin layout sizing
+ var headerHeight = $('header').outerHeight();
+ var padding = $('#input, #output').css('padding-top').replace("px", "") * 2 + 1;
+ var outputPlaceholder = $('#output').text();
+
+ $(window).resize(function()
+ {
+ $('#input, #output').height($(window).height() - headerHeight - padding);
+ });
+
+ $(window).resize();
+ // End layout sizing
+
+
+ $('#input').keydown(function(e)
+ {
+ // 13=Enter, 16=Shift
+ composer.discardLastKey = e.keyCode === 13
+ || e.keyCode === 16;
+ }).keyup(function(e)
+ {
+ if (!composer.discardLastKey)
+ generate($(this).val());
+ });
+
+ composer.template = $('#tpl-convey').text();
+
+ tabOverride.set(document.getElementById('input'));
+ $('#input').focus();
+});
+
+
+
+// Begin Markup.js custom pipes
+Mark.pipes.recursivelyRender = function(val)
+{
+ return !val || val.length === 0 ? "\n" : Mark.up(composer.template, val);
+}
+
+Mark.pipes.indent = function(val)
+{
+ return new Array(val + 1).join("\t");
+}
+
+Mark.pipes.notTestFunc = function(scope)
+{
+ return !composer.isFunc(scope);
+}
+
+Mark.pipes.safeFunc = function(val)
+{
+ return val.replace(/[^a-z0-9_]/gi, '');
+}
+
+Mark.pipes.properCase = function(str)
+{
+ if (str.length === 0)
+ return "";
+
+ str = str.charAt(0).toUpperCase() + str.substr(1);
+
+ if (str.length < 2)
+ return str;
+
+ return str.replace(/[\s_][a-z]+/g, function(txt)
+ {
+ return txt.charAt(0)
+ + txt.charAt(1).toUpperCase()
+ + txt.substr(2).toLowerCase();
+ });
+}
+
+Mark.pipes.showImports = function(item)
+{
+ console.log(item);
+ if (root.title === "(root)" && root.stories.length > 0)
+ return 'import (\n\t"testing"\n\t. "github.com/smartystreets/goconvey/convey"\n)\n';
+ else
+ return "";
+}
+// End Markup.js custom pipes
+
+
+function generate(input)
+{
+ var root = parseInput(input);
+ $('#output').text(Mark.up(composer.template, root.stories));
+ if (root.stories.length > 0 && root.stories[0].title.substr(0, 4) === "Test")
+ $('#output').prepend('import (\n\t"testing"\n\t. "github.com/smartystreets/goconvey/convey"\n)\n\n');
+}
+
+function parseInput(input)
+{
+ lines = input.split("\n");
+
+ if (!lines)
+ return;
+
+ var root = {
+ title: "(root)",
+ stories: []
+ };
+
+ for (i in lines)
+ {
+ line = lines[i];
+ lineText = $.trim(line);
+
+ if (!lineText)
+ continue;
+
+ // Figure out how deep to put this story
+ indent = line.match(new RegExp("^" + composer.tab + "+"));
+ tabs = indent ? indent[0].length / composer.tab.length : 0;
+
+ // Starting at root, traverse into the right spot in the arrays
+ var curScope = root, prevScope = root;
+ for (j = 0; j < tabs && curScope.stories.length > 0; j++)
+ {
+ curScope = curScope.stories[curScope.stories.length - 1];
+ prevScope = curScope;
+ }
+
+ // Don't go crazy, though! (avoid excessive indentation)
+ if (tabs > curScope.depth + 1)
+ tabs = curScope.depth + 1;
+
+ // Only top-level Convey() calls need the *testing.T object passed in
+ var showT = composer.isFunc(prevScope)
+ || (!composer.isFunc(curScope)
+ && tabs === 0);
+
+ // Save the story at this scope
+ curScope.stories.push({
+ title: lineText.replace(/"/g, "\\\""), // escape quotes
+ stories: [],
+ depth: tabs,
+ showT: showT
+ });
+ }
+
+ return root;
+}
+
+function suppress(event)
+{
+ if (!event)
+ return false;
+ if (event.preventDefault)
+ event.preventDefault();
+ if (event.stopPropagation)
+ event.stopPropagation();
+ event.cancelBubble = true;
+ return false;
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/config.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/config.js
new file mode 100644
index 00000000000..0ca1e457bd4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/config.js
@@ -0,0 +1,15 @@
+// Configure the GoConvey web UI client in here
+
+convey.config = {
+
+ // Install new themes by adding them here; the first one will be default
+ themes: {
+ "dark": { name: "Dark", filename: "dark.css", coverage: "hsla({{hue}}, 75%, 30%, .5)" },
+ "dark-bigtext": { name: "Dark-BigText", filename: "dark-bigtext.css", coverage: "hsla({{hue}}, 75%, 30%, .5)" },
+ "light": { name: "Light", filename: "light.css", coverage: "hsla({{hue}}, 62%, 75%, 1)" }
+ },
+
+ // Path to the themes (end with forward-slash)
+ themePath: "/resources/css/themes/"
+
+};
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/convey.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/convey.js
new file mode 100644
index 00000000000..b4e6b525eca
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/convey.js
@@ -0,0 +1,46 @@
+var convey = {
+
+ // *** Don't edit in here unless you're brave ***
+
+ statuses: { // contains some constants related to overall test status
+ pass: { class: 'ok', text: "Pass" }, // class name must also be that in the favicon file name
+ fail: { class: 'fail', text: "Fail" },
+ panic: { class: 'panic', text: "Panic" },
+ buildfail: { class: 'buildfail', text: "Build Failure" }
+ },
+ frameCounter: 0, // gives each frame a unique ID
+ maxHistory: 20, // how many tests to keep in the history
+ notif: undefined, // the notification currently being displayed
+ notifTimer: undefined, // the timer that clears the notifications automatically
+ poller: new Poller(), // the server poller
+ status: "", // what the _server_ is currently doing (not overall test results)
+ overallClass: "", // class name of the "overall" status banner
+ theme: "", // theme currently being used
+ packageStates: {}, // packages manually collapsed or expanded during this page's lifetime
+ uiEffects: true, // whether visual effects are enabled
+ framesOnSamePath: 0, // number of consecutive frames on this same watch path
+ layout: {
+ selClass: "sel", // CSS class when an element is "selected"
+ header: undefined, // container element of the header area (overall, controls)
+ frame: undefined, // container element of the main body area (above footer)
+ footer: undefined // container element of the footer (stuck to bottom)
+ },
+ history: [], // complete history of states (test results and aggregated data), including the current one
+ moments: {}, // elements that display time relative to the current time, keyed by ID, with the moment() as a value
+ intervals: {}, // ntervals that execute periodically
+ intervalFuncs: { // functions executed by each interval in convey.intervals
+ time: function()
+ {
+ var t = new Date();
+ var h = zerofill(t.getHours(), 2);
+ var m = zerofill(t.getMinutes(), 2);
+ var s = zerofill(t.getSeconds(), 2);
+ $('#time').text(h + ":" + m + ":" + s);
+ },
+ momentjs: function()
+ {
+ for (var id in convey.moments)
+ $('#'+id).html(convey.moments[id].fromNow());
+ }
+ }
+};
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/goconvey.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/goconvey.js
new file mode 100644
index 00000000000..3bc12c5a7cd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/goconvey.js
@@ -0,0 +1,1322 @@
+$(init);
+
+$(window).load(function()
+{
+ // Things may shift after all the elements (images/fonts) are loaded
+ // In Chrome, calling reframe() doesn't work (maybe a quirk); we need to trigger resize
+ $(window).resize();
+});
+
+function init()
+{
+ log("Welcome to GoConvey!");
+ log("Initializing interface");
+ convey.overall = emptyOverall();
+ loadTheme();
+ $('body').show();
+ initPoller();
+ wireup();
+ latest();
+}
+
+function loadTheme(thmID)
+{
+ var defaultTheme = "dark";
+ var linkTagId = "themeRef";
+
+ if (!thmID)
+ thmID = get('theme') || defaultTheme;
+
+ log("Initializing theme: " + thmID);
+
+ if (!convey.config.themes[thmID])
+ {
+ replacement = Object.keys(convey.config.themes)[0] || defaultTheme;
+ log("NOTICE: Could not find '" + thmID + "' theme; defaulting to '" + replacement + "'");
+ thmID = replacement;
+ }
+
+ convey.theme = thmID;
+ save('theme', convey.theme);
+
+ var linkTag = $('#'+linkTagId);
+ var fullPath = convey.config.themePath
+ + convey.config.themes[convey.theme].filename;
+
+ if (linkTag.length === 0)
+ {
+ $('head').append('<link rel="stylesheet" href="'
+ + fullPath + '" id="themeRef">');
+ }
+ else
+ linkTag.attr('href', fullPath);
+
+ colorizeCoverageBars();
+}
+
+function initPoller()
+{
+ $(convey.poller).on('serverstarting', function(event)
+ {
+ log("Server is starting...");
+ convey.status = "starting";
+ showServerDown("Server starting");
+ $('#run-tests').addClass('spin-slowly disabled');
+ });
+
+ $(convey.poller).on('pollsuccess', function(event, data)
+ {
+ if (convey.status !== "starting")
+ hideServerDown();
+
+ // These two if statements determine if the server is now busy
+ // (and wasn't before) or is not busy (regardless of whether it was before)
+ if ((!convey.status || convey.status === "idle")
+ && data.status && data.status !== "idle")
+ $('#run-tests').addClass('spin-slowly disabled');
+ else if (convey.status !== "idle" && data.status === "idle")
+ {
+ $('#run-tests').removeClass('spin-slowly disabled');
+ }
+
+ switch (data.status)
+ {
+ case "executing":
+ $(convey.poller).trigger('serverexec', data);
+ break;
+ case "idle":
+ $(convey.poller).trigger('serveridle', data);
+ break;
+ }
+
+ convey.status = data.status;
+ });
+
+ $(convey.poller).on('pollfail', function(event, data)
+ {
+ log("Poll failed; server down");
+ convey.status = "down";
+ showServerDown("Server down");
+ });
+
+ $(convey.poller).on('serverexec', function(event, data)
+ {
+ log("Server status: executing");
+ $('.favicon').attr('href', '/favicon.ico'); // indicates running tests
+ });
+
+ $(convey.poller).on('serveridle', function(event, data)
+ {
+ log("Server status: idle");
+ log("Tests have finished executing");
+ latest();
+ });
+
+ convey.poller.start();
+}
+
+function wireup()
+{
+ log("Wireup");
+
+ customMarkupPipes();
+
+ var themes = [];
+ for (var k in convey.config.themes)
+ themes.push({ id: k, name: convey.config.themes[k].name });
+ $('#theme').html(render('tpl-theme-enum', themes));
+
+ enumSel("theme", convey.theme);
+
+ loadSettingsFromStorage();
+
+ $('#stories').on('click', '.toggle-all-pkg', function(event)
+ {
+ if ($(this).closest('.story-pkg').data('pkg-state') === "expanded")
+ collapseAll();
+ else
+ expandAll();
+ return suppress(event);
+ });
+
+ // Wireup the settings switches
+ $('.enum#theme').on('click', 'li:not(.sel)', function()
+ {
+ loadTheme($(this).data('theme'));
+ });
+ $('.enum#pkg-expand-collapse').on('click', 'li:not(.sel)', function()
+ {
+ var newSetting = $(this).data('pkg-expand-collapse');
+ convey.packageStates = {};
+ save('pkg-expand-collapse', newSetting);
+ if (newSetting === "expanded")
+ expandAll();
+ else
+ collapseAll();
+ });
+ $('.enum#show-debug-output').on('click', 'li:not(.sel)', function()
+ {
+ var newSetting = $(this).data('show-debug-output');
+ save('show-debug-output', newSetting);
+ if (newSetting === "show")
+ $('.story-line-desc .message').show();
+ else
+ $('.story-line-desc .message').hide();
+ });
+ $('.enum#ui-effects').on('click', 'li:not(.sel)', function()
+ {
+ var newSetting = $(this).data('ui-effects');
+ convey.uiEffects = newSetting;
+ save('ui-effects', newSetting);
+ });
+ // End settings wireup
+
+ convey.layout.header = $('header').first();
+ convey.layout.frame = $('.frame').first();
+ convey.layout.footer = $('footer').last();
+
+ updateWatchPath();
+
+ $('#path').change(function()
+ {
+ // Updates the watched directory with the server and makes sure it exists
+ var tb = $(this);
+ var newpath = encodeURIComponent($.trim(tb.val()));
+ $.post('/watch?root='+newpath)
+ .done(function() { tb.removeClass('error'); })
+ .fail(function() { tb.addClass('error'); });
+ convey.framesOnSamePath = 1;
+ });
+
+ $('#run-tests').click(function()
+ {
+ var self = $(this);
+ if (self.hasClass('spin-slowly') || self.hasClass('disabled'))
+ return;
+ log("Test run invoked from web UI");
+ $.get("/execute");
+ });
+
+ $('#play-pause').click(function()
+ {
+ $.get('/pause');
+
+ if ($(this).hasClass(convey.layout.selClass))
+ {
+ // Un-pausing
+ if (!$('footer .replay').is(':visible'))
+ $('footer .recording').show();
+ $('footer .paused').hide();
+ log("Resuming auto-execution of tests");
+ }
+ else
+ {
+ // Pausing
+ $('footer .recording').hide();
+ $('footer .paused').show();
+ log("Pausing auto-execution of tests");
+ }
+
+ $(this).toggleClass("throb " + convey.layout.selClass);
+ });
+
+ $('#toggle-notif').click(function()
+ {
+ log("Turning notifications " + (notif() ? "off" : "on"));
+ $(this).toggleClass("fa-bell-o fa-bell " + convey.layout.selClass);
+ save('notifications', !notif());
+
+ if (notif() && 'Notification' in window)
+ {
+ if (Notification.permission !== 'denied')
+ {
+ Notification.requestPermission(function(per)
+ {
+ if (!('permission' in Notification))
+ Notification.permission = per;
+ });
+ }
+ else
+ log("Permission denied to show desktop notification");
+ }
+ });
+
+ $('#show-history').click(function()
+ {
+ toggle($('.history'), $(this));
+ });
+
+ $('#show-settings').click(function()
+ {
+ toggle($('.settings'), $(this));
+ });
+
+ $('#show-gen').click(function() {
+ var writer = window.open("/composer.html");
+ if (window.focus)
+ writer.focus();
+ });
+
+ // Wire-up the tipsy tooltips
+ $('.controls li, .pkg-cover-name').tipsy({ live: true });
+ $('footer .replay').tipsy({ live: true, gravity: 'e' });
+ $('#path').tipsy({ delayIn: 500 });
+ $('.ignore').tipsy({ live: true, gravity: $.fn.tipsy.autoNS });
+ $('.disabled').tipsy({ live: true, gravity: $.fn.tipsy.autoNS });
+ $('#logo').tipsy({ gravity: 'w' });
+
+
+ $('.toggler').not('.narrow').prepend('<i class="fa fa-angle-up fa-lg"></i>');
+ $('.toggler.narrow').prepend('<i class="fa fa-angle-down fa-lg"></i>');
+
+ $('.toggler').not('.narrow').click(function()
+ {
+ var target = $('#' + $(this).data('toggle'));
+ $('.fa-angle-down, .fa-angle-up', this).toggleClass('fa-angle-down fa-angle-up');
+ target.toggle();
+ });
+
+ $('.toggler.narrow').click(function()
+ {
+ var target = $('#' + $(this).data('toggle'));
+ $('.fa-angle-down, .fa-angle-up', this).toggleClass('fa-angle-down fa-angle-up');
+ target.toggleClass('hide-narrow show-narrow');
+ });
+
+ // Enumerations are horizontal lists where one item can be selected at a time
+ $('.enum').on('click', 'li', enumSel);
+
+ // Start ticking time
+ convey.intervals.time = setInterval(convey.intervalFuncs.time, 1000);
+ convey.intervals.momentjs = setInterval(convey.intervalFuncs.momentjs, 5000);
+ convey.intervalFuncs.time();
+
+ // Ignore/un-ignore package
+ $('#stories').on('click', '.fa.ignore', function(event)
+ {
+ var pkg = $(this).data('pkg');
+ if ($(this).hasClass('disabled'))
+ return;
+ else if ($(this).hasClass('unwatch'))
+ $.get("/ignore", { paths: pkg });
+ else
+ $.get("/reinstate", { paths: pkg });
+ $(this).toggleClass('watch unwatch fa-eye fa-eye-slash clr-red');
+ return suppress(event);
+ });
+
+ // Show "All" link when hovering the toggler on packages in the stories
+ $('#stories').on({
+ mouseenter: function() { $('.toggle-all-pkg', this).stop().show('fast'); },
+ mouseleave: function() { $('.toggle-all-pkg', this).stop().hide('fast'); }
+ }, '.pkg-toggle-container');
+
+ // Toggle a package in the stories when clicked
+ $('#stories').on('click', '.story-pkg', function(event)
+ {
+ togglePackage(this, true);
+ return suppress(event);
+ });
+
+ // Select a story line when it is clicked
+ $('#stories').on('click', '.story-line', function()
+ {
+ $('.story-line-sel').not(this).removeClass('story-line-sel');
+ $(this).toggleClass('story-line-sel');
+ });
+
+ // Render a frame from the history when clicked
+ $('.history .container').on('click', '.item', function(event)
+ {
+ var frame = getFrame($(this).data("frameid"));
+ changeStatus(frame.overall.status, true);
+ renderFrame(frame);
+ $(this).addClass('selected');
+
+ // Update current status down in the footer
+ if ($(this).is(':first-child'))
+ {
+ // Now on current frame
+ $('footer .replay').hide();
+
+ if ($('#play-pause').hasClass(convey.layout.selClass)) // Was/is paused
+ $('footer .paused').show();
+ else
+ $('footer .recording').show(); // Was/is recording
+ }
+ else
+ {
+ $('footer .recording, footer .replay').hide();
+ $('footer .replay').show();
+ }
+ return suppress(event);
+ });
+
+ $('footer').on('click', '.replay', function()
+ {
+ // Clicking "REPLAY" in the corner should bring them back to the current frame
+ // and hide, if visible, the history panel for convenience
+ $('.history .item:first-child').click();
+ if ($('#show-history').hasClass('sel'))
+ $('#show-history').click();
+ });
+
+ // Keyboard shortcuts!
+ $(document).keydown(function(e)
+ {
+ if (e.ctrlKey || e.metaKey || e.shiftKey)
+ return;
+
+ switch (e.keyCode)
+ {
+ case 67: // c
+ $('#show-gen').click();
+ break;
+ case 82: // r
+ $('#run-tests').click();
+ break;
+ case 78: // n
+ $('#toggle-notif').click();
+ break;
+ case 87: // w
+ $('#path').focus();
+ break;
+ case 80: // p
+ $('#play-pause').click();
+ break;
+ }
+
+ return suppress(e);
+ });
+ $('body').on('keydown', 'input, textarea, select', function(e)
+ {
+ // If user is typing something, don't let this event bubble
+ // up to the document to annoyingly fire keyboard shortcuts
+ e.stopPropagation();
+ });
+
+ // Keep everything positioned and sized properly on window resize
+ reframe();
+ $(window).resize(reframe);
+}
+
+function expandAll()
+{
+ $('.story-pkg').each(function() { expandPackage($(this).data('pkg')); });
+}
+
+function collapseAll()
+{
+ $('.story-pkg').each(function() { collapsePackage($(this).data('pkg')); });
+}
+
+function expandPackage(pkgId)
+{
+ var pkg = $('.story-pkg.pkg-'+pkgId);
+ var rows = $('.story-line.pkg-'+pkgId);
+
+ pkg.data('pkg-state', "expanded").addClass('expanded').removeClass('collapsed');
+
+ $('.pkg-toggle', pkg)
+ .addClass('fa-minus-square-o')
+ .removeClass('fa-plus-square-o');
+
+ rows.show();
+}
+
+function collapsePackage(pkgId)
+{
+ var pkg = $('.story-pkg.pkg-'+pkgId);
+ var rows = $('.story-line.pkg-'+pkgId);
+
+ pkg.data('pkg-state', "collapsed").addClass('collapsed').removeClass('expanded');
+
+ $('.pkg-toggle', pkg)
+ .addClass('fa-plus-square-o')
+ .removeClass('fa-minus-square-o');
+
+ rows.hide();
+}
+
+function togglePackage(storyPkgElem)
+{
+ var pkgId = $(storyPkgElem).data('pkg');
+ if ($(storyPkgElem).data('pkg-state') === "expanded")
+ {
+ collapsePackage(pkgId);
+ convey.packageStates[$(storyPkgElem).data('pkg-name')] = "collapsed";
+ }
+ else
+ {
+ expandPackage(pkgId);
+ convey.packageStates[$(storyPkgElem).data('pkg-name')] = "expanded";
+ }
+}
+
+function loadSettingsFromStorage()
+{
+ var pkgExpCollapse = get("pkg-expand-collapse");
+ if (!pkgExpCollapse)
+ {
+ pkgExpCollapse = "expanded";
+ save("pkg-expand-collapse", pkgExpCollapse);
+ }
+ enumSel("pkg-expand-collapse", pkgExpCollapse);
+
+ var showDebugOutput = get("show-debug-output");
+ if (!showDebugOutput)
+ {
+ showDebugOutput = "show";
+ save("show-debug-output", showDebugOutput);
+ }
+ enumSel("show-debug-output", showDebugOutput);
+
+ var uiEffects = get("ui-effects");
+ if (uiEffects === null)
+ uiEffects = "true";
+ convey.uiEffects = uiEffects === "true";
+ enumSel("ui-effects", uiEffects);
+
+ if (notif())
+ $('#toggle-notif').toggleClass("fa-bell-o fa-bell " + convey.layout.selClass);
+}
+
+
+
+
+
+
+
+
+
+
+
+function latest()
+{
+ log("Fetching latest test results");
+ $.getJSON("/latest", process);
+}
+
+function process(data, status, jqxhr)
+{
+ if (!data || !data.Revision)
+ {
+ log("No data received or revision timestamp was missing");
+ return;
+ }
+
+ if (data.Paused && !$('#play-pause').hasClass(convey.layout.selClass))
+ {
+ $('footer .recording').hide();
+ $('footer .paused').show();
+ $('#play-pause').toggleClass("throb " + convey.layout.selClass);
+ }
+
+ if (current() && data.Revision === current().results.Revision)
+ {
+ log("No changes");
+ changeStatus(current().overall.status); // re-assures that status is unchanged
+ return;
+ }
+
+
+ // Put the new frame in the queue so we can use current() to get to it
+ convey.history.push(newFrame());
+ convey.framesOnSamePath++;
+
+ // Store the raw results in our frame
+ current().results = data;
+
+ log("Updating watch path");
+ updateWatchPath();
+
+ // Remove all templated items from the DOM as we'll
+ // replace them with new ones; also remove tipsy tooltips
+ // that may have lingered around
+ $('.templated, .tipsy').remove();
+
+ var uniqueID = 0;
+ var coverageAvgHelper = { countedPackages: 0, coverageSum: 0 };
+ var packages = {
+ tested: [],
+ ignored: [],
+ coverage: {},
+ nogofiles: [],
+ notestfiles: [],
+ notestfn: []
+ };
+
+ log("Compiling package statistics");
+
+ // Look for failures and panics through the packages->tests->stories...
+ for (var i in data.Packages)
+ {
+ pkg = makeContext(data.Packages[i]);
+ current().overall.duration += pkg.Elapsed;
+ pkg._id = uniqueID++;
+
+ if (pkg.Outcome === "build failure")
+ {
+ current().overall.failedBuilds++;
+ current().failedBuilds.push(pkg);
+ continue;
+ }
+
+
+ if (pkg.Outcome === "no go code")
+ packages.nogofiles.push(pkg);
+ else if (pkg.Outcome === "no test files")
+ packages.notestfiles.push(pkg);
+ else if (pkg.Outcome === "no test functions")
+ packages.notestfn.push(pkg);
+ else if (pkg.Outcome === "ignored" || pkg.Outcome === "disabled")
+ packages.ignored.push(pkg);
+ else
+ {
+ if (pkg.Coverage >= 0)
+ coverageAvgHelper.coverageSum += pkg.Coverage;
+ coverageAvgHelper.countedPackages++;
+ packages.coverage[pkg.PackageName] = pkg.Coverage;
+ packages.tested.push(pkg);
+ }
+
+
+ for (var j in pkg.TestResults)
+ {
+ test = makeContext(pkg.TestResults[j]);
+ test._id = uniqueID++;
+ test._pkgid = pkg._id;
+ test._pkg = pkg.PackageName;
+
+ if (test.Stories.length === 0)
+ {
+ // Here we've got ourselves a classic Go test,
+ // not a GoConvey test that has stories and assertions
+ // so we'll treat this whole test as a single assertion
+ current().overall.assertions++;
+
+ if (test.Error)
+ {
+ test._status = convey.statuses.panic;
+ pkg._panicked++;
+ test._panicked++;
+ current().assertions.panicked.push(test);
+ }
+ else if (test.Passed === false)
+ {
+ test._status = convey.statuses.fail;
+ pkg._failed++;
+ test._failed++;
+ current().assertions.failed.push(test);
+ }
+ else if (test.Skipped)
+ {
+ test._status = convey.statuses.skipped;
+ pkg._skipped++;
+ test._skipped++;
+ current().assertions.skipped.push(test);
+ }
+ else
+ {
+ test._status = convey.statuses.pass;
+ pkg._passed++;
+ test._passed++;
+ current().assertions.passed.push(test);
+ }
+ }
+ else
+ test._status = convey.statuses.pass;
+
+ var storyPath = [{ Depth: -1, Title: test.TestName, _id: test._id }]; // Maintains the current assertion's story as we iterate
+
+ for (var k in test.Stories)
+ {
+ var story = makeContext(test.Stories[k]);
+
+ story._id = uniqueID;
+ story._pkgid = pkg._id;
+ current().overall.assertions += story.Assertions.length;
+
+ // Establish the current story path so we can report the context
+ // of failures and panicks more conveniently at the top of the page
+ if (storyPath.length > 0)
+ for (var x = storyPath[storyPath.length - 1].Depth; x >= test.Stories[k].Depth; x--)
+ storyPath.pop();
+ storyPath.push({ Depth: test.Stories[k].Depth, Title: test.Stories[k].Title, _id: test.Stories[k]._id });
+
+
+ for (var l in story.Assertions)
+ {
+ var assertion = story.Assertions[l];
+ assertion._id = uniqueID;
+ assertion._pkg = pkg.PackageName;
+ assertion._pkgId = pkg._id;
+ assertion._failed = !!assertion.Failure;
+ assertion._panicked = !!assertion.Error;
+ assertion._maxDepth = storyPath[storyPath.length - 1].Depth;
+ $.extend(assertion._path = [], storyPath);
+
+ if (assertion.Failure)
+ {
+ current().assertions.failed.push(assertion);
+ pkg._failed++;
+ test._failed++;
+ story._failed++;
+ }
+ if (assertion.Error)
+ {
+ current().assertions.panicked.push(assertion);
+ pkg._panicked++;
+ test._panicked++;
+ story._panicked++;
+ }
+ if (assertion.Skipped)
+ {
+ current().assertions.skipped.push(assertion);
+ pkg._skipped++;
+ test._skipped++;
+ story._skipped++;
+ }
+ if (!assertion.Failure && !assertion.Error && !assertion.Skipped)
+ {
+ current().assertions.passed.push(assertion);
+ pkg._passed++;
+ test._passed++;
+ story._passed++;
+ }
+ }
+
+ assignStatus(story);
+ uniqueID++;
+ }
+
+ if (!test.Passed && !test._failed && !test._panicked)
+ {
+ // Edge case: Developer is using the GoConvey DSL, but maybe
+ // in some cases is using t.Error() instead of So() assertions.
+ // This can be detected, assuming all child stories with
+ // assertions (in this test) are passing.
+ test._status = convey.statuses.fail;
+ pkg._failed++;
+ test._failed++;
+ current().assertions.failed.push(test);
+ }
+ }
+ }
+
+ current().overall.passed = current().assertions.passed.length;
+ current().overall.panics = current().assertions.panicked.length;
+ current().overall.failures = current().assertions.failed.length;
+ current().overall.skipped = current().assertions.skipped.length;
+
+ current().overall.coverage = Math.round((coverageAvgHelper.coverageSum / (coverageAvgHelper.countedPackages || 1)) * 100) / 100;
+ current().overall.duration = Math.round(current().overall.duration * 1000) / 1000;
+
+ // Compute the coverage delta (difference in overall coverage between now and last frame)
+ // Only compare coverage on the same watch path
+ var coverDelta = current().overall.coverage;
+ if (convey.framesOnSamePath > 2)
+ coverDelta = current().overall.coverage - convey.history[convey.history.length - 2].overall.coverage;
+ current().coverDelta = Math.round(coverDelta * 100) / 100;
+
+
+ // Build failures trump panics,
+ // Panics trump failures,
+ // Failures trump pass.
+ if (current().overall.failedBuilds)
+ changeStatus(convey.statuses.buildfail);
+ else if (current().overall.panics)
+ changeStatus(convey.statuses.panic);
+ else if (current().overall.failures)
+ changeStatus(convey.statuses.fail);
+ else
+ changeStatus(convey.statuses.pass);
+
+ // Save our organized package lists
+ current().packages = packages;
+
+ log(" Assertions: " + current().overall.assertions);
+ log(" Passed: " + current().overall.passed);
+ log(" Skipped: " + current().overall.skipped);
+ log(" Failures: " + current().overall.failures);
+ log(" Panics: " + current().overall.panics);
+ log("Build Failures: " + current().overall.failedBuilds);
+ log(" Coverage: " + current().overall.coverage + "% (" + showCoverDelta(current().coverDelta) + ")");
+
+ // Save timestamp when this test was executed
+ convey.moments['last-test'] = moment();
+
+
+
+ // Render... render ALL THE THINGS! (All model/state modifications are DONE!)
+ renderFrame(current());
+ // Now, just finish up miscellaneous UI things
+
+
+ // Add this frame to the history pane
+ var framePiece = render('tpl-history', current());
+ $('.history .container').prepend(framePiece);
+ $('.history .item:first-child').addClass('selected');
+ convey.moments['frame-'+current().id] = moment();
+ if (convey.history.length > convey.maxHistory)
+ {
+ // Delete the oldest frame out of the history pane if we have too many
+ convey.history.splice(0, 1);
+ $('.history .container .item').last().remove();
+ }
+
+ // Now add the momentjs time to the new frame in the history
+ convey.intervalFuncs.momentjs();
+
+ // Show notification, if enabled
+ if (notif())
+ {
+ log("Showing notification");
+ if (convey.notif)
+ {
+ clearTimeout(convey.notifTimer);
+ convey.notif.close();
+ }
+
+ var notifText = notifSummary(current())
+
+ convey.notif = new Notification(notifText.title, {
+ body: notifText.body,
+ icon: $('.favicon').attr('href')
+ });
+
+ convey.notifTimer = setTimeout(function() { convey.notif.close(); }, 5000);
+ }
+
+ // Update title in title bar
+ if (current().overall.passed === current().overall.assertions && current().overall.status.class === "ok")
+ $('title').text("GoConvey (ALL PASS)");
+ else
+ $('title').text("GoConvey [" + current().overall.status.text + "] " + current().overall.passed + "/" + current().overall.assertions);
+
+ // All done!
+ log("Processing complete");
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+// Updates the entire UI given a frame from the history
+function renderFrame(frame)
+{
+ log("Rendering frame (id: " + frame.id + ")");
+
+ $('#coverage').html(render('tpl-coverage', frame.packages.tested.sort(sortPackages)));
+ $('#ignored').html(render('tpl-ignored', frame.packages.ignored.sort(sortPackages)));
+ $('#nogofiles').html(render('tpl-nogofiles', frame.packages.nogofiles.sort(sortPackages)));
+ $('#notestfiles').html(render('tpl-notestfiles', frame.packages.notestfiles.sort(sortPackages)));
+ $('#notestfn').html(render('tpl-notestfn', frame.packages.notestfn.sort(sortPackages)));
+
+ if (frame.overall.failedBuilds)
+ {
+ $('.buildfailures').show();
+ $('#buildfailures').html(render('tpl-buildfailures', frame.failedBuilds));
+ }
+ else
+ $('.buildfailures').hide();
+
+ if (frame.overall.panics)
+ {
+ $('.panics').show();
+ $('#panics').html(render('tpl-panics', frame.assertions.panicked));
+ }
+ else
+ $('.panics').hide();
+
+
+ if (frame.overall.failures)
+ {
+ $('.failures').show();
+ $('#failures').html(render('tpl-failures', frame.assertions.failed));
+ $(".failure").each(function() {
+ $(this).prettyTextDiff();
+ });
+ }
+ else
+ $('.failures').hide();
+
+ $('#stories').html(render('tpl-stories', frame.packages.tested.sort(sortPackages)));
+ $('#stories').append(render('tpl-stories', frame.packages.ignored.sort(sortPackages)));
+
+ var pkgDefaultView = get('pkg-expand-collapse');
+ $('.story-pkg.expanded').each(function()
+ {
+ if (pkgDefaultView === "collapsed" && convey.packageStates[$(this).data('pkg-name')] !== "expanded")
+ collapsePackage($(this).data('pkg'));
+ });
+
+ redrawCoverageBars();
+
+ $('#assert-count').html("<b>"+frame.overall.assertions+"</b> assertion"
+ + (frame.overall.assertions !== 1 ? "s" : ""));
+ $('#skip-count').html("<b>"+frame.assertions.skipped.length + "</b> skipped");
+ $('#fail-count').html("<b>"+frame.assertions.failed.length + "</b> failed");
+ $('#panic-count').html("<b>"+frame.assertions.panicked.length + "</b> panicked");
+ $('#duration').html("<b>"+frame.overall.duration + "</b>s");
+
+ $('#narrow-assert-count').html("<b>"+frame.overall.assertions+"</b>");
+ $('#narrow-skip-count').html("<b>"+frame.assertions.skipped.length + "</b>");
+ $('#narrow-fail-count').html("<b>"+frame.assertions.failed.length + "</b>");
+ $('#narrow-panic-count').html("<b>"+frame.assertions.panicked.length + "</b>");
+
+ $('.history .item').removeClass('selected');
+
+ if (get('show-debug-output') === "hide")
+ $('.story-line-desc .message').hide();
+
+ log("Rendering finished");
+}
+
+
+
+
+
+
+
+function enumSel(id, val)
+{
+ if (typeof id === "string" && typeof val === "string")
+ {
+ $('.enum#'+id+' > li').each(function()
+ {
+ if ($(this).data(id).toString() === val)
+ {
+ $(this).addClass(convey.layout.selClass).siblings().removeClass(convey.layout.selClass);
+ return false;
+ }
+ });
+ }
+ else
+ $(this).addClass(convey.layout.selClass).siblings().removeClass(convey.layout.selClass);
+}
+
+function toggle(jqelem, switchelem)
+{
+ var speed = 250;
+ var transition = 'easeInOutQuart';
+ var containerSel = '.container';
+
+ if (!jqelem.is(':visible'))
+ {
+ $(containerSel, jqelem).css('opacity', 0);
+ jqelem.stop().slideDown(speed, transition, function()
+ {
+ if (switchelem)
+ switchelem.toggleClass(convey.layout.selClass);
+ $(containerSel, jqelem).stop().animate({
+ opacity: 1
+ }, speed);
+ reframe();
+ });
+ }
+ else
+ {
+ $(containerSel, jqelem).stop().animate({
+ opacity: 0
+ }, speed, function()
+ {
+ if (switchelem)
+ switchelem.toggleClass(convey.layout.selClass);
+ jqelem.stop().slideUp(speed, transition, function() { reframe(); });
+ });
+ }
+}
+
+function changeStatus(newStatus, isHistoricalFrame)
+{
+ if (!newStatus || !newStatus.class || !newStatus.text)
+ newStatus = convey.statuses.pass;
+
+ var sameStatus = newStatus.class === convey.overallClass;
+
+ // The CSS class .flash and the jQuery UI 'pulsate' effect don't play well together.
+ // This series of callbacks does the flickering/pulsating as well as
+ // enabling/disabling flashing in the proper order so that they don't overlap.
+ // TODO: I suppose the pulsating could also be done with just CSS, maybe...?
+
+ if (convey.uiEffects)
+ {
+ var times = sameStatus ? 3 : 2;
+ var duration = sameStatus ? 500 : 300;
+
+ $('.overall .status').removeClass('flash').effect("pulsate", {times: times}, duration, function()
+ {
+ $(this).text(newStatus.text);
+
+ if (newStatus !== convey.statuses.pass) // only flicker extra when not currently passing
+ {
+ $(this).effect("pulsate", {times: 1}, 300, function()
+ {
+ $(this).effect("pulsate", {times: 1}, 500, function()
+ {
+ if (newStatus === convey.statuses.panic
+ || newStatus === convey.statuses.buildfail)
+ $(this).addClass('flash');
+ else
+ $(this).removeClass('flash');
+ });
+ });
+ }
+ });
+ }
+ else
+ $('.overall .status').text(newStatus.text);
+
+ if (!sameStatus) // change the color
+ $('.overall').switchClass(convey.overallClass, newStatus.class, 1000);
+
+ if (!isHistoricalFrame)
+ current().overall.status = newStatus;
+ convey.overallClass = newStatus.class;
+ $('.favicon').attr('href', '/resources/ico/goconvey-'+newStatus.class+'.ico');
+}
+
+function updateWatchPath()
+{
+ $.get("/watch", function(data)
+ {
+ var newPath = $.trim(data);
+ if (newPath !== $('#path').val())
+ convey.framesOnSamePath = 1;
+ $('#path').val(newPath);
+ });
+}
+
+function notifSummary(frame)
+{
+ var body = frame.overall.passed + " passed, ";
+
+ if (frame.overall.failedBuilds)
+ body += frame.overall.failedBuilds + " build" + (frame.overall.failedBuilds !== 1 ? "s" : "") + " failed, ";
+ if (frame.overall.failures)
+ body += frame.overall.failures + " failed, ";
+ if (frame.overall.panics)
+ body += frame.overall.panics + " panicked, ";
+ body += frame.overall.skipped + " skipped";
+
+ body += "\r\n" + frame.overall.duration + "s";
+
+ if (frame.coverDelta > 0)
+ body += "\r\n↑ coverage (" + showCoverDelta(frame.coverDelta) + ")";
+ else if (frame.coverDelta < 0)
+ body += "\r\n↓ coverage (" + showCoverDelta(frame.coverDelta) + ")";
+
+ return {
+ title: frame.overall.status.text.toUpperCase(),
+ body: body
+ };
+}
+
+function redrawCoverageBars()
+{
+ $('.pkg-cover-bar').each(function()
+ {
+ var pkgName = $(this).data("pkg");
+ var hue = $(this).data("width");
+ var hueDiff = hue;
+
+ if (convey.history.length > 1)
+ {
+ var oldHue = convey.history[convey.history.length - 2].packages.coverage[pkgName] || 0;
+ $(this).width(oldHue + "%");
+ hueDiff = hue - oldHue;
+ }
+
+ $(this).animate({
+ width: "+=" + hueDiff + "%"
+ }, 1250);
+ });
+
+ colorizeCoverageBars();
+}
+
+function colorizeCoverageBars()
+{
+ var colorTpl = convey.config.themes[convey.theme].coverage
+ || "hsla({{hue}}, 75%, 30%, .3)"; //default color template
+
+ $('.pkg-cover-bar').each(function()
+ {
+ var hue = $(this).data("width");
+ $(this).css({
+ background: colorTpl.replace("{{hue}}", hue)
+ });
+ });
+}
+
+
+function getFrame(id)
+{
+ for (var i in convey.history)
+ if (convey.history[i].id === id)
+ return convey.history[i];
+}
+
+function render(templateID, context)
+{
+ var tpl = $('#' + templateID).text();
+ return $($.trim(Mark.up(tpl, context)));
+}
+
+function reframe()
+{
+ var heightBelowHeader = $(window).height() - convey.layout.header.outerHeight();
+ var middleHeight = heightBelowHeader - convey.layout.footer.outerHeight();
+ convey.layout.frame.height(middleHeight);
+
+ var pathWidth = $(window).width() - $('#logo').outerWidth() - $('#control-buttons').outerWidth() - 10;
+ $('#path-container').width(pathWidth);
+}
+
+function notif()
+{
+ return get('notifications') === "true"; // stored as strings
+}
+
+function showServerDown(message)
+{
+ $('.server-down .notice-message').text(message);
+ $('.server-down').show();
+ $('.server-not-down').hide();
+ reframe();
+}
+
+function hideServerDown()
+{
+ $('.server-down').hide();
+ $('.server-not-down').show();
+ reframe();
+}
+
+function log(msg)
+{
+ var jqLog = $('#log');
+ if (jqLog.length > 0)
+ {
+ var t = new Date();
+ var h = zerofill(t.getHours(), 2);
+ var m = zerofill(t.getMinutes(), 2);
+ var s = zerofill(t.getSeconds(), 2);
+ var ms = zerofill(t.getMilliseconds(), 3);
+ date = h + ":" + m + ":" + s + "." + ms;
+
+ $(jqLog).append(render('tpl-log-line', { time: date, msg: msg }));
+ $(jqLog).parent('.col').scrollTop(jqLog[0].scrollHeight);
+ }
+ else
+ console.log(msg);
+}
+
+function zerofill(val, count)
+{
+ // Cheers to http://stackoverflow.com/a/9744576/1048862
+ var pad = new Array(1 + count).join('0');
+ return (pad + val).slice(-pad.length);
+}
+
+// Sorts packages ascending by only the last part of their name
+// Can be passed into Array.sort()
+function sortPackages(a, b)
+{
+ var aPkg = splitPathName(a.PackageName);
+ var bPkg = splitPathName(b.PackageName);
+
+ if (aPkg.length === 0 || bPkg.length === 0)
+ return 0;
+
+ var aName = aPkg.parts[aPkg.parts.length - 1].toLowerCase();
+ var bName = bPkg.parts[bPkg.parts.length - 1].toLowerCase();
+
+ if (aName < bName)
+ return -1;
+ else if (aName > bName)
+ return 1;
+ else
+ return 0;
+
+ /*
+ MEMO: Use to sort by entire package name:
+ if (a.PackageName < b.PackageName) return -1;
+ else if (a.PackageName > b.PackageName) return 1;
+ else return 0;
+ */
+}
+
+function get(key)
+{
+ var val = localStorage.getItem(key);
+ if (val && (val[0] === '[' || val[0] === '{'))
+ return JSON.parse(val);
+ else
+ return val;
+}
+
+function save(key, val)
+{
+ if (typeof val === 'object')
+ val = JSON.stringify(val);
+ else if (typeof val === 'number' || typeof val === 'boolean')
+ val = val.toString();
+ localStorage.setItem(key, val);
+}
+
+function splitPathName(str)
+{
+ var delim = str.indexOf('\\') > -1 ? '\\' : '/';
+ return { delim: delim, parts: str.split(delim) };
+}
+
+function newFrame()
+{
+ return {
+ results: {}, // response from server (with some of our own context info)
+ packages: {}, // packages organized into statuses for convenience (like with coverage)
+ overall: emptyOverall(), // overall status info, compiled from server's response
+ assertions: emptyAssertions(), // lists of assertions, compiled from server's response
+ failedBuilds: [], // list of packages that failed to build
+ timestamp: moment(), // the timestamp of this "freeze-state"
+ id: convey.frameCounter++, // unique ID for this frame
+ coverDelta: 0 // difference in total coverage from the last frame to this one
+ };
+}
+
+function emptyOverall()
+{
+ return {
+ status: {},
+ duration: 0,
+ assertions: 0,
+ passed: 0,
+ panics: 0,
+ failures: 0,
+ skipped: 0,
+ failedBuilds: 0,
+ coverage: 0
+ };
+}
+
+function emptyAssertions()
+{
+ return {
+ passed: [],
+ failed: [],
+ panicked: [],
+ skipped: []
+ };
+}
+
+function makeContext(obj)
+{
+ obj._passed = 0;
+ obj._failed = 0;
+ obj._panicked = 0;
+ obj._skipped = 0;
+ obj._status = '';
+ return obj;
+}
+
+function current()
+{
+ return convey.history[convey.history.length - 1];
+}
+
+function assignStatus(obj)
+{
+ if (obj._skipped)
+ obj._status = 'skip';
+ else if (obj.Outcome === "ignored")
+ obj._status = convey.statuses.ignored;
+ else if (obj._panicked)
+ obj._status = convey.statuses.panic;
+ else if (obj._failed || obj.Outcome === "failed")
+ obj._status = convey.statuses.fail;
+ else
+ obj._status = convey.statuses.pass;
+}
+
+function showCoverDelta(delta)
+{
+ if (delta > 0)
+ return "+" + delta + "%";
+ else if (delta === 0)
+ return "±" + delta + "%";
+ else
+ return delta + "%";
+}
+
+function customMarkupPipes()
+{
+ // MARKUP.JS custom pipes
+ Mark.pipes.relativePath = function(str)
+ {
+ basePath = new RegExp($('#path').val()+'[\\/]', 'gi');
+ return str.replace(basePath, '');
+ };
+ Mark.pipes.htmlSafe = function(str)
+ {
+ return str.replace(/</g, "&lt;").replace(/>/g, "&gt;");
+ };
+ Mark.pipes.ansiColours = ansispan;
+ Mark.pipes.boldPkgName = function(str)
+ {
+ var pkg = splitPathName(str);
+ pkg.parts[0] = '<span class="not-pkg-name">' + pkg.parts[0];
+ pkg.parts[pkg.parts.length - 1] = "</span><b>" + pkg.parts[pkg.parts.length - 1] + "</b>";
+ return pkg.parts.join(pkg.delim);
+ };
+ Mark.pipes.needsDiff = function(test)
+ {
+ return !!test.Failure && (test.Expected !== "" || test.Actual !== "");
+ };
+ Mark.pipes.coveragePct = function(str)
+ {
+ // Expected input: 75% to be represented as: "75.0"
+ var num = parseInt(str); // we only need int precision
+ if (num < 0)
+ return "0";
+ else if (num <= 5)
+ return "5px"; // Still shows low coverage
+ else if (num > 100)
+ str = "100";
+ return str;
+ };
+ Mark.pipes.coverageDisplay = function(str)
+ {
+ var num = parseFloat(str);
+ return num < 0 ? "" : num + "% coverage";
+ };
+ Mark.pipes.coverageReportName = function(str)
+ {
+ return str.replace(/\//g, "-");
+ };
+}
+
+function suppress(event)
+{
+ if (!event)
+ return false;
+ if (event.preventDefault)
+ event.preventDefault();
+ if (event.stopPropagation)
+ event.stopPropagation();
+ event.cancelBubble = true;
+ return false;
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/ansispan.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/ansispan.js
new file mode 100644
index 00000000000..3d8603a6d1b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/ansispan.js
@@ -0,0 +1,67 @@
+/*
+Copyright (C) 2011 by Maciej Małecki
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+var ansispan = function (str) {
+ Object.keys(ansispan.foregroundColors).forEach(function (ansi) {
+ var span = '<span class="ansi-' + ansispan.foregroundColors[ansi] + '">';
+
+ //
+ // `\033[Xm` == `\033[0;Xm` sets foreground color to `X`.
+ //
+
+ str = str.replace(
+ new RegExp('\033\\[' + ansi + 'm', 'g'),
+ span
+ ).replace(
+ new RegExp('\033\\[0;' + ansi + 'm', 'g'),
+ span
+ );
+ });
+ //
+ // `\033[1m` enables bold font, `\033[22m` disables it
+ //
+ str = str.replace(/\033\[1m/g, '<b>').replace(/\033\[22m/g, '</b>');
+
+ //
+ // `\033[3m` enables italics font, `\033[23m` disables it
+ //
+ str = str.replace(/\033\[3m/g, '<i>').replace(/\033\[23m/g, '</i>');
+
+ str = str.replace(/\033\[m/g, '</span>');
+ str = str.replace(/\033\[0m/g, '</span>');
+ return str.replace(/\033\[39m/g, '</span>');
+};
+
+ansispan.foregroundColors = {
+ '30': 'black',
+ '31': 'red',
+ '32': 'green',
+ '33': 'yellow',
+ '34': 'blue',
+ '35': 'purple',
+ '36': 'cyan',
+ '37': 'white'
+};
+
+if (typeof module !== 'undefined' && module.exports) {
+ module.exports = ansispan;
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/diff-match-patch.min.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/diff-match-patch.min.js
new file mode 100644
index 00000000000..c78b7ffc46d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/diff-match-patch.min.js
@@ -0,0 +1,49 @@
+(function(){function diff_match_patch(){this.Diff_Timeout=1;this.Diff_EditCost=4;this.Match_Threshold=0.5;this.Match_Distance=1E3;this.Patch_DeleteThreshold=0.5;this.Patch_Margin=4;this.Match_MaxBits=32}
+diff_match_patch.prototype.diff_main=function(a,b,c,d){"undefined"==typeof d&&(d=0>=this.Diff_Timeout?Number.MAX_VALUE:(new Date).getTime()+1E3*this.Diff_Timeout);if(null==a||null==b)throw Error("Null input. (diff_main)");if(a==b)return a?[[0,a]]:[];"undefined"==typeof c&&(c=!0);var e=c,f=this.diff_commonPrefix(a,b);c=a.substring(0,f);a=a.substring(f);b=b.substring(f);var f=this.diff_commonSuffix(a,b),g=a.substring(a.length-f);a=a.substring(0,a.length-f);b=b.substring(0,b.length-f);a=this.diff_compute_(a,
+b,e,d);c&&a.unshift([0,c]);g&&a.push([0,g]);this.diff_cleanupMerge(a);return a};
+diff_match_patch.prototype.diff_compute_=function(a,b,c,d){if(!a)return[[1,b]];if(!b)return[[-1,a]];var e=a.length>b.length?a:b,f=a.length>b.length?b:a,g=e.indexOf(f);return-1!=g?(c=[[1,e.substring(0,g)],[0,f],[1,e.substring(g+f.length)]],a.length>b.length&&(c[0][0]=c[2][0]=-1),c):1==f.length?[[-1,a],[1,b]]:(e=this.diff_halfMatch_(a,b))?(f=e[0],a=e[1],g=e[2],b=e[3],e=e[4],f=this.diff_main(f,g,c,d),c=this.diff_main(a,b,c,d),f.concat([[0,e]],c)):c&&100<a.length&&100<b.length?this.diff_lineMode_(a,b,
+d):this.diff_bisect_(a,b,d)};
+diff_match_patch.prototype.diff_lineMode_=function(a,b,c){var d=this.diff_linesToChars_(a,b);a=d.chars1;b=d.chars2;d=d.lineArray;a=this.diff_main(a,b,!1,c);this.diff_charsToLines_(a,d);this.diff_cleanupSemantic(a);a.push([0,""]);for(var e=d=b=0,f="",g="";b<a.length;){switch(a[b][0]){case 1:e++;g+=a[b][1];break;case -1:d++;f+=a[b][1];break;case 0:if(1<=d&&1<=e){a.splice(b-d-e,d+e);b=b-d-e;d=this.diff_main(f,g,!1,c);for(e=d.length-1;0<=e;e--)a.splice(b,0,d[e]);b+=d.length}d=e=0;g=f=""}b++}a.pop();return a};
+diff_match_patch.prototype.diff_bisect_=function(a,b,c){for(var d=a.length,e=b.length,f=Math.ceil((d+e)/2),g=f,h=2*f,j=Array(h),i=Array(h),k=0;k<h;k++)j[k]=-1,i[k]=-1;j[g+1]=0;i[g+1]=0;for(var k=d-e,q=0!=k%2,r=0,t=0,p=0,w=0,v=0;v<f&&!((new Date).getTime()>c);v++){for(var n=-v+r;n<=v-t;n+=2){var l=g+n,m;m=n==-v||n!=v&&j[l-1]<j[l+1]?j[l+1]:j[l-1]+1;for(var s=m-n;m<d&&s<e&&a.charAt(m)==b.charAt(s);)m++,s++;j[l]=m;if(m>d)t+=2;else if(s>e)r+=2;else if(q&&(l=g+k-n,0<=l&&l<h&&-1!=i[l])){var u=d-i[l];if(m>=
+u)return this.diff_bisectSplit_(a,b,m,s,c)}}for(n=-v+p;n<=v-w;n+=2){l=g+n;u=n==-v||n!=v&&i[l-1]<i[l+1]?i[l+1]:i[l-1]+1;for(m=u-n;u<d&&m<e&&a.charAt(d-u-1)==b.charAt(e-m-1);)u++,m++;i[l]=u;if(u>d)w+=2;else if(m>e)p+=2;else if(!q&&(l=g+k-n,0<=l&&(l<h&&-1!=j[l])&&(m=j[l],s=g+m-l,u=d-u,m>=u)))return this.diff_bisectSplit_(a,b,m,s,c)}}return[[-1,a],[1,b]]};
+diff_match_patch.prototype.diff_bisectSplit_=function(a,b,c,d,e){var f=a.substring(0,c),g=b.substring(0,d);a=a.substring(c);b=b.substring(d);f=this.diff_main(f,g,!1,e);e=this.diff_main(a,b,!1,e);return f.concat(e)};
+diff_match_patch.prototype.diff_linesToChars_=function(a,b){function c(a){for(var b="",c=0,f=-1,g=d.length;f<a.length-1;){f=a.indexOf("\n",c);-1==f&&(f=a.length-1);var r=a.substring(c,f+1),c=f+1;(e.hasOwnProperty?e.hasOwnProperty(r):void 0!==e[r])?b+=String.fromCharCode(e[r]):(b+=String.fromCharCode(g),e[r]=g,d[g++]=r)}return b}var d=[],e={};d[0]="";var f=c(a),g=c(b);return{chars1:f,chars2:g,lineArray:d}};
+diff_match_patch.prototype.diff_charsToLines_=function(a,b){for(var c=0;c<a.length;c++){for(var d=a[c][1],e=[],f=0;f<d.length;f++)e[f]=b[d.charCodeAt(f)];a[c][1]=e.join("")}};diff_match_patch.prototype.diff_commonPrefix=function(a,b){if(!a||!b||a.charAt(0)!=b.charAt(0))return 0;for(var c=0,d=Math.min(a.length,b.length),e=d,f=0;c<e;)a.substring(f,e)==b.substring(f,e)?f=c=e:d=e,e=Math.floor((d-c)/2+c);return e};
+diff_match_patch.prototype.diff_commonSuffix=function(a,b){if(!a||!b||a.charAt(a.length-1)!=b.charAt(b.length-1))return 0;for(var c=0,d=Math.min(a.length,b.length),e=d,f=0;c<e;)a.substring(a.length-e,a.length-f)==b.substring(b.length-e,b.length-f)?f=c=e:d=e,e=Math.floor((d-c)/2+c);return e};
+diff_match_patch.prototype.diff_commonOverlap_=function(a,b){var c=a.length,d=b.length;if(0==c||0==d)return 0;c>d?a=a.substring(c-d):c<d&&(b=b.substring(0,c));c=Math.min(c,d);if(a==b)return c;for(var d=0,e=1;;){var f=a.substring(c-e),f=b.indexOf(f);if(-1==f)return d;e+=f;if(0==f||a.substring(c-e)==b.substring(0,e))d=e,e++}};
+diff_match_patch.prototype.diff_halfMatch_=function(a,b){function c(a,b,c){for(var d=a.substring(c,c+Math.floor(a.length/4)),e=-1,g="",h,j,n,l;-1!=(e=b.indexOf(d,e+1));){var m=f.diff_commonPrefix(a.substring(c),b.substring(e)),s=f.diff_commonSuffix(a.substring(0,c),b.substring(0,e));g.length<s+m&&(g=b.substring(e-s,e)+b.substring(e,e+m),h=a.substring(0,c-s),j=a.substring(c+m),n=b.substring(0,e-s),l=b.substring(e+m))}return 2*g.length>=a.length?[h,j,n,l,g]:null}if(0>=this.Diff_Timeout)return null;
+var d=a.length>b.length?a:b,e=a.length>b.length?b:a;if(4>d.length||2*e.length<d.length)return null;var f=this,g=c(d,e,Math.ceil(d.length/4)),d=c(d,e,Math.ceil(d.length/2)),h;if(!g&&!d)return null;h=d?g?g[4].length>d[4].length?g:d:d:g;var j;a.length>b.length?(g=h[0],d=h[1],e=h[2],j=h[3]):(e=h[0],j=h[1],g=h[2],d=h[3]);h=h[4];return[g,d,e,j,h]};
+diff_match_patch.prototype.diff_cleanupSemantic=function(a){for(var b=!1,c=[],d=0,e=null,f=0,g=0,h=0,j=0,i=0;f<a.length;)0==a[f][0]?(c[d++]=f,g=j,h=i,i=j=0,e=a[f][1]):(1==a[f][0]?j+=a[f][1].length:i+=a[f][1].length,e&&(e.length<=Math.max(g,h)&&e.length<=Math.max(j,i))&&(a.splice(c[d-1],0,[-1,e]),a[c[d-1]+1][0]=1,d--,d--,f=0<d?c[d-1]:-1,i=j=h=g=0,e=null,b=!0)),f++;b&&this.diff_cleanupMerge(a);this.diff_cleanupSemanticLossless(a);for(f=1;f<a.length;){if(-1==a[f-1][0]&&1==a[f][0]){b=a[f-1][1];c=a[f][1];
+d=this.diff_commonOverlap_(b,c);e=this.diff_commonOverlap_(c,b);if(d>=e){if(d>=b.length/2||d>=c.length/2)a.splice(f,0,[0,c.substring(0,d)]),a[f-1][1]=b.substring(0,b.length-d),a[f+1][1]=c.substring(d),f++}else if(e>=b.length/2||e>=c.length/2)a.splice(f,0,[0,b.substring(0,e)]),a[f-1][0]=1,a[f-1][1]=c.substring(0,c.length-e),a[f+1][0]=-1,a[f+1][1]=b.substring(e),f++;f++}f++}};
+diff_match_patch.prototype.diff_cleanupSemanticLossless=function(a){function b(a,b){if(!a||!b)return 6;var c=a.charAt(a.length-1),d=b.charAt(0),e=c.match(diff_match_patch.nonAlphaNumericRegex_),f=d.match(diff_match_patch.nonAlphaNumericRegex_),g=e&&c.match(diff_match_patch.whitespaceRegex_),h=f&&d.match(diff_match_patch.whitespaceRegex_),c=g&&c.match(diff_match_patch.linebreakRegex_),d=h&&d.match(diff_match_patch.linebreakRegex_),i=c&&a.match(diff_match_patch.blanklineEndRegex_),j=d&&b.match(diff_match_patch.blanklineStartRegex_);
+return i||j?5:c||d?4:e&&!g&&h?3:g||h?2:e||f?1:0}for(var c=1;c<a.length-1;){if(0==a[c-1][0]&&0==a[c+1][0]){var d=a[c-1][1],e=a[c][1],f=a[c+1][1],g=this.diff_commonSuffix(d,e);if(g)var h=e.substring(e.length-g),d=d.substring(0,d.length-g),e=h+e.substring(0,e.length-g),f=h+f;for(var g=d,h=e,j=f,i=b(d,e)+b(e,f);e.charAt(0)===f.charAt(0);){var d=d+e.charAt(0),e=e.substring(1)+f.charAt(0),f=f.substring(1),k=b(d,e)+b(e,f);k>=i&&(i=k,g=d,h=e,j=f)}a[c-1][1]!=g&&(g?a[c-1][1]=g:(a.splice(c-1,1),c--),a[c][1]=
+h,j?a[c+1][1]=j:(a.splice(c+1,1),c--))}c++}};diff_match_patch.nonAlphaNumericRegex_=/[^a-zA-Z0-9]/;diff_match_patch.whitespaceRegex_=/\s/;diff_match_patch.linebreakRegex_=/[\r\n]/;diff_match_patch.blanklineEndRegex_=/\n\r?\n$/;diff_match_patch.blanklineStartRegex_=/^\r?\n\r?\n/;
+diff_match_patch.prototype.diff_cleanupEfficiency=function(a){for(var b=!1,c=[],d=0,e=null,f=0,g=!1,h=!1,j=!1,i=!1;f<a.length;){if(0==a[f][0])a[f][1].length<this.Diff_EditCost&&(j||i)?(c[d++]=f,g=j,h=i,e=a[f][1]):(d=0,e=null),j=i=!1;else if(-1==a[f][0]?i=!0:j=!0,e&&(g&&h&&j&&i||e.length<this.Diff_EditCost/2&&3==g+h+j+i))a.splice(c[d-1],0,[-1,e]),a[c[d-1]+1][0]=1,d--,e=null,g&&h?(j=i=!0,d=0):(d--,f=0<d?c[d-1]:-1,j=i=!1),b=!0;f++}b&&this.diff_cleanupMerge(a)};
+diff_match_patch.prototype.diff_cleanupMerge=function(a){a.push([0,""]);for(var b=0,c=0,d=0,e="",f="",g;b<a.length;)switch(a[b][0]){case 1:d++;f+=a[b][1];b++;break;case -1:c++;e+=a[b][1];b++;break;case 0:1<c+d?(0!==c&&0!==d&&(g=this.diff_commonPrefix(f,e),0!==g&&(0<b-c-d&&0==a[b-c-d-1][0]?a[b-c-d-1][1]+=f.substring(0,g):(a.splice(0,0,[0,f.substring(0,g)]),b++),f=f.substring(g),e=e.substring(g)),g=this.diff_commonSuffix(f,e),0!==g&&(a[b][1]=f.substring(f.length-g)+a[b][1],f=f.substring(0,f.length-
+g),e=e.substring(0,e.length-g))),0===c?a.splice(b-d,c+d,[1,f]):0===d?a.splice(b-c,c+d,[-1,e]):a.splice(b-c-d,c+d,[-1,e],[1,f]),b=b-c-d+(c?1:0)+(d?1:0)+1):0!==b&&0==a[b-1][0]?(a[b-1][1]+=a[b][1],a.splice(b,1)):b++,c=d=0,f=e=""}""===a[a.length-1][1]&&a.pop();c=!1;for(b=1;b<a.length-1;)0==a[b-1][0]&&0==a[b+1][0]&&(a[b][1].substring(a[b][1].length-a[b-1][1].length)==a[b-1][1]?(a[b][1]=a[b-1][1]+a[b][1].substring(0,a[b][1].length-a[b-1][1].length),a[b+1][1]=a[b-1][1]+a[b+1][1],a.splice(b-1,1),c=!0):a[b][1].substring(0,
+a[b+1][1].length)==a[b+1][1]&&(a[b-1][1]+=a[b+1][1],a[b][1]=a[b][1].substring(a[b+1][1].length)+a[b+1][1],a.splice(b+1,1),c=!0)),b++;c&&this.diff_cleanupMerge(a)};diff_match_patch.prototype.diff_xIndex=function(a,b){var c=0,d=0,e=0,f=0,g;for(g=0;g<a.length;g++){1!==a[g][0]&&(c+=a[g][1].length);-1!==a[g][0]&&(d+=a[g][1].length);if(c>b)break;e=c;f=d}return a.length!=g&&-1===a[g][0]?f:f+(b-e)};
+diff_match_patch.prototype.diff_prettyHtml=function(a){for(var b=[],c=/&/g,d=/</g,e=/>/g,f=/\n/g,g=0;g<a.length;g++){var h=a[g][0],j=a[g][1],j=j.replace(c,"&amp;").replace(d,"&lt;").replace(e,"&gt;").replace(f,"&para;<br>");switch(h){case 1:b[g]='<ins style="background:#e6ffe6;">'+j+"</ins>";break;case -1:b[g]='<del style="background:#ffe6e6;">'+j+"</del>";break;case 0:b[g]="<span>"+j+"</span>"}}return b.join("")};
+diff_match_patch.prototype.diff_text1=function(a){for(var b=[],c=0;c<a.length;c++)1!==a[c][0]&&(b[c]=a[c][1]);return b.join("")};diff_match_patch.prototype.diff_text2=function(a){for(var b=[],c=0;c<a.length;c++)-1!==a[c][0]&&(b[c]=a[c][1]);return b.join("")};diff_match_patch.prototype.diff_levenshtein=function(a){for(var b=0,c=0,d=0,e=0;e<a.length;e++){var f=a[e][0],g=a[e][1];switch(f){case 1:c+=g.length;break;case -1:d+=g.length;break;case 0:b+=Math.max(c,d),d=c=0}}return b+=Math.max(c,d)};
+diff_match_patch.prototype.diff_toDelta=function(a){for(var b=[],c=0;c<a.length;c++)switch(a[c][0]){case 1:b[c]="+"+encodeURI(a[c][1]);break;case -1:b[c]="-"+a[c][1].length;break;case 0:b[c]="="+a[c][1].length}return b.join("\t").replace(/%20/g," ")};
+diff_match_patch.prototype.diff_fromDelta=function(a,b){for(var c=[],d=0,e=0,f=b.split(/\t/g),g=0;g<f.length;g++){var h=f[g].substring(1);switch(f[g].charAt(0)){case "+":try{c[d++]=[1,decodeURI(h)]}catch(j){throw Error("Illegal escape in diff_fromDelta: "+h);}break;case "-":case "=":var i=parseInt(h,10);if(isNaN(i)||0>i)throw Error("Invalid number in diff_fromDelta: "+h);h=a.substring(e,e+=i);"="==f[g].charAt(0)?c[d++]=[0,h]:c[d++]=[-1,h];break;default:if(f[g])throw Error("Invalid diff operation in diff_fromDelta: "+
+f[g]);}}if(e!=a.length)throw Error("Delta length ("+e+") does not equal source text length ("+a.length+").");return c};diff_match_patch.prototype.match_main=function(a,b,c){if(null==a||null==b||null==c)throw Error("Null input. (match_main)");c=Math.max(0,Math.min(c,a.length));return a==b?0:a.length?a.substring(c,c+b.length)==b?c:this.match_bitap_(a,b,c):-1};
+diff_match_patch.prototype.match_bitap_=function(a,b,c){function d(a,d){var e=a/b.length,g=Math.abs(c-d);return!f.Match_Distance?g?1:e:e+g/f.Match_Distance}if(b.length>this.Match_MaxBits)throw Error("Pattern too long for this browser.");var e=this.match_alphabet_(b),f=this,g=this.Match_Threshold,h=a.indexOf(b,c);-1!=h&&(g=Math.min(d(0,h),g),h=a.lastIndexOf(b,c+b.length),-1!=h&&(g=Math.min(d(0,h),g)));for(var j=1<<b.length-1,h=-1,i,k,q=b.length+a.length,r,t=0;t<b.length;t++){i=0;for(k=q;i<k;)d(t,c+
+k)<=g?i=k:q=k,k=Math.floor((q-i)/2+i);q=k;i=Math.max(1,c-k+1);var p=Math.min(c+k,a.length)+b.length;k=Array(p+2);for(k[p+1]=(1<<t)-1;p>=i;p--){var w=e[a.charAt(p-1)];k[p]=0===t?(k[p+1]<<1|1)&w:(k[p+1]<<1|1)&w|((r[p+1]|r[p])<<1|1)|r[p+1];if(k[p]&j&&(w=d(t,p-1),w<=g))if(g=w,h=p-1,h>c)i=Math.max(1,2*c-h);else break}if(d(t+1,c)>g)break;r=k}return h};
+diff_match_patch.prototype.match_alphabet_=function(a){for(var b={},c=0;c<a.length;c++)b[a.charAt(c)]=0;for(c=0;c<a.length;c++)b[a.charAt(c)]|=1<<a.length-c-1;return b};
+diff_match_patch.prototype.patch_addContext_=function(a,b){if(0!=b.length){for(var c=b.substring(a.start2,a.start2+a.length1),d=0;b.indexOf(c)!=b.lastIndexOf(c)&&c.length<this.Match_MaxBits-this.Patch_Margin-this.Patch_Margin;)d+=this.Patch_Margin,c=b.substring(a.start2-d,a.start2+a.length1+d);d+=this.Patch_Margin;(c=b.substring(a.start2-d,a.start2))&&a.diffs.unshift([0,c]);(d=b.substring(a.start2+a.length1,a.start2+a.length1+d))&&a.diffs.push([0,d]);a.start1-=c.length;a.start2-=c.length;a.length1+=
+c.length+d.length;a.length2+=c.length+d.length}};
+diff_match_patch.prototype.patch_make=function(a,b,c){var d;if("string"==typeof a&&"string"==typeof b&&"undefined"==typeof c)d=a,b=this.diff_main(d,b,!0),2<b.length&&(this.diff_cleanupSemantic(b),this.diff_cleanupEfficiency(b));else if(a&&"object"==typeof a&&"undefined"==typeof b&&"undefined"==typeof c)b=a,d=this.diff_text1(b);else if("string"==typeof a&&b&&"object"==typeof b&&"undefined"==typeof c)d=a;else if("string"==typeof a&&"string"==typeof b&&c&&"object"==typeof c)d=a,b=c;else throw Error("Unknown call format to patch_make.");
+if(0===b.length)return[];c=[];a=new diff_match_patch.patch_obj;for(var e=0,f=0,g=0,h=d,j=0;j<b.length;j++){var i=b[j][0],k=b[j][1];!e&&0!==i&&(a.start1=f,a.start2=g);switch(i){case 1:a.diffs[e++]=b[j];a.length2+=k.length;d=d.substring(0,g)+k+d.substring(g);break;case -1:a.length1+=k.length;a.diffs[e++]=b[j];d=d.substring(0,g)+d.substring(g+k.length);break;case 0:k.length<=2*this.Patch_Margin&&e&&b.length!=j+1?(a.diffs[e++]=b[j],a.length1+=k.length,a.length2+=k.length):k.length>=2*this.Patch_Margin&&
+e&&(this.patch_addContext_(a,h),c.push(a),a=new diff_match_patch.patch_obj,e=0,h=d,f=g)}1!==i&&(f+=k.length);-1!==i&&(g+=k.length)}e&&(this.patch_addContext_(a,h),c.push(a));return c};diff_match_patch.prototype.patch_deepCopy=function(a){for(var b=[],c=0;c<a.length;c++){var d=a[c],e=new diff_match_patch.patch_obj;e.diffs=[];for(var f=0;f<d.diffs.length;f++)e.diffs[f]=d.diffs[f].slice();e.start1=d.start1;e.start2=d.start2;e.length1=d.length1;e.length2=d.length2;b[c]=e}return b};
+diff_match_patch.prototype.patch_apply=function(a,b){if(0==a.length)return[b,[]];a=this.patch_deepCopy(a);var c=this.patch_addPadding(a);b=c+b+c;this.patch_splitMax(a);for(var d=0,e=[],f=0;f<a.length;f++){var g=a[f].start2+d,h=this.diff_text1(a[f].diffs),j,i=-1;if(h.length>this.Match_MaxBits){if(j=this.match_main(b,h.substring(0,this.Match_MaxBits),g),-1!=j&&(i=this.match_main(b,h.substring(h.length-this.Match_MaxBits),g+h.length-this.Match_MaxBits),-1==i||j>=i))j=-1}else j=this.match_main(b,h,g);
+if(-1==j)e[f]=!1,d-=a[f].length2-a[f].length1;else if(e[f]=!0,d=j-g,g=-1==i?b.substring(j,j+h.length):b.substring(j,i+this.Match_MaxBits),h==g)b=b.substring(0,j)+this.diff_text2(a[f].diffs)+b.substring(j+h.length);else if(g=this.diff_main(h,g,!1),h.length>this.Match_MaxBits&&this.diff_levenshtein(g)/h.length>this.Patch_DeleteThreshold)e[f]=!1;else{this.diff_cleanupSemanticLossless(g);for(var h=0,k,i=0;i<a[f].diffs.length;i++){var q=a[f].diffs[i];0!==q[0]&&(k=this.diff_xIndex(g,h));1===q[0]?b=b.substring(0,
+j+k)+q[1]+b.substring(j+k):-1===q[0]&&(b=b.substring(0,j+k)+b.substring(j+this.diff_xIndex(g,h+q[1].length)));-1!==q[0]&&(h+=q[1].length)}}}b=b.substring(c.length,b.length-c.length);return[b,e]};
+diff_match_patch.prototype.patch_addPadding=function(a){for(var b=this.Patch_Margin,c="",d=1;d<=b;d++)c+=String.fromCharCode(d);for(d=0;d<a.length;d++)a[d].start1+=b,a[d].start2+=b;var d=a[0],e=d.diffs;if(0==e.length||0!=e[0][0])e.unshift([0,c]),d.start1-=b,d.start2-=b,d.length1+=b,d.length2+=b;else if(b>e[0][1].length){var f=b-e[0][1].length;e[0][1]=c.substring(e[0][1].length)+e[0][1];d.start1-=f;d.start2-=f;d.length1+=f;d.length2+=f}d=a[a.length-1];e=d.diffs;0==e.length||0!=e[e.length-1][0]?(e.push([0,
+c]),d.length1+=b,d.length2+=b):b>e[e.length-1][1].length&&(f=b-e[e.length-1][1].length,e[e.length-1][1]+=c.substring(0,f),d.length1+=f,d.length2+=f);return c};
+diff_match_patch.prototype.patch_splitMax=function(a){for(var b=this.Match_MaxBits,c=0;c<a.length;c++)if(!(a[c].length1<=b)){var d=a[c];a.splice(c--,1);for(var e=d.start1,f=d.start2,g="";0!==d.diffs.length;){var h=new diff_match_patch.patch_obj,j=!0;h.start1=e-g.length;h.start2=f-g.length;""!==g&&(h.length1=h.length2=g.length,h.diffs.push([0,g]));for(;0!==d.diffs.length&&h.length1<b-this.Patch_Margin;){var g=d.diffs[0][0],i=d.diffs[0][1];1===g?(h.length2+=i.length,f+=i.length,h.diffs.push(d.diffs.shift()),
+j=!1):-1===g&&1==h.diffs.length&&0==h.diffs[0][0]&&i.length>2*b?(h.length1+=i.length,e+=i.length,j=!1,h.diffs.push([g,i]),d.diffs.shift()):(i=i.substring(0,b-h.length1-this.Patch_Margin),h.length1+=i.length,e+=i.length,0===g?(h.length2+=i.length,f+=i.length):j=!1,h.diffs.push([g,i]),i==d.diffs[0][1]?d.diffs.shift():d.diffs[0][1]=d.diffs[0][1].substring(i.length))}g=this.diff_text2(h.diffs);g=g.substring(g.length-this.Patch_Margin);i=this.diff_text1(d.diffs).substring(0,this.Patch_Margin);""!==i&&
+(h.length1+=i.length,h.length2+=i.length,0!==h.diffs.length&&0===h.diffs[h.diffs.length-1][0]?h.diffs[h.diffs.length-1][1]+=i:h.diffs.push([0,i]));j||a.splice(++c,0,h)}}};diff_match_patch.prototype.patch_toText=function(a){for(var b=[],c=0;c<a.length;c++)b[c]=a[c];return b.join("")};
+diff_match_patch.prototype.patch_fromText=function(a){var b=[];if(!a)return b;a=a.split("\n");for(var c=0,d=/^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$/;c<a.length;){var e=a[c].match(d);if(!e)throw Error("Invalid patch string: "+a[c]);var f=new diff_match_patch.patch_obj;b.push(f);f.start1=parseInt(e[1],10);""===e[2]?(f.start1--,f.length1=1):"0"==e[2]?f.length1=0:(f.start1--,f.length1=parseInt(e[2],10));f.start2=parseInt(e[3],10);""===e[4]?(f.start2--,f.length2=1):"0"==e[4]?f.length2=0:(f.start2--,f.length2=
+parseInt(e[4],10));for(c++;c<a.length;){e=a[c].charAt(0);try{var g=decodeURI(a[c].substring(1))}catch(h){throw Error("Illegal escape in patch_fromText: "+g);}if("-"==e)f.diffs.push([-1,g]);else if("+"==e)f.diffs.push([1,g]);else if(" "==e)f.diffs.push([0,g]);else if("@"==e)break;else if(""!==e)throw Error('Invalid patch mode "'+e+'" in: '+g);c++}}return b};diff_match_patch.patch_obj=function(){this.diffs=[];this.start2=this.start1=null;this.length2=this.length1=0};
+diff_match_patch.patch_obj.prototype.toString=function(){var a,b;a=0===this.length1?this.start1+",0":1==this.length1?this.start1+1:this.start1+1+","+this.length1;b=0===this.length2?this.start2+",0":1==this.length2?this.start2+1:this.start2+1+","+this.length2;a=["@@ -"+a+" +"+b+" @@\n"];var c;for(b=0;b<this.diffs.length;b++){switch(this.diffs[b][0]){case 1:c="+";break;case -1:c="-";break;case 0:c=" "}a[b+1]=c+encodeURI(this.diffs[b][1])+"\n"}return a.join("").replace(/%20/g," ")};
+this.diff_match_patch=diff_match_patch;this.DIFF_DELETE=-1;this.DIFF_INSERT=1;this.DIFF_EQUAL=0;})() \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-2_1_0.min.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-2_1_0.min.js
new file mode 100644
index 00000000000..2adda35a5b1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-2_1_0.min.js
@@ -0,0 +1,4 @@
+/*! jQuery v2.1.0 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
+!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k="".trim,l={},m=a.document,n="2.1.0",o=function(a,b){return new o.fn.init(a,b)},p=/^-ms-/,q=/-([\da-z])/gi,r=function(a,b){return b.toUpperCase()};o.fn=o.prototype={jquery:n,constructor:o,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=o.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return o.each(this,a,b)},map:function(a){return this.pushStack(o.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},o.extend=o.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||o.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(o.isPlainObject(d)||(e=o.isArray(d)))?(e?(e=!1,f=c&&o.isArray(c)?c:[]):f=c&&o.isPlainObject(c)?c:{},g[b]=o.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},o.extend({expando:"jQuery"+(n+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===o.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){return a-parseFloat(a)>=0},isPlainObject:function(a){if("object"!==o.type(a)||a.nodeType||o.isWindow(a))return!1;try{if(a.constructor&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(b){return!1}return!0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(a){var b,c=eval;a=o.trim(a),a&&(1===a.indexOf("use strict")?(b=m.createElement("script"),b.text=a,m.head.appendChild(b).parentNode.removeChild(b)):c(a))},camelCase:function(a){return a.replace(p,"ms-").replace(q,r)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=s(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":k.call(a)},makeArray:function(a,b){var c=b||[];return null!=a&&(s(Object(a))?o.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:g.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;c>d;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=s(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(c=a[b],b=a,a=c),o.isFunction(a)?(e=d.call(arguments,2),f=function(){return a.apply(b||this,e.concat(d.call(arguments)))},f.guid=a.guid=a.guid||o.guid++,f):void 0},now:Date.now,support:l}),o.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function s(a){var b=a.length,c=o.type(a);return"function"===c||o.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var t=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s="sizzle"+-new Date,t=a.document,u=0,v=0,w=eb(),x=eb(),y=eb(),z=function(a,b){return a===b&&(j=!0),0},A="undefined",B=1<<31,C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=D.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},J="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",K="[\\x20\\t\\r\\n\\f]",L="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",M=L.replace("w","w#"),N="\\["+K+"*("+L+")"+K+"*(?:([*^$|!~]?=)"+K+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+M+")|)|)"+K+"*\\]",O=":("+L+")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|"+N.replace(3,8)+")*)|.*)\\)|)",P=new RegExp("^"+K+"+|((?:^|[^\\\\])(?:\\\\.)*)"+K+"+$","g"),Q=new RegExp("^"+K+"*,"+K+"*"),R=new RegExp("^"+K+"*([>+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(O),U=new RegExp("^"+M+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L.replace("w","w*")+")"),ATTR:new RegExp("^"+N),PSEUDO:new RegExp("^"+O),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=/'|\\/g,ab=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),bb=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{G.apply(D=H.call(t.childNodes),t.childNodes),D[t.childNodes.length].nodeType}catch(cb){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function db(a,b,d,e){var f,g,h,i,j,m,p,q,u,v;if((b?b.ownerDocument||b:t)!==l&&k(b),b=b||l,d=d||[],!a||"string"!=typeof a)return d;if(1!==(i=b.nodeType)&&9!==i)return[];if(n&&!e){if(f=Z.exec(a))if(h=f[1]){if(9===i){if(g=b.getElementById(h),!g||!g.parentNode)return d;if(g.id===h)return d.push(g),d}else if(b.ownerDocument&&(g=b.ownerDocument.getElementById(h))&&r(b,g)&&g.id===h)return d.push(g),d}else{if(f[2])return G.apply(d,b.getElementsByTagName(a)),d;if((h=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(h)),d}if(c.qsa&&(!o||!o.test(a))){if(q=p=s,u=b,v=9===i&&a,1===i&&"object"!==b.nodeName.toLowerCase()){m=ob(a),(p=b.getAttribute("id"))?q=p.replace(_,"\\$&"):b.setAttribute("id",q),q="[id='"+q+"'] ",j=m.length;while(j--)m[j]=q+pb(m[j]);u=$.test(a)&&mb(b.parentNode)||b,v=m.join(",")}if(v)try{return G.apply(d,u.querySelectorAll(v)),d}catch(w){}finally{p||b.removeAttribute("id")}}}return xb(a.replace(P,"$1"),b,d,e)}function eb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function fb(a){return a[s]=!0,a}function gb(a){var b=l.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function hb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function ib(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||B)-(~a.sourceIndex||B);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function jb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function kb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function lb(a){return fb(function(b){return b=+b,fb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function mb(a){return a&&typeof a.getElementsByTagName!==A&&a}c=db.support={},f=db.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},k=db.setDocument=function(a){var b,e=a?a.ownerDocument||a:t,g=e.defaultView;return e!==l&&9===e.nodeType&&e.documentElement?(l=e,m=e.documentElement,n=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){k()},!1):g.attachEvent&&g.attachEvent("onunload",function(){k()})),c.attributes=gb(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=gb(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(e.getElementsByClassName)&&gb(function(a){return a.innerHTML="<div class='a'></div><div class='a i'></div>",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=gb(function(a){return m.appendChild(a).id=s,!e.getElementsByName||!e.getElementsByName(s).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==A&&n){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(ab,bb);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(ab,bb);return function(a){var c=typeof a.getAttributeNode!==A&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==A?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==A&&n?b.getElementsByClassName(a):void 0},p=[],o=[],(c.qsa=Y.test(e.querySelectorAll))&&(gb(function(a){a.innerHTML="<select t=''><option selected=''></option></select>",a.querySelectorAll("[t^='']").length&&o.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||o.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll(":checked").length||o.push(":checked")}),gb(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&o.push("name"+K+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||o.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),o.push(",.*:")})),(c.matchesSelector=Y.test(q=m.webkitMatchesSelector||m.mozMatchesSelector||m.oMatchesSelector||m.msMatchesSelector))&&gb(function(a){c.disconnectedMatch=q.call(a,"div"),q.call(a,"[s!='']:x"),p.push("!=",O)}),o=o.length&&new RegExp(o.join("|")),p=p.length&&new RegExp(p.join("|")),b=Y.test(m.compareDocumentPosition),r=b||Y.test(m.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},z=b?function(a,b){if(a===b)return j=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===t&&r(t,a)?-1:b===e||b.ownerDocument===t&&r(t,b)?1:i?I.call(i,a)-I.call(i,b):0:4&d?-1:1)}:function(a,b){if(a===b)return j=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],k=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:i?I.call(i,a)-I.call(i,b):0;if(f===g)return ib(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)k.unshift(c);while(h[d]===k[d])d++;return d?ib(h[d],k[d]):h[d]===t?-1:k[d]===t?1:0},e):l},db.matches=function(a,b){return db(a,null,null,b)},db.matchesSelector=function(a,b){if((a.ownerDocument||a)!==l&&k(a),b=b.replace(S,"='$1']"),!(!c.matchesSelector||!n||p&&p.test(b)||o&&o.test(b)))try{var d=q.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return db(b,l,null,[a]).length>0},db.contains=function(a,b){return(a.ownerDocument||a)!==l&&k(a),r(a,b)},db.attr=function(a,b){(a.ownerDocument||a)!==l&&k(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!n):void 0;return void 0!==f?f:c.attributes||!n?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},db.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},db.uniqueSort=function(a){var b,d=[],e=0,f=0;if(j=!c.detectDuplicates,i=!c.sortStable&&a.slice(0),a.sort(z),j){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return i=null,a},e=db.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=db.selectors={cacheLength:50,createPseudo:fb,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(ab,bb),a[3]=(a[4]||a[5]||"").replace(ab,bb),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||db.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&db.error(a[0]),a},PSEUDO:function(a){var b,c=!a[5]&&a[2];return V.CHILD.test(a[0])?null:(a[3]&&void 0!==a[4]?a[2]=a[4]:c&&T.test(c)&&(b=ob(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(ab,bb).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=w[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&w(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==A&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=db.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),t=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&t){k=q[s]||(q[s]={}),j=k[a]||[],n=j[0]===u&&j[1],m=j[0]===u&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[u,n,m];break}}else if(t&&(j=(b[s]||(b[s]={}))[a])&&j[0]===u)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(t&&((l[s]||(l[s]={}))[a]=[u,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||db.error("unsupported pseudo: "+a);return e[s]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?fb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:fb(function(a){var b=[],c=[],d=g(a.replace(P,"$1"));return d[s]?fb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:fb(function(a){return function(b){return db(a,b).length>0}}),contains:fb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:fb(function(a){return U.test(a||"")||db.error("unsupported lang: "+a),a=a.replace(ab,bb).toLowerCase(),function(b){var c;do if(c=n?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===m},focus:function(a){return a===l.activeElement&&(!l.hasFocus||l.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:lb(function(){return[0]}),last:lb(function(a,b){return[b-1]}),eq:lb(function(a,b,c){return[0>c?c+b:c]}),even:lb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:lb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:lb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:lb(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=jb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=kb(b);function nb(){}nb.prototype=d.filters=d.pseudos,d.setFilters=new nb;function ob(a,b){var c,e,f,g,h,i,j,k=x[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=Q.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=R.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(P," ")}),h=h.slice(c.length));for(g in d.filter)!(e=V[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?db.error(a):x(a,i).slice(0)}function pb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function qb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=v++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[u,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[s]||(b[s]={}),(h=i[d])&&h[0]===u&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function rb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function sb(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function tb(a,b,c,d,e,f){return d&&!d[s]&&(d=tb(d)),e&&!e[s]&&(e=tb(e,f)),fb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||wb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:sb(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=sb(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?I.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=sb(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ub(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],i=g||d.relative[" "],j=g?1:0,k=qb(function(a){return a===b},i,!0),l=qb(function(a){return I.call(b,a)>-1},i,!0),m=[function(a,c,d){return!g&&(d||c!==h)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>j;j++)if(c=d.relative[a[j].type])m=[qb(rb(m),c)];else{if(c=d.filter[a[j].type].apply(null,a[j].matches),c[s]){for(e=++j;f>e;e++)if(d.relative[a[e].type])break;return tb(j>1&&rb(m),j>1&&pb(a.slice(0,j-1).concat({value:" "===a[j-2].type?"*":""})).replace(P,"$1"),c,e>j&&ub(a.slice(j,e)),f>e&&ub(a=a.slice(e)),f>e&&pb(a))}m.push(c)}return rb(m)}function vb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,i,j,k){var m,n,o,p=0,q="0",r=f&&[],s=[],t=h,v=f||e&&d.find.TAG("*",k),w=u+=null==t?1:Math.random()||.1,x=v.length;for(k&&(h=g!==l&&g);q!==x&&null!=(m=v[q]);q++){if(e&&m){n=0;while(o=a[n++])if(o(m,g,i)){j.push(m);break}k&&(u=w)}c&&((m=!o&&m)&&p--,f&&r.push(m))}if(p+=q,c&&q!==p){n=0;while(o=b[n++])o(r,s,g,i);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=E.call(j));s=sb(s)}G.apply(j,s),k&&!f&&s.length>0&&p+b.length>1&&db.uniqueSort(j)}return k&&(u=w,h=t),r};return c?fb(f):f}g=db.compile=function(a,b){var c,d=[],e=[],f=y[a+" "];if(!f){b||(b=ob(a)),c=b.length;while(c--)f=ub(b[c]),f[s]?d.push(f):e.push(f);f=y(a,vb(e,d))}return f};function wb(a,b,c){for(var d=0,e=b.length;e>d;d++)db(a,b[d],c);return c}function xb(a,b,e,f){var h,i,j,k,l,m=ob(a);if(!f&&1===m.length){if(i=m[0]=m[0].slice(0),i.length>2&&"ID"===(j=i[0]).type&&c.getById&&9===b.nodeType&&n&&d.relative[i[1].type]){if(b=(d.find.ID(j.matches[0].replace(ab,bb),b)||[])[0],!b)return e;a=a.slice(i.shift().value.length)}h=V.needsContext.test(a)?0:i.length;while(h--){if(j=i[h],d.relative[k=j.type])break;if((l=d.find[k])&&(f=l(j.matches[0].replace(ab,bb),$.test(i[0].type)&&mb(b.parentNode)||b))){if(i.splice(h,1),a=f.length&&pb(i),!a)return G.apply(e,f),e;break}}}return g(a,m)(f,b,!n,e,$.test(a)&&mb(b.parentNode)||b),e}return c.sortStable=s.split("").sort(z).join("")===s,c.detectDuplicates=!!j,k(),c.sortDetached=gb(function(a){return 1&a.compareDocumentPosition(l.createElement("div"))}),gb(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||hb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&gb(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||hb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),gb(function(a){return null==a.getAttribute("disabled")})||hb(J,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),db}(a);o.find=t,o.expr=t.selectors,o.expr[":"]=o.expr.pseudos,o.unique=t.uniqueSort,o.text=t.getText,o.isXMLDoc=t.isXML,o.contains=t.contains;var u=o.expr.match.needsContext,v=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,w=/^.[^:#\[\.,]*$/;function x(a,b,c){if(o.isFunction(b))return o.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return o.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(w.test(b))return o.filter(b,a,c);b=o.filter(b,a)}return o.grep(a,function(a){return g.call(b,a)>=0!==c})}o.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?o.find.matchesSelector(d,a)?[d]:[]:o.find.matches(a,o.grep(b,function(a){return 1===a.nodeType}))},o.fn.extend({find:function(a){var b,c=this.length,d=[],e=this;if("string"!=typeof a)return this.pushStack(o(a).filter(function(){for(b=0;c>b;b++)if(o.contains(e[b],this))return!0}));for(b=0;c>b;b++)o.find(a,e[b],d);return d=this.pushStack(c>1?o.unique(d):d),d.selector=this.selector?this.selector+" "+a:a,d},filter:function(a){return this.pushStack(x(this,a||[],!1))},not:function(a){return this.pushStack(x(this,a||[],!0))},is:function(a){return!!x(this,"string"==typeof a&&u.test(a)?o(a):a||[],!1).length}});var y,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=o.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||y).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof o?b[0]:b,o.merge(this,o.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:m,!0)),v.test(c[1])&&o.isPlainObject(b))for(c in b)o.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}return d=m.getElementById(c[2]),d&&d.parentNode&&(this.length=1,this[0]=d),this.context=m,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):o.isFunction(a)?"undefined"!=typeof y.ready?y.ready(a):a(o):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),o.makeArray(a,this))};A.prototype=o.fn,y=o(m);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};o.extend({dir:function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&o(a).is(c))break;d.push(a)}return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),o.fn.extend({has:function(a){var b=o(a,this),c=b.length;return this.filter(function(){for(var a=0;c>a;a++)if(o.contains(this,b[a]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=u.test(a)||"string"!=typeof a?o(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&o.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?o.unique(f):f)},index:function(a){return a?"string"==typeof a?g.call(o(a),this[0]):g.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(o.unique(o.merge(this.get(),o(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){while((a=a[b])&&1!==a.nodeType);return a}o.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return o.dir(a,"parentNode")},parentsUntil:function(a,b,c){return o.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return o.dir(a,"nextSibling")},prevAll:function(a){return o.dir(a,"previousSibling")},nextUntil:function(a,b,c){return o.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return o.dir(a,"previousSibling",c)},siblings:function(a){return o.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return o.sibling(a.firstChild)},contents:function(a){return a.contentDocument||o.merge([],a.childNodes)}},function(a,b){o.fn[a]=function(c,d){var e=o.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=o.filter(d,e)),this.length>1&&(C[a]||o.unique(e),B.test(a)&&e.reverse()),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return o.each(a.match(E)||[],function(a,c){b[c]=!0}),b}o.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):o.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(b=a.memory&&l,c=!0,g=e||0,e=0,f=h.length,d=!0;h&&f>g;g++)if(h[g].apply(l[0],l[1])===!1&&a.stopOnFalse){b=!1;break}d=!1,h&&(i?i.length&&j(i.shift()):b?h=[]:k.disable())},k={add:function(){if(h){var c=h.length;!function g(b){o.each(b,function(b,c){var d=o.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&g(c)})}(arguments),d?f=h.length:b&&(e=c,j(b))}return this},remove:function(){return h&&o.each(arguments,function(a,b){var c;while((c=o.inArray(b,h,c))>-1)h.splice(c,1),d&&(f>=c&&f--,g>=c&&g--)}),this},has:function(a){return a?o.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],f=0,this},disable:function(){return h=i=b=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,b||k.disable(),this},locked:function(){return!i},fireWith:function(a,b){return!h||c&&!i||(b=b||[],b=[a,b.slice?b.slice():b],d?i.push(b):j(b)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!c}};return k},o.extend({Deferred:function(a){var b=[["resolve","done",o.Callbacks("once memory"),"resolved"],["reject","fail",o.Callbacks("once memory"),"rejected"],["notify","progress",o.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return o.Deferred(function(c){o.each(b,function(b,f){var g=o.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&o.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?o.extend(a,d):d}},e={};return d.pipe=d.then,o.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&o.isFunction(a.promise)?e:0,g=1===f?a:o.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&o.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;o.fn.ready=function(a){return o.ready.promise().done(a),this},o.extend({isReady:!1,readyWait:1,holdReady:function(a){a?o.readyWait++:o.ready(!0)},ready:function(a){(a===!0?--o.readyWait:o.isReady)||(o.isReady=!0,a!==!0&&--o.readyWait>0||(H.resolveWith(m,[o]),o.fn.trigger&&o(m).trigger("ready").off("ready")))}});function I(){m.removeEventListener("DOMContentLoaded",I,!1),a.removeEventListener("load",I,!1),o.ready()}o.ready.promise=function(b){return H||(H=o.Deferred(),"complete"===m.readyState?setTimeout(o.ready):(m.addEventListener("DOMContentLoaded",I,!1),a.addEventListener("load",I,!1))),H.promise(b)},o.ready.promise();var J=o.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===o.type(c)){e=!0;for(h in c)o.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,o.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(o(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f};o.acceptData=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function K(){Object.defineProperty(this.cache={},0,{get:function(){return{}}}),this.expando=o.expando+Math.random()}K.uid=1,K.accepts=o.acceptData,K.prototype={key:function(a){if(!K.accepts(a))return 0;var b={},c=a[this.expando];if(!c){c=K.uid++;try{b[this.expando]={value:c},Object.defineProperties(a,b)}catch(d){b[this.expando]=c,o.extend(a,b)}}return this.cache[c]||(this.cache[c]={}),c},set:function(a,b,c){var d,e=this.key(a),f=this.cache[e];if("string"==typeof b)f[b]=c;else if(o.isEmptyObject(f))o.extend(this.cache[e],b);else for(d in b)f[d]=b[d];return f},get:function(a,b){var c=this.cache[this.key(a)];return void 0===b?c:c[b]},access:function(a,b,c){var d;return void 0===b||b&&"string"==typeof b&&void 0===c?(d=this.get(a,b),void 0!==d?d:this.get(a,o.camelCase(b))):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d,e,f=this.key(a),g=this.cache[f];if(void 0===b)this.cache[f]={};else{o.isArray(b)?d=b.concat(b.map(o.camelCase)):(e=o.camelCase(b),b in g?d=[b,e]:(d=e,d=d in g?[d]:d.match(E)||[])),c=d.length;while(c--)delete g[d[c]]}},hasData:function(a){return!o.isEmptyObject(this.cache[a[this.expando]]||{})},discard:function(a){a[this.expando]&&delete this.cache[a[this.expando]]}};var L=new K,M=new K,N=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,O=/([A-Z])/g;function P(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(O,"-$1").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:N.test(c)?o.parseJSON(c):c}catch(e){}M.set(a,b,c)}else c=void 0;return c}o.extend({hasData:function(a){return M.hasData(a)||L.hasData(a)},data:function(a,b,c){return M.access(a,b,c)},removeData:function(a,b){M.remove(a,b)},_data:function(a,b,c){return L.access(a,b,c)},_removeData:function(a,b){L.remove(a,b)}}),o.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=M.get(f),1===f.nodeType&&!L.get(f,"hasDataAttrs"))){c=g.length;
+while(c--)d=g[c].name,0===d.indexOf("data-")&&(d=o.camelCase(d.slice(5)),P(f,d,e[d]));L.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){M.set(this,a)}):J(this,function(b){var c,d=o.camelCase(a);if(f&&void 0===b){if(c=M.get(f,a),void 0!==c)return c;if(c=M.get(f,d),void 0!==c)return c;if(c=P(f,d,void 0),void 0!==c)return c}else this.each(function(){var c=M.get(this,d);M.set(this,d,b),-1!==a.indexOf("-")&&void 0!==c&&M.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){M.remove(this,a)})}}),o.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=L.get(a,b),c&&(!d||o.isArray(c)?d=L.access(a,b,o.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=o.queue(a,b),d=c.length,e=c.shift(),f=o._queueHooks(a,b),g=function(){o.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return L.get(a,c)||L.access(a,c,{empty:o.Callbacks("once memory").add(function(){L.remove(a,[b+"queue",c])})})}}),o.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?o.queue(this[0],a):void 0===b?this:this.each(function(){var c=o.queue(this,a,b);o._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&o.dequeue(this,a)})},dequeue:function(a){return this.each(function(){o.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=o.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=L.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var Q=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,R=["Top","Right","Bottom","Left"],S=function(a,b){return a=b||a,"none"===o.css(a,"display")||!o.contains(a.ownerDocument,a)},T=/^(?:checkbox|radio)$/i;!function(){var a=m.createDocumentFragment(),b=a.appendChild(m.createElement("div"));b.innerHTML="<input type='radio' checked='checked' name='t'/>",l.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",l.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var U="undefined";l.focusinBubbles="onfocusin"in a;var V=/^key/,W=/^(?:mouse|contextmenu)|click/,X=/^(?:focusinfocus|focusoutblur)$/,Y=/^([^.]*)(?:\.(.+)|)$/;function Z(){return!0}function $(){return!1}function _(){try{return m.activeElement}catch(a){}}o.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,p,q,r=L.get(a);if(r){c.handler&&(f=c,c=f.handler,e=f.selector),c.guid||(c.guid=o.guid++),(i=r.events)||(i=r.events={}),(g=r.handle)||(g=r.handle=function(b){return typeof o!==U&&o.event.triggered!==b.type?o.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(E)||[""],j=b.length;while(j--)h=Y.exec(b[j])||[],n=q=h[1],p=(h[2]||"").split(".").sort(),n&&(l=o.event.special[n]||{},n=(e?l.delegateType:l.bindType)||n,l=o.event.special[n]||{},k=o.extend({type:n,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&o.expr.match.needsContext.test(e),namespace:p.join(".")},f),(m=i[n])||(m=i[n]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,p,g)!==!1||a.addEventListener&&a.addEventListener(n,g,!1)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),o.event.global[n]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,p,q,r=L.hasData(a)&&L.get(a);if(r&&(i=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=Y.exec(b[j])||[],n=q=h[1],p=(h[2]||"").split(".").sort(),n){l=o.event.special[n]||{},n=(d?l.delegateType:l.bindType)||n,m=i[n]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&q!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||o.removeEvent(a,n,r.handle),delete i[n])}else for(n in i)o.event.remove(a,n+b[j],c,d,!0);o.isEmptyObject(i)&&(delete r.handle,L.remove(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,p=[d||m],q=j.call(b,"type")?b.type:b,r=j.call(b,"namespace")?b.namespace.split("."):[];if(g=h=d=d||m,3!==d.nodeType&&8!==d.nodeType&&!X.test(q+o.event.triggered)&&(q.indexOf(".")>=0&&(r=q.split("."),q=r.shift(),r.sort()),k=q.indexOf(":")<0&&"on"+q,b=b[o.expando]?b:new o.Event(q,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=r.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+r.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:o.makeArray(c,[b]),n=o.event.special[q]||{},e||!n.trigger||n.trigger.apply(d,c)!==!1)){if(!e&&!n.noBubble&&!o.isWindow(d)){for(i=n.delegateType||q,X.test(i+q)||(g=g.parentNode);g;g=g.parentNode)p.push(g),h=g;h===(d.ownerDocument||m)&&p.push(h.defaultView||h.parentWindow||a)}f=0;while((g=p[f++])&&!b.isPropagationStopped())b.type=f>1?i:n.bindType||q,l=(L.get(g,"events")||{})[b.type]&&L.get(g,"handle"),l&&l.apply(g,c),l=k&&g[k],l&&l.apply&&o.acceptData(g)&&(b.result=l.apply(g,c),b.result===!1&&b.preventDefault());return b.type=q,e||b.isDefaultPrevented()||n._default&&n._default.apply(p.pop(),c)!==!1||!o.acceptData(d)||k&&o.isFunction(d[q])&&!o.isWindow(d)&&(h=d[k],h&&(d[k]=null),o.event.triggered=q,d[q](),o.event.triggered=void 0,h&&(d[k]=h)),b.result}},dispatch:function(a){a=o.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(L.get(this,"events")||{})[a.type]||[],k=o.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=o.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,c=0;while((g=f.handlers[c++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(g.namespace))&&(a.handleObj=g,a.data=g.data,e=((o.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(a.result=e)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!==this;i=i.parentNode||this)if(i.disabled!==!0||"click"!==a.type){for(d=[],c=0;h>c;c++)f=b[c],e=f.selector+" ",void 0===d[e]&&(d[e]=f.needsContext?o(e,this).index(i)>=0:o.find(e,this,null,[i]).length),d[e]&&d.push(f);d.length&&g.push({elem:i,handlers:d})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button;return null==a.pageX&&null!=b.clientX&&(c=a.target.ownerDocument||m,d=c.documentElement,e=c.body,a.pageX=b.clientX+(d&&d.scrollLeft||e&&e.scrollLeft||0)-(d&&d.clientLeft||e&&e.clientLeft||0),a.pageY=b.clientY+(d&&d.scrollTop||e&&e.scrollTop||0)-(d&&d.clientTop||e&&e.clientTop||0)),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},fix:function(a){if(a[o.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=W.test(e)?this.mouseHooks:V.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new o.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=m),3===a.target.nodeType&&(a.target=a.target.parentNode),g.filter?g.filter(a,f):a},special:{load:{noBubble:!0},focus:{trigger:function(){return this!==_()&&this.focus?(this.focus(),!1):void 0},delegateType:"focusin"},blur:{trigger:function(){return this===_()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return"checkbox"===this.type&&this.click&&o.nodeName(this,"input")?(this.click(),!1):void 0},_default:function(a){return o.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=o.extend(new o.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?o.event.trigger(e,null,b):o.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},o.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)},o.Event=function(a,b){return this instanceof o.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.getPreventDefault&&a.getPreventDefault()?Z:$):this.type=a,b&&o.extend(this,b),this.timeStamp=a&&a.timeStamp||o.now(),void(this[o.expando]=!0)):new o.Event(a,b)},o.Event.prototype={isDefaultPrevented:$,isPropagationStopped:$,isImmediatePropagationStopped:$,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=Z,a&&a.preventDefault&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=Z,a&&a.stopPropagation&&a.stopPropagation()},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=Z,this.stopPropagation()}},o.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){o.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!o.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),l.focusinBubbles||o.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){o.event.simulate(b,a.target,o.event.fix(a),!0)};o.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=L.access(d,b);e||d.addEventListener(a,c,!0),L.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=L.access(d,b)-1;e?L.access(d,b,e):(d.removeEventListener(a,c,!0),L.remove(d,b))}}}),o.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(g in a)this.on(g,b,c,a[g],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=$;else if(!d)return this;return 1===e&&(f=d,d=function(a){return o().off(a),f.apply(this,arguments)},d.guid=f.guid||(f.guid=o.guid++)),this.each(function(){o.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,o(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=$),this.each(function(){o.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){o.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?o.event.trigger(a,b,c,!0):void 0}});var ab=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bb=/<([\w:]+)/,cb=/<|&#?\w+;/,db=/<(?:script|style|link)/i,eb=/checked\s*(?:[^=]|=\s*.checked.)/i,fb=/^$|\/(?:java|ecma)script/i,gb=/^true\/(.*)/,hb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,ib={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};ib.optgroup=ib.option,ib.tbody=ib.tfoot=ib.colgroup=ib.caption=ib.thead,ib.th=ib.td;function jb(a,b){return o.nodeName(a,"table")&&o.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function kb(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function lb(a){var b=gb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function mb(a,b){for(var c=0,d=a.length;d>c;c++)L.set(a[c],"globalEval",!b||L.get(b[c],"globalEval"))}function nb(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(L.hasData(a)&&(f=L.access(a),g=L.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;d>c;c++)o.event.add(b,e,j[e][c])}M.hasData(a)&&(h=M.access(a),i=o.extend({},h),M.set(b,i))}}function ob(a,b){var c=a.getElementsByTagName?a.getElementsByTagName(b||"*"):a.querySelectorAll?a.querySelectorAll(b||"*"):[];return void 0===b||b&&o.nodeName(a,b)?o.merge([a],c):c}function pb(a,b){var c=b.nodeName.toLowerCase();"input"===c&&T.test(a.type)?b.checked=a.checked:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}o.extend({clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=o.contains(a.ownerDocument,a);if(!(l.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||o.isXMLDoc(a)))for(g=ob(h),f=ob(a),d=0,e=f.length;e>d;d++)pb(f[d],g[d]);if(b)if(c)for(f=f||ob(a),g=g||ob(h),d=0,e=f.length;e>d;d++)nb(f[d],g[d]);else nb(a,h);return g=ob(h,"script"),g.length>0&&mb(g,!i&&ob(a,"script")),h},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,k=b.createDocumentFragment(),l=[],m=0,n=a.length;n>m;m++)if(e=a[m],e||0===e)if("object"===o.type(e))o.merge(l,e.nodeType?[e]:e);else if(cb.test(e)){f=f||k.appendChild(b.createElement("div")),g=(bb.exec(e)||["",""])[1].toLowerCase(),h=ib[g]||ib._default,f.innerHTML=h[1]+e.replace(ab,"<$1></$2>")+h[2],j=h[0];while(j--)f=f.lastChild;o.merge(l,f.childNodes),f=k.firstChild,f.textContent=""}else l.push(b.createTextNode(e));k.textContent="",m=0;while(e=l[m++])if((!d||-1===o.inArray(e,d))&&(i=o.contains(e.ownerDocument,e),f=ob(k.appendChild(e),"script"),i&&mb(f),c)){j=0;while(e=f[j++])fb.test(e.type||"")&&c.push(e)}return k},cleanData:function(a){for(var b,c,d,e,f,g,h=o.event.special,i=0;void 0!==(c=a[i]);i++){if(o.acceptData(c)&&(f=c[L.expando],f&&(b=L.cache[f]))){if(d=Object.keys(b.events||{}),d.length)for(g=0;void 0!==(e=d[g]);g++)h[e]?o.event.remove(c,e):o.removeEvent(c,e,b.handle);L.cache[f]&&delete L.cache[f]}delete M.cache[c[M.expando]]}}}),o.fn.extend({text:function(a){return J(this,function(a){return void 0===a?o.text(this):this.empty().each(function(){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&(this.textContent=a)})},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=jb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?o.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||o.cleanData(ob(c)),c.parentNode&&(b&&o.contains(c.ownerDocument,c)&&mb(ob(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(o.cleanData(ob(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return o.clone(this,a,b)})},html:function(a){return J(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!db.test(a)&&!ib[(bb.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(ab,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(o.cleanData(ob(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,o.cleanData(ob(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,k=this.length,m=this,n=k-1,p=a[0],q=o.isFunction(p);if(q||k>1&&"string"==typeof p&&!l.checkClone&&eb.test(p))return this.each(function(c){var d=m.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(k&&(c=o.buildFragment(a,this[0].ownerDocument,!1,this),d=c.firstChild,1===c.childNodes.length&&(c=d),d)){for(f=o.map(ob(c,"script"),kb),g=f.length;k>j;j++)h=c,j!==n&&(h=o.clone(h,!0,!0),g&&o.merge(f,ob(h,"script"))),b.call(this[j],h,j);if(g)for(i=f[f.length-1].ownerDocument,o.map(f,lb),j=0;g>j;j++)h=f[j],fb.test(h.type||"")&&!L.access(h,"globalEval")&&o.contains(i,h)&&(h.src?o._evalUrl&&o._evalUrl(h.src):o.globalEval(h.textContent.replace(hb,"")))}return this}}),o.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){o.fn[a]=function(a){for(var c,d=[],e=o(a),g=e.length-1,h=0;g>=h;h++)c=h===g?this:this.clone(!0),o(e[h])[b](c),f.apply(d,c.get());return this.pushStack(d)}});var qb,rb={};function sb(b,c){var d=o(c.createElement(b)).appendTo(c.body),e=a.getDefaultComputedStyle?a.getDefaultComputedStyle(d[0]).display:o.css(d[0],"display");return d.detach(),e}function tb(a){var b=m,c=rb[a];return c||(c=sb(a,b),"none"!==c&&c||(qb=(qb||o("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=qb[0].contentDocument,b.write(),b.close(),c=sb(a,b),qb.detach()),rb[a]=c),c}var ub=/^margin/,vb=new RegExp("^("+Q+")(?!px)[a-z%]+$","i"),wb=function(a){return a.ownerDocument.defaultView.getComputedStyle(a,null)};function xb(a,b,c){var d,e,f,g,h=a.style;return c=c||wb(a),c&&(g=c.getPropertyValue(b)||c[b]),c&&(""!==g||o.contains(a.ownerDocument,a)||(g=o.style(a,b)),vb.test(g)&&ub.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0!==g?g+"":g}function yb(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d="padding:0;margin:0;border:0;display:block;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box",e=m.documentElement,f=m.createElement("div"),g=m.createElement("div");g.style.backgroundClip="content-box",g.cloneNode(!0).style.backgroundClip="",l.clearCloneStyle="content-box"===g.style.backgroundClip,f.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",f.appendChild(g);function h(){g.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%",e.appendChild(f);var d=a.getComputedStyle(g,null);b="1%"!==d.top,c="4px"===d.width,e.removeChild(f)}a.getComputedStyle&&o.extend(l,{pixelPosition:function(){return h(),b},boxSizingReliable:function(){return null==c&&h(),c},reliableMarginRight:function(){var b,c=g.appendChild(m.createElement("div"));return c.style.cssText=g.style.cssText=d,c.style.marginRight=c.style.width="0",g.style.width="1px",e.appendChild(f),b=!parseFloat(a.getComputedStyle(c,null).marginRight),e.removeChild(f),g.innerHTML="",b}})}(),o.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var zb=/^(none|table(?!-c[ea]).+)/,Ab=new RegExp("^("+Q+")(.*)$","i"),Bb=new RegExp("^([+-])=("+Q+")","i"),Cb={position:"absolute",visibility:"hidden",display:"block"},Db={letterSpacing:0,fontWeight:400},Eb=["Webkit","O","Moz","ms"];function Fb(a,b){if(b in a)return b;var c=b[0].toUpperCase()+b.slice(1),d=b,e=Eb.length;while(e--)if(b=Eb[e]+c,b in a)return b;return d}function Gb(a,b,c){var d=Ab.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Hb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=o.css(a,c+R[f],!0,e)),d?("content"===c&&(g-=o.css(a,"padding"+R[f],!0,e)),"margin"!==c&&(g-=o.css(a,"border"+R[f]+"Width",!0,e))):(g+=o.css(a,"padding"+R[f],!0,e),"padding"!==c&&(g+=o.css(a,"border"+R[f]+"Width",!0,e)));return g}function Ib(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=wb(a),g="border-box"===o.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=xb(a,b,f),(0>e||null==e)&&(e=a.style[b]),vb.test(e))return e;d=g&&(l.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Hb(a,b,c||(g?"border":"content"),d,f)+"px"}function Jb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=L.get(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&S(d)&&(f[g]=L.access(d,"olddisplay",tb(d.nodeName)))):f[g]||(e=S(d),(c&&"none"!==c||!e)&&L.set(d,"olddisplay",e?c:o.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}o.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=xb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=o.camelCase(b),i=a.style;return b=o.cssProps[h]||(o.cssProps[h]=Fb(i,h)),g=o.cssHooks[b]||o.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=Bb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(o.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||o.cssNumber[h]||(c+="px"),l.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]="",i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=o.camelCase(b);return b=o.cssProps[h]||(o.cssProps[h]=Fb(a.style,h)),g=o.cssHooks[b]||o.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=xb(a,b,d)),"normal"===e&&b in Db&&(e=Db[b]),""===c||c?(f=parseFloat(e),c===!0||o.isNumeric(f)?f||0:e):e}}),o.each(["height","width"],function(a,b){o.cssHooks[b]={get:function(a,c,d){return c?0===a.offsetWidth&&zb.test(o.css(a,"display"))?o.swap(a,Cb,function(){return Ib(a,b,d)}):Ib(a,b,d):void 0},set:function(a,c,d){var e=d&&wb(a);return Gb(a,c,d?Hb(a,b,d,"border-box"===o.css(a,"boxSizing",!1,e),e):0)}}}),o.cssHooks.marginRight=yb(l.reliableMarginRight,function(a,b){return b?o.swap(a,{display:"inline-block"},xb,[a,"marginRight"]):void 0}),o.each({margin:"",padding:"",border:"Width"},function(a,b){o.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+R[d]+b]=f[d]||f[d-2]||f[0];return e}},ub.test(a)||(o.cssHooks[a+b].set=Gb)}),o.fn.extend({css:function(a,b){return J(this,function(a,b,c){var d,e,f={},g=0;if(o.isArray(b)){for(d=wb(a),e=b.length;e>g;g++)f[b[g]]=o.css(a,b[g],!1,d);return f}return void 0!==c?o.style(a,b,c):o.css(a,b)},a,b,arguments.length>1)},show:function(){return Jb(this,!0)},hide:function(){return Jb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){S(this)?o(this).show():o(this).hide()})}});function Kb(a,b,c,d,e){return new Kb.prototype.init(a,b,c,d,e)}o.Tween=Kb,Kb.prototype={constructor:Kb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(o.cssNumber[c]?"":"px")},cur:function(){var a=Kb.propHooks[this.prop];return a&&a.get?a.get(this):Kb.propHooks._default.get(this)},run:function(a){var b,c=Kb.propHooks[this.prop];return this.pos=b=this.options.duration?o.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Kb.propHooks._default.set(this),this}},Kb.prototype.init.prototype=Kb.prototype,Kb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=o.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){o.fx.step[a.prop]?o.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[o.cssProps[a.prop]]||o.cssHooks[a.prop])?o.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Kb.propHooks.scrollTop=Kb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},o.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},o.fx=Kb.prototype.init,o.fx.step={};var Lb,Mb,Nb=/^(?:toggle|show|hide)$/,Ob=new RegExp("^(?:([+-])=|)("+Q+")([a-z%]*)$","i"),Pb=/queueHooks$/,Qb=[Vb],Rb={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=Ob.exec(b),f=e&&e[3]||(o.cssNumber[a]?"":"px"),g=(o.cssNumber[a]||"px"!==f&&+d)&&Ob.exec(o.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,o.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function Sb(){return setTimeout(function(){Lb=void 0}),Lb=o.now()}function Tb(a,b){var c,d=0,e={height:a};for(b=b?1:0;4>d;d+=2-b)c=R[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function Ub(a,b,c){for(var d,e=(Rb[b]||[]).concat(Rb["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function Vb(a,b,c){var d,e,f,g,h,i,j,k=this,l={},m=a.style,n=a.nodeType&&S(a),p=L.get(a,"fxshow");c.queue||(h=o._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,k.always(function(){k.always(function(){h.unqueued--,o.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[m.overflow,m.overflowX,m.overflowY],j=o.css(a,"display"),"none"===j&&(j=tb(a.nodeName)),"inline"===j&&"none"===o.css(a,"float")&&(m.display="inline-block")),c.overflow&&(m.overflow="hidden",k.always(function(){m.overflow=c.overflow[0],m.overflowX=c.overflow[1],m.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],Nb.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(n?"hide":"show")){if("show"!==e||!p||void 0===p[d])continue;n=!0}l[d]=p&&p[d]||o.style(a,d)}if(!o.isEmptyObject(l)){p?"hidden"in p&&(n=p.hidden):p=L.access(a,"fxshow",{}),f&&(p.hidden=!n),n?o(a).show():k.done(function(){o(a).hide()}),k.done(function(){var b;L.remove(a,"fxshow");for(b in l)o.style(a,b,l[b])});for(d in l)g=Ub(n?p[d]:0,d,k),d in p||(p[d]=g.start,n&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function Wb(a,b){var c,d,e,f,g;for(c in a)if(d=o.camelCase(c),e=b[d],f=a[c],o.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=o.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function Xb(a,b,c){var d,e,f=0,g=Qb.length,h=o.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=Lb||Sb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:o.extend({},b),opts:o.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:Lb||Sb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=o.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(Wb(k,j.opts.specialEasing);g>f;f++)if(d=Qb[f].call(j,a,k,j.opts))return d;return o.map(k,Ub,j),o.isFunction(j.opts.start)&&j.opts.start.call(a,j),o.fx.timer(o.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}o.Animation=o.extend(Xb,{tweener:function(a,b){o.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],Rb[c]=Rb[c]||[],Rb[c].unshift(b)},prefilter:function(a,b){b?Qb.unshift(a):Qb.push(a)}}),o.speed=function(a,b,c){var d=a&&"object"==typeof a?o.extend({},a):{complete:c||!c&&b||o.isFunction(a)&&a,duration:a,easing:c&&b||b&&!o.isFunction(b)&&b};return d.duration=o.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in o.fx.speeds?o.fx.speeds[d.duration]:o.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){o.isFunction(d.old)&&d.old.call(this),d.queue&&o.dequeue(this,d.queue)},d},o.fn.extend({fadeTo:function(a,b,c,d){return this.filter(S).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=o.isEmptyObject(a),f=o.speed(b,c,d),g=function(){var b=Xb(this,o.extend({},a),f);(e||L.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=o.timers,g=L.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&Pb.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&o.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=L.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=o.timers,g=d?d.length:0;for(c.finish=!0,o.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),o.each(["toggle","show","hide"],function(a,b){var c=o.fn[b];o.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(Tb(b,!0),a,d,e)}}),o.each({slideDown:Tb("show"),slideUp:Tb("hide"),slideToggle:Tb("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){o.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),o.timers=[],o.fx.tick=function(){var a,b=0,c=o.timers;for(Lb=o.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||o.fx.stop(),Lb=void 0},o.fx.timer=function(a){o.timers.push(a),a()?o.fx.start():o.timers.pop()},o.fx.interval=13,o.fx.start=function(){Mb||(Mb=setInterval(o.fx.tick,o.fx.interval))},o.fx.stop=function(){clearInterval(Mb),Mb=null},o.fx.speeds={slow:600,fast:200,_default:400},o.fn.delay=function(a,b){return a=o.fx?o.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a=m.createElement("input"),b=m.createElement("select"),c=b.appendChild(m.createElement("option"));a.type="checkbox",l.checkOn=""!==a.value,l.optSelected=c.selected,b.disabled=!0,l.optDisabled=!c.disabled,a=m.createElement("input"),a.value="t",a.type="radio",l.radioValue="t"===a.value}();var Yb,Zb,$b=o.expr.attrHandle;o.fn.extend({attr:function(a,b){return J(this,o.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){o.removeAttr(this,a)})}}),o.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===U?o.prop(a,b,c):(1===f&&o.isXMLDoc(a)||(b=b.toLowerCase(),d=o.attrHooks[b]||(o.expr.match.bool.test(b)?Zb:Yb)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=o.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void o.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=o.propFix[c]||c,o.expr.match.bool.test(c)&&(a[d]=!1),a.removeAttribute(c)},attrHooks:{type:{set:function(a,b){if(!l.radioValue&&"radio"===b&&o.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),Zb={set:function(a,b,c){return b===!1?o.removeAttr(a,c):a.setAttribute(c,c),c}},o.each(o.expr.match.bool.source.match(/\w+/g),function(a,b){var c=$b[b]||o.find.attr;$b[b]=function(a,b,d){var e,f;
+return d||(f=$b[b],$b[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,$b[b]=f),e}});var _b=/^(?:input|select|textarea|button)$/i;o.fn.extend({prop:function(a,b){return J(this,o.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[o.propFix[a]||a]})}}),o.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!o.isXMLDoc(a),f&&(b=o.propFix[b]||b,e=o.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){return a.hasAttribute("tabindex")||_b.test(a.nodeName)||a.href?a.tabIndex:-1}}}}),l.optSelected||(o.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null}}),o.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){o.propFix[this.toLowerCase()]=this});var ac=/[\t\r\n\f]/g;o.fn.extend({addClass:function(a){var b,c,d,e,f,g,h="string"==typeof a&&a,i=0,j=this.length;if(o.isFunction(a))return this.each(function(b){o(this).addClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ac," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=o.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0===arguments.length||"string"==typeof a&&a,i=0,j=this.length;if(o.isFunction(a))return this.each(function(b){o(this).removeClass(a.call(this,b,this.className))});if(h)for(b=(a||"").match(E)||[];j>i;i++)if(c=this[i],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(ac," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?o.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(o.isFunction(a)?function(c){o(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=o(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===U||"boolean"===c)&&(this.className&&L.set(this,"__className__",this.className),this.className=this.className||a===!1?"":L.get(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(ac," ").indexOf(b)>=0)return!0;return!1}});var bc=/\r/g;o.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=o.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,o(this).val()):a,null==e?e="":"number"==typeof e?e+="":o.isArray(e)&&(e=o.map(e,function(a){return null==a?"":a+""})),b=o.valHooks[this.type]||o.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=o.valHooks[e.type]||o.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(bc,""):null==c?"":c)}}}),o.extend({valHooks:{select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(l.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&o.nodeName(c.parentNode,"optgroup"))){if(b=o(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=o.makeArray(b),g=e.length;while(g--)d=e[g],(d.selected=o.inArray(o(d).val(),f)>=0)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),o.each(["radio","checkbox"],function(){o.valHooks[this]={set:function(a,b){return o.isArray(b)?a.checked=o.inArray(o(a).val(),b)>=0:void 0}},l.checkOn||(o.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})}),o.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){o.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),o.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var cc=o.now(),dc=/\?/;o.parseJSON=function(a){return JSON.parse(a+"")},o.parseXML=function(a){var b,c;if(!a||"string"!=typeof a)return null;try{c=new DOMParser,b=c.parseFromString(a,"text/xml")}catch(d){b=void 0}return(!b||b.getElementsByTagName("parsererror").length)&&o.error("Invalid XML: "+a),b};var ec,fc,gc=/#.*$/,hc=/([?&])_=[^&]*/,ic=/^(.*?):[ \t]*([^\r\n]*)$/gm,jc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,kc=/^(?:GET|HEAD)$/,lc=/^\/\//,mc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,nc={},oc={},pc="*/".concat("*");try{fc=location.href}catch(qc){fc=m.createElement("a"),fc.href="",fc=fc.href}ec=mc.exec(fc.toLowerCase())||[];function rc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(o.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function sc(a,b,c,d){var e={},f=a===oc;function g(h){var i;return e[h]=!0,o.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function tc(a,b){var c,d,e=o.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&o.extend(!0,a,d),a}function uc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function vc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}o.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:fc,type:"GET",isLocal:jc.test(ec[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":pc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":o.parseJSON,"text xml":o.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?tc(tc(a,o.ajaxSettings),b):tc(o.ajaxSettings,a)},ajaxPrefilter:rc(nc),ajaxTransport:rc(oc),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=o.ajaxSetup({},b),l=k.context||k,m=k.context&&(l.nodeType||l.jquery)?o(l):o.event,n=o.Deferred(),p=o.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!f){f={};while(b=ic.exec(e))f[b[1].toLowerCase()]=b[2]}b=f[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?e:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return c&&c.abort(b),x(0,b),this}};if(n.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||fc)+"").replace(gc,"").replace(lc,ec[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=o.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(h=mc.exec(k.url.toLowerCase()),k.crossDomain=!(!h||h[1]===ec[1]&&h[2]===ec[2]&&(h[3]||("http:"===h[1]?"80":"443"))===(ec[3]||("http:"===ec[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=o.param(k.data,k.traditional)),sc(nc,k,b,v),2===t)return v;i=k.global,i&&0===o.active++&&o.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!kc.test(k.type),d=k.url,k.hasContent||(k.data&&(d=k.url+=(dc.test(d)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=hc.test(d)?d.replace(hc,"$1_="+cc++):d+(dc.test(d)?"&":"?")+"_="+cc++)),k.ifModified&&(o.lastModified[d]&&v.setRequestHeader("If-Modified-Since",o.lastModified[d]),o.etag[d]&&v.setRequestHeader("If-None-Match",o.etag[d])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+pc+"; q=0.01":""):k.accepts["*"]);for(j in k.headers)v.setRequestHeader(j,k.headers[j]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(j in{success:1,error:1,complete:1})v[j](k[j]);if(c=sc(oc,k,b,v)){v.readyState=1,i&&m.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,c.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,f,h){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),c=void 0,e=h||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,f&&(u=uc(k,v,f)),u=vc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(o.lastModified[d]=w),w=v.getResponseHeader("etag"),w&&(o.etag[d]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?n.resolveWith(l,[r,x,v]):n.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,i&&m.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),i&&(m.trigger("ajaxComplete",[v,k]),--o.active||o.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return o.get(a,b,c,"json")},getScript:function(a,b){return o.get(a,void 0,b,"script")}}),o.each(["get","post"],function(a,b){o[b]=function(a,c,d,e){return o.isFunction(c)&&(e=e||d,d=c,c=void 0),o.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),o.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){o.fn[b]=function(a){return this.on(b,a)}}),o._evalUrl=function(a){return o.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},o.fn.extend({wrapAll:function(a){var b;return o.isFunction(a)?this.each(function(b){o(this).wrapAll(a.call(this,b))}):(this[0]&&(b=o(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this)},wrapInner:function(a){return this.each(o.isFunction(a)?function(b){o(this).wrapInner(a.call(this,b))}:function(){var b=o(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=o.isFunction(a);return this.each(function(c){o(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){o.nodeName(this,"body")||o(this).replaceWith(this.childNodes)}).end()}}),o.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0},o.expr.filters.visible=function(a){return!o.expr.filters.hidden(a)};var wc=/%20/g,xc=/\[\]$/,yc=/\r?\n/g,zc=/^(?:submit|button|image|reset|file)$/i,Ac=/^(?:input|select|textarea|keygen)/i;function Bc(a,b,c,d){var e;if(o.isArray(b))o.each(b,function(b,e){c||xc.test(a)?d(a,e):Bc(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==o.type(b))d(a,b);else for(e in b)Bc(a+"["+e+"]",b[e],c,d)}o.param=function(a,b){var c,d=[],e=function(a,b){b=o.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=o.ajaxSettings&&o.ajaxSettings.traditional),o.isArray(a)||a.jquery&&!o.isPlainObject(a))o.each(a,function(){e(this.name,this.value)});else for(c in a)Bc(c,a[c],b,e);return d.join("&").replace(wc,"+")},o.fn.extend({serialize:function(){return o.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=o.prop(this,"elements");return a?o.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!o(this).is(":disabled")&&Ac.test(this.nodeName)&&!zc.test(a)&&(this.checked||!T.test(a))}).map(function(a,b){var c=o(this).val();return null==c?null:o.isArray(c)?o.map(c,function(a){return{name:b.name,value:a.replace(yc,"\r\n")}}):{name:b.name,value:c.replace(yc,"\r\n")}}).get()}}),o.ajaxSettings.xhr=function(){try{return new XMLHttpRequest}catch(a){}};var Cc=0,Dc={},Ec={0:200,1223:204},Fc=o.ajaxSettings.xhr();a.ActiveXObject&&o(a).on("unload",function(){for(var a in Dc)Dc[a]()}),l.cors=!!Fc&&"withCredentials"in Fc,l.ajax=Fc=!!Fc,o.ajaxTransport(function(a){var b;return l.cors||Fc&&!a.crossDomain?{send:function(c,d){var e,f=a.xhr(),g=++Cc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)f.setRequestHeader(e,c[e]);b=function(a){return function(){b&&(delete Dc[g],b=f.onload=f.onerror=null,"abort"===a?f.abort():"error"===a?d(f.status,f.statusText):d(Ec[f.status]||f.status,f.statusText,"string"==typeof f.responseText?{text:f.responseText}:void 0,f.getAllResponseHeaders()))}},f.onload=b(),f.onerror=b("error"),b=Dc[g]=b("abort"),f.send(a.hasContent&&a.data||null)},abort:function(){b&&b()}}:void 0}),o.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return o.globalEval(a),a}}}),o.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),o.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(d,e){b=o("<script>").prop({async:!0,charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&e("error"===a.type?404:200,a.type)}),m.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Gc=[],Hc=/(=)\?(?=&|$)|\?\?/;o.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Gc.pop()||o.expando+"_"+cc++;return this[a]=!0,a}}),o.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Hc.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Hc.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=o.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Hc,"$1"+e):b.jsonp!==!1&&(b.url+=(dc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||o.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Gc.push(e)),g&&o.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),o.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||m;var d=v.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=o.buildFragment([a],b,e),e&&e.length&&o(e).remove(),o.merge([],d.childNodes))};var Ic=o.fn.load;o.fn.load=function(a,b,c){if("string"!=typeof a&&Ic)return Ic.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=a.slice(h),a=a.slice(0,h)),o.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&o.ajax({url:a,type:e,dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?o("<div>").append(o.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,f||[a.responseText,b,a])}),this},o.expr.filters.animated=function(a){return o.grep(o.timers,function(b){return a===b.elem}).length};var Jc=a.document.documentElement;function Kc(a){return o.isWindow(a)?a:9===a.nodeType&&a.defaultView}o.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=o.css(a,"position"),l=o(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=o.css(a,"top"),i=o.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),o.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},o.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){o.offset.setOffset(this,a,b)});var b,c,d=this[0],e={top:0,left:0},f=d&&d.ownerDocument;if(f)return b=f.documentElement,o.contains(b,d)?(typeof d.getBoundingClientRect!==U&&(e=d.getBoundingClientRect()),c=Kc(f),{top:e.top+c.pageYOffset-b.clientTop,left:e.left+c.pageXOffset-b.clientLeft}):e},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===o.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),o.nodeName(a[0],"html")||(d=a.offset()),d.top+=o.css(a[0],"borderTopWidth",!0),d.left+=o.css(a[0],"borderLeftWidth",!0)),{top:b.top-d.top-o.css(c,"marginTop",!0),left:b.left-d.left-o.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||Jc;while(a&&!o.nodeName(a,"html")&&"static"===o.css(a,"position"))a=a.offsetParent;return a||Jc})}}),o.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(b,c){var d="pageYOffset"===c;o.fn[b]=function(e){return J(this,function(b,e,f){var g=Kc(b);return void 0===f?g?g[c]:b[e]:void(g?g.scrollTo(d?a.pageXOffset:f,d?f:a.pageYOffset):b[e]=f)},b,e,arguments.length,null)}}),o.each(["top","left"],function(a,b){o.cssHooks[b]=yb(l.pixelPosition,function(a,c){return c?(c=xb(a,b),vb.test(c)?o(a).position()[b]+"px":c):void 0})}),o.each({Height:"height",Width:"width"},function(a,b){o.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){o.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return J(this,function(b,c,d){var e;return o.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?o.css(b,c,g):o.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),o.fn.size=function(){return this.length},o.fn.andSelf=o.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return o});var Lc=a.jQuery,Mc=a.$;return o.noConflict=function(b){return a.$===o&&(a.$=Mc),b&&a.jQuery===o&&(a.jQuery=Lc),o},typeof b===U&&(a.jQuery=a.$=o),o}); \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-ui-1_10_3-custom.min.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-ui-1_10_3-custom.min.js
new file mode 100644
index 00000000000..d9fffa07332
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery-ui-1_10_3-custom.min.js
@@ -0,0 +1,5 @@
+/*! jQuery UI - v1.10.3 - 2013-11-27
+* http://jqueryui.com
+* Includes: jquery.ui.effect.js, jquery.ui.effect-pulsate.js
+* Copyright 2013 jQuery Foundation and other contributors; Licensed MIT */
+(function(e,t){var i="ui-effects-";e.effects={effect:{}},function(e,t){function i(e,t,i){var s=c[t.type]||{};return null==e?i||!t.def?null:t.def:(e=s.floor?~~e:parseFloat(e),isNaN(e)?t.def:s.mod?(e+s.mod)%s.mod:0>e?0:e>s.max?s.max:e)}function s(i){var s=l(),a=s._rgba=[];return i=i.toLowerCase(),f(h,function(e,n){var r,o=n.re.exec(i),h=o&&n.parse(o),l=n.space||"rgba";return h?(r=s[l](h),s[u[l].cache]=r[u[l].cache],a=s._rgba=r._rgba,!1):t}),a.length?("0,0,0,0"===a.join()&&e.extend(a,n.transparent),s):n[i]}function a(e,t,i){return i=(i+1)%1,1>6*i?e+6*(t-e)*i:1>2*i?t:2>3*i?e+6*(t-e)*(2/3-i):e}var n,r="backgroundColor borderBottomColor borderLeftColor borderRightColor borderTopColor color columnRuleColor outlineColor textDecorationColor textEmphasisColor",o=/^([\-+])=\s*(\d+\.?\d*)/,h=[{re:/rgba?\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(e){return[e[1],e[2],e[3],e[4]]}},{re:/rgba?\(\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,parse:function(e){return[2.55*e[1],2.55*e[2],2.55*e[3],e[4]]}},{re:/#([a-f0-9]{2})([a-f0-9]{2})([a-f0-9]{2})/,parse:function(e){return[parseInt(e[1],16),parseInt(e[2],16),parseInt(e[3],16)]}},{re:/#([a-f0-9])([a-f0-9])([a-f0-9])/,parse:function(e){return[parseInt(e[1]+e[1],16),parseInt(e[2]+e[2],16),parseInt(e[3]+e[3],16)]}},{re:/hsla?\(\s*(\d+(?:\.\d+)?)\s*,\s*(\d+(?:\.\d+)?)\%\s*,\s*(\d+(?:\.\d+)?)\%\s*(?:,\s*(\d?(?:\.\d+)?)\s*)?\)/,space:"hsla",parse:function(e){return[e[1],e[2]/100,e[3]/100,e[4]]}}],l=e.Color=function(t,i,s,a){return new e.Color.fn.parse(t,i,s,a)},u={rgba:{props:{red:{idx:0,type:"byte"},green:{idx:1,type:"byte"},blue:{idx:2,type:"byte"}}},hsla:{props:{hue:{idx:0,type:"degrees"},saturation:{idx:1,type:"percent"},lightness:{idx:2,type:"percent"}}}},c={"byte":{floor:!0,max:255},percent:{max:1},degrees:{mod:360,floor:!0}},d=l.support={},p=e("<p>")[0],f=e.each;p.style.cssText="background-color:rgba(1,1,1,.5)",d.rgba=p.style.backgroundColor.indexOf("rgba")>-1,f(u,function(e,t){t.cache="_"+e,t.props.alpha={idx:3,type:"percent",def:1}}),l.fn=e.extend(l.prototype,{parse:function(a,r,o,h){if(a===t)return this._rgba=[null,null,null,null],this;(a.jquery||a.nodeType)&&(a=e(a).css(r),r=t);var c=this,d=e.type(a),p=this._rgba=[];return r!==t&&(a=[a,r,o,h],d="array"),"string"===d?this.parse(s(a)||n._default):"array"===d?(f(u.rgba.props,function(e,t){p[t.idx]=i(a[t.idx],t)}),this):"object"===d?(a instanceof l?f(u,function(e,t){a[t.cache]&&(c[t.cache]=a[t.cache].slice())}):f(u,function(t,s){var n=s.cache;f(s.props,function(e,t){if(!c[n]&&s.to){if("alpha"===e||null==a[e])return;c[n]=s.to(c._rgba)}c[n][t.idx]=i(a[e],t,!0)}),c[n]&&0>e.inArray(null,c[n].slice(0,3))&&(c[n][3]=1,s.from&&(c._rgba=s.from(c[n])))}),this):t},is:function(e){var i=l(e),s=!0,a=this;return f(u,function(e,n){var r,o=i[n.cache];return o&&(r=a[n.cache]||n.to&&n.to(a._rgba)||[],f(n.props,function(e,i){return null!=o[i.idx]?s=o[i.idx]===r[i.idx]:t})),s}),s},_space:function(){var e=[],t=this;return f(u,function(i,s){t[s.cache]&&e.push(i)}),e.pop()},transition:function(e,t){var s=l(e),a=s._space(),n=u[a],r=0===this.alpha()?l("transparent"):this,o=r[n.cache]||n.to(r._rgba),h=o.slice();return s=s[n.cache],f(n.props,function(e,a){var n=a.idx,r=o[n],l=s[n],u=c[a.type]||{};null!==l&&(null===r?h[n]=l:(u.mod&&(l-r>u.mod/2?r+=u.mod:r-l>u.mod/2&&(r-=u.mod)),h[n]=i((l-r)*t+r,a)))}),this[a](h)},blend:function(t){if(1===this._rgba[3])return this;var i=this._rgba.slice(),s=i.pop(),a=l(t)._rgba;return l(e.map(i,function(e,t){return(1-s)*a[t]+s*e}))},toRgbaString:function(){var t="rgba(",i=e.map(this._rgba,function(e,t){return null==e?t>2?1:0:e});return 1===i[3]&&(i.pop(),t="rgb("),t+i.join()+")"},toHslaString:function(){var t="hsla(",i=e.map(this.hsla(),function(e,t){return null==e&&(e=t>2?1:0),t&&3>t&&(e=Math.round(100*e)+"%"),e});return 1===i[3]&&(i.pop(),t="hsl("),t+i.join()+")"},toHexString:function(t){var i=this._rgba.slice(),s=i.pop();return t&&i.push(~~(255*s)),"#"+e.map(i,function(e){return e=(e||0).toString(16),1===e.length?"0"+e:e}).join("")},toString:function(){return 0===this._rgba[3]?"transparent":this.toRgbaString()}}),l.fn.parse.prototype=l.fn,u.hsla.to=function(e){if(null==e[0]||null==e[1]||null==e[2])return[null,null,null,e[3]];var t,i,s=e[0]/255,a=e[1]/255,n=e[2]/255,r=e[3],o=Math.max(s,a,n),h=Math.min(s,a,n),l=o-h,u=o+h,c=.5*u;return t=h===o?0:s===o?60*(a-n)/l+360:a===o?60*(n-s)/l+120:60*(s-a)/l+240,i=0===l?0:.5>=c?l/u:l/(2-u),[Math.round(t)%360,i,c,null==r?1:r]},u.hsla.from=function(e){if(null==e[0]||null==e[1]||null==e[2])return[null,null,null,e[3]];var t=e[0]/360,i=e[1],s=e[2],n=e[3],r=.5>=s?s*(1+i):s+i-s*i,o=2*s-r;return[Math.round(255*a(o,r,t+1/3)),Math.round(255*a(o,r,t)),Math.round(255*a(o,r,t-1/3)),n]},f(u,function(s,a){var n=a.props,r=a.cache,h=a.to,u=a.from;l.fn[s]=function(s){if(h&&!this[r]&&(this[r]=h(this._rgba)),s===t)return this[r].slice();var a,o=e.type(s),c="array"===o||"object"===o?s:arguments,d=this[r].slice();return f(n,function(e,t){var s=c["object"===o?e:t.idx];null==s&&(s=d[t.idx]),d[t.idx]=i(s,t)}),u?(a=l(u(d)),a[r]=d,a):l(d)},f(n,function(t,i){l.fn[t]||(l.fn[t]=function(a){var n,r=e.type(a),h="alpha"===t?this._hsla?"hsla":"rgba":s,l=this[h](),u=l[i.idx];return"undefined"===r?u:("function"===r&&(a=a.call(this,u),r=e.type(a)),null==a&&i.empty?this:("string"===r&&(n=o.exec(a),n&&(a=u+parseFloat(n[2])*("+"===n[1]?1:-1))),l[i.idx]=a,this[h](l)))})})}),l.hook=function(t){var i=t.split(" ");f(i,function(t,i){e.cssHooks[i]={set:function(t,a){var n,r,o="";if("transparent"!==a&&("string"!==e.type(a)||(n=s(a)))){if(a=l(n||a),!d.rgba&&1!==a._rgba[3]){for(r="backgroundColor"===i?t.parentNode:t;(""===o||"transparent"===o)&&r&&r.style;)try{o=e.css(r,"backgroundColor"),r=r.parentNode}catch(h){}a=a.blend(o&&"transparent"!==o?o:"_default")}a=a.toRgbaString()}try{t.style[i]=a}catch(h){}}},e.fx.step[i]=function(t){t.colorInit||(t.start=l(t.elem,i),t.end=l(t.end),t.colorInit=!0),e.cssHooks[i].set(t.elem,t.start.transition(t.end,t.pos))}})},l.hook(r),e.cssHooks.borderColor={expand:function(e){var t={};return f(["Top","Right","Bottom","Left"],function(i,s){t["border"+s+"Color"]=e}),t}},n=e.Color.names={aqua:"#00ffff",black:"#000000",blue:"#0000ff",fuchsia:"#ff00ff",gray:"#808080",green:"#008000",lime:"#00ff00",maroon:"#800000",navy:"#000080",olive:"#808000",purple:"#800080",red:"#ff0000",silver:"#c0c0c0",teal:"#008080",white:"#ffffff",yellow:"#ffff00",transparent:[null,null,null,0],_default:"#ffffff"}}(jQuery),function(){function i(t){var i,s,a=t.ownerDocument.defaultView?t.ownerDocument.defaultView.getComputedStyle(t,null):t.currentStyle,n={};if(a&&a.length&&a[0]&&a[a[0]])for(s=a.length;s--;)i=a[s],"string"==typeof a[i]&&(n[e.camelCase(i)]=a[i]);else for(i in a)"string"==typeof a[i]&&(n[i]=a[i]);return n}function s(t,i){var s,a,r={};for(s in i)a=i[s],t[s]!==a&&(n[s]||(e.fx.step[s]||!isNaN(parseFloat(a)))&&(r[s]=a));return r}var a=["add","remove","toggle"],n={border:1,borderBottom:1,borderColor:1,borderLeft:1,borderRight:1,borderTop:1,borderWidth:1,margin:1,padding:1};e.each(["borderLeftStyle","borderRightStyle","borderBottomStyle","borderTopStyle"],function(t,i){e.fx.step[i]=function(e){("none"!==e.end&&!e.setAttr||1===e.pos&&!e.setAttr)&&(jQuery.style(e.elem,i,e.end),e.setAttr=!0)}}),e.fn.addBack||(e.fn.addBack=function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}),e.effects.animateClass=function(t,n,r,o){var h=e.speed(n,r,o);return this.queue(function(){var n,r=e(this),o=r.attr("class")||"",l=h.children?r.find("*").addBack():r;l=l.map(function(){var t=e(this);return{el:t,start:i(this)}}),n=function(){e.each(a,function(e,i){t[i]&&r[i+"Class"](t[i])})},n(),l=l.map(function(){return this.end=i(this.el[0]),this.diff=s(this.start,this.end),this}),r.attr("class",o),l=l.map(function(){var t=this,i=e.Deferred(),s=e.extend({},h,{queue:!1,complete:function(){i.resolve(t)}});return this.el.animate(this.diff,s),i.promise()}),e.when.apply(e,l.get()).done(function(){n(),e.each(arguments,function(){var t=this.el;e.each(this.diff,function(e){t.css(e,"")})}),h.complete.call(r[0])})})},e.fn.extend({addClass:function(t){return function(i,s,a,n){return s?e.effects.animateClass.call(this,{add:i},s,a,n):t.apply(this,arguments)}}(e.fn.addClass),removeClass:function(t){return function(i,s,a,n){return arguments.length>1?e.effects.animateClass.call(this,{remove:i},s,a,n):t.apply(this,arguments)}}(e.fn.removeClass),toggleClass:function(i){return function(s,a,n,r,o){return"boolean"==typeof a||a===t?n?e.effects.animateClass.call(this,a?{add:s}:{remove:s},n,r,o):i.apply(this,arguments):e.effects.animateClass.call(this,{toggle:s},a,n,r)}}(e.fn.toggleClass),switchClass:function(t,i,s,a,n){return e.effects.animateClass.call(this,{add:i,remove:t},s,a,n)}})}(),function(){function s(t,i,s,a){return e.isPlainObject(t)&&(i=t,t=t.effect),t={effect:t},null==i&&(i={}),e.isFunction(i)&&(a=i,s=null,i={}),("number"==typeof i||e.fx.speeds[i])&&(a=s,s=i,i={}),e.isFunction(s)&&(a=s,s=null),i&&e.extend(t,i),s=s||i.duration,t.duration=e.fx.off?0:"number"==typeof s?s:s in e.fx.speeds?e.fx.speeds[s]:e.fx.speeds._default,t.complete=a||i.complete,t}function a(t){return!t||"number"==typeof t||e.fx.speeds[t]?!0:"string"!=typeof t||e.effects.effect[t]?e.isFunction(t)?!0:"object"!=typeof t||t.effect?!1:!0:!0}e.extend(e.effects,{version:"1.10.3",save:function(e,t){for(var s=0;t.length>s;s++)null!==t[s]&&e.data(i+t[s],e[0].style[t[s]])},restore:function(e,s){var a,n;for(n=0;s.length>n;n++)null!==s[n]&&(a=e.data(i+s[n]),a===t&&(a=""),e.css(s[n],a))},setMode:function(e,t){return"toggle"===t&&(t=e.is(":hidden")?"show":"hide"),t},getBaseline:function(e,t){var i,s;switch(e[0]){case"top":i=0;break;case"middle":i=.5;break;case"bottom":i=1;break;default:i=e[0]/t.height}switch(e[1]){case"left":s=0;break;case"center":s=.5;break;case"right":s=1;break;default:s=e[1]/t.width}return{x:s,y:i}},createWrapper:function(t){if(t.parent().is(".ui-effects-wrapper"))return t.parent();var i={width:t.outerWidth(!0),height:t.outerHeight(!0),"float":t.css("float")},s=e("<div></div>").addClass("ui-effects-wrapper").css({fontSize:"100%",background:"transparent",border:"none",margin:0,padding:0}),a={width:t.width(),height:t.height()},n=document.activeElement;try{n.id}catch(r){n=document.body}return t.wrap(s),(t[0]===n||e.contains(t[0],n))&&e(n).focus(),s=t.parent(),"static"===t.css("position")?(s.css({position:"relative"}),t.css({position:"relative"})):(e.extend(i,{position:t.css("position"),zIndex:t.css("z-index")}),e.each(["top","left","bottom","right"],function(e,s){i[s]=t.css(s),isNaN(parseInt(i[s],10))&&(i[s]="auto")}),t.css({position:"relative",top:0,left:0,right:"auto",bottom:"auto"})),t.css(a),s.css(i).show()},removeWrapper:function(t){var i=document.activeElement;return t.parent().is(".ui-effects-wrapper")&&(t.parent().replaceWith(t),(t[0]===i||e.contains(t[0],i))&&e(i).focus()),t},setTransition:function(t,i,s,a){return a=a||{},e.each(i,function(e,i){var n=t.cssUnit(i);n[0]>0&&(a[i]=n[0]*s+n[1])}),a}}),e.fn.extend({effect:function(){function t(t){function s(){e.isFunction(n)&&n.call(a[0]),e.isFunction(t)&&t()}var a=e(this),n=i.complete,o=i.mode;(a.is(":hidden")?"hide"===o:"show"===o)?(a[o](),s()):r.call(a[0],i,s)}var i=s.apply(this,arguments),a=i.mode,n=i.queue,r=e.effects.effect[i.effect];return e.fx.off||!r?a?this[a](i.duration,i.complete):this.each(function(){i.complete&&i.complete.call(this)}):n===!1?this.each(t):this.queue(n||"fx",t)},show:function(e){return function(t){if(a(t))return e.apply(this,arguments);var i=s.apply(this,arguments);return i.mode="show",this.effect.call(this,i)}}(e.fn.show),hide:function(e){return function(t){if(a(t))return e.apply(this,arguments);var i=s.apply(this,arguments);return i.mode="hide",this.effect.call(this,i)}}(e.fn.hide),toggle:function(e){return function(t){if(a(t)||"boolean"==typeof t)return e.apply(this,arguments);var i=s.apply(this,arguments);return i.mode="toggle",this.effect.call(this,i)}}(e.fn.toggle),cssUnit:function(t){var i=this.css(t),s=[];return e.each(["em","px","%","pt"],function(e,t){i.indexOf(t)>0&&(s=[parseFloat(i),t])}),s}})}(),function(){var t={};e.each(["Quad","Cubic","Quart","Quint","Expo"],function(e,i){t[i]=function(t){return Math.pow(t,e+2)}}),e.extend(t,{Sine:function(e){return 1-Math.cos(e*Math.PI/2)},Circ:function(e){return 1-Math.sqrt(1-e*e)},Elastic:function(e){return 0===e||1===e?e:-Math.pow(2,8*(e-1))*Math.sin((80*(e-1)-7.5)*Math.PI/15)},Back:function(e){return e*e*(3*e-2)},Bounce:function(e){for(var t,i=4;((t=Math.pow(2,--i))-1)/11>e;);return 1/Math.pow(4,3-i)-7.5625*Math.pow((3*t-2)/22-e,2)}}),e.each(t,function(t,i){e.easing["easeIn"+t]=i,e.easing["easeOut"+t]=function(e){return 1-i(1-e)},e.easing["easeInOut"+t]=function(e){return.5>e?i(2*e)/2:1-i(-2*e+2)/2}})}()})(jQuery);(function(e){e.effects.effect.pulsate=function(t,i){var s,a=e(this),n=e.effects.setMode(a,t.mode||"show"),r="show"===n,o="hide"===n,l=r||"hide"===n,h=2*(t.times||5)+(l?1:0),u=t.duration/h,d=0,c=a.queue(),p=c.length;for((r||!a.is(":visible"))&&(a.css("opacity",0).show(),d=1),s=1;h>s;s++)a.animate({opacity:d},u,t.easing),d=1-d;a.animate({opacity:d},u,t.easing),a.queue(function(){o&&a.hide(),i()}),p>1&&c.splice.apply(c,[1,0].concat(c.splice(p,h+1))),a.dequeue()}})(jQuery); \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery.pretty-text-diff.min.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery.pretty-text-diff.min.js
new file mode 100644
index 00000000000..d2c44be1fc8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery.pretty-text-diff.min.js
@@ -0,0 +1,5 @@
+/*
+@preserve jQuery.PrettyTextDiff 1.0.2
+See https://github.com/arnab/jQuery.PrettyTextDiff/
+*/
+(function(){var $;$=jQuery;$.fn.extend({prettyTextDiff:function(options){var dmp,settings;settings={originalContainer:".original",changedContainer:".changed",diffContainer:".diff",cleanup:true,debug:false};settings=$.extend(settings,options);$.fn.prettyTextDiff.debug("Options: ",settings,settings);dmp=new diff_match_patch;return this.each(function(){var changed,diff_as_html,diffs,original;original=$(settings.originalContainer,this).text();$.fn.prettyTextDiff.debug("Original text found: ",original,settings);changed=$(settings.changedContainer,this).text();$.fn.prettyTextDiff.debug("Changed text found: ",changed,settings);diffs=dmp.diff_main(original,changed);if(settings.cleanup){dmp.diff_cleanupSemantic(diffs)}$.fn.prettyTextDiff.debug("Diffs: ",diffs,settings);diff_as_html=diffs.map(function(diff){return $.fn.prettyTextDiff.createHTML(diff)});$(settings.diffContainer,this).html(diff_as_html.join(""));return this})}});$.fn.prettyTextDiff.debug=function(message,object,settings){if(settings.debug){return console.log(message,object)}};$.fn.prettyTextDiff.createHTML=function(diff){var data,html,operation,pattern_amp,pattern_gt,pattern_lt,pattern_para,text;html=[];pattern_amp=/&/g;pattern_lt=/</g;pattern_gt=/>/g;pattern_para=/\n/g;operation=diff[0],data=diff[1];text=data.replace(pattern_amp,"&amp;").replace(pattern_lt,"&lt;").replace(pattern_gt,"&gt;").replace(pattern_para,"<br>");switch(operation){case DIFF_INSERT:return"<ins>"+text+"</ins>";case DIFF_DELETE:return"<del>"+text+"</del>";case DIFF_EQUAL:return"<span>"+text+"</span>"}}}).call(this); \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery.tipsy.min.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery.tipsy.min.js
new file mode 100644
index 00000000000..b1f97639fa2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/jquery.tipsy.min.js
@@ -0,0 +1,5 @@
+// tipsy, facebook style tooltips for jquery
+// version 1.0.0a
+// (c) 2008-2010 jason frame [jason@onehackoranother.com]
+// released under the MIT license
+(function(e){function t(e,t){return typeof e=="function"?e.call(t):e}function n(e){while(e=e.parentNode){if(e==document)return true}return false}function r(t,n){this.$element=e(t);this.options=n;this.enabled=true;this.fixTitle()}r.prototype={show:function(){var n=this.getTitle();if(n&&this.enabled){var r=this.tip();r.find(".tipsy-inner")[this.options.html?"html":"text"](n);r[0].className="tipsy";r.remove().css({top:0,left:0,visibility:"hidden",display:"block"}).prependTo(document.body);var i=e.extend({},this.$element.offset(),{width:this.$element[0].offsetWidth,height:this.$element[0].offsetHeight + 3});var s=r[0].offsetWidth,o=r[0].offsetHeight,u=t(this.options.gravity,this.$element[0]);var a;switch(u.charAt(0)){case"n":a={top:i.top+i.height+this.options.offset,left:i.left+i.width/2-s/2};break;case"s":a={top:i.top-o-this.options.offset,left:i.left+i.width/2-s/2};break;case"e":a={top:i.top+i.height/2-o/2,left:i.left-s-this.options.offset};break;case"w":a={top:i.top+i.height/2-o/2,left:i.left+i.width+this.options.offset};break}if(u.length==2){if(u.charAt(1)=="w"){a.left=i.left+i.width/2-15}else{a.left=i.left+i.width/2-s+15}}r.css(a).addClass("tipsy-"+u);r.find(".tipsy-arrow")[0].className="tipsy-arrow tipsy-arrow-"+u.charAt(0);if(this.options.className){r.addClass(t(this.options.className,this.$element[0]))}if(this.options.fade){r.stop().css({opacity:0,display:"block",visibility:"visible"}).animate({opacity:this.options.opacity})}else{r.css({visibility:"visible",opacity:this.options.opacity})}}},hide:function(){if(this.options.fade){this.tip().stop().fadeOut(function(){e(this).remove()})}else{this.tip().remove()}},fixTitle:function(){var e=this.$element;if(e.attr("title")||typeof e.attr("original-title")!="string"){e.attr("original-title",e.attr("title")||"").removeAttr("title")}},getTitle:function(){var e,t=this.$element,n=this.options;this.fixTitle();var e,n=this.options;if(typeof n.title=="string"){e=t.attr(n.title=="title"?"original-title":n.title)}else if(typeof n.title=="function"){e=n.title.call(t[0])}e=(""+e).replace(/(^\s*|\s*$)/,"");return e||n.fallback},tip:function(){if(!this.$tip){this.$tip=e('<div class="tipsy"></div>').html('<div class="tipsy-arrow"></div><div class="tipsy-inner"></div>');this.$tip.data("tipsy-pointee",this.$element[0])}return this.$tip},validate:function(){if(!this.$element[0].parentNode){this.hide();this.$element=null;this.options=null}},enable:function(){this.enabled=true},disable:function(){this.enabled=false},toggleEnabled:function(){this.enabled=!this.enabled}};e.fn.tipsy=function(t){function i(n){var i=e.data(n,"tipsy");if(!i){i=new r(n,e.fn.tipsy.elementOptions(n,t));e.data(n,"tipsy",i)}return i}function s(){var e=i(this);e.hoverState="in";if(t.delayIn==0){e.show()}else{e.fixTitle();setTimeout(function(){if(e.hoverState=="in")e.show()},t.delayIn)}}function o(){var e=i(this);e.hoverState="out";if(t.delayOut==0){e.hide()}else{setTimeout(function(){if(e.hoverState=="out")e.hide()},t.delayOut)}}if(t===true){return this.data("tipsy")}else if(typeof t=="string"){var n=this.data("tipsy");if(n)n[t]();return this}t=e.extend({},e.fn.tipsy.defaults,t);if(!t.live)this.each(function(){i(this)});if(t.trigger!="manual"){var u=t.trigger=="hover"?"mouseenter":"focus",a=t.trigger=="hover"?"mouseleave":"blur";if(t.live){e(this.context).on(u,this.selector,s).on(a,this.selector,o)}else{this.on(u,s).on(a,o)}}return this};e.fn.tipsy.defaults={className:null,delayIn:0,delayOut:0,fade:false,fallback:"",gravity:"n",html:false,live:false,offset:0,opacity:.9,title:"title",trigger:"hover"};e.fn.tipsy.revalidate=function(){e(".tipsy").each(function(){var t=e.data(this,"tipsy-pointee");if(!t||!n(t)){e(this).remove()}})};e.fn.tipsy.elementOptions=function(t,n){return e.metadata?e.extend({},n,e(t).metadata()):n};e.fn.tipsy.autoNS=function(){return e(this).offset().top>e(document).scrollTop()+e(window).height()/2?"s":"n"};e.fn.tipsy.autoWE=function(){return e(this).offset().left>e(document).scrollLeft()+e(window).width()/2?"e":"w"};e.fn.tipsy.autoBounds=function(t,n){return function(){var r={ns:n[0],ew:n.length>1?n[1]:false},i=e(document).scrollTop()+t,s=e(document).scrollLeft()+t,o=e(this);if(o.offset().top<i)r.ns="n";if(o.offset().left<s)r.ew="w";if(e(window).width()+e(document).scrollLeft()-o.offset().left<t)r.ew="e";if(e(window).height()+e(document).scrollTop()-o.offset().top<t)r.ns="s";return r.ns+(r.ew?r.ew:"")}}})(jQuery) \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/markup.min.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/markup.min.js
new file mode 100644
index 00000000000..204f3e4971f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/markup.min.js
@@ -0,0 +1,6 @@
+/*
+ Markup.js v1.5.17: http://github.com/adammark/Markup.js
+ MIT License
+ (c) 2011 - 2013 Adam Mark
+*/
+var Mark={includes:{},globals:{},delimiter:">",compact:false,_copy:function(d,c){c=c||[];for(var e in d){c[e]=d[e]}return c},_size:function(b){return b instanceof Array?b.length:(b||0)},_iter:function(a,b){this.idx=a;this.size=b;this.length=b;this.sign="#";this.toString=function(){return this.idx+this.sign.length-1}},_pipe:function(h,c){var g,f,b,a;if((g=c.shift())){f=g.split(this.delimiter);b=f.shift().trim();try{a=Mark.pipes[b].apply(null,[h].concat(f));h=this._pipe(a,c)}catch(d){}}return h},_eval:function(e,g,h){var a=this._pipe(e,g),b=a,d=-1,c,f;if(a instanceof Array){a="";c=b.length;while(++d<c){f={iter:new this._iter(d,c)};a+=h?Mark.up(h,b[d],f):b[d]}}else{if(a instanceof Object){a=Mark.up(h,b)}}return a},_test:function(a,e,c,b){var d=Mark.up(e,c,b).split(/\{\{\s*else\s*\}\}/);return(a===false?d[1]:d[0])||""},_bridge:function(h,e){var f="{{\\s*"+e+"([^/}]+\\w*)?}}|{{/"+e+"\\s*}}",n=new RegExp(f,"g"),p=h.match(n)||[],o,g,m=0,l=0,k=-1,j=0;for(g=0;g<p.length;g++){o=g;k=h.indexOf(p[o],k+1);if(p[o].indexOf("{{/")>-1){l++}else{m++}if(m===l){break}}m=h.indexOf(p[0]);l=m+p[0].length;j=k+p[o].length;return[h.substring(m,j),h.substring(l,k)]}};Mark.up=function(s,b,e){b=b||{};e=e||{};var m=/\{\{(.+?)\}\}/g,l=s.match(m)||[],t,d,g,h=[],r,c,f,k,o,a,n,q=0,p=0;if(e.pipes){this._copy(e.pipes,this.pipes)}if(e.includes){this._copy(e.includes,this.includes)}if(e.globals){this._copy(e.globals,this.globals)}if(e.delimiter){this.delimiter=e.delimiter}if(e.compact!==undefined){this.compact=e.compact}while((t=l[q++])){k=undefined;f="";r=t.indexOf("/}}")>-1;d=t.substr(2,t.length-(r?5:4));d=d.replace(/`(.+?)`/g,function(i,j){return Mark.up("{{"+j+"}}",b)});c=d.trim().indexOf("if ")===0;h=d.split("|");h.shift();d=d.replace(/^\s*if/,"").split("|").shift().trim();g=c?"if":d.split("|")[0];n=b[d];if(c&&!h.length){h=["notempty"]}if(!r&&s.indexOf("{{/"+g)>-1){k=this._bridge(s,g);t=k[0];f=k[1];q+=t.match(m).length-1}if(/^\{\{\s*else\s*\}\}$/.test(t)){continue}else{if((o=this.globals[d])!==undefined){k=this._eval(o,h,f)}else{if((a=this.includes[d])){if(a instanceof Function){a=a()}k=this._pipe(Mark.up(a,b),h)}else{if(d.indexOf("#")>-1){e.iter.sign=d;k=this._pipe(e.iter,h)}else{if(d==="."){k=this._pipe(b,h)}else{if(d.indexOf(".")>-1){d=d.split(".");n=Mark.globals[d[0]];if(n){p=1}else{p=0;n=b}while(n&&p<d.length){n=n[d[p++]]}k=this._eval(n,h,f)}else{if(c){k=this._pipe(n,h)}else{if(n instanceof Array){k=this._eval(n,h,f)}else{if(f){k=n?Mark.up(f,n):undefined}else{if(b.hasOwnProperty(d)){k=this._pipe(n,h)}}}}}}}}}}if(k instanceof Array){k=this._eval(k,h,f)}if(c){k=this._test(k,f,b,e)}s=s.replace(t,k===undefined?"???":k)}return this.compact?s.replace(/>\s+</g,"><"):s};Mark.pipes={empty:function(a){return !a||(a+"").trim().length===0?a:false},notempty:function(a){return a&&(a+"").trim().length?a:false},blank:function(b,a){return !!b||b===0?b:a},more:function(d,c){return Mark._size(d)>c?d:false},less:function(d,c){return Mark._size(d)<c?d:false},ormore:function(d,c){return Mark._size(d)>=c?d:false},orless:function(d,c){return Mark._size(d)<=c?d:false},between:function(e,d,f){e=Mark._size(e);return e>=d&&e<=f?e:false},equals:function(d,c){return d==c?d:false},notequals:function(d,c){return d!=c?d:false},like:function(b,a){return new RegExp(a,"i").test(b)?b:false},notlike:function(b,a){return !Mark.pipes.like(b,a)?b:false},upcase:function(a){return String(a).toUpperCase()},downcase:function(a){return String(a).toLowerCase()},capcase:function(a){return a.replace(/\b\w/g,function(b){return b.toUpperCase()})},chop:function(a,b){return a.length>b?a.substr(0,b)+"...":a},tease:function(c,d){var b=c.split(/\s+/);return b.slice(0,d).join(" ")+(b.length>d?"...":"")},trim:function(a){return a.trim()},pack:function(a){return a.trim().replace(/\s{2,}/g," ")},round:function(a){return Math.round(+a)},clean:function(a){return String(a).replace(/<\/?[^>]+>/gi,"")},size:function(a){return a.length},length:function(a){return a.length},reverse:function(a){return[].concat(a).reverse()},join:function(a,b){return a.join(b)},limit:function(b,c,a){return b.slice(+a||0,+c+(+a||0))},split:function(b,a){return b.split(a||",")},choose:function(b,c,a){return !!b?c:(a||"")},toggle:function(c,b,a,d){return a.split(",")[b.match(/\w+/g).indexOf(c+"")]||d},sort:function(a,c){var b=function(e,d){return e[c]>d[c]?1:-1};return[].concat(a).sort(c?b:undefined)},fix:function(a,b){return(+a).toFixed(b)},mod:function(a,b){return(+a)%(+b)},divisible:function(a,b){return a&&(+a%b)===0?a:false},even:function(a){return a&&(+a&1)===0?a:false},odd:function(a){return a&&(+a&1)===1?a:false},number:function(a){return parseFloat(a.replace(/[^\-\d\.]/g,""))},url:function(a){return encodeURI(a)},bool:function(a){return !!a},falsy:function(a){return !a},first:function(a){return a.idx===0},last:function(a){return a.idx===a.size-1},call:function(b,a){return b[a].apply(b,[].slice.call(arguments,2))},set:function(b,a){Mark.globals[a]=b;return""},log:function(a){console.log(a);return a}};if(typeof String.prototype.trim!=="function"){String.prototype.trim=function(){return this.replace(/^\s+|\s+$/g,"")}}if(typeof module!=="undefined"&&module.exports){module.exports=Mark}else{if(typeof define==="function"&&define.amd){define(function(){return Mark})}}; \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/moment.min.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/moment.min.js
new file mode 100644
index 00000000000..d3425e9a1ee
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/moment.min.js
@@ -0,0 +1,6 @@
+//! moment.js
+//! version : 2.5.1
+//! authors : Tim Wood, Iskren Chernev, Moment.js contributors
+//! license : MIT
+//! momentjs.com
+;(function(a){function b(){return{empty:!1,unusedTokens:[],unusedInput:[],overflow:-2,charsLeftOver:0,nullInput:!1,invalidMonth:null,invalidFormat:!1,userInvalidated:!1,iso:!1}}function c(a,b){return function(c){return k(a.call(this,c),b)}}function d(a,b){return function(c){return this.lang().ordinal(a.call(this,c),b)}}function e(){}function f(a){w(a),h(this,a)}function g(a){var b=q(a),c=b.year||0,d=b.month||0,e=b.week||0,f=b.day||0,g=b.hour||0,h=b.minute||0,i=b.second||0,j=b.millisecond||0;this._milliseconds=+j+1e3*i+6e4*h+36e5*g,this._days=+f+7*e,this._months=+d+12*c,this._data={},this._bubble()}function h(a,b){for(var c in b)b.hasOwnProperty(c)&&(a[c]=b[c]);return b.hasOwnProperty("toString")&&(a.toString=b.toString),b.hasOwnProperty("valueOf")&&(a.valueOf=b.valueOf),a}function i(a){var b,c={};for(b in a)a.hasOwnProperty(b)&&qb.hasOwnProperty(b)&&(c[b]=a[b]);return c}function j(a){return 0>a?Math.ceil(a):Math.floor(a)}function k(a,b,c){for(var d=""+Math.abs(a),e=a>=0;d.length<b;)d="0"+d;return(e?c?"+":"":"-")+d}function l(a,b,c,d){var e,f,g=b._milliseconds,h=b._days,i=b._months;g&&a._d.setTime(+a._d+g*c),(h||i)&&(e=a.minute(),f=a.hour()),h&&a.date(a.date()+h*c),i&&a.month(a.month()+i*c),g&&!d&&db.updateOffset(a),(h||i)&&(a.minute(e),a.hour(f))}function m(a){return"[object Array]"===Object.prototype.toString.call(a)}function n(a){return"[object Date]"===Object.prototype.toString.call(a)||a instanceof Date}function o(a,b,c){var d,e=Math.min(a.length,b.length),f=Math.abs(a.length-b.length),g=0;for(d=0;e>d;d++)(c&&a[d]!==b[d]||!c&&s(a[d])!==s(b[d]))&&g++;return g+f}function p(a){if(a){var b=a.toLowerCase().replace(/(.)s$/,"$1");a=Tb[a]||Ub[b]||b}return a}function q(a){var b,c,d={};for(c in a)a.hasOwnProperty(c)&&(b=p(c),b&&(d[b]=a[c]));return d}function r(b){var c,d;if(0===b.indexOf("week"))c=7,d="day";else{if(0!==b.indexOf("month"))return;c=12,d="month"}db[b]=function(e,f){var g,h,i=db.fn._lang[b],j=[];if("number"==typeof e&&(f=e,e=a),h=function(a){var b=db().utc().set(d,a);return i.call(db.fn._lang,b,e||"")},null!=f)return h(f);for(g=0;c>g;g++)j.push(h(g));return j}}function s(a){var b=+a,c=0;return 0!==b&&isFinite(b)&&(c=b>=0?Math.floor(b):Math.ceil(b)),c}function t(a,b){return new Date(Date.UTC(a,b+1,0)).getUTCDate()}function u(a){return v(a)?366:365}function v(a){return a%4===0&&a%100!==0||a%400===0}function w(a){var b;a._a&&-2===a._pf.overflow&&(b=a._a[jb]<0||a._a[jb]>11?jb:a._a[kb]<1||a._a[kb]>t(a._a[ib],a._a[jb])?kb:a._a[lb]<0||a._a[lb]>23?lb:a._a[mb]<0||a._a[mb]>59?mb:a._a[nb]<0||a._a[nb]>59?nb:a._a[ob]<0||a._a[ob]>999?ob:-1,a._pf._overflowDayOfYear&&(ib>b||b>kb)&&(b=kb),a._pf.overflow=b)}function x(a){return null==a._isValid&&(a._isValid=!isNaN(a._d.getTime())&&a._pf.overflow<0&&!a._pf.empty&&!a._pf.invalidMonth&&!a._pf.nullInput&&!a._pf.invalidFormat&&!a._pf.userInvalidated,a._strict&&(a._isValid=a._isValid&&0===a._pf.charsLeftOver&&0===a._pf.unusedTokens.length)),a._isValid}function y(a){return a?a.toLowerCase().replace("_","-"):a}function z(a,b){return b._isUTC?db(a).zone(b._offset||0):db(a).local()}function A(a,b){return b.abbr=a,pb[a]||(pb[a]=new e),pb[a].set(b),pb[a]}function B(a){delete pb[a]}function C(a){var b,c,d,e,f=0,g=function(a){if(!pb[a]&&rb)try{require("./lang/"+a)}catch(b){}return pb[a]};if(!a)return db.fn._lang;if(!m(a)){if(c=g(a))return c;a=[a]}for(;f<a.length;){for(e=y(a[f]).split("-"),b=e.length,d=y(a[f+1]),d=d?d.split("-"):null;b>0;){if(c=g(e.slice(0,b).join("-")))return c;if(d&&d.length>=b&&o(e,d,!0)>=b-1)break;b--}f++}return db.fn._lang}function D(a){return a.match(/\[[\s\S]/)?a.replace(/^\[|\]$/g,""):a.replace(/\\/g,"")}function E(a){var b,c,d=a.match(vb);for(b=0,c=d.length;c>b;b++)d[b]=Yb[d[b]]?Yb[d[b]]:D(d[b]);return function(e){var f="";for(b=0;c>b;b++)f+=d[b]instanceof Function?d[b].call(e,a):d[b];return f}}function F(a,b){return a.isValid()?(b=G(b,a.lang()),Vb[b]||(Vb[b]=E(b)),Vb[b](a)):a.lang().invalidDate()}function G(a,b){function c(a){return b.longDateFormat(a)||a}var d=5;for(wb.lastIndex=0;d>=0&&wb.test(a);)a=a.replace(wb,c),wb.lastIndex=0,d-=1;return a}function H(a,b){var c,d=b._strict;switch(a){case"DDDD":return Ib;case"YYYY":case"GGGG":case"gggg":return d?Jb:zb;case"Y":case"G":case"g":return Lb;case"YYYYYY":case"YYYYY":case"GGGGG":case"ggggg":return d?Kb:Ab;case"S":if(d)return Gb;case"SS":if(d)return Hb;case"SSS":if(d)return Ib;case"DDD":return yb;case"MMM":case"MMMM":case"dd":case"ddd":case"dddd":return Cb;case"a":case"A":return C(b._l)._meridiemParse;case"X":return Fb;case"Z":case"ZZ":return Db;case"T":return Eb;case"SSSS":return Bb;case"MM":case"DD":case"YY":case"GG":case"gg":case"HH":case"hh":case"mm":case"ss":case"ww":case"WW":return d?Hb:xb;case"M":case"D":case"d":case"H":case"h":case"m":case"s":case"w":case"W":case"e":case"E":return xb;default:return c=new RegExp(P(O(a.replace("\\","")),"i"))}}function I(a){a=a||"";var b=a.match(Db)||[],c=b[b.length-1]||[],d=(c+"").match(Qb)||["-",0,0],e=+(60*d[1])+s(d[2]);return"+"===d[0]?-e:e}function J(a,b,c){var d,e=c._a;switch(a){case"M":case"MM":null!=b&&(e[jb]=s(b)-1);break;case"MMM":case"MMMM":d=C(c._l).monthsParse(b),null!=d?e[jb]=d:c._pf.invalidMonth=b;break;case"D":case"DD":null!=b&&(e[kb]=s(b));break;case"DDD":case"DDDD":null!=b&&(c._dayOfYear=s(b));break;case"YY":e[ib]=s(b)+(s(b)>68?1900:2e3);break;case"YYYY":case"YYYYY":case"YYYYYY":e[ib]=s(b);break;case"a":case"A":c._isPm=C(c._l).isPM(b);break;case"H":case"HH":case"h":case"hh":e[lb]=s(b);break;case"m":case"mm":e[mb]=s(b);break;case"s":case"ss":e[nb]=s(b);break;case"S":case"SS":case"SSS":case"SSSS":e[ob]=s(1e3*("0."+b));break;case"X":c._d=new Date(1e3*parseFloat(b));break;case"Z":case"ZZ":c._useUTC=!0,c._tzm=I(b);break;case"w":case"ww":case"W":case"WW":case"d":case"dd":case"ddd":case"dddd":case"e":case"E":a=a.substr(0,1);case"gg":case"gggg":case"GG":case"GGGG":case"GGGGG":a=a.substr(0,2),b&&(c._w=c._w||{},c._w[a]=b)}}function K(a){var b,c,d,e,f,g,h,i,j,k,l=[];if(!a._d){for(d=M(a),a._w&&null==a._a[kb]&&null==a._a[jb]&&(f=function(b){var c=parseInt(b,10);return b?b.length<3?c>68?1900+c:2e3+c:c:null==a._a[ib]?db().weekYear():a._a[ib]},g=a._w,null!=g.GG||null!=g.W||null!=g.E?h=Z(f(g.GG),g.W||1,g.E,4,1):(i=C(a._l),j=null!=g.d?V(g.d,i):null!=g.e?parseInt(g.e,10)+i._week.dow:0,k=parseInt(g.w,10)||1,null!=g.d&&j<i._week.dow&&k++,h=Z(f(g.gg),k,j,i._week.doy,i._week.dow)),a._a[ib]=h.year,a._dayOfYear=h.dayOfYear),a._dayOfYear&&(e=null==a._a[ib]?d[ib]:a._a[ib],a._dayOfYear>u(e)&&(a._pf._overflowDayOfYear=!0),c=U(e,0,a._dayOfYear),a._a[jb]=c.getUTCMonth(),a._a[kb]=c.getUTCDate()),b=0;3>b&&null==a._a[b];++b)a._a[b]=l[b]=d[b];for(;7>b;b++)a._a[b]=l[b]=null==a._a[b]?2===b?1:0:a._a[b];l[lb]+=s((a._tzm||0)/60),l[mb]+=s((a._tzm||0)%60),a._d=(a._useUTC?U:T).apply(null,l)}}function L(a){var b;a._d||(b=q(a._i),a._a=[b.year,b.month,b.day,b.hour,b.minute,b.second,b.millisecond],K(a))}function M(a){var b=new Date;return a._useUTC?[b.getUTCFullYear(),b.getUTCMonth(),b.getUTCDate()]:[b.getFullYear(),b.getMonth(),b.getDate()]}function N(a){a._a=[],a._pf.empty=!0;var b,c,d,e,f,g=C(a._l),h=""+a._i,i=h.length,j=0;for(d=G(a._f,g).match(vb)||[],b=0;b<d.length;b++)e=d[b],c=(h.match(H(e,a))||[])[0],c&&(f=h.substr(0,h.indexOf(c)),f.length>0&&a._pf.unusedInput.push(f),h=h.slice(h.indexOf(c)+c.length),j+=c.length),Yb[e]?(c?a._pf.empty=!1:a._pf.unusedTokens.push(e),J(e,c,a)):a._strict&&!c&&a._pf.unusedTokens.push(e);a._pf.charsLeftOver=i-j,h.length>0&&a._pf.unusedInput.push(h),a._isPm&&a._a[lb]<12&&(a._a[lb]+=12),a._isPm===!1&&12===a._a[lb]&&(a._a[lb]=0),K(a),w(a)}function O(a){return a.replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(a,b,c,d,e){return b||c||d||e})}function P(a){return a.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function Q(a){var c,d,e,f,g;if(0===a._f.length)return a._pf.invalidFormat=!0,a._d=new Date(0/0),void 0;for(f=0;f<a._f.length;f++)g=0,c=h({},a),c._pf=b(),c._f=a._f[f],N(c),x(c)&&(g+=c._pf.charsLeftOver,g+=10*c._pf.unusedTokens.length,c._pf.score=g,(null==e||e>g)&&(e=g,d=c));h(a,d||c)}function R(a){var b,c,d=a._i,e=Mb.exec(d);if(e){for(a._pf.iso=!0,b=0,c=Ob.length;c>b;b++)if(Ob[b][1].exec(d)){a._f=Ob[b][0]+(e[6]||" ");break}for(b=0,c=Pb.length;c>b;b++)if(Pb[b][1].exec(d)){a._f+=Pb[b][0];break}d.match(Db)&&(a._f+="Z"),N(a)}else a._d=new Date(d)}function S(b){var c=b._i,d=sb.exec(c);c===a?b._d=new Date:d?b._d=new Date(+d[1]):"string"==typeof c?R(b):m(c)?(b._a=c.slice(0),K(b)):n(c)?b._d=new Date(+c):"object"==typeof c?L(b):b._d=new Date(c)}function T(a,b,c,d,e,f,g){var h=new Date(a,b,c,d,e,f,g);return 1970>a&&h.setFullYear(a),h}function U(a){var b=new Date(Date.UTC.apply(null,arguments));return 1970>a&&b.setUTCFullYear(a),b}function V(a,b){if("string"==typeof a)if(isNaN(a)){if(a=b.weekdaysParse(a),"number"!=typeof a)return null}else a=parseInt(a,10);return a}function W(a,b,c,d,e){return e.relativeTime(b||1,!!c,a,d)}function X(a,b,c){var d=hb(Math.abs(a)/1e3),e=hb(d/60),f=hb(e/60),g=hb(f/24),h=hb(g/365),i=45>d&&["s",d]||1===e&&["m"]||45>e&&["mm",e]||1===f&&["h"]||22>f&&["hh",f]||1===g&&["d"]||25>=g&&["dd",g]||45>=g&&["M"]||345>g&&["MM",hb(g/30)]||1===h&&["y"]||["yy",h];return i[2]=b,i[3]=a>0,i[4]=c,W.apply({},i)}function Y(a,b,c){var d,e=c-b,f=c-a.day();return f>e&&(f-=7),e-7>f&&(f+=7),d=db(a).add("d",f),{week:Math.ceil(d.dayOfYear()/7),year:d.year()}}function Z(a,b,c,d,e){var f,g,h=U(a,0,1).getUTCDay();return c=null!=c?c:e,f=e-h+(h>d?7:0)-(e>h?7:0),g=7*(b-1)+(c-e)+f+1,{year:g>0?a:a-1,dayOfYear:g>0?g:u(a-1)+g}}function $(a){var b=a._i,c=a._f;return null===b?db.invalid({nullInput:!0}):("string"==typeof b&&(a._i=b=C().preparse(b)),db.isMoment(b)?(a=i(b),a._d=new Date(+b._d)):c?m(c)?Q(a):N(a):S(a),new f(a))}function _(a,b){db.fn[a]=db.fn[a+"s"]=function(a){var c=this._isUTC?"UTC":"";return null!=a?(this._d["set"+c+b](a),db.updateOffset(this),this):this._d["get"+c+b]()}}function ab(a){db.duration.fn[a]=function(){return this._data[a]}}function bb(a,b){db.duration.fn["as"+a]=function(){return+this/b}}function cb(a){var b=!1,c=db;"undefined"==typeof ender&&(a?(gb.moment=function(){return!b&&console&&console.warn&&(b=!0,console.warn("Accessing Moment through the global scope is deprecated, and will be removed in an upcoming release.")),c.apply(null,arguments)},h(gb.moment,c)):gb.moment=db)}for(var db,eb,fb="2.5.1",gb=this,hb=Math.round,ib=0,jb=1,kb=2,lb=3,mb=4,nb=5,ob=6,pb={},qb={_isAMomentObject:null,_i:null,_f:null,_l:null,_strict:null,_isUTC:null,_offset:null,_pf:null,_lang:null},rb="undefined"!=typeof module&&module.exports&&"undefined"!=typeof require,sb=/^\/?Date\((\-?\d+)/i,tb=/(\-)?(?:(\d*)\.)?(\d+)\:(\d+)(?:\:(\d+)\.?(\d{3})?)?/,ub=/^(-)?P(?:(?:([0-9,.]*)Y)?(?:([0-9,.]*)M)?(?:([0-9,.]*)D)?(?:T(?:([0-9,.]*)H)?(?:([0-9,.]*)M)?(?:([0-9,.]*)S)?)?|([0-9,.]*)W)$/,vb=/(\[[^\[]*\])|(\\)?(Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|YYYYYY|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|mm?|ss?|S{1,4}|X|zz?|ZZ?|.)/g,wb=/(\[[^\[]*\])|(\\)?(LT|LL?L?L?|l{1,4})/g,xb=/\d\d?/,yb=/\d{1,3}/,zb=/\d{1,4}/,Ab=/[+\-]?\d{1,6}/,Bb=/\d+/,Cb=/[0-9]*['a-z\u00A0-\u05FF\u0700-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]+|[\u0600-\u06FF\/]+(\s*?[\u0600-\u06FF]+){1,2}/i,Db=/Z|[\+\-]\d\d:?\d\d/gi,Eb=/T/i,Fb=/[\+\-]?\d+(\.\d{1,3})?/,Gb=/\d/,Hb=/\d\d/,Ib=/\d{3}/,Jb=/\d{4}/,Kb=/[+-]?\d{6}/,Lb=/[+-]?\d+/,Mb=/^\s*(?:[+-]\d{6}|\d{4})-(?:(\d\d-\d\d)|(W\d\d$)|(W\d\d-\d)|(\d\d\d))((T| )(\d\d(:\d\d(:\d\d(\.\d+)?)?)?)?([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,Nb="YYYY-MM-DDTHH:mm:ssZ",Ob=[["YYYYYY-MM-DD",/[+-]\d{6}-\d{2}-\d{2}/],["YYYY-MM-DD",/\d{4}-\d{2}-\d{2}/],["GGGG-[W]WW-E",/\d{4}-W\d{2}-\d/],["GGGG-[W]WW",/\d{4}-W\d{2}/],["YYYY-DDD",/\d{4}-\d{3}/]],Pb=[["HH:mm:ss.SSSS",/(T| )\d\d:\d\d:\d\d\.\d{1,3}/],["HH:mm:ss",/(T| )\d\d:\d\d:\d\d/],["HH:mm",/(T| )\d\d:\d\d/],["HH",/(T| )\d\d/]],Qb=/([\+\-]|\d\d)/gi,Rb="Date|Hours|Minutes|Seconds|Milliseconds".split("|"),Sb={Milliseconds:1,Seconds:1e3,Minutes:6e4,Hours:36e5,Days:864e5,Months:2592e6,Years:31536e6},Tb={ms:"millisecond",s:"second",m:"minute",h:"hour",d:"day",D:"date",w:"week",W:"isoWeek",M:"month",y:"year",DDD:"dayOfYear",e:"weekday",E:"isoWeekday",gg:"weekYear",GG:"isoWeekYear"},Ub={dayofyear:"dayOfYear",isoweekday:"isoWeekday",isoweek:"isoWeek",weekyear:"weekYear",isoweekyear:"isoWeekYear"},Vb={},Wb="DDD w W M D d".split(" "),Xb="M D H h m s w W".split(" "),Yb={M:function(){return this.month()+1},MMM:function(a){return this.lang().monthsShort(this,a)},MMMM:function(a){return this.lang().months(this,a)},D:function(){return this.date()},DDD:function(){return this.dayOfYear()},d:function(){return this.day()},dd:function(a){return this.lang().weekdaysMin(this,a)},ddd:function(a){return this.lang().weekdaysShort(this,a)},dddd:function(a){return this.lang().weekdays(this,a)},w:function(){return this.week()},W:function(){return this.isoWeek()},YY:function(){return k(this.year()%100,2)},YYYY:function(){return k(this.year(),4)},YYYYY:function(){return k(this.year(),5)},YYYYYY:function(){var a=this.year(),b=a>=0?"+":"-";return b+k(Math.abs(a),6)},gg:function(){return k(this.weekYear()%100,2)},gggg:function(){return k(this.weekYear(),4)},ggggg:function(){return k(this.weekYear(),5)},GG:function(){return k(this.isoWeekYear()%100,2)},GGGG:function(){return k(this.isoWeekYear(),4)},GGGGG:function(){return k(this.isoWeekYear(),5)},e:function(){return this.weekday()},E:function(){return this.isoWeekday()},a:function(){return this.lang().meridiem(this.hours(),this.minutes(),!0)},A:function(){return this.lang().meridiem(this.hours(),this.minutes(),!1)},H:function(){return this.hours()},h:function(){return this.hours()%12||12},m:function(){return this.minutes()},s:function(){return this.seconds()},S:function(){return s(this.milliseconds()/100)},SS:function(){return k(s(this.milliseconds()/10),2)},SSS:function(){return k(this.milliseconds(),3)},SSSS:function(){return k(this.milliseconds(),3)},Z:function(){var a=-this.zone(),b="+";return 0>a&&(a=-a,b="-"),b+k(s(a/60),2)+":"+k(s(a)%60,2)},ZZ:function(){var a=-this.zone(),b="+";return 0>a&&(a=-a,b="-"),b+k(s(a/60),2)+k(s(a)%60,2)},z:function(){return this.zoneAbbr()},zz:function(){return this.zoneName()},X:function(){return this.unix()},Q:function(){return this.quarter()}},Zb=["months","monthsShort","weekdays","weekdaysShort","weekdaysMin"];Wb.length;)eb=Wb.pop(),Yb[eb+"o"]=d(Yb[eb],eb);for(;Xb.length;)eb=Xb.pop(),Yb[eb+eb]=c(Yb[eb],2);for(Yb.DDDD=c(Yb.DDD,3),h(e.prototype,{set:function(a){var b,c;for(c in a)b=a[c],"function"==typeof b?this[c]=b:this["_"+c]=b},_months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),months:function(a){return this._months[a.month()]},_monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),monthsShort:function(a){return this._monthsShort[a.month()]},monthsParse:function(a){var b,c,d;for(this._monthsParse||(this._monthsParse=[]),b=0;12>b;b++)if(this._monthsParse[b]||(c=db.utc([2e3,b]),d="^"+this.months(c,"")+"|^"+this.monthsShort(c,""),this._monthsParse[b]=new RegExp(d.replace(".",""),"i")),this._monthsParse[b].test(a))return b},_weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdays:function(a){return this._weekdays[a.day()]},_weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysShort:function(a){return this._weekdaysShort[a.day()]},_weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),weekdaysMin:function(a){return this._weekdaysMin[a.day()]},weekdaysParse:function(a){var b,c,d;for(this._weekdaysParse||(this._weekdaysParse=[]),b=0;7>b;b++)if(this._weekdaysParse[b]||(c=db([2e3,1]).day(b),d="^"+this.weekdays(c,"")+"|^"+this.weekdaysShort(c,"")+"|^"+this.weekdaysMin(c,""),this._weekdaysParse[b]=new RegExp(d.replace(".",""),"i")),this._weekdaysParse[b].test(a))return b},_longDateFormat:{LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D YYYY",LLL:"MMMM D YYYY LT",LLLL:"dddd, MMMM D YYYY LT"},longDateFormat:function(a){var b=this._longDateFormat[a];return!b&&this._longDateFormat[a.toUpperCase()]&&(b=this._longDateFormat[a.toUpperCase()].replace(/MMMM|MM|DD|dddd/g,function(a){return a.slice(1)}),this._longDateFormat[a]=b),b},isPM:function(a){return"p"===(a+"").toLowerCase().charAt(0)},_meridiemParse:/[ap]\.?m?\.?/i,meridiem:function(a,b,c){return a>11?c?"pm":"PM":c?"am":"AM"},_calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},calendar:function(a,b){var c=this._calendar[a];return"function"==typeof c?c.apply(b):c},_relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},relativeTime:function(a,b,c,d){var e=this._relativeTime[c];return"function"==typeof e?e(a,b,c,d):e.replace(/%d/i,a)},pastFuture:function(a,b){var c=this._relativeTime[a>0?"future":"past"];return"function"==typeof c?c(b):c.replace(/%s/i,b)},ordinal:function(a){return this._ordinal.replace("%d",a)},_ordinal:"%d",preparse:function(a){return a},postformat:function(a){return a},week:function(a){return Y(a,this._week.dow,this._week.doy).week},_week:{dow:0,doy:6},_invalidDate:"Invalid date",invalidDate:function(){return this._invalidDate}}),db=function(c,d,e,f){var g;return"boolean"==typeof e&&(f=e,e=a),g={},g._isAMomentObject=!0,g._i=c,g._f=d,g._l=e,g._strict=f,g._isUTC=!1,g._pf=b(),$(g)},db.utc=function(c,d,e,f){var g;return"boolean"==typeof e&&(f=e,e=a),g={},g._isAMomentObject=!0,g._useUTC=!0,g._isUTC=!0,g._l=e,g._i=c,g._f=d,g._strict=f,g._pf=b(),$(g).utc()},db.unix=function(a){return db(1e3*a)},db.duration=function(a,b){var c,d,e,f=a,h=null;return db.isDuration(a)?f={ms:a._milliseconds,d:a._days,M:a._months}:"number"==typeof a?(f={},b?f[b]=a:f.milliseconds=a):(h=tb.exec(a))?(c="-"===h[1]?-1:1,f={y:0,d:s(h[kb])*c,h:s(h[lb])*c,m:s(h[mb])*c,s:s(h[nb])*c,ms:s(h[ob])*c}):(h=ub.exec(a))&&(c="-"===h[1]?-1:1,e=function(a){var b=a&&parseFloat(a.replace(",","."));return(isNaN(b)?0:b)*c},f={y:e(h[2]),M:e(h[3]),d:e(h[4]),h:e(h[5]),m:e(h[6]),s:e(h[7]),w:e(h[8])}),d=new g(f),db.isDuration(a)&&a.hasOwnProperty("_lang")&&(d._lang=a._lang),d},db.version=fb,db.defaultFormat=Nb,db.updateOffset=function(){},db.lang=function(a,b){var c;return a?(b?A(y(a),b):null===b?(B(a),a="en"):pb[a]||C(a),c=db.duration.fn._lang=db.fn._lang=C(a),c._abbr):db.fn._lang._abbr},db.langData=function(a){return a&&a._lang&&a._lang._abbr&&(a=a._lang._abbr),C(a)},db.isMoment=function(a){return a instanceof f||null!=a&&a.hasOwnProperty("_isAMomentObject")},db.isDuration=function(a){return a instanceof g},eb=Zb.length-1;eb>=0;--eb)r(Zb[eb]);for(db.normalizeUnits=function(a){return p(a)},db.invalid=function(a){var b=db.utc(0/0);return null!=a?h(b._pf,a):b._pf.userInvalidated=!0,b},db.parseZone=function(a){return db(a).parseZone()},h(db.fn=f.prototype,{clone:function(){return db(this)},valueOf:function(){return+this._d+6e4*(this._offset||0)},unix:function(){return Math.floor(+this/1e3)},toString:function(){return this.clone().lang("en").format("ddd MMM DD YYYY HH:mm:ss [GMT]ZZ")},toDate:function(){return this._offset?new Date(+this):this._d},toISOString:function(){var a=db(this).utc();return 0<a.year()&&a.year()<=9999?F(a,"YYYY-MM-DD[T]HH:mm:ss.SSS[Z]"):F(a,"YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]")},toArray:function(){var a=this;return[a.year(),a.month(),a.date(),a.hours(),a.minutes(),a.seconds(),a.milliseconds()]},isValid:function(){return x(this)},isDSTShifted:function(){return this._a?this.isValid()&&o(this._a,(this._isUTC?db.utc(this._a):db(this._a)).toArray())>0:!1},parsingFlags:function(){return h({},this._pf)},invalidAt:function(){return this._pf.overflow},utc:function(){return this.zone(0)},local:function(){return this.zone(0),this._isUTC=!1,this},format:function(a){var b=F(this,a||db.defaultFormat);return this.lang().postformat(b)},add:function(a,b){var c;return c="string"==typeof a?db.duration(+b,a):db.duration(a,b),l(this,c,1),this},subtract:function(a,b){var c;return c="string"==typeof a?db.duration(+b,a):db.duration(a,b),l(this,c,-1),this},diff:function(a,b,c){var d,e,f=z(a,this),g=6e4*(this.zone()-f.zone());return b=p(b),"year"===b||"month"===b?(d=432e5*(this.daysInMonth()+f.daysInMonth()),e=12*(this.year()-f.year())+(this.month()-f.month()),e+=(this-db(this).startOf("month")-(f-db(f).startOf("month")))/d,e-=6e4*(this.zone()-db(this).startOf("month").zone()-(f.zone()-db(f).startOf("month").zone()))/d,"year"===b&&(e/=12)):(d=this-f,e="second"===b?d/1e3:"minute"===b?d/6e4:"hour"===b?d/36e5:"day"===b?(d-g)/864e5:"week"===b?(d-g)/6048e5:d),c?e:j(e)},from:function(a,b){return db.duration(this.diff(a)).lang(this.lang()._abbr).humanize(!b)},fromNow:function(a){return this.from(db(),a)},calendar:function(){var a=z(db(),this).startOf("day"),b=this.diff(a,"days",!0),c=-6>b?"sameElse":-1>b?"lastWeek":0>b?"lastDay":1>b?"sameDay":2>b?"nextDay":7>b?"nextWeek":"sameElse";return this.format(this.lang().calendar(c,this))},isLeapYear:function(){return v(this.year())},isDST:function(){return this.zone()<this.clone().month(0).zone()||this.zone()<this.clone().month(5).zone()},day:function(a){var b=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=a?(a=V(a,this.lang()),this.add({d:a-b})):b},month:function(a){var b,c=this._isUTC?"UTC":"";return null!=a?"string"==typeof a&&(a=this.lang().monthsParse(a),"number"!=typeof a)?this:(b=this.date(),this.date(1),this._d["set"+c+"Month"](a),this.date(Math.min(b,this.daysInMonth())),db.updateOffset(this),this):this._d["get"+c+"Month"]()},startOf:function(a){switch(a=p(a)){case"year":this.month(0);case"month":this.date(1);case"week":case"isoWeek":case"day":this.hours(0);case"hour":this.minutes(0);case"minute":this.seconds(0);case"second":this.milliseconds(0)}return"week"===a?this.weekday(0):"isoWeek"===a&&this.isoWeekday(1),this},endOf:function(a){return a=p(a),this.startOf(a).add("isoWeek"===a?"week":a,1).subtract("ms",1)},isAfter:function(a,b){return b="undefined"!=typeof b?b:"millisecond",+this.clone().startOf(b)>+db(a).startOf(b)},isBefore:function(a,b){return b="undefined"!=typeof b?b:"millisecond",+this.clone().startOf(b)<+db(a).startOf(b)},isSame:function(a,b){return b=b||"ms",+this.clone().startOf(b)===+z(a,this).startOf(b)},min:function(a){return a=db.apply(null,arguments),this>a?this:a},max:function(a){return a=db.apply(null,arguments),a>this?this:a},zone:function(a){var b=this._offset||0;return null==a?this._isUTC?b:this._d.getTimezoneOffset():("string"==typeof a&&(a=I(a)),Math.abs(a)<16&&(a=60*a),this._offset=a,this._isUTC=!0,b!==a&&l(this,db.duration(b-a,"m"),1,!0),this)},zoneAbbr:function(){return this._isUTC?"UTC":""},zoneName:function(){return this._isUTC?"Coordinated Universal Time":""},parseZone:function(){return this._tzm?this.zone(this._tzm):"string"==typeof this._i&&this.zone(this._i),this},hasAlignedHourOffset:function(a){return a=a?db(a).zone():0,(this.zone()-a)%60===0},daysInMonth:function(){return t(this.year(),this.month())},dayOfYear:function(a){var b=hb((db(this).startOf("day")-db(this).startOf("year"))/864e5)+1;return null==a?b:this.add("d",a-b)},quarter:function(){return Math.ceil((this.month()+1)/3)},weekYear:function(a){var b=Y(this,this.lang()._week.dow,this.lang()._week.doy).year;return null==a?b:this.add("y",a-b)},isoWeekYear:function(a){var b=Y(this,1,4).year;return null==a?b:this.add("y",a-b)},week:function(a){var b=this.lang().week(this);return null==a?b:this.add("d",7*(a-b))},isoWeek:function(a){var b=Y(this,1,4).week;return null==a?b:this.add("d",7*(a-b))},weekday:function(a){var b=(this.day()+7-this.lang()._week.dow)%7;return null==a?b:this.add("d",a-b)},isoWeekday:function(a){return null==a?this.day()||7:this.day(this.day()%7?a:a-7)},get:function(a){return a=p(a),this[a]()},set:function(a,b){return a=p(a),"function"==typeof this[a]&&this[a](b),this},lang:function(b){return b===a?this._lang:(this._lang=C(b),this)}}),eb=0;eb<Rb.length;eb++)_(Rb[eb].toLowerCase().replace(/s$/,""),Rb[eb]);_("year","FullYear"),db.fn.days=db.fn.day,db.fn.months=db.fn.month,db.fn.weeks=db.fn.week,db.fn.isoWeeks=db.fn.isoWeek,db.fn.toJSON=db.fn.toISOString,h(db.duration.fn=g.prototype,{_bubble:function(){var a,b,c,d,e=this._milliseconds,f=this._days,g=this._months,h=this._data;h.milliseconds=e%1e3,a=j(e/1e3),h.seconds=a%60,b=j(a/60),h.minutes=b%60,c=j(b/60),h.hours=c%24,f+=j(c/24),h.days=f%30,g+=j(f/30),h.months=g%12,d=j(g/12),h.years=d},weeks:function(){return j(this.days()/7)},valueOf:function(){return this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*s(this._months/12)},humanize:function(a){var b=+this,c=X(b,!a,this.lang());return a&&(c=this.lang().pastFuture(b,c)),this.lang().postformat(c)},add:function(a,b){var c=db.duration(a,b);return this._milliseconds+=c._milliseconds,this._days+=c._days,this._months+=c._months,this._bubble(),this},subtract:function(a,b){var c=db.duration(a,b);return this._milliseconds-=c._milliseconds,this._days-=c._days,this._months-=c._months,this._bubble(),this},get:function(a){return a=p(a),this[a.toLowerCase()+"s"]()},as:function(a){return a=p(a),this["as"+a.charAt(0).toUpperCase()+a.slice(1)+"s"]()},lang:db.fn.lang,toIsoString:function(){var a=Math.abs(this.years()),b=Math.abs(this.months()),c=Math.abs(this.days()),d=Math.abs(this.hours()),e=Math.abs(this.minutes()),f=Math.abs(this.seconds()+this.milliseconds()/1e3);return this.asSeconds()?(this.asSeconds()<0?"-":"")+"P"+(a?a+"Y":"")+(b?b+"M":"")+(c?c+"D":"")+(d||e||f?"T":"")+(d?d+"H":"")+(e?e+"M":"")+(f?f+"S":""):"P0D"}});for(eb in Sb)Sb.hasOwnProperty(eb)&&(bb(eb,Sb[eb]),ab(eb.toLowerCase()));bb("Weeks",6048e5),db.duration.fn.asMonths=function(){return(+this-31536e6*this.years())/2592e6+12*this.years()},db.lang("en",{ordinal:function(a){var b=a%10,c=1===s(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c}}),rb?(module.exports=db,cb(!0)):"function"==typeof define&&define.amd?define("moment",function(b,c,d){return d.config&&d.config()&&d.config().noGlobal!==!0&&cb(d.config().noGlobal===a),db}):cb()}).call(this); \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/taboverride.min.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/taboverride.min.js
new file mode 100644
index 00000000000..59ef198182d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/lib/taboverride.min.js
@@ -0,0 +1,3 @@
+/*! taboverride v4.0.2 | https://github.com/wjbryant/taboverride
+Copyright (c) 2014 Bill Bryant | http://opensource.org/licenses/mit */
+!function(a){"use strict";var b;"object"==typeof exports?a(exports):"function"==typeof define&&define.amd?define(["exports"],a):(b=window.tabOverride={},a(b))}(function(a){"use strict";function b(a,b){var c,d,e,f=["alt","ctrl","meta","shift"],g=a.length,h=!0;for(c=0;g>c;c+=1)if(!b[a[c]]){h=!1;break}if(h)for(c=0;c<f.length;c+=1){if(e=f[c]+"Key",b[e])if(g){for(h=!1,d=0;g>d;d+=1)if(e===a[d]){h=!0;break}}else h=!1;if(!h)break}return h}function c(a,c){return a===q&&b(s,c)}function d(a,c){return a===r&&b(t,c)}function e(a,b){return function(c,d){var e,f="";if(arguments.length){if("number"==typeof c&&(a(c),b.length=0,d&&d.length))for(e=0;e<d.length;e+=1)b.push(d[e]+"Key");return this}for(e=0;e<b.length;e+=1)f+=b[e].slice(0,-3)+"+";return f+a()}}function f(a){a=a||event;var b,e,f,g,h,i,j,k,l,s,t,w,x,y,z,A,B,C,D=a.currentTarget||a.srcElement,E=a.keyCode,F="character";if((!D.nodeName||"textarea"===D.nodeName.toLowerCase())&&(E===q||E===r||13===E&&u)){if(v=!1,f=D.value,k=D.scrollTop,"number"==typeof D.selectionStart)l=D.selectionStart,s=D.selectionEnd,t=f.slice(l,s);else{if(!o.selection)return;g=o.selection.createRange(),t=g.text,h=g.duplicate(),h.moveToElementText(D),h.setEndPoint("EndToEnd",g),s=h.text.length,l=s-t.length,n>1?(i=f.slice(0,l).split(m).length-1,j=t.split(m).length-1):i=j=0}if(E===q||E===r)if(b=p,e=b.length,y=0,z=0,A=0,l!==s&&-1!==t.indexOf("\n"))if(w=0===l||"\n"===f.charAt(l-1)?l:f.lastIndexOf("\n",l-1)+1,s===f.length||"\n"===f.charAt(s)?x=s:"\n"===f.charAt(s-1)?x=s-1:(x=f.indexOf("\n",s),-1===x&&(x=f.length)),c(E,a))y=1,D.value=f.slice(0,w)+b+f.slice(w,x).replace(/\n/g,function(){return y+=1,"\n"+b})+f.slice(x),g?(g.collapse(),g.moveEnd(F,s+y*e-j-i),g.moveStart(F,l+e-i),g.select()):(D.selectionStart=l+e,D.selectionEnd=s+y*e,D.scrollTop=k);else{if(!d(E,a))return;0===f.slice(w).indexOf(b)&&(w===l?t=t.slice(e):A=e,z=e),D.value=f.slice(0,w)+f.slice(w+A,l)+t.replace(new RegExp("\n"+b,"g"),function(){return y+=1,"\n"})+f.slice(s),g?(g.collapse(),g.moveEnd(F,s-z-y*e-j-i),g.moveStart(F,l-A-i),g.select()):(D.selectionStart=l-A,D.selectionEnd=s-z-y*e)}else if(c(E,a))g?(g.text=b,g.select()):(D.value=f.slice(0,l)+b+f.slice(s),D.selectionEnd=D.selectionStart=l+e,D.scrollTop=k);else{if(!d(E,a))return;0===f.slice(l-e).indexOf(b)&&(D.value=f.slice(0,l-e)+f.slice(l),g?(g.move(F,l-e-i),g.select()):(D.selectionEnd=D.selectionStart=l-e,D.scrollTop=k))}else if(u){if(0===l||"\n"===f.charAt(l-1))return void(v=!0);if(w=f.lastIndexOf("\n",l-1)+1,x=f.indexOf("\n",l),-1===x&&(x=f.length),B=f.slice(w,x).match(/^[ \t]*/)[0],C=B.length,w+C>l)return void(v=!0);g?(g.text="\n"+B,g.select()):(D.value=f.slice(0,l)+"\n"+B+f.slice(s),D.selectionEnd=D.selectionStart=l+n+C,D.scrollTop=k)}return a.preventDefault?void a.preventDefault():(a.returnValue=!1,!1)}}function g(a){a=a||event;var b=a.keyCode;if(c(b,a)||d(b,a)||13===b&&u&&!v){if(!a.preventDefault)return a.returnValue=!1,!1;a.preventDefault()}}function h(a,b){var c,d=x[a]||[],e=d.length;for(c=0;e>c;c+=1)d[c].apply(null,b)}function i(a){function b(b){for(c=0;f>c;c+=1)b(a[c].type,a[c].handler)}var c,d,e,f=a.length;return o.addEventListener?(d=function(a){b(function(b,c){a.removeEventListener(b,c,!1)})},e=function(a){d(a),b(function(b,c){a.addEventListener(b,c,!1)})}):o.attachEvent&&(d=function(a){b(function(b,c){a.detachEvent("on"+b,c)})},e=function(a){d(a),b(function(b,c){a.attachEvent("on"+b,c)})}),{add:e,remove:d}}function j(a){h("addListeners",[a]),l.add(a)}function k(a){h("removeListeners",[a]),l.remove(a)}var l,m,n,o=window.document,p=" ",q=9,r=9,s=[],t=["shiftKey"],u=!0,v=!1,w=o.createElement("textarea"),x={};l=i([{type:"keydown",handler:f},{type:"keypress",handler:g}]),w.value="\n",m=w.value,n=m.length,w=null,a.utils={executeExtensions:h,isValidModifierKeyCombo:b,createListeners:i,addListeners:j,removeListeners:k},a.handlers={keydown:f,keypress:g},a.addExtension=function(a,b){return a&&"string"==typeof a&&"function"==typeof b&&(x[a]||(x[a]=[]),x[a].push(b)),this},a.set=function(a,b){var c,d,e,f,g,i,l;if(a)for(c=arguments.length<2||b,d=a,e=d.length,"number"!=typeof e&&(d=[d],e=1),c?(f=j,g="true"):(f=k,g=""),i=0;e>i;i+=1)l=d[i],l&&l.nodeName&&"textarea"===l.nodeName.toLowerCase()&&(h("set",[l,c]),l.setAttribute("data-taboverride-enabled",g),f(l));return this},a.tabSize=function(a){var b;if(arguments.length){if(a&&"number"==typeof a&&a>0)for(p="",b=0;a>b;b+=1)p+=" ";else p=" ";return this}return" "===p?0:p.length},a.autoIndent=function(a){return arguments.length?(u=a?!0:!1,this):u},a.tabKey=e(function(a){return arguments.length?void(q=a):q},s),a.untabKey=e(function(a){return arguments.length?void(r=a):r},t)}); \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/poller.js b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/poller.js
new file mode 100644
index 00000000000..d8ab1e8eb9f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/client/resources/js/poller.js
@@ -0,0 +1,130 @@
+function Poller(config)
+{
+ // CONFIGURABLE
+ var endpoints = {
+ up: "/status/poll", // url to poll when the server is up
+ down: "/status" // url to poll at regular intervals when the server is down
+ };
+ var timeout = 60000 * 2; // how many ms between polling attempts
+ var intervalMs = 1000; // ms between polls when the server is down
+
+ // INTERNAL STATE
+ var up = true; // whether or not we can connect to the server
+ var req; // the pending ajax request
+ var downPoller; // the setInterval for polling when the server is down
+ var self = this;
+
+ if (typeof config === 'object')
+ {
+ if (typeof config.endpoints === 'object')
+ {
+ endpoints.up = config.endpoints.up;
+ endpoints.down = config.endpoints.down;
+ }
+ if (config.timeout)
+ timeout = config.timeout;
+ if (config.interval)
+ intervalMs = config.interval;
+ }
+
+ $(self).on('pollstart', function(event, data) {
+ log("Started poller");
+ }).on('pollstop', function(event, data) {
+ log("Stopped poller");
+ });
+
+
+ this.start = function()
+ {
+ if (req)
+ return false;
+ doPoll();
+ $(self).trigger('pollstart', {url: endpoints.up, timeout: timeout});
+ return true;
+ };
+
+ this.stop = function()
+ {
+ if (!req)
+ return false;
+ req.abort();
+ req = undefined;
+ stopped = true;
+ stopDownPoller();
+ $(self).trigger('pollstop', {});
+ return true;
+ };
+
+ this.setTimeout = function(tmout)
+ {
+ timeout = tmout; // takes effect at next poll
+ };
+
+ this.isUp = function()
+ {
+ return up;
+ };
+
+ function doPoll()
+ {
+ req = $.ajax({
+ url: endpoints.up + "?timeout=" + timeout,
+ timeout: timeout
+ }).done(pollSuccess).fail(pollFailed);
+ }
+
+ function pollSuccess(data, message, jqxhr)
+ {
+ stopDownPoller();
+ doPoll();
+
+ var wasUp = up;
+ up = true;
+ status = data;
+
+ var arg = {
+ status: status,
+ data: data,
+ jqxhr: jqxhr
+ };
+
+ if (!wasUp)
+ $(convey.poller).trigger('serverstarting', arg);
+ else
+ $(self).trigger('pollsuccess', arg);
+ }
+
+ function pollFailed(jqxhr, message, exception)
+ {
+ if (message === "timeout")
+ {
+ log("Poller timeout; re-polling...", req);
+ doPoll(); // in our case, timeout actually means no activity; poll again
+ return;
+ }
+
+ up = false;
+
+ downPoller = setInterval(function()
+ {
+ // If the server is still down, do a ping to see
+ // if it's up; pollSuccess() will do the rest.
+ if (!up)
+ $.get(endpoints.down).done(pollSuccess);
+ }, intervalMs);
+
+ $(self).trigger('pollfail', {
+ exception: exception,
+ message: message,
+ jqxhr: jqxhr
+ });
+ }
+
+ function stopDownPoller()
+ {
+ if (!downPoller)
+ return;
+ clearInterval(downPoller);
+ downPoller = undefined;
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/api.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/api.goconvey
new file mode 100644
index 00000000000..79982854b53
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/api.goconvey
@@ -0,0 +1,2 @@
+#ignore
+-timeout=1s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/server.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/server.go
new file mode 100644
index 00000000000..6cea26daee8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/server.go
@@ -0,0 +1,164 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+ "time"
+
+ "github.com/smartystreets/goconvey/web/server/contract"
+ "github.com/smartystreets/goconvey/web/server/messaging"
+)
+
+type HTTPServer struct {
+ watcher chan messaging.WatcherCommand
+ executor contract.Executor
+ latest *contract.CompleteOutput
+ currentRoot string
+ longpoll chan chan string
+ paused bool
+}
+
+func (self *HTTPServer) ReceiveUpdate(root string, update *contract.CompleteOutput) {
+ self.currentRoot = root
+ self.latest = update
+}
+
+func (self *HTTPServer) Watch(response http.ResponseWriter, request *http.Request) {
+ if request.Method == "POST" {
+ self.adjustRoot(response, request)
+ } else if request.Method == "GET" {
+ response.Write([]byte(self.currentRoot))
+ }
+}
+
+func (self *HTTPServer) adjustRoot(response http.ResponseWriter, request *http.Request) {
+ newRoot := self.parseQueryString("root", response, request)
+ if newRoot == "" {
+ return
+ }
+ info, err := os.Stat(newRoot) // TODO: how to unit test?
+ if !info.IsDir() || err != nil {
+ http.Error(response, err.Error(), http.StatusNotFound)
+ return
+ }
+
+ self.watcher <- messaging.WatcherCommand{
+ Instruction: messaging.WatcherAdjustRoot,
+ Details: newRoot,
+ }
+}
+
+func (self *HTTPServer) Ignore(response http.ResponseWriter, request *http.Request) {
+ paths := self.parseQueryString("paths", response, request)
+ if paths != "" {
+ self.watcher <- messaging.WatcherCommand{
+ Instruction: messaging.WatcherIgnore,
+ Details: paths,
+ }
+ }
+}
+
+func (self *HTTPServer) Reinstate(response http.ResponseWriter, request *http.Request) {
+ paths := self.parseQueryString("paths", response, request)
+ if paths != "" {
+ self.watcher <- messaging.WatcherCommand{
+ Instruction: messaging.WatcherReinstate,
+ Details: paths,
+ }
+ }
+}
+
+func (self *HTTPServer) parseQueryString(key string, response http.ResponseWriter, request *http.Request) string {
+ value := request.URL.Query()[key]
+
+ if len(value) == 0 {
+ http.Error(response, fmt.Sprintf("No '%s' query string parameter included!", key), http.StatusBadRequest)
+ return ""
+ }
+
+ path := value[0]
+ if path == "" {
+ http.Error(response, "You must provide a non-blank path.", http.StatusBadRequest)
+ }
+ return path
+}
+
+func (self *HTTPServer) Status(response http.ResponseWriter, request *http.Request) {
+ status := self.executor.Status()
+ response.Write([]byte(status))
+}
+
+func (self *HTTPServer) LongPollStatus(response http.ResponseWriter, request *http.Request) {
+ if self.executor.ClearStatusFlag() {
+ response.Write([]byte(self.executor.Status()))
+ return
+ }
+
+ timeout, err := strconv.Atoi(request.URL.Query().Get("timeout"))
+ if err != nil || timeout > 180000 || timeout < 0 {
+ timeout = 60000 // default timeout is 60 seconds
+ }
+
+ myReqChan := make(chan string)
+
+ select {
+ case self.longpoll <- myReqChan: // this case means the executor's status is changing
+ case <-time.After(time.Duration(timeout) * time.Millisecond): // this case means the executor hasn't changed status
+ return
+ }
+
+ out := <-myReqChan
+
+ if out != "" { // TODO: Why is this check necessary? Sometimes it writes empty string...
+ response.Write([]byte(out))
+ }
+}
+
+func (self *HTTPServer) Results(response http.ResponseWriter, request *http.Request) {
+ response.Header().Set("Content-Type", "application/json")
+ response.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
+ response.Header().Set("Pragma", "no-cache")
+ response.Header().Set("Expires", "0")
+ if self.latest != nil {
+ self.latest.Paused = self.paused
+ }
+ stuff, _ := json.Marshal(self.latest)
+ response.Write(stuff)
+}
+
+func (self *HTTPServer) Execute(response http.ResponseWriter, request *http.Request) {
+ go self.execute()
+}
+
+func (self *HTTPServer) execute() {
+ self.watcher <- messaging.WatcherCommand{Instruction: messaging.WatcherExecute}
+}
+
+func (self *HTTPServer) TogglePause(response http.ResponseWriter, request *http.Request) {
+ instruction := messaging.WatcherPause
+ if self.paused {
+ instruction = messaging.WatcherResume
+ }
+
+ self.watcher <- messaging.WatcherCommand{Instruction: instruction}
+ self.paused = !self.paused
+
+ fmt.Fprint(response, self.paused) // we could write out whatever helps keep the UI honest...
+}
+
+func NewHTTPServer(
+ root string,
+ watcher chan messaging.WatcherCommand,
+ executor contract.Executor,
+ status chan chan string) *HTTPServer {
+
+ self := new(HTTPServer)
+ self.currentRoot = root
+ self.watcher = watcher
+ self.executor = executor
+ self.longpoll = status
+ return self
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/server_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/server_test.go
new file mode 100644
index 00000000000..bd48e308015
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/api/server_test.go
@@ -0,0 +1,462 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+ "testing"
+ "time"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "github.com/smartystreets/goconvey/web/server/contract"
+ "github.com/smartystreets/goconvey/web/server/messaging"
+)
+
+const initialRoot = "/root/gopath/src/github.com/smartystreets/project"
+const nonexistentRoot = "I don't exist"
+const unreadableContent = "!!error!!"
+
+func TestHTTPServer(t *testing.T) {
+ // TODO: fix the skipped tests...
+
+ Convey("Subject: HttpServer responds to requests appropriately", t, func() {
+ fixture := newServerFixture()
+
+ Convey("Before any update is recieved", func() {
+ Convey("When the update is requested", func() {
+ update, _ := fixture.RequestLatest()
+
+ Convey("No panic should occur", func() {
+ So(func() { fixture.RequestLatest() }, ShouldNotPanic)
+ })
+
+ Convey("The update will be empty", func() {
+ So(update, ShouldResemble, new(contract.CompleteOutput))
+ })
+ })
+ })
+
+ Convey("Given an update is received", func() {
+ fixture.ReceiveUpdate("", &contract.CompleteOutput{Revision: "asdf"})
+
+ Convey("When the update is requested", func() {
+ update, response := fixture.RequestLatest()
+
+ Convey("The server returns it", func() {
+ So(update, ShouldResemble, &contract.CompleteOutput{Revision: "asdf"})
+ })
+
+ Convey("The server returns 200", func() {
+ So(response.Code, ShouldEqual, http.StatusOK)
+ })
+
+ Convey("The server should include important cache-related headers", func() {
+ So(len(response.HeaderMap), ShouldEqual, 4)
+ So(response.HeaderMap["Content-Type"][0], ShouldEqual, "application/json")
+ So(response.HeaderMap["Cache-Control"][0], ShouldEqual, "no-cache, no-store, must-revalidate")
+ So(response.HeaderMap["Pragma"][0], ShouldEqual, "no-cache")
+ So(response.HeaderMap["Expires"][0], ShouldEqual, "0")
+ })
+ })
+ })
+
+ Convey("When the root watch is queried", func() {
+ root, status := fixture.QueryRootWatch(false)
+
+ SkipConvey("The server returns it", func() {
+ So(root, ShouldEqual, initialRoot)
+ })
+
+ Convey("The server returns HTTP 200 - OK", func() {
+ So(status, ShouldEqual, http.StatusOK)
+ })
+ })
+
+ SkipConvey("When the root watch is adjusted", func() {
+
+ Convey("But the request has no root parameter", func() {
+ status, body := fixture.AdjustRootWatchMalformed()
+
+ Convey("The server returns HTTP 400 - Bad Input", func() {
+ So(status, ShouldEqual, http.StatusBadRequest)
+ })
+
+ Convey("The body should contain a helpful error message", func() {
+ So(body, ShouldEqual, "No 'root' query string parameter included!")
+ })
+
+ Convey("The server should not change the existing root", func() {
+ root, _ := fixture.QueryRootWatch(false)
+ So(root, ShouldEqual, initialRoot)
+ })
+ })
+
+ Convey("But the root parameter is empty", func() {
+ status, body := fixture.AdjustRootWatch("")
+
+ Convey("The server returns HTTP 400 - Bad Input", func() {
+ So(status, ShouldEqual, http.StatusBadRequest)
+ })
+
+ Convey("The server should provide a helpful error message", func() {
+ So(body, ShouldEqual, "You must provide a non-blank path.")
+ })
+
+ Convey("The server should not change the existing root", func() {
+ root, _ := fixture.QueryRootWatch(false)
+ So(root, ShouldEqual, initialRoot)
+ })
+ })
+
+ Convey("And the new root exists", func() {
+ status, body := fixture.AdjustRootWatch(initialRoot + "/package")
+
+ Convey("The server returns HTTP 200 - OK", func() {
+ So(status, ShouldEqual, http.StatusOK)
+ })
+
+ Convey("The body should NOT contain any error message or content", func() {
+ So(body, ShouldEqual, "")
+ })
+
+ Convey("The server informs the watcher of the new root", func() {
+ root, _ := fixture.QueryRootWatch(false)
+ So(root, ShouldEqual, initialRoot+"/package")
+ })
+ })
+
+ Convey("And the new root does NOT exist", func() {
+ status, body := fixture.AdjustRootWatch(nonexistentRoot)
+
+ Convey("The server returns HTTP 404 - Not Found", func() {
+ So(status, ShouldEqual, http.StatusNotFound)
+ })
+
+ Convey("The body should contain a helpful error message", func() {
+ So(body, ShouldEqual, fmt.Sprintf("Directory does not exist: '%s'", nonexistentRoot))
+ })
+
+ Convey("The server should not change the existing root", func() {
+ root, _ := fixture.QueryRootWatch(false)
+ So(root, ShouldEqual, initialRoot)
+ })
+ })
+ })
+
+ SkipConvey("When a packge is ignored", func() {
+
+ Convey("But the request has no path parameter", func() {
+ status, body := fixture.IgnoreMalformed()
+
+ Convey("The server returns HTTP 400 - Bad Input", func() {
+ So(status, ShouldEqual, http.StatusBadRequest)
+ })
+
+ Convey("The body should contain a helpful error message", func() {
+ So(body, ShouldEqual, "No 'paths' query string parameter included!")
+ })
+
+ SkipConvey("The server should not ignore anything", func() {
+ // So(fixture.watcher.ignored, ShouldEqual, "")
+ })
+ })
+
+ Convey("But the request is blank", func() {
+ status, body := fixture.Ignore("")
+
+ Convey("The server returns HTTP 400 - Bad Input", func() {
+ So(status, ShouldEqual, http.StatusBadRequest)
+ })
+
+ Convey("The body should contain a helpful error message", func() {
+ So(body, ShouldEqual, "You must provide a non-blank path.")
+ })
+ })
+
+ Convey("And the request is well formed", func() {
+ status, _ := fixture.Ignore(initialRoot)
+
+ SkipConvey("The server informs the watcher", func() {
+ // So(fixture.watcher.ignored, ShouldEqual, initialRoot)
+ })
+ Convey("The server returns HTTP 200 - OK", func() {
+ So(status, ShouldEqual, http.StatusOK)
+ })
+ })
+ })
+
+ SkipConvey("When a package is reinstated", func() {
+ Convey("But the request has no path parameter", func() {
+ status, body := fixture.ReinstateMalformed()
+
+ Convey("The server returns HTTP 400 - Bad Input", func() {
+ So(status, ShouldEqual, http.StatusBadRequest)
+ })
+
+ Convey("The body should contain a helpful error message", func() {
+ So(body, ShouldEqual, "No 'paths' query string parameter included!")
+ })
+
+ SkipConvey("The server should not ignore anything", func() {
+ // So(fixture.watcher.reinstated, ShouldEqual, "")
+ })
+ })
+
+ Convey("But the request is blank", func() {
+ status, body := fixture.Reinstate("")
+
+ Convey("The server returns HTTP 400 - Bad Input", func() {
+ So(status, ShouldEqual, http.StatusBadRequest)
+ })
+
+ Convey("The body should contain a helpful error message", func() {
+ So(body, ShouldEqual, "You must provide a non-blank path.")
+ })
+ })
+
+ Convey("And the request is well formed", func() {
+ status, _ := fixture.Reinstate(initialRoot)
+
+ SkipConvey("The server informs the watcher", func() {
+ // So(fixture.watcher.reinstated, ShouldEqual, initialRoot)
+ })
+ Convey("The server returns HTTP 200 - OK", func() {
+ So(status, ShouldEqual, http.StatusOK)
+ })
+ })
+ })
+
+ Convey("When the status of the executor is requested", func() {
+ fixture.executor.status = "blah blah blah"
+ statusCode, statusBody := fixture.RequestExecutorStatus()
+
+ Convey("The server asks the executor its status and returns it", func() {
+ So(statusBody, ShouldEqual, "blah blah blah")
+ })
+
+ Convey("The server returns HTTP 200 - OK", func() {
+ So(statusCode, ShouldEqual, http.StatusOK)
+ })
+ })
+
+ Convey("When a manual execution of the test packages is requested", func() {
+ status := fixture.ManualExecution()
+ update, _ := fixture.RequestLatest()
+
+ SkipConvey("The server invokes the executor using the watcher's listing and save the result", func() {
+ So(update, ShouldResemble, &contract.CompleteOutput{Revision: initialRoot})
+ })
+
+ Convey("The server returns HTTP 200 - OK", func() {
+ So(status, ShouldEqual, http.StatusOK)
+ })
+ })
+
+ SkipConvey("When the pause setting is toggled via the server", func() {
+ paused := fixture.TogglePause()
+
+ SkipConvey("The pause channel buffer should have a true value", func() {
+ // var value bool
+ // select {
+ // case value = <-fixture.pauseUpdate:
+ // default:
+ // }
+ // So(value, ShouldBeTrue)
+ })
+
+ Convey("The latest results should show that the server is paused", func() {
+ fixture.ReceiveUpdate("", &contract.CompleteOutput{Revision: "asdf"})
+ update, _ := fixture.RequestLatest()
+
+ So(update.Paused, ShouldBeTrue)
+ })
+
+ Convey("The toggle handler should return its new status", func() {
+ So(paused, ShouldEqual, "true")
+ })
+ })
+ })
+}
+
+/********* Server Fixture *********/
+
+type ServerFixture struct {
+ server *HTTPServer
+ watcher chan messaging.WatcherCommand
+ executor *FakeExecutor
+ statusUpdate chan bool
+}
+
+func (self *ServerFixture) ReceiveUpdate(root string, update *contract.CompleteOutput) {
+ self.server.ReceiveUpdate(root, update)
+}
+
+func (self *ServerFixture) RequestLatest() (*contract.CompleteOutput, *httptest.ResponseRecorder) {
+ request, _ := http.NewRequest("GET", "http://localhost:8080/results", nil)
+ response := httptest.NewRecorder()
+
+ self.server.Results(response, request)
+
+ decoder := json.NewDecoder(strings.NewReader(response.Body.String()))
+ update := new(contract.CompleteOutput)
+ decoder.Decode(update)
+ return update, response
+}
+
+func (self *ServerFixture) QueryRootWatch(newclient bool) (string, int) {
+ url := "http://localhost:8080/watch"
+ if newclient {
+ url += "?newclient=1"
+ }
+ request, _ := http.NewRequest("GET", url, nil)
+ response := httptest.NewRecorder()
+
+ self.server.Watch(response, request)
+
+ return strings.TrimSpace(response.Body.String()), response.Code
+}
+
+func (self *ServerFixture) AdjustRootWatchMalformed() (status int, body string) {
+ request, _ := http.NewRequest("POST", "http://localhost:8080/watch", nil)
+ response := httptest.NewRecorder()
+
+ self.server.Watch(response, request)
+
+ status, body = response.Code, strings.TrimSpace(response.Body.String())
+ return
+}
+
+func (self *ServerFixture) AdjustRootWatch(newRoot string) (status int, body string) {
+ escapedRoot := url.QueryEscape(newRoot)
+ request, _ := http.NewRequest("POST", "http://localhost:8080/watch?root="+escapedRoot, nil)
+ response := httptest.NewRecorder()
+
+ self.server.Watch(response, request)
+
+ status, body = response.Code, strings.TrimSpace(response.Body.String())
+ return
+}
+
+func (self *ServerFixture) IgnoreMalformed() (status int, body string) {
+ request, _ := http.NewRequest("POST", "http://localhost:8080/ignore", nil)
+ response := httptest.NewRecorder()
+
+ self.server.Ignore(response, request)
+
+ status, body = response.Code, strings.TrimSpace(response.Body.String())
+ return
+}
+
+func (self *ServerFixture) Ignore(folder string) (status int, body string) {
+ escapedFolder := url.QueryEscape(folder)
+ request, _ := http.NewRequest("POST", "http://localhost:8080/ignore?paths="+escapedFolder, nil)
+ response := httptest.NewRecorder()
+
+ self.server.Ignore(response, request)
+
+ status, body = response.Code, strings.TrimSpace(response.Body.String())
+ return
+}
+
+func (self *ServerFixture) ReinstateMalformed() (status int, body string) {
+ request, _ := http.NewRequest("POST", "http://localhost:8080/reinstate", nil)
+ response := httptest.NewRecorder()
+
+ self.server.Reinstate(response, request)
+
+ status, body = response.Code, strings.TrimSpace(response.Body.String())
+ return
+}
+
+func (self *ServerFixture) Reinstate(folder string) (status int, body string) {
+ escapedFolder := url.QueryEscape(folder)
+ request, _ := http.NewRequest("POST", "http://localhost:8080/reinstate?paths="+escapedFolder, nil)
+ response := httptest.NewRecorder()
+
+ self.server.Reinstate(response, request)
+
+ status, body = response.Code, strings.TrimSpace(response.Body.String())
+ return
+}
+
+func (self *ServerFixture) SetExecutorStatus(status string) {
+ // self.executor.status = status
+ // select {
+ // case self.executor.statusUpdate <- make(chan string):
+ // default:
+ // }
+}
+
+func (self *ServerFixture) RequestExecutorStatus() (code int, status string) {
+ request, _ := http.NewRequest("GET", "http://localhost:8080/status", nil)
+ response := httptest.NewRecorder()
+
+ self.server.Status(response, request)
+
+ code, status = response.Code, strings.TrimSpace(response.Body.String())
+ return
+}
+
+func (self *ServerFixture) ManualExecution() int {
+ request, _ := http.NewRequest("POST", "http://localhost:8080/execute", nil)
+ response := httptest.NewRecorder()
+
+ self.server.Execute(response, request)
+ nap, _ := time.ParseDuration("100ms")
+ time.Sleep(nap)
+ return response.Code
+}
+
+func (self *ServerFixture) TogglePause() string {
+ request, _ := http.NewRequest("POST", "http://localhost:8080/pause", nil)
+ response := httptest.NewRecorder()
+
+ self.server.TogglePause(response, request)
+
+ return response.Body.String()
+}
+
+func newServerFixture() *ServerFixture {
+ self := new(ServerFixture)
+ self.watcher = make(chan messaging.WatcherCommand)
+ // self.watcher.SetRootWatch(initialRoot)
+ statusUpdate := make(chan chan string)
+ self.executor = newFakeExecutor("", statusUpdate)
+ self.server = NewHTTPServer("initial-working-dir", self.watcher, self.executor, statusUpdate)
+ return self
+}
+
+/********* Fake Executor *********/
+
+type FakeExecutor struct {
+ status string
+ executed bool
+ statusFlag bool
+ statusUpdate chan chan string
+}
+
+func (self *FakeExecutor) Status() string {
+ return self.status
+}
+
+func (self *FakeExecutor) ClearStatusFlag() bool {
+ hasNewStatus := self.statusFlag
+ self.statusFlag = false
+ return hasNewStatus
+}
+
+func (self *FakeExecutor) ExecuteTests(watched []*contract.Package) *contract.CompleteOutput {
+ output := new(contract.CompleteOutput)
+ output.Revision = watched[0].Path
+ return output
+}
+
+func newFakeExecutor(status string, statusUpdate chan chan string) *FakeExecutor {
+ self := new(FakeExecutor)
+ self.status = status
+ self.statusUpdate = statusUpdate
+ return self
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/contracts.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/contracts.go
new file mode 100644
index 00000000000..e758f3e16e3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/contracts.go
@@ -0,0 +1,27 @@
+package contract
+
+import "net/http"
+
+type (
+ Server interface {
+ ReceiveUpdate(root string, update *CompleteOutput)
+ Watch(writer http.ResponseWriter, request *http.Request)
+ Ignore(writer http.ResponseWriter, request *http.Request)
+ Reinstate(writer http.ResponseWriter, request *http.Request)
+ Status(writer http.ResponseWriter, request *http.Request)
+ LongPollStatus(writer http.ResponseWriter, request *http.Request)
+ Results(writer http.ResponseWriter, request *http.Request)
+ Execute(writer http.ResponseWriter, request *http.Request)
+ TogglePause(writer http.ResponseWriter, request *http.Request)
+ }
+
+ Executor interface {
+ ExecuteTests([]*Package) *CompleteOutput
+ Status() string
+ ClearStatusFlag() bool
+ }
+
+ Shell interface {
+ GoTest(directory, packageName string, tags, arguments []string) (output string, err error)
+ }
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/doc_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/doc_test.go
new file mode 100644
index 00000000000..14f4d2d9c2e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/doc_test.go
@@ -0,0 +1 @@
+package contract
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/result.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/result.go
new file mode 100644
index 00000000000..c6f9bf2cfde
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/contract/result.go
@@ -0,0 +1,120 @@
+package contract
+
+import (
+ "path/filepath"
+ "strings"
+
+ "go/build"
+
+ "github.com/smartystreets/goconvey/convey/reporting"
+ "github.com/smartystreets/goconvey/web/server/messaging"
+)
+
+type Package struct {
+ Path string
+ Name string
+ Ignored bool
+ Disabled bool
+ BuildTags []string
+ TestArguments []string
+ Error error
+ Output string
+ Result *PackageResult
+
+ HasImportCycle bool
+}
+
+func NewPackage(folder *messaging.Folder, hasImportCycle bool) *Package {
+ self := new(Package)
+ self.Path = folder.Path
+ self.Name = resolvePackageName(self.Path)
+ self.Result = NewPackageResult(self.Name)
+ self.Ignored = folder.Ignored
+ self.Disabled = folder.Disabled
+ self.BuildTags = folder.BuildTags
+ self.TestArguments = folder.TestArguments
+ self.HasImportCycle = hasImportCycle
+ return self
+}
+
+func (self *Package) Active() bool {
+ return !self.Disabled && !self.Ignored
+}
+
+func (self *Package) HasUsableResult() bool {
+ return self.Active() && (self.Error == nil || (self.Output != ""))
+}
+
+type CompleteOutput struct {
+ Packages []*PackageResult
+ Revision string
+ Paused bool
+}
+
+var ( // PackageResult.Outcome values:
+ Ignored = "ignored"
+ Disabled = "disabled"
+ Passed = "passed"
+ Failed = "failed"
+ Panicked = "panicked"
+ BuildFailure = "build failure"
+ NoTestFiles = "no test files"
+ NoTestFunctions = "no test functions"
+ NoGoFiles = "no go code"
+
+ TestRunAbortedUnexpectedly = "test run aborted unexpectedly"
+)
+
+type PackageResult struct {
+ PackageName string
+ Elapsed float64
+ Coverage float64
+ Outcome string
+ BuildOutput string
+ TestResults []TestResult
+}
+
+func NewPackageResult(packageName string) *PackageResult {
+ self := new(PackageResult)
+ self.PackageName = packageName
+ self.TestResults = []TestResult{}
+ self.Coverage = -1
+ return self
+}
+
+type TestResult struct {
+ TestName string
+ Elapsed float64
+ Passed bool
+ Skipped bool
+ File string
+ Line int
+ Message string
+ Error string
+ Stories []reporting.ScopeResult
+
+ RawLines []string `json:",omitempty"`
+}
+
+func NewTestResult(testName string) *TestResult {
+ self := new(TestResult)
+ self.Stories = []reporting.ScopeResult{}
+ self.RawLines = []string{}
+ self.TestName = testName
+ return self
+}
+
+func resolvePackageName(path string) string {
+ pkg, err := build.ImportDir(path, build.FindOnly)
+ if err == nil {
+ return pkg.ImportPath
+ }
+
+ nameArr := strings.Split(path, endGoPath)
+ return nameArr[len(nameArr)-1]
+}
+
+const (
+ separator = string(filepath.Separator)
+ endGoPath = separator + "src" + separator
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/contract.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/contract.go
new file mode 100644
index 00000000000..209dbca5975
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/contract.go
@@ -0,0 +1,12 @@
+package executor
+
+import "github.com/smartystreets/goconvey/web/server/contract"
+
+type Parser interface {
+ Parse([]*contract.Package)
+}
+
+type Tester interface {
+ SetBatchSize(batchSize int)
+ TestAll(folders []*contract.Package)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/coordinator.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/coordinator.go
new file mode 100644
index 00000000000..117dd56d65a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/coordinator.go
@@ -0,0 +1,71 @@
+package executor
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "strings"
+ "sync"
+
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+type concurrentCoordinator struct {
+ batchSize int
+ queue chan *contract.Package
+ folders []*contract.Package
+ shell contract.Shell
+ waiter sync.WaitGroup
+}
+
+func (self *concurrentCoordinator) ExecuteConcurrently() {
+ self.enlistWorkers()
+ self.scheduleTasks()
+ self.awaitCompletion()
+}
+
+func (self *concurrentCoordinator) enlistWorkers() {
+ for i := 0; i < self.batchSize; i++ {
+ self.waiter.Add(1)
+ go self.worker(i)
+ }
+}
+func (self *concurrentCoordinator) worker(id int) {
+ for folder := range self.queue {
+ packageName := strings.Replace(folder.Name, "\\", "/", -1)
+ if !folder.Active() {
+ log.Printf("Skipping concurrent execution: %s\n", packageName)
+ continue
+ }
+
+ if folder.HasImportCycle {
+ message := fmt.Sprintf("can't load package: import cycle not allowed\npackage %s\n\timports %s", packageName, packageName)
+ log.Println(message)
+ folder.Output, folder.Error = message, errors.New(message)
+ } else {
+ log.Printf("Executing concurrent tests: %s\n", packageName)
+ folder.Output, folder.Error = self.shell.GoTest(folder.Path, packageName, folder.BuildTags, folder.TestArguments)
+ }
+ }
+ self.waiter.Done()
+}
+
+func (self *concurrentCoordinator) scheduleTasks() {
+ for _, folder := range self.folders {
+ self.queue <- folder
+ }
+}
+
+func (self *concurrentCoordinator) awaitCompletion() {
+ close(self.queue)
+ self.waiter.Wait()
+}
+
+func newConcurrentCoordinator(folders []*contract.Package, batchSize int, shell contract.Shell) *concurrentCoordinator {
+ self := new(concurrentCoordinator)
+ self.queue = make(chan *contract.Package)
+ self.folders = folders
+ self.batchSize = batchSize
+ self.shell = shell
+ return self
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor.go
new file mode 100644
index 00000000000..887080ccd33
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor.go
@@ -0,0 +1,84 @@
+package executor
+
+import (
+ "log"
+ "time"
+
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+const (
+ Idle = "idle"
+ Executing = "executing"
+)
+
+type Executor struct {
+ tester Tester
+ parser Parser
+ status string
+ statusChan chan chan string
+ statusFlag bool
+}
+
+func (self *Executor) Status() string {
+ return self.status
+}
+
+func (self *Executor) ClearStatusFlag() bool {
+ hasNewStatus := self.statusFlag
+ self.statusFlag = false
+ return hasNewStatus
+}
+
+func (self *Executor) ExecuteTests(folders []*contract.Package) *contract.CompleteOutput {
+ defer func() { self.setStatus(Idle) }()
+ self.execute(folders)
+ result := self.parse(folders)
+ return result
+}
+
+func (self *Executor) execute(folders []*contract.Package) {
+ self.setStatus(Executing)
+ self.tester.TestAll(folders)
+}
+
+func (self *Executor) parse(folders []*contract.Package) *contract.CompleteOutput {
+ result := &contract.CompleteOutput{Revision: now().String()}
+ self.parser.Parse(folders)
+ for _, folder := range folders {
+ result.Packages = append(result.Packages, folder.Result)
+ }
+ return result
+}
+
+func (self *Executor) setStatus(status string) {
+ self.status = status
+ self.statusFlag = true
+
+Loop:
+ for {
+ select {
+ case c := <-self.statusChan:
+ self.statusFlag = false
+ c <- status
+ default:
+ break Loop
+ }
+ }
+
+ log.Printf("Executor status: '%s'\n", self.status)
+}
+
+func NewExecutor(tester Tester, parser Parser, ch chan chan string) *Executor {
+ return &Executor{
+ tester: tester,
+ parser: parser,
+ status: Idle,
+ statusChan: ch,
+ statusFlag: false,
+ }
+}
+
+var now = func() time.Time {
+ return time.Now()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor.goconvey
new file mode 100644
index 00000000000..79982854b53
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor.goconvey
@@ -0,0 +1,2 @@
+#ignore
+-timeout=1s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor_test.go
new file mode 100644
index 00000000000..d7221b0dc12
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/executor_test.go
@@ -0,0 +1,160 @@
+package executor
+
+import (
+ "strings"
+ "testing"
+ "time"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+func TestExecutor(t *testing.T) {
+ t.Skip("BROKEN!")
+
+ Convey("Subject: Execution of test packages and aggregation of parsed results", t, func() {
+ fixture := newExecutorFixture()
+
+ Convey("When tests packages are executed", func() {
+ fixture.ExecuteTests()
+
+ Convey("The result should include parsed results for each test package.",
+ fixture.ResultShouldBePopulated)
+ })
+
+ Convey("When the executor is idle", func() {
+ Convey("The status of the executor should be 'idle'", func() {
+ So(fixture.executor.Status(), ShouldEqual, Idle)
+ })
+ })
+
+ Convey("When the status is updated", func() {
+ fixture.executor.setStatus(Executing)
+
+ Convey("The status flag should be set to true", func() {
+ So(fixture.executor.statusFlag, ShouldBeTrue)
+ })
+ })
+
+ Convey("During test execution", func() {
+ status := fixture.CaptureStatusDuringExecutionPhase()
+
+ Convey("The status of the executor should be 'executing'", func() {
+ So(status, ShouldEqual, Executing)
+ })
+ })
+ })
+}
+
+type ExecutorFixture struct {
+ executor *Executor
+ tester *FakeTester
+ parser *FakeParser
+ folders []*contract.Package
+ result *contract.CompleteOutput
+ expected *contract.CompleteOutput
+ stamp time.Time
+}
+
+func (self *ExecutorFixture) ExecuteTests() {
+ self.result = self.executor.ExecuteTests(self.folders)
+}
+
+func (self *ExecutorFixture) CaptureStatusDuringExecutionPhase() string {
+ nap, _ := time.ParseDuration("25ms")
+ self.tester.addDelay(nap)
+ return self.delayedExecution(nap)
+}
+
+func (self *ExecutorFixture) delayedExecution(nap time.Duration) string {
+ go self.ExecuteTests()
+ time.Sleep(nap)
+ return self.executor.Status()
+}
+
+func (self *ExecutorFixture) ResultShouldBePopulated() {
+ So(self.result, ShouldResemble, self.expected)
+}
+
+var (
+ prefix = "/Users/blah/gopath/src/"
+ packageA = "github.com/smartystreets/goconvey/a"
+ packageB = "github.com/smartystreets/goconvey/b"
+ resultA = &contract.PackageResult{PackageName: packageA}
+ resultB = &contract.PackageResult{PackageName: packageB}
+)
+
+func newExecutorFixture() *ExecutorFixture {
+ self := new(ExecutorFixture)
+ self.tester = newFakeTester()
+ self.parser = newFakeParser()
+ self.executor = NewExecutor(self.tester, self.parser, make(chan chan string))
+ self.folders = []*contract.Package{
+ &contract.Package{Path: prefix + packageA, Name: packageA},
+ &contract.Package{Path: prefix + packageB, Name: packageB},
+ }
+ self.stamp = time.Now()
+ now = func() time.Time { return self.stamp }
+
+ self.expected = &contract.CompleteOutput{
+ Packages: []*contract.PackageResult{
+ resultA,
+ resultB,
+ },
+ Revision: self.stamp.String(),
+ }
+ return self
+}
+
+/******** FakeTester ********/
+
+type FakeTester struct {
+ nap time.Duration
+}
+
+func (self *FakeTester) SetBatchSize(batchSize int) { panic("NOT SUPPORTED") }
+func (self *FakeTester) TestAll(folders []*contract.Package) {
+ for _, p := range folders {
+ p.Output = p.Path
+ }
+ time.Sleep(self.nap)
+}
+func (self *FakeTester) addDelay(nap time.Duration) {
+ self.nap = nap
+}
+
+func newFakeTester() *FakeTester {
+ self := new(FakeTester)
+ zero, _ := time.ParseDuration("0")
+ self.nap = zero
+ return self
+}
+
+/******** FakeParser ********/
+
+type FakeParser struct {
+ nap time.Duration
+}
+
+func (self *FakeParser) Parse(packages []*contract.Package) {
+ time.Sleep(self.nap)
+ for _, package_ := range packages {
+ if package_.Name == packageA && strings.HasSuffix(package_.Output, packageA) {
+ package_.Result = resultA
+ }
+ if package_.Name == packageB && strings.HasSuffix(package_.Output, packageB) {
+ package_.Result = resultB
+ }
+ }
+}
+
+func (self *FakeParser) addDelay(nap time.Duration) {
+ self.nap = nap
+}
+
+func newFakeParser() *FakeParser {
+ self := new(FakeParser)
+ zero, _ := time.ParseDuration("0")
+ self.nap = zero
+ return self
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/tester.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/tester.go
new file mode 100644
index 00000000000..76f353a5baa
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/tester.go
@@ -0,0 +1,56 @@
+package executor
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "strings"
+
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+type ConcurrentTester struct {
+ shell contract.Shell
+ batchSize int
+}
+
+func (self *ConcurrentTester) SetBatchSize(batchSize int) {
+ self.batchSize = batchSize
+ log.Printf("Now configured to test %d packages concurrently.\n", self.batchSize)
+}
+
+func (self *ConcurrentTester) TestAll(folders []*contract.Package) {
+ if self.batchSize == 1 {
+ self.executeSynchronously(folders)
+ } else {
+ newConcurrentCoordinator(folders, self.batchSize, self.shell).ExecuteConcurrently()
+ }
+ return
+}
+
+func (self *ConcurrentTester) executeSynchronously(folders []*contract.Package) {
+ for _, folder := range folders {
+ packageName := strings.Replace(folder.Name, "\\", "/", -1)
+ if !folder.Active() {
+ log.Printf("Skipping execution: %s\n", packageName)
+ continue
+ }
+ if folder.HasImportCycle {
+ message := fmt.Sprintf("can't load package: import cycle not allowed\npackage %s\n\timports %s", packageName, packageName)
+ log.Println(message)
+ folder.Output, folder.Error = message, errors.New(message)
+ } else {
+ log.Printf("Executing tests: %s\n", packageName)
+ folder.Output, folder.Error = self.shell.GoTest(folder.Path, packageName, folder.BuildTags, folder.TestArguments)
+ }
+ }
+}
+
+func NewConcurrentTester(shell contract.Shell) *ConcurrentTester {
+ self := new(ConcurrentTester)
+ self.shell = shell
+ self.batchSize = defaultBatchSize
+ return self
+}
+
+const defaultBatchSize = 10
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/tester_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/tester_test.go
new file mode 100644
index 00000000000..d540c546d9a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/executor/tester_test.go
@@ -0,0 +1,254 @@
+package executor
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "testing"
+ "time"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+func init() {
+ log.SetOutput(ioutil.Discard)
+}
+
+func TestConcurrentTester(t *testing.T) {
+ t.Skip("BROKEN!")
+
+ Convey("Subject: Controlled execution of test packages", t, func() {
+ fixture := NewTesterFixture()
+
+ Convey("Whenever tests for each package are executed", func() {
+ fixture.InBatchesOf(1).RunTests()
+
+ Convey("The tester should execute the tests in each active package with the correct arguments",
+ fixture.ShouldHaveRecordOfExecutionCommands)
+
+ Convey("There should be a test output result for each active package",
+ fixture.ShouldHaveOneOutputPerInput)
+
+ Convey("The output should be as expected",
+ fixture.OutputShouldBeAsExpected)
+ })
+
+ Convey("When the tests for each package are executed synchronously", func() {
+ fixture.InBatchesOf(1).RunTests()
+
+ Convey("Each active package should be run synchronously and in the given order",
+ fixture.TestsShouldHaveRunContiguously)
+ })
+
+ Convey("When the tests for each package are executed synchronously with failures", func() {
+ fixture.InBatchesOf(1).SetupFailedTestSuites().RunTests()
+
+ Convey("The failed test packages should not result in any panics", func() {
+ So(fixture.recovered, ShouldBeNil)
+ })
+ })
+
+ Convey("When packages are tested concurrently", func() {
+ fixture.InBatchesOf(concurrentBatchSize).RunTests()
+
+ Convey("Active packages should be arranged and tested in batches of the appropriate size",
+ fixture.TestsShouldHaveRunInBatchesOfTwo)
+ })
+
+ Convey("When packages are tested concurrently with failures", func() {
+ fixture.InBatchesOf(concurrentBatchSize).SetupFailedTestSuites().RunTests()
+
+ Convey("The failed test packages should not result in any panics", func() {
+ So(fixture.recovered, ShouldBeNil)
+ })
+ })
+ })
+}
+
+const concurrentBatchSize = 2
+
+type TesterFixture struct {
+ tester *ConcurrentTester
+ shell *TimedShell
+ results []string
+ compilations []*ShellCommand
+ executions []*ShellCommand
+ packages []*contract.Package
+ recovered error
+}
+
+func NewTesterFixture() *TesterFixture {
+ self := new(TesterFixture)
+ self.shell = NewTimedShell()
+ self.tester = NewConcurrentTester(self.shell)
+ self.packages = []*contract.Package{
+ {Path: "a"},
+ {Path: "b"},
+ {Path: "c"},
+ {Path: "d"},
+ {Path: "e", Ignored: true},
+ {Path: "f"},
+ {Path: "g", HasImportCycle: true},
+ }
+ return self
+}
+
+func (self *TesterFixture) InBatchesOf(batchSize int) *TesterFixture {
+ self.tester.SetBatchSize(batchSize)
+ return self
+}
+
+func (self *TesterFixture) SetupAbnormalError(message string) *TesterFixture {
+ self.shell.setTripWire(message)
+ return self
+}
+
+func (self *TesterFixture) SetupFailedTestSuites() *TesterFixture {
+ self.shell.setExitWithError()
+ return self
+}
+
+func (self *TesterFixture) RunTests() {
+ defer func() {
+ if r := recover(); r != nil {
+ self.recovered = r.(error)
+ }
+ }()
+
+ self.tester.TestAll(self.packages)
+ for _, p := range self.packages {
+ self.results = append(self.results, p.Output)
+ }
+ self.executions = self.shell.Executions()
+}
+
+func (self *TesterFixture) ShouldHaveRecordOfExecutionCommands() {
+ executed := []string{"a", "b", "c", "d", "f"}
+ ignored := "e"
+ importCycle := "g"
+ actual := []string{}
+ for _, pkg := range self.executions {
+ actual = append(actual, pkg.Command)
+ }
+ So(actual, ShouldResemble, executed)
+ So(actual, ShouldNotContain, ignored)
+ So(actual, ShouldNotContain, importCycle)
+}
+
+func (self *TesterFixture) ShouldHaveOneOutputPerInput() {
+ So(len(self.results), ShouldEqual, len(self.packages))
+}
+
+func (self *TesterFixture) OutputShouldBeAsExpected() {
+ for _, p := range self.packages {
+ if p.HasImportCycle {
+ So(p.Output, ShouldContainSubstring, "can't load package: import cycle not allowed")
+ So(p.Error.Error(), ShouldContainSubstring, "can't load package: import cycle not allowed")
+ } else {
+ if p.Active() {
+ So(p.Output, ShouldEndWith, p.Path)
+ } else {
+ So(p.Output, ShouldBeBlank)
+ }
+ So(p.Error, ShouldBeNil)
+ }
+ }
+}
+
+func (self *TesterFixture) TestsShouldHaveRunContiguously() {
+ self.OutputShouldBeAsExpected()
+
+ So(self.shell.MaxConcurrentCommands(), ShouldEqual, 1)
+
+ for i := 0; i < len(self.executions)-1; i++ {
+ current := self.executions[i]
+ next := self.executions[i+1]
+ So(current.Started, ShouldHappenBefore, next.Started)
+ So(current.Ended, ShouldHappenOnOrBefore, next.Started)
+ }
+}
+
+func (self *TesterFixture) TestsShouldHaveRunInBatchesOfTwo() {
+ self.OutputShouldBeAsExpected()
+
+ So(self.shell.MaxConcurrentCommands(), ShouldEqual, concurrentBatchSize)
+}
+
+/**** Fakes ****/
+
+type ShellCommand struct {
+ Command string
+ Started time.Time
+ Ended time.Time
+}
+
+type TimedShell struct {
+ executions []*ShellCommand
+ panicMessage string
+ err error
+}
+
+func (self *TimedShell) Executions() []*ShellCommand {
+ return self.executions
+}
+
+func (self *TimedShell) MaxConcurrentCommands() int {
+ var concurrent int
+
+ for x, current := range self.executions {
+ concurrentWith_x := 1
+ for y, comparison := range self.executions {
+ if y == x {
+ continue
+ } else if concurrentWith(current, comparison) {
+ concurrentWith_x++
+ }
+ }
+ if concurrentWith_x > concurrent {
+ concurrent = concurrentWith_x
+ }
+ }
+ return concurrent
+}
+
+func concurrentWith(current, comparison *ShellCommand) bool {
+ return ((comparison.Started == current.Started || comparison.Started.After(current.Started)) &&
+ (comparison.Started.Before(current.Ended)))
+}
+
+func (self *TimedShell) setTripWire(message string) {
+ self.panicMessage = message
+}
+
+func (self *TimedShell) setExitWithError() {
+ self.err = errors.New("Simulate test failure")
+}
+
+func (self *TimedShell) GoTest(directory, packageName string, arguments, tags []string) (output string, err error) {
+ if self.panicMessage != "" {
+ return "", errors.New(self.panicMessage)
+ }
+
+ output = directory
+ err = self.err
+ self.executions = append(self.executions, self.composeCommand(directory))
+ return
+}
+
+func (self *TimedShell) composeCommand(commandText string) *ShellCommand {
+ start := time.Now()
+ time.Sleep(nap)
+ end := time.Now()
+ return &ShellCommand{commandText, start, end}
+}
+
+func NewTimedShell() *TimedShell {
+ self := new(TimedShell)
+ self.executions = []*ShellCommand{}
+ return self
+}
+
+var nap, _ = time.ParseDuration("10ms")
+var _ = fmt.Sprintf("fmt")
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/messaging/doc_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/messaging/doc_test.go
new file mode 100644
index 00000000000..cbb7a43f8ba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/messaging/doc_test.go
@@ -0,0 +1 @@
+package messaging
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/messaging/messages.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/messaging/messages.go
new file mode 100644
index 00000000000..7a92091162a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/messaging/messages.go
@@ -0,0 +1,56 @@
+package messaging
+
+///////////////////////////////////////////////////////////////////////////////
+
+type WatcherCommand struct {
+ Instruction WatcherInstruction
+ Details string
+}
+
+type WatcherInstruction int
+
+func (this WatcherInstruction) String() string {
+ switch this {
+ case WatcherPause:
+ return "Pause"
+ case WatcherResume:
+ return "Resume"
+ case WatcherIgnore:
+ return "Ignore"
+ case WatcherReinstate:
+ return "Reinstate"
+ case WatcherAdjustRoot:
+ return "AdjustRoot"
+ case WatcherExecute:
+ return "Execute"
+ case WatcherStop:
+ return "Stop"
+ default:
+ return "UNKNOWN INSTRUCTION"
+ }
+}
+
+const (
+ WatcherPause WatcherInstruction = iota
+ WatcherResume
+ WatcherIgnore
+ WatcherReinstate
+ WatcherAdjustRoot
+ WatcherExecute
+ WatcherStop
+)
+
+///////////////////////////////////////////////////////////////////////////////
+
+type Folders map[string]*Folder
+
+type Folder struct {
+ Path string // key
+ Root string
+ Ignored bool
+ Disabled bool
+ BuildTags []string
+ TestArguments []string
+}
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/packageParser.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/packageParser.go
new file mode 100644
index 00000000000..628b137b39a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/packageParser.go
@@ -0,0 +1,174 @@
+package parser
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+var (
+ testNamePattern = regexp.MustCompile("^=== RUN:? +(.+)$")
+)
+
+func ParsePackageResults(result *contract.PackageResult, rawOutput string) {
+ newOutputParser(result, rawOutput).parse()
+}
+
+type outputParser struct {
+ raw string
+ lines []string
+ result *contract.PackageResult
+ tests []*contract.TestResult
+
+ // place holders for loops
+ line string
+ test *contract.TestResult
+ testMap map[string]*contract.TestResult
+}
+
+func newOutputParser(result *contract.PackageResult, rawOutput string) *outputParser {
+ self := new(outputParser)
+ self.raw = strings.TrimSpace(rawOutput)
+ self.lines = strings.Split(self.raw, "\n")
+ self.result = result
+ self.tests = []*contract.TestResult{}
+ self.testMap = make(map[string]*contract.TestResult)
+ return self
+}
+
+func (self *outputParser) parse() {
+ self.separateTestFunctionsAndMetadata()
+ self.parseEachTestFunction()
+}
+
+func (self *outputParser) separateTestFunctionsAndMetadata() {
+ for _, self.line = range self.lines {
+ if self.processNonTestOutput() {
+ break
+ }
+ self.processTestOutput()
+ }
+}
+func (self *outputParser) processNonTestOutput() bool {
+ if noGoFiles(self.line) {
+ self.recordFinalOutcome(contract.NoGoFiles)
+
+ } else if buildFailed(self.line) {
+ self.recordFinalOutcome(contract.BuildFailure)
+
+ } else if noTestFiles(self.line) {
+ self.recordFinalOutcome(contract.NoTestFiles)
+
+ } else if noTestFunctions(self.line) {
+ self.recordFinalOutcome(contract.NoTestFunctions)
+
+ } else {
+ return false
+ }
+ return true
+}
+
+func (self *outputParser) recordFinalOutcome(outcome string) {
+ self.result.Outcome = outcome
+ self.result.BuildOutput = strings.Join(self.lines, "\n")
+}
+
+func (self *outputParser) processTestOutput() {
+ if isNewTest(self.line) {
+ self.registerTestFunction()
+
+ } else if isTestResult(self.line) {
+ self.recordTestMetadata()
+
+ } else if isPackageReport(self.line) {
+ self.recordPackageMetadata()
+
+ } else {
+ self.saveLineForParsingLater()
+
+ }
+}
+
+func (self *outputParser) registerTestFunction() {
+ testName := testNamePattern.FindStringSubmatch(self.line)[1]
+ self.test = contract.NewTestResult(testName)
+ self.tests = append(self.tests, self.test)
+ self.testMap[self.test.TestName] = self.test
+}
+func (self *outputParser) recordTestMetadata() {
+ testName := strings.Split(self.line, " ")[2]
+ if test, ok := self.testMap[testName]; ok {
+ self.test = test
+ self.test.Passed = !strings.HasPrefix(self.line, "--- FAIL: ")
+ self.test.Skipped = strings.HasPrefix(self.line, "--- SKIP: ")
+ self.test.Elapsed = parseTestFunctionDuration(self.line)
+ }
+}
+func (self *outputParser) recordPackageMetadata() {
+ if packageFailed(self.line) {
+ self.recordTestingOutcome(contract.Failed)
+
+ } else if packagePassed(self.line) {
+ self.recordTestingOutcome(contract.Passed)
+
+ } else if isCoverageSummary(self.line) {
+ self.recordCoverageSummary(self.line)
+ }
+}
+func (self *outputParser) recordTestingOutcome(outcome string) {
+ self.result.Outcome = outcome
+ fields := strings.Split(self.line, "\t")
+ self.result.PackageName = strings.TrimSpace(fields[1])
+ self.result.Elapsed = parseDurationInSeconds(fields[2], 3)
+}
+func (self *outputParser) recordCoverageSummary(summary string) {
+ start := len("coverage: ")
+ end := strings.Index(summary, "%")
+ value := summary[start:end]
+ parsed, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ self.result.Coverage = -1
+ } else {
+ self.result.Coverage = parsed
+ }
+}
+func (self *outputParser) saveLineForParsingLater() {
+ self.line = strings.TrimLeft(self.line, "\t")
+ if self.test == nil {
+ fmt.Println("Potential error parsing output of", self.result.PackageName, "; couldn't handle this stray line:", self.line)
+ return
+ }
+ self.test.RawLines = append(self.test.RawLines, self.line)
+}
+
+// TestResults is a collection of TestResults that implements sort.Interface.
+type TestResults []contract.TestResult
+
+func (r TestResults) Len() int {
+ return len(r)
+}
+
+// Less compares TestResults on TestName
+func (r TestResults) Less(i, j int) bool {
+ return r[i].TestName < r[j].TestName
+}
+
+func (r TestResults) Swap(i, j int) {
+ r[i], r[j] = r[j], r[i]
+}
+
+func (self *outputParser) parseEachTestFunction() {
+ for _, self.test = range self.tests {
+ self.test = parseTestOutput(self.test)
+ if self.test.Error != "" {
+ self.result.Outcome = contract.Panicked
+ }
+ self.test.RawLines = []string{}
+ self.result.TestResults = append(self.result.TestResults, *self.test)
+ }
+ sort.Sort(TestResults(self.result.TestResults))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/package_parser_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/package_parser_test.go
new file mode 100644
index 00000000000..65f3f9830bf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/package_parser_test.go
@@ -0,0 +1,792 @@
+package parser
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "strings"
+ "testing"
+
+ "github.com/smartystreets/goconvey/convey/reporting"
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+func init() {
+ log.SetOutput(ioutil.Discard)
+}
+
+func TestParsePackage_NoGoFiles_ReturnsPackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expected_NoGoFiles.PackageName}
+ ParsePackageResults(actual, input_NoGoFiles)
+ assertEqual(t, expected_NoGoFiles, *actual)
+}
+
+func TestParsePackage_NoTestFiles_ReturnsPackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expected_NoTestFiles.PackageName}
+ ParsePackageResults(actual, input_NoTestFiles)
+ assertEqual(t, expected_NoTestFiles, *actual)
+}
+
+func TestParsePacakge_NoTestFunctions_ReturnsPackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expected_NoTestFunctions.PackageName}
+ ParsePackageResults(actual, input_NoTestFunctions)
+ assertEqual(t, expected_NoTestFunctions, *actual)
+}
+
+func TestParsePackage_BuildFailed_ReturnsPackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expected_BuildFailed_InvalidPackageDeclaration.PackageName}
+ ParsePackageResults(actual, input_BuildFailed_InvalidPackageDeclaration)
+ assertEqual(t, expected_BuildFailed_InvalidPackageDeclaration, *actual)
+
+ actual = &contract.PackageResult{PackageName: expected_BuildFailed_OtherErrors.PackageName}
+ ParsePackageResults(actual, input_BuildFailed_OtherErrors)
+ assertEqual(t, expected_BuildFailed_OtherErrors, *actual)
+
+ actual = &contract.PackageResult{PackageName: expected_BuildFailed_ImportCycle.PackageName}
+ ParsePackageResults(actual, input_BuildFailed_ImportCycle)
+ assertEqual(t, expected_BuildFailed_ImportCycle, *actual)
+
+ actual = &contract.PackageResult{PackageName: expected_BuildFailed_CantFindPackage.PackageName}
+ ParsePackageResults(actual, input_BuildFailed_CantFindPackage)
+ assertEqual(t, expected_BuildFailed_CantFindPackage, *actual)
+
+ actual = &contract.PackageResult{PackageName: expected_BuildFailed_ConflictingImport.PackageName}
+ ParsePackageResults(actual, input_BuildFailed_ConfictingImport)
+ assertEqual(t, expected_BuildFailed_ConflictingImport, *actual)
+}
+
+func TestParsePackage_OldSchoolWithFailureOutput_ReturnsCompletePackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedOldSchool_Fails.PackageName}
+ ParsePackageResults(actual, inputOldSchool_Fails)
+ assertEqual(t, expectedOldSchool_Fails, *actual)
+}
+
+func TestParsePackage_OldSchoolWithSuccessOutput_ReturnsCompletePackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedOldSchool_Passes.PackageName}
+ ParsePackageResults(actual, inputOldSchool_Passes)
+ assertEqual(t, expectedOldSchool_Passes, *actual)
+}
+
+func TestParsePackage_OldSchoolWithPanicOutput_ReturnsCompletePackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedOldSchool_Panics.PackageName}
+ ParsePackageResults(actual, inputOldSchool_Panics)
+ assertEqual(t, expectedOldSchool_Panics, *actual)
+}
+
+func TestParsePackage_GoConveyOutput_ReturnsCompletePackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedGoConvey.PackageName}
+ ParsePackageResults(actual, inputGoConvey)
+ assertEqual(t, expectedGoConvey, *actual)
+}
+
+func TestParsePackage_ActualPackageNameDifferentThanDirectoryName_ReturnsActualPackageName(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: strings.Replace(expectedGoConvey.PackageName, "examples", "stuff", -1)}
+ ParsePackageResults(actual, inputGoConvey)
+ assertEqual(t, expectedGoConvey, *actual)
+}
+
+func TestParsePackage_GoConveyOutputMalformed_CausesPanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ message := fmt.Sprintf("%v", r)
+ if !strings.Contains(message, "bug report") {
+ t.Errorf("Should have panicked with a request to file a bug report but we received this error instead: %s", message)
+ }
+ } else {
+ t.Errorf("Should have panicked with a request to file a bug report but we received no error.")
+ }
+ }()
+
+ actual := &contract.PackageResult{PackageName: expectedGoConvey.PackageName}
+ ParsePackageResults(actual, inputGoConvey_Malformed)
+}
+
+func TestParsePackage_GoConveyWithRandomOutput_ReturnsPackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedGoConvey_WithRandomOutput.PackageName}
+ ParsePackageResults(actual, inputGoConvey_WithRandomOutput)
+ assertEqual(t, expectedGoConvey_WithRandomOutput, *actual)
+}
+
+func TestParsePackage_OldSchoolWithSuccessAndBogusCoverage_ReturnsCompletePackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedOldSchool_PassesButCoverageIsBogus.PackageName}
+ ParsePackageResults(actual, inputOldSchool_PassesButCoverageIsBogus)
+ assertEqual(t, expectedOldSchool_PassesButCoverageIsBogus, *actual)
+}
+
+func TestParsePackage_NestedTests_ReturnsPackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedNestedTests.PackageName}
+ ParsePackageResults(actual, inputNestedTests)
+ assertEqual(t, expectedNestedTests, *actual)
+}
+
+func TestParsePackage_WithExampleFunctions_ReturnsPackageResult(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedExampleFunctions.PackageName}
+ ParsePackageResults(actual, inputExampleFunctions)
+ assertEqual(t, expectedExampleFunctions, *actual)
+}
+
+func TestParsePackage_Golang15Output_ShouldNotPanic(t *testing.T) {
+ actual := &contract.PackageResult{PackageName: expectedGolang15.PackageName}
+ ParsePackageResults(actual, inputGolang15)
+ assertEqual(t, expectedGolang15, *actual)
+}
+
+func assertEqual(t *testing.T, expected, actual interface{}) {
+ a, _ := json.Marshal(expected)
+ b, _ := json.Marshal(actual)
+ if string(a) != string(b) {
+ t.Errorf(failureTemplate, string(a), string(b))
+ }
+}
+
+const failureTemplate = "Comparison failed:\n Expected: %v\n Actual: %v\n"
+
+const input_NoGoFiles = `can't load package: package github.com/smartystreets/goconvey: no buildable Go source files in /Users/matt/Work/Dev/goconvey/src/github.com/smartystreets/goconvey`
+
+var expected_NoGoFiles = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey",
+ Outcome: contract.NoGoFiles,
+ BuildOutput: input_NoGoFiles,
+}
+
+const input_NoTestFiles = `? pkg.smartystreets.net/liveaddress-zipapi [no test files]`
+
+var expected_NoTestFiles = contract.PackageResult{
+ PackageName: "pkg.smartystreets.net/liveaddress-zipapi",
+ Outcome: contract.NoTestFiles,
+ BuildOutput: input_NoTestFiles,
+}
+
+const input_NoTestFunctions = `testing: warning: no tests to run
+PASS
+ok github.com/smartystreets/goconvey/scripts 0.011s`
+
+var expected_NoTestFunctions = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/scripts",
+ Outcome: contract.NoTestFunctions,
+ BuildOutput: input_NoTestFunctions,
+}
+
+const input_BuildFailed_InvalidPackageDeclaration = `
+can't load package: package github.com/smartystreets/goconvey/examples:
+bowling_game_test.go:9:1: expected 'package', found 'IDENT' asdf
+bowling_game_test.go:10:1: invalid package name _
+`
+
+var expected_BuildFailed_InvalidPackageDeclaration = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/examples",
+ Outcome: contract.BuildFailure,
+ BuildOutput: strings.TrimSpace(input_BuildFailed_InvalidPackageDeclaration),
+}
+
+const input_BuildFailed_CantFindPackage = `
+bowling_game.go:3:8: cannot find package "format" in any of:
+ /usr/local/go/src/pkg/format (from $GOROOT)
+ /Users/mike/work/dev/goconvey/src/format (from $GOPATH)
+FAIL github.com/smartystreets/goconvey/examples [setup failed]
+`
+
+var expected_BuildFailed_CantFindPackage = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/examples",
+ Outcome: contract.BuildFailure,
+ BuildOutput: strings.TrimSpace(input_BuildFailed_CantFindPackage),
+}
+
+const input_BuildFailed_ConfictingImport = `
+mutustus.go:4:2: found packages e (e.go) and err (prepend.go) in /Users/mike/src/utensils.git/e
+`
+
+var expected_BuildFailed_ConflictingImport = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/examples",
+ Outcome: contract.BuildFailure,
+ BuildOutput: strings.TrimSpace(input_BuildFailed_ConfictingImport),
+}
+
+const input_BuildFailed_OtherErrors = `
+# github.com/smartystreets/goconvey/examples
+./bowling_game_test.go:22: undefined: game
+./bowling_game_test.go:22: cannot assign to game
+./bowling_game_test.go:25: undefined: game
+./bowling_game_test.go:28: undefined: game
+./bowling_game_test.go:33: undefined: game
+./bowling_game_test.go:36: undefined: game
+./bowling_game_test.go:41: undefined: game
+./bowling_game_test.go:42: undefined: game
+./bowling_game_test.go:43: undefined: game
+./bowling_game_test.go:46: undefined: game
+./bowling_game_test.go:46: too many errors
+FAIL github.com/smartystreets/goconvey/examples [build failed]
+`
+
+var expected_BuildFailed_OtherErrors = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/examples",
+ Outcome: contract.BuildFailure,
+ BuildOutput: strings.TrimSpace(input_BuildFailed_OtherErrors),
+}
+
+const input_BuildFailed_ImportCycle = `
+# github.com/smartystreets/goconvey/t
+./t_test.go:23: import "github.com/smartystreets/goconvey/t" while compiling that package (import cycle)
+FAIL github.com/smartystreets/goconvey/t [build failed]
+`
+
+var expected_BuildFailed_ImportCycle = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/t",
+ Outcome: contract.BuildFailure,
+ BuildOutput: strings.TrimSpace(input_BuildFailed_ImportCycle),
+}
+
+const inputOldSchool_Passes = `
+=== RUN TestOldSchool_Passes
+--- PASS: TestOldSchool_Passes (0.02 seconds)
+=== RUN TestSkippingTests
+--- SKIP: TestSkippingTests (0.00 seconds)
+ old_school_test.go:8: blah
+=== RUN TestOldSchool_PassesWithMessage
+--- PASS: TestOldSchool_PassesWithMessage (0.05 seconds)
+ old_school_test.go:10: I am a passing test.
+ With a newline.
+PASS
+coverage: 100.0%% of statements in github.com/smartystreets/goconvey/convey, github.com/smartystreets/goconvey/convey/gotest, github.com/smartystreets/goconvey/convey/reporting
+ok github.com/smartystreets/goconvey/webserver/examples 0.018s
+`
+
+var expectedOldSchool_Passes = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/webserver/examples",
+ Elapsed: 0.018,
+ Coverage: 100,
+ Outcome: contract.Passed,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "TestOldSchool_Passes",
+ Elapsed: 0.02,
+ Passed: true,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestOldSchool_PassesWithMessage",
+ Elapsed: 0.05,
+ Passed: true,
+ File: "old_school_test.go",
+ Line: 10,
+ Message: "old_school_test.go:10: I am a passing test.\nWith a newline.",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestSkippingTests",
+ Elapsed: 0,
+ Passed: true,
+ Skipped: true,
+ File: "old_school_test.go",
+ Line: 8,
+ Message: "old_school_test.go:8: blah",
+ Stories: []reporting.ScopeResult{},
+ },
+ },
+}
+
+const inputOldSchool_Fails = `
+=== RUN TestOldSchool_Passes
+--- PASS: TestOldSchool_Passes (0.01 seconds)
+=== RUN TestOldSchool_PassesWithMessage
+--- PASS: TestOldSchool_PassesWithMessage (0.03 seconds)
+ old_school_test.go:10: I am a passing test.
+ With a newline.
+=== RUN TestOldSchool_Failure
+--- FAIL: TestOldSchool_Failure (0.06 seconds)
+=== RUN TestOldSchool_FailureWithReason
+--- FAIL: TestOldSchool_FailureWithReason (0.11 seconds)
+ old_school_test.go:18: I am a failing test.
+FAIL
+exit status 1
+FAIL github.com/smartystreets/goconvey/webserver/examples 0.017s
+`
+
+var expectedOldSchool_Fails = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/webserver/examples",
+ Outcome: contract.Failed,
+ Elapsed: 0.017,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "TestOldSchool_Failure",
+ Elapsed: 0.06,
+ Passed: false,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestOldSchool_FailureWithReason",
+ Elapsed: 0.11,
+ Passed: false,
+ File: "old_school_test.go",
+ Line: 18,
+ Message: "old_school_test.go:18: I am a failing test.",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestOldSchool_Passes",
+ Elapsed: 0.01,
+ Passed: true,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestOldSchool_PassesWithMessage",
+ Elapsed: 0.03,
+ Passed: true,
+ File: "old_school_test.go",
+ Line: 10,
+ Message: "old_school_test.go:10: I am a passing test.\nWith a newline.",
+ Stories: []reporting.ScopeResult{},
+ },
+ },
+}
+
+const inputOldSchool_Panics = `
+=== RUN TestOldSchool_Panics
+--- FAIL: TestOldSchool_Panics (0.02 seconds)
+panic: runtime error: index out of range [recovered]
+ panic: runtime error: index out of range
+
+goroutine 3 [running]:
+testing.func·004()
+ /usr/local/go/src/pkg/testing/testing.go:348 +0xcd
+github.com/smartystreets/goconvey/webserver/examples.TestOldSchool_Panics(0x210292000)
+ /Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/something_test.go:15 +0xec
+testing.tRunner(0x210292000, 0x1b09f0)
+ /usr/local/go/src/pkg/testing/testing.go:353 +0x8a
+created by testing.RunTests
+ /usr/local/go/src/pkg/testing/testing.go:433 +0x86b
+
+goroutine 1 [chan receive]:
+testing.RunTests(0x138f38, 0x1b09f0, 0x1, 0x1, 0x1, ...)
+ /usr/local/go/src/pkg/testing/testing.go:434 +0x88e
+testing.Main(0x138f38, 0x1b09f0, 0x1, 0x1, 0x1b7f60, ...)
+ /usr/local/go/src/pkg/testing/testing.go:365 +0x8a
+main.main()
+ github.com/smartystreets/goconvey/webserver/examples/_test/_testmain.go:43 +0x9a
+exit status 2
+FAIL github.com/smartystreets/goconvey/webserver/examples 0.014s
+`
+
+var expectedOldSchool_Panics = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/webserver/examples",
+ Elapsed: 0.014,
+ Outcome: contract.Panicked,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "TestOldSchool_Panics",
+ Elapsed: 0.02,
+ Passed: false,
+ File: "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/something_test.go",
+ Line: 15,
+ Message: "",
+ Error: strings.Replace(`panic: runtime error: index out of range [recovered]
+ panic: runtime error: index out of range
+
+goroutine 3 [running]:
+testing.func·004()
+ /usr/local/go/src/pkg/testing/testing.go:348 +0xcd
+github.com/smartystreets/goconvey/webserver/examples.TestOldSchool_Panics(0x210292000)
+ /Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/something_test.go:15 +0xec
+testing.tRunner(0x210292000, 0x1b09f0)
+ /usr/local/go/src/pkg/testing/testing.go:353 +0x8a
+created by testing.RunTests
+ /usr/local/go/src/pkg/testing/testing.go:433 +0x86b
+
+goroutine 1 [chan receive]:
+testing.RunTests(0x138f38, 0x1b09f0, 0x1, 0x1, 0x1, ...)
+ /usr/local/go/src/pkg/testing/testing.go:434 +0x88e
+testing.Main(0x138f38, 0x1b09f0, 0x1, 0x1, 0x1b7f60, ...)
+ /usr/local/go/src/pkg/testing/testing.go:365 +0x8a
+main.main()
+ github.com/smartystreets/goconvey/webserver/examples/_test/_testmain.go:43 +0x9a`, "\u0009", "\t", -1),
+ Stories: []reporting.ScopeResult{},
+ },
+ },
+}
+
+const inputGoConvey_Malformed = `
+=== RUN TestPassingStory
+>->->OPEN-JSON->->->
+{
+ "Title": "A passing story",
+ "File": "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go",
+ "Line": 11,
+ "Depth": 0,
+ "Assertions": [
+ {
+ "File": "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go",
+ "Line": 10,
+ "Failure": "",
+
+ ;aiwheopinen39 n3902n92m
+
+ "Error": null,
+ "Skipped": false,
+ "StackTrace": "goroutine 3 [running]:\ngithub.com/smartystreets/goconvey/webserver/examples.func·001()\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go:10 +0xe3\ngithub.com/smartystreets/goconvey/webserver/examples.TestPassingStory(0x210314000)\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go:11 +0xec\ntesting.tRunner(0x210314000, 0x21ab10)\n\u0009/usr/local/go/src/pkg/testing/testing.go:353 +0x8a\ncreated by testing.RunTests\n\u0009/usr/local/go/src/pkg/testing/testing.go:433 +0x86b\n"
+ }
+ ]
+},
+<-<-<-CLOSE-JSON<-<-<
+--- PASS: TestPassingStory (0.01 seconds)
+PASS
+ok github.com/smartystreets/goconvey/webserver/examples 0.019s
+`
+
+const inputGoConvey = `
+=== RUN TestPassingStory
+>->->OPEN-JSON->->->
+{
+ "Title": "A passing story",
+ "File": "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go",
+ "Line": 11,
+ "Depth": 0,
+ "Assertions": [
+ {
+ "File": "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go",
+ "Line": 10,
+ "Failure": "",
+ "Error": null,
+ "Skipped": false,
+ "StackTrace": "goroutine 3 [running]:\ngithub.com/smartystreets/goconvey/webserver/examples.func·001()\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go:10 +0xe3\ngithub.com/smartystreets/goconvey/webserver/examples.TestPassingStory(0x210314000)\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go:11 +0xec\ntesting.tRunner(0x210314000, 0x21ab10)\n\u0009/usr/local/go/src/pkg/testing/testing.go:353 +0x8a\ncreated by testing.RunTests\n\u0009/usr/local/go/src/pkg/testing/testing.go:433 +0x86b\n"
+ }
+ ]
+},
+<-<-<-CLOSE-JSON<-<-<
+--- PASS: TestPassingStory (0.01 seconds)
+PASS
+coverage: 75.5%% of statements
+ok github.com/smartystreets/goconvey/webserver/examples 0.019s
+`
+
+var expectedGoConvey = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/webserver/examples",
+ Elapsed: 0.019,
+ Outcome: contract.Passed,
+ Coverage: 75.5,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "TestPassingStory",
+ Elapsed: 0.01,
+ Passed: true,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{
+ reporting.ScopeResult{
+ Title: "A passing story",
+ File: "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go",
+ Line: 11,
+ Depth: 0,
+ Assertions: []*reporting.AssertionResult{
+ &reporting.AssertionResult{
+ File: "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go",
+ Line: 10,
+ Failure: "",
+ Error: nil,
+ Skipped: false,
+ StackTrace: "goroutine 3 [running]:\ngithub.com/smartystreets/goconvey/webserver/examples.func·001()\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go:10 +0xe3\ngithub.com/smartystreets/goconvey/webserver/examples.TestPassingStory(0x210314000)\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/webserver/examples/old_school_test.go:11 +0xec\ntesting.tRunner(0x210314000, 0x21ab10)\n\u0009/usr/local/go/src/pkg/testing/testing.go:353 +0x8a\ncreated by testing.RunTests\n\u0009/usr/local/go/src/pkg/testing/testing.go:433 +0x86b\n",
+ },
+ },
+ },
+ },
+ },
+ },
+}
+
+const inputGoConvey_WithRandomOutput = `
+=== RUN TestPassingStory
+*** Hello, World! (1) ***
+*** Hello, World! (2) ***
+*** Hello, World! (3) ***>->->OPEN-JSON->->->
+{
+ "Title": "A passing story",
+ "File": "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go",
+ "Line": 16,
+ "Depth": 0,
+ "Assertions": [
+ {
+ "File": "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go",
+ "Line": 14,
+ "Failure": "",
+ "Error": null,
+ "Skipped": false,
+ "StackTrace": "goroutine 3 [running]:\ngithub.com/smartystreets/goconvey/web/server/testing.func·001()\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go:14 +0x186\ngithub.com/smartystreets/goconvey/web/server/testing.TestPassingStory(0x210315000)\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go:16 +0x1b9\ntesting.tRunner(0x210315000, 0x21bb10)\n\u0009/usr/local/go/src/pkg/testing/testing.go:353 +0x8a\ncreated by testing.RunTests\n\u0009/usr/local/go/src/pkg/testing/testing.go:433 +0x86b\n"
+ }
+ ]
+},
+<-<-<-CLOSE-JSON<-<-<
+*** Hello, World! (4)***
+*** Hello, World! (5) ***
+>->->OPEN-JSON->->->
+{
+ "Title": "A passing story",
+ "File": "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go",
+ "Line": 22,
+ "Depth": 0,
+ "Assertions": [
+ {
+ "File": "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go",
+ "Line": 20,
+ "Failure": "",
+ "Error": null,
+ "Skipped": false,
+ "StackTrace": "goroutine 3 [running]:\ngithub.com/smartystreets/goconvey/web/server/testing.func·002()\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go:20 +0x186\ngithub.com/smartystreets/goconvey/web/server/testing.TestPassingStory(0x210315000)\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go:22 +0x294\ntesting.tRunner(0x210315000, 0x21bb10)\n\u0009/usr/local/go/src/pkg/testing/testing.go:353 +0x8a\ncreated by testing.RunTests\n\u0009/usr/local/go/src/pkg/testing/testing.go:433 +0x86b\n"
+ }
+ ]
+},
+<-<-<-CLOSE-JSON<-<-<
+*** Hello, World! (6) ***
+--- PASS: TestPassingStory (0.03 seconds)
+PASS
+coverage: 45.0%% of statements
+ok github.com/smartystreets/goconvey/web/server/testing 0.024s
+`
+
+var expectedGoConvey_WithRandomOutput = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/web/server/testing",
+ Elapsed: 0.024,
+ Outcome: contract.Passed,
+ Coverage: 45.0,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "TestPassingStory",
+ Elapsed: 0.03,
+ Passed: true,
+ File: "",
+ Line: 0,
+ Message: "*** Hello, World! (1) ***\n*** Hello, World! (2) ***\n*** Hello, World! (3) ***\n*** Hello, World! (4)***\n*** Hello, World! (5) ***\n*** Hello, World! (6) ***",
+ Stories: []reporting.ScopeResult{
+ reporting.ScopeResult{
+ Title: "A passing story",
+ File: "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go",
+ Line: 16,
+ Depth: 0,
+ Assertions: []*reporting.AssertionResult{
+ &reporting.AssertionResult{
+ File: "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go",
+ Line: 14,
+ Failure: "",
+ Error: nil,
+ Skipped: false,
+ StackTrace: "goroutine 3 [running]:\ngithub.com/smartystreets/goconvey/web/server/testing.func·001()\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go:14 +0x186\ngithub.com/smartystreets/goconvey/web/server/testing.TestPassingStory(0x210315000)\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go:16 +0x1b9\ntesting.tRunner(0x210315000, 0x21bb10)\n\u0009/usr/local/go/src/pkg/testing/testing.go:353 +0x8a\ncreated by testing.RunTests\n\u0009/usr/local/go/src/pkg/testing/testing.go:433 +0x86b\n",
+ },
+ },
+ },
+ reporting.ScopeResult{
+ Title: "A passing story",
+ File: "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go",
+ Line: 22,
+ Depth: 0,
+ Assertions: []*reporting.AssertionResult{
+ &reporting.AssertionResult{
+ File: "/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go",
+ Line: 20,
+ Failure: "",
+ Error: nil,
+ Skipped: false,
+ StackTrace: "goroutine 3 [running]:\ngithub.com/smartystreets/goconvey/web/server/testing.func·002()\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go:20 +0x186\ngithub.com/smartystreets/goconvey/web/server/testing.TestPassingStory(0x210315000)\n\u0009/Users/mike/work/dev/goconvey/src/github.com/smartystreets/goconvey/web/server/testing/go_test.go:22 +0x294\ntesting.tRunner(0x210315000, 0x21bb10)\n\u0009/usr/local/go/src/pkg/testing/testing.go:353 +0x8a\ncreated by testing.RunTests\n\u0009/usr/local/go/src/pkg/testing/testing.go:433 +0x86b\n",
+ },
+ },
+ },
+ },
+ },
+ },
+}
+
+const inputOldSchool_PassesButCoverageIsBogus = `
+=== RUN TestOldSchool_Passes
+--- PASS: TestOldSchool_Passes (0.02 seconds)
+=== RUN TestOldSchool_PassesWithMessage
+--- PASS: TestOldSchool_PassesWithMessage (0.05 seconds)
+ old_school_test.go:10: I am a passing test.
+ With a newline.
+PASS
+coverage: bogus%% of statements
+ok github.com/smartystreets/goconvey/webserver/examples 0.018s
+`
+
+var expectedOldSchool_PassesButCoverageIsBogus = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/webserver/examples",
+ Elapsed: 0.018,
+ Coverage: -1,
+ Outcome: contract.Passed,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "TestOldSchool_Passes",
+ Elapsed: 0.02,
+ Passed: true,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestOldSchool_PassesWithMessage",
+ Elapsed: 0.05,
+ Passed: true,
+ File: "old_school_test.go",
+ Line: 10,
+ Message: "old_school_test.go:10: I am a passing test.\nWith a newline.",
+ Stories: []reporting.ScopeResult{},
+ },
+ },
+}
+
+const inputNestedTests = `
+=== RUN TestNestedTests
+=== RUN TestNestedTests_Passes
+--- PASS: TestNestedTests_Passes (0.02 seconds)
+=== RUN TestNestedTests_Failure
+--- FAIL: TestNestedTests_Failure (0.06 seconds)
+=== RUN TestNestedTests_FailureWithReason
+--- FAIL: TestNestedTests_FailureWithReason (0.11 seconds)
+ nested_test.go:18: I am a failing test.
+=== RUN TestNestedTests_Skipping
+--- SKIP: TestNestedTests_Skipping (0.00 seconds)
+ nested_test.go:8: blah
+=== RUN TestNestedTests_PassesWithMessage
+--- PASS: TestNestedTests_PassesWithMessage (0.05 seconds)
+ nested_test.go:10: I am a passing test.
+ With a newline.
+--- FAIL: TestNestedTests (0.25 seconds)
+FAIL
+exit status 1
+FAIL github.com/smartystreets/goconvey/webserver/examples 0.018s
+`
+
+var expectedNestedTests = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/webserver/examples",
+ Elapsed: 0.018,
+ Outcome: contract.Failed,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "TestNestedTests",
+ Elapsed: 0.25,
+ Passed: false,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestNestedTests_Failure",
+ Elapsed: 0.06,
+ Passed: false,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestNestedTests_FailureWithReason",
+ Elapsed: 0.11,
+ Passed: false,
+ File: "nested_test.go",
+ Line: 18,
+ Message: "nested_test.go:18: I am a failing test.",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestNestedTests_Passes",
+ Elapsed: 0.02,
+ Passed: true,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestNestedTests_PassesWithMessage",
+ Elapsed: 0.05,
+ Passed: true,
+ File: "nested_test.go",
+ Line: 10,
+ Message: "nested_test.go:10: I am a passing test.\nWith a newline.",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "TestNestedTests_Skipping",
+ Elapsed: 0.00,
+ Passed: true,
+ Skipped: true,
+ File: "nested_test.go",
+ Line: 8,
+ Message: "nested_test.go:8: blah",
+ Stories: []reporting.ScopeResult{},
+ },
+ },
+}
+
+const inputExampleFunctions = `
+=== RUN Example_Failure
+--- FAIL: Example_Failure (0.11 seconds)
+got:
+actuall output
+want:
+real output
+=== RUN Example_Pass
+--- PASS: Example_Pass (0.06 seconds)
+FAIL
+exit status 1
+FAIL github.com/smartystreets/goconvey/webserver/examples 0.18s
+`
+
+var expectedExampleFunctions = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/webserver/examples",
+ Elapsed: 0.18,
+ Outcome: contract.Failed,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "Example_Failure",
+ Elapsed: 0.11,
+ Passed: false,
+ File: "",
+ Line: 0,
+ Message: "got:\nactuall output\nwant:\nreal output",
+ Stories: []reporting.ScopeResult{},
+ },
+ contract.TestResult{
+ TestName: "Example_Pass",
+ Elapsed: 0.06,
+ Passed: true,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ },
+}
+
+const inputGolang15 = `
+=== RUN Golang15
+--- PASS: Golang15 (0.00s)
+PASS
+ok github.com/smartystreets/goconvey/webserver/examples 0.008s
+`
+
+var expectedGolang15 = contract.PackageResult{
+ PackageName: "github.com/smartystreets/goconvey/webserver/examples",
+ Elapsed: 0.008,
+ Outcome: contract.Passed,
+ TestResults: []contract.TestResult{
+ contract.TestResult{
+ TestName: "Golang15",
+ Elapsed: 0.00,
+ Passed: true,
+ File: "",
+ Line: 0,
+ Message: "",
+ Stories: []reporting.ScopeResult{},
+ },
+ },
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser.go
new file mode 100644
index 00000000000..f6250caf346
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser.go
@@ -0,0 +1,32 @@
+package parser
+
+import (
+ "log"
+
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+type Parser struct {
+ parser func(*contract.PackageResult, string)
+}
+
+func (self *Parser) Parse(packages []*contract.Package) {
+ for _, p := range packages {
+ if p.Active() && p.HasUsableResult() {
+ self.parser(p.Result, p.Output)
+ } else if p.Ignored {
+ p.Result.Outcome = contract.Ignored
+ } else if p.Disabled {
+ p.Result.Outcome = contract.Disabled
+ } else {
+ p.Result.Outcome = contract.TestRunAbortedUnexpectedly
+ }
+ log.Printf("[%s]: %s\n", p.Result.Outcome, p.Name)
+ }
+}
+
+func NewParser(helper func(*contract.PackageResult, string)) *Parser {
+ self := new(Parser)
+ self.parser = helper
+ return self
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser.goconvey
new file mode 100644
index 00000000000..79982854b53
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser.goconvey
@@ -0,0 +1,2 @@
+#ignore
+-timeout=1s
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser_test.go
new file mode 100644
index 00000000000..de695e3fd7e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/parser_test.go
@@ -0,0 +1,47 @@
+package parser
+
+import (
+ "errors"
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+func TestParser(t *testing.T) {
+
+ Convey("Subject: Parser parses test output for active packages", t, func() {
+ packages := []*contract.Package{
+ &contract.Package{Ignored: false, Output: "Active", Result: contract.NewPackageResult("asdf")},
+ &contract.Package{Ignored: true, Output: "Inactive", Result: contract.NewPackageResult("qwer")},
+ }
+ parser := NewParser(fakeParserImplementation)
+
+ Convey("When given a collection of packages", func() {
+ parser.Parse(packages)
+
+ Convey("The parser uses its internal parsing mechanism to parse the output of only the active packages", func() {
+ So(packages[0].Result.Outcome, ShouldEqual, packages[0].Output)
+ })
+
+ Convey("The parser should mark inactive packages as ignored", func() {
+ So(packages[1].Result.Outcome, ShouldEqual, contract.Ignored)
+ })
+ })
+
+ Convey("When a package could not be tested (maybe it was deleted between scanning and execution?)", func() {
+ packages[0].Output = ""
+ packages[0].Error = errors.New("Directory does not exist")
+
+ parser.Parse(packages)
+
+ Convey("The package result should not be parsed and the outcome should actually resemble the problem", func() {
+ So(packages[0].Result.Outcome, ShouldEqual, contract.TestRunAbortedUnexpectedly)
+ })
+ })
+ })
+}
+
+func fakeParserImplementation(result *contract.PackageResult, rawOutput string) {
+ result.Outcome = rawOutput
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/rules.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/rules.go
new file mode 100644
index 00000000000..f8d05d83066
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/rules.go
@@ -0,0 +1,43 @@
+package parser
+
+import "strings"
+
+func noGoFiles(line string) bool {
+ return strings.HasPrefix(line, "can't load package: ") &&
+ strings.Contains(line, ": no buildable Go source files in ")
+}
+func buildFailed(line string) bool {
+ return strings.HasPrefix(line, "# ") ||
+ strings.Contains(line, "cannot find package") ||
+ (strings.HasPrefix(line, "can't load package: ") && !strings.Contains(line, ": no Go source files in ")) ||
+ (strings.Contains(line, ": found packages ") && strings.Contains(line, ".go) and ") && strings.Contains(line, ".go) in "))
+}
+func noTestFunctions(line string) bool {
+ return line == "testing: warning: no tests to run"
+}
+func noTestFiles(line string) bool {
+ return strings.HasPrefix(line, "?") && strings.Contains(line, "[no test files]")
+}
+func isNewTest(line string) bool {
+ return strings.HasPrefix(line, "=== ")
+}
+func isTestResult(line string) bool {
+ return strings.HasPrefix(line, "--- ")
+}
+func isPackageReport(line string) bool {
+ return (strings.HasPrefix(line, "FAIL") ||
+ strings.HasPrefix(line, "exit status") ||
+ strings.HasPrefix(line, "PASS") ||
+ isCoverageSummary(line) ||
+ packagePassed(line))
+}
+
+func packageFailed(line string) bool {
+ return strings.HasPrefix(line, "FAIL\t")
+}
+func packagePassed(line string) bool {
+ return strings.HasPrefix(line, "ok \t")
+}
+func isCoverageSummary(line string) bool {
+ return strings.HasPrefix(line, "coverage: ") && strings.Contains(line, "% of statements")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/testParser.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/testParser.go
new file mode 100644
index 00000000000..fe8f5110b6c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/testParser.go
@@ -0,0 +1,174 @@
+package parser
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/smartystreets/goconvey/convey/reporting"
+ "github.com/smartystreets/goconvey/web/server/contract"
+)
+
+type testParser struct {
+ test *contract.TestResult
+ line string
+ index int
+ inJson bool
+ jsonLines []string
+ otherLines []string
+}
+
+func parseTestOutput(test *contract.TestResult) *contract.TestResult {
+ parser := newTestParser(test)
+ parser.parseTestFunctionOutput()
+ return test
+}
+
+func newTestParser(test *contract.TestResult) *testParser {
+ self := new(testParser)
+ self.test = test
+ return self
+}
+
+func (self *testParser) parseTestFunctionOutput() {
+ if len(self.test.RawLines) > 0 {
+ self.processLines()
+ self.deserializeJson()
+ self.composeCapturedOutput()
+ }
+}
+
+func (self *testParser) processLines() {
+ for self.index, self.line = range self.test.RawLines {
+ if !self.processLine() {
+ break
+ }
+ }
+}
+
+func (self *testParser) processLine() bool {
+ if strings.HasSuffix(self.line, reporting.OpenJson) {
+ self.inJson = true
+ self.accountForOutputWithoutNewline()
+
+ } else if self.line == reporting.CloseJson {
+ self.inJson = false
+
+ } else if self.inJson {
+ self.jsonLines = append(self.jsonLines, self.line)
+
+ } else if isPanic(self.line) {
+ self.parsePanicOutput()
+ return false
+
+ } else if isGoTestLogOutput(self.line) {
+ self.parseLogLocation()
+
+ } else {
+ self.otherLines = append(self.otherLines, self.line)
+ }
+ return true
+}
+
+// If fmt.Print(f) produces output with no \n and that output
+// is that last output before the framework spits out json
+// (which starts with ''>>>>>'') then without this code
+// all of the json is counted as output, not as json to be
+// parsed and displayed by the web UI.
+func (self *testParser) accountForOutputWithoutNewline() {
+ prefix := strings.Split(self.line, reporting.OpenJson)[0]
+ if prefix != "" {
+ self.otherLines = append(self.otherLines, prefix)
+ }
+}
+
+func (self *testParser) deserializeJson() {
+ formatted := createArrayForJsonItems(self.jsonLines)
+ var scopes []reporting.ScopeResult
+ err := json.Unmarshal(formatted, &scopes)
+ if err != nil {
+ panic(fmt.Sprintf(bugReportRequest, err, formatted))
+ }
+ self.test.Stories = scopes
+}
+func (self *testParser) parsePanicOutput() {
+ for index, line := range self.test.RawLines[self.index:] {
+ self.parsePanicLocation(index, line)
+ self.preserveStackTraceIndentation(index, line)
+ }
+ self.test.Error = strings.Join(self.test.RawLines, "\n")
+}
+func (self *testParser) parsePanicLocation(index int, line string) {
+ if !panicLineHasMetadata(line) {
+ return
+ }
+ metaLine := self.test.RawLines[index+4]
+ fields := strings.Split(metaLine, " ")
+ fileAndLine := strings.Split(fields[0], ":")
+ self.test.File = fileAndLine[0]
+ if len(fileAndLine) >= 2 {
+ self.test.Line, _ = strconv.Atoi(fileAndLine[1])
+ }
+}
+func (self *testParser) preserveStackTraceIndentation(index int, line string) {
+ if panicLineShouldBeIndented(index, line) {
+ self.test.RawLines[index] = "\t" + line
+ }
+}
+func (self *testParser) parseLogLocation() {
+ self.otherLines = append(self.otherLines, self.line)
+ lineFields := self.line
+ fields := strings.Split(lineFields, ":")
+ self.test.File = strings.TrimSpace(fields[0])
+ self.test.Line, _ = strconv.Atoi(fields[1])
+}
+
+func (self *testParser) composeCapturedOutput() {
+ self.test.Message = strings.Join(self.otherLines, "\n")
+}
+
+func createArrayForJsonItems(lines []string) []byte {
+ jsonArrayItems := strings.Join(lines, "")
+ jsonArrayItems = removeTrailingComma(jsonArrayItems)
+ return []byte(fmt.Sprintf("[%s]\n", jsonArrayItems))
+}
+func removeTrailingComma(rawJson string) string {
+ if trailingComma(rawJson) {
+ return rawJson[:len(rawJson)-1]
+ }
+ return rawJson
+}
+func trailingComma(value string) bool {
+ return strings.HasSuffix(value, ",")
+}
+
+func isGoTestLogOutput(line string) bool {
+ return strings.Count(line, ":") == 2
+}
+
+func isPanic(line string) bool {
+ return strings.HasPrefix(line, "panic: ")
+}
+
+func panicLineHasMetadata(line string) bool {
+ return strings.HasPrefix(line, "goroutine") && strings.Contains(line, "[running]")
+}
+func panicLineShouldBeIndented(index int, line string) bool {
+ return strings.Contains(line, "+") || (index > 0 && strings.Contains(line, "panic: "))
+}
+
+const bugReportRequest = `
+Uh-oh! Looks like something went wrong. Please copy the following text and file a bug report at:
+
+https://github.com/smartystreets/goconvey/issues?state=open
+
+======= BEGIN BUG REPORT =======
+
+ERROR: %v
+
+OUTPUT: %s
+
+======= END BUG REPORT =======
+
+`
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/util.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/util.go
new file mode 100644
index 00000000000..e2061603439
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/parser/util.go
@@ -0,0 +1,45 @@
+package parser
+
+import (
+ "math"
+ "strings"
+ "time"
+)
+
+// parseTestFunctionDuration parses the duration in seconds as a float64
+// from a line of go test output that looks something like this:
+// --- PASS: TestOldSchool_PassesWithMessage (0.03 seconds)
+func parseTestFunctionDuration(line string) float64 {
+ line = strings.Replace(line, "(", "", 1)
+ fields := strings.Split(line, " ")
+ return parseDurationInSeconds(fields[3]+"s", 2)
+}
+
+func parseDurationInSeconds(raw string, precision int) float64 {
+ elapsed, _ := time.ParseDuration(raw)
+ return round(elapsed.Seconds(), precision)
+}
+
+// round returns the rounded version of x with precision.
+//
+// Special cases are:
+// round(±0) = ±0
+// round(±Inf) = ±Inf
+// round(NaN) = NaN
+//
+// Why, oh why doesn't the math package come with a round function?
+// Inspiration: http://play.golang.org/p/ZmFfr07oHp
+func round(x float64, precision int) float64 {
+ var rounder float64
+ pow := math.Pow(10, float64(precision))
+ intermediate := x * pow
+
+ if intermediate < 0.0 {
+ intermediate -= 0.5
+ } else {
+ intermediate += 0.5
+ }
+ rounder = float64(int64(intermediate))
+
+ return rounder / float64(pow)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell.go
new file mode 100644
index 00000000000..f2fa10711db
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell.go
@@ -0,0 +1,174 @@
+package system
+
+import (
+ "log"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// Integration: ///////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+type Shell struct {
+ coverage bool
+ gobin string
+ reportsPath string
+ defaultTimeout string
+}
+
+func NewShell(gobin, reportsPath string, coverage bool, defaultTimeout string) *Shell {
+ return &Shell{
+ coverage: coverage,
+ gobin: gobin,
+ reportsPath: reportsPath,
+ defaultTimeout: defaultTimeout,
+ }
+}
+
+func (self *Shell) GoTest(directory, packageName string, tags, arguments []string) (output string, err error) {
+ reportFilename := strings.Replace(packageName, "/", "-", -1)
+ reportPath := filepath.Join(self.reportsPath, reportFilename)
+ reportData := reportPath + ".txt"
+ reportHTML := reportPath + ".html"
+ tagsArg := "-tags=" + strings.Join(tags, ",")
+
+ goconvey := findGoConvey(directory, self.gobin, packageName, tagsArg).Execute()
+ compilation := compile(directory, self.gobin, tagsArg).Execute()
+ withCoverage := runWithCoverage(compilation, goconvey, self.coverage, reportData, directory, self.gobin, self.defaultTimeout, tagsArg, arguments).Execute()
+ final := runWithoutCoverage(compilation, withCoverage, goconvey, directory, self.gobin, self.defaultTimeout, tagsArg, arguments).Execute()
+ go generateReports(final, self.coverage, directory, self.gobin, reportData, reportHTML).Execute()
+
+ return final.Output, final.Error
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Functional Core:////////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+func findGoConvey(directory, gobin, packageName, tagsArg string) Command {
+ return NewCommand(directory, gobin, "list", "-f", "'{{.TestImports}}'", tagsArg, packageName)
+}
+
+func compile(directory, gobin, tagsArg string) Command {
+ return NewCommand(directory, gobin, "test", "-i", tagsArg)
+}
+
+func runWithCoverage(compile, goconvey Command, coverage bool, reportPath, directory, gobin, defaultTimeout, tagsArg string, customArguments []string) Command {
+ if compile.Error != nil || goconvey.Error != nil {
+ return compile
+ }
+
+ if !coverage {
+ return compile
+ }
+
+ arguments := []string{"test", "-v", "-coverprofile=" + reportPath, tagsArg}
+
+ customArgsText := strings.Join(customArguments, "\t")
+ if !strings.Contains(customArgsText, "-covermode=") {
+ arguments = append(arguments, "-covermode=set")
+ }
+
+ if !strings.Contains(customArgsText, "-timeout=") {
+ arguments = append(arguments, "-timeout="+defaultTimeout)
+ }
+
+ if strings.Contains(goconvey.Output, goconveyDSLImport) {
+ arguments = append(arguments, "-json")
+ }
+
+ arguments = append(arguments, customArguments...)
+
+ return NewCommand(directory, gobin, arguments...)
+}
+
+func runWithoutCoverage(compile, withCoverage, goconvey Command, directory, gobin, defaultTimeout, tagsArg string, customArguments []string) Command {
+ if compile.Error != nil {
+ return compile
+ }
+
+ if goconvey.Error != nil {
+ log.Println(gopathProblem, goconvey.Output, goconvey.Error)
+ return goconvey
+ }
+
+ if coverageStatementRE.MatchString(withCoverage.Output) {
+ return withCoverage
+ }
+
+ log.Printf("Coverage output: %v", withCoverage.Output)
+
+ log.Print("Run without coverage")
+
+ arguments := []string{"test", "-v", tagsArg}
+ customArgsText := strings.Join(customArguments, "\t")
+ if !strings.Contains(customArgsText, "-timeout=") {
+ arguments = append(arguments, "-timeout="+defaultTimeout)
+ }
+
+ if strings.Contains(goconvey.Output, goconveyDSLImport) {
+ arguments = append(arguments, "-json")
+ }
+ arguments = append(arguments, customArguments...)
+ return NewCommand(directory, gobin, arguments...)
+}
+
+func generateReports(previous Command, coverage bool, directory, gobin, reportData, reportHTML string) Command {
+ if previous.Error != nil {
+ return previous
+ }
+
+ if !coverage {
+ return previous
+ }
+
+ return NewCommand(directory, gobin, "tool", "cover", "-html="+reportData, "-o", reportHTML)
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Imperative Shell: //////////////////////////////////////////////////////////
+///////////////////////////////////////////////////////////////////////////////
+
+type Command struct {
+ directory string
+ executable string
+ arguments []string
+
+ Output string
+ Error error
+}
+
+func NewCommand(directory, executable string, arguments ...string) Command {
+ return Command{
+ directory: directory,
+ executable: executable,
+ arguments: arguments,
+ }
+}
+
+func (this Command) Execute() Command {
+ if len(this.executable) == 0 {
+ return this
+ }
+
+ if len(this.Output) > 0 || this.Error != nil {
+ return this
+ }
+
+ command := exec.Command(this.executable, this.arguments...)
+ command.Dir = this.directory
+ var rawOutput []byte
+ rawOutput, this.Error = command.CombinedOutput()
+ this.Output = string(rawOutput)
+ return this
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const goconveyDSLImport = "github.com/smartystreets/goconvey/convey " // note the trailing space: we don't want to target packages nested in the /convey package.
+const gopathProblem = "Please run goconvey from within $GOPATH/src (also, symlinks might be problematic). Output and Error: "
+
+var coverageStatementRE = regexp.MustCompile(`(?m)^coverage: \d+\.\d% of statements(.*)$|^panic: test timed out after `)
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell_integration_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell_integration_test.go
new file mode 100644
index 00000000000..88c696dbb81
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell_integration_test.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+ "log"
+ "path/filepath"
+ "runtime"
+ "strings"
+
+ "testing"
+)
+
+func TestShellIntegration(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping potentially long-running integration test...")
+ return
+ }
+
+ log.SetFlags(log.LstdFlags | log.Lshortfile | log.Lmicroseconds)
+
+ _, filename, _, _ := runtime.Caller(0)
+ directory := filepath.Join(filepath.Dir(filename), "..", "watch", "integration_testing", "sub")
+ packageName := "github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub"
+
+ shell := NewShell("go", "", true, "5s")
+ output, err := shell.GoTest(directory, packageName, []string{}, []string{"-short"})
+
+ if !strings.Contains(output, "PASS\n") || !strings.Contains(output, "ok") {
+ t.Errorf("Expected output that resembed tests passing but got this instead: [%s]", output)
+ }
+ if err != nil {
+ t.Error("Test run resulted in the following error:", err)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell_test.go
new file mode 100644
index 00000000000..b115c4f356a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/shell_test.go
@@ -0,0 +1,217 @@
+package system
+
+import (
+ "errors"
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+)
+
+func TestShellCommandComposition(t *testing.T) {
+ var (
+ buildFailed = Command{Error: errors.New("BUILD FAILURE!")}
+ buildSucceeded = Command{Output: "ok"}
+ goConvey = Command{Output: "[fmt github.com/smartystreets/goconvey/convey net/http net/http/httptest path runtime strconv strings testing time]"}
+ noGoConvey = Command{Output: "[fmt net/http net/http/httptest path runtime strconv strings testing time]"}
+ errorGoConvey = Command{Output: "This is a wacky error", Error: errors.New("This happens when running goconvey outside your $GOPATH (symlinked code).")}
+ noCoveragePassed = Command{Output: "PASS\nok github.com/smartystreets/goconvey/examples 0.012s"}
+ coveragePassed = Command{Output: "PASS\ncoverage: 100.0% of statements\nok github.com/smartystreets/goconvey/examples 0.012s"}
+ coverageFailed = Command{
+ Error: errors.New("Tests bombed!"),
+ Output: "--- FAIL: TestIntegerManipulation (0.00 seconds)\nFAIL\ncoverage: 100.0% of statements\nexit status 1\nFAIL github.com/smartystreets/goconvey/examples 0.013s",
+ }
+ coverageFailedTimeout = Command{
+ Error: errors.New("Tests bombed!"),
+ Output: "=== RUN SomeTest\n--- PASS: SomeTest (0.00 seconds)\n=== RUN TimeoutTest\npanic: test timed out after 5s\n\ngoroutine 27 [running]:\n",
+ }
+ )
+
+ const (
+ yesCoverage = true
+ noCoverage = false
+ )
+
+ Convey("When attempting to run tests with coverage flags", t, func() {
+ Convey("And buildSucceeded failed", func() {
+ result := runWithCoverage(buildFailed, goConvey, noCoverage, "", "", "", "", "-tags=", nil)
+
+ Convey("Then no action should be taken", func() {
+ So(result, ShouldResemble, buildFailed)
+ })
+ })
+
+ Convey("And coverage is not wanted", func() {
+ result := runWithCoverage(buildSucceeded, goConvey, noCoverage, "", "", "", "", "-tags=", nil)
+
+ Convey("Then no action should be taken", func() {
+ So(result, ShouldResemble, buildSucceeded)
+ })
+ })
+
+ Convey("And the package being tested usees the GoConvey DSL (`convey` package)", func() {
+ result := runWithCoverage(buildSucceeded, goConvey, yesCoverage, "reportsPath", "/directory", "go", "5s", "-tags=bob", []string{"-arg1", "-arg2"})
+
+ Convey("The returned command should be well formed (and include the -json flag)", func() {
+ So(result, ShouldResemble, Command{
+ directory: "/directory",
+ executable: "go",
+ arguments: []string{"test", "-v", "-coverprofile=reportsPath", "-tags=bob", "-covermode=set", "-timeout=5s", "-json", "-arg1", "-arg2"},
+ })
+ })
+ })
+
+ Convey("And the package being tested does NOT use the GoConvey DSL", func() {
+ result := runWithCoverage(buildSucceeded, noGoConvey, yesCoverage, "reportsPath", "/directory", "go", "1s", "-tags=bob", []string{"-arg1", "-arg2"})
+
+ Convey("The returned command should be well formed (and NOT include the -json flag)", func() {
+ So(result, ShouldResemble, Command{
+ directory: "/directory",
+ executable: "go",
+ arguments: []string{"test", "-v", "-coverprofile=reportsPath", "-tags=bob", "-covermode=set", "-timeout=1s", "-arg1", "-arg2"},
+ })
+ })
+ })
+
+ Convey("And the package being tested has been symlinked outside the $GOAPTH", func() {
+ result := runWithCoverage(buildSucceeded, errorGoConvey, yesCoverage, "reportsPath", "/directory", "go", "1s", "-tags=", nil)
+
+ Convey("The returned command should be the compilation command", func() {
+ So(result, ShouldResemble, buildSucceeded)
+ })
+ })
+
+ Convey("And the package being tested specifies an alternate covermode", func() {
+ result := runWithCoverage(buildSucceeded, noGoConvey, yesCoverage, "reportsPath", "/directory", "go", "1s", "-tags=", []string{"-covermode=atomic"})
+
+ Convey("The returned command should allow the alternate value", func() {
+ So(result, ShouldResemble, Command{
+ directory: "/directory",
+ executable: "go",
+ arguments: []string{"test", "-v", "-coverprofile=reportsPath", "-tags=", "-timeout=1s", "-covermode=atomic"},
+ })
+ })
+ })
+
+ Convey("And the package being tested specifies an alternate timeout", func() {
+ result := runWithCoverage(buildSucceeded, noGoConvey, yesCoverage, "reportsPath", "/directory", "go", "1s", "-tags=", []string{"-timeout=5s"})
+
+ Convey("The returned command should allow the alternate value", func() {
+ So(result, ShouldResemble, Command{
+ directory: "/directory",
+ executable: "go",
+ arguments: []string{"test", "-v", "-coverprofile=reportsPath", "-tags=", "-covermode=set", "-timeout=5s"},
+ })
+ })
+ })
+
+ })
+
+ Convey("When attempting to run tests without the coverage flags", t, func() {
+ Convey("And tests already succeeded with coverage", func() {
+ result := runWithoutCoverage(buildSucceeded, coveragePassed, goConvey, "/directory", "go", "1s", "-tags=", []string{"-arg1", "-arg2"})
+
+ Convey("Then no action should be taken", func() {
+ So(result, ShouldResemble, coveragePassed)
+ })
+ })
+
+ Convey("And tests already failed (legitimately) with coverage", func() {
+ result := runWithoutCoverage(buildSucceeded, coverageFailed, goConvey, "/directory", "go", "1s", "-tags=", []string{"-arg1", "-arg2"})
+
+ Convey("Then no action should be taken", func() {
+ So(result, ShouldResemble, coverageFailed)
+ })
+ })
+
+ Convey("And tests already failed (timeout) with coverage", func() {
+ result := runWithoutCoverage(buildSucceeded, coverageFailedTimeout, goConvey, "/directory", "go", "1s", "-tags=", []string{"-arg1", "-arg2"})
+
+ Convey("Then no action should be taken", func() {
+ So(result, ShouldResemble, coverageFailedTimeout)
+ })
+ })
+
+ Convey("And the build failed earlier", func() {
+ result := runWithoutCoverage(buildFailed, Command{}, goConvey, "/directory", "go", "1s", "-tags=", []string{"-arg1", "-arg2"})
+
+ Convey("Then no action should be taken", func() {
+ So(result, ShouldResemble, buildFailed)
+ })
+ })
+
+ Convey("And the goconvey dsl command failed (probably because of symlinks)", func() {
+ result := runWithoutCoverage(buildSucceeded, Command{}, errorGoConvey, "", "", "", "-tags=", nil)
+
+ Convey("Then no action should be taken", func() {
+ So(result, ShouldResemble, errorGoConvey)
+ })
+ })
+
+ Convey("And the package being tested uses the GoConvey DSL (`convey` package)", func() {
+ result := runWithoutCoverage(buildSucceeded, buildSucceeded, goConvey, "/directory", "go", "1s", "-tags=", []string{"-arg1", "-arg2"})
+
+ Convey("Then the returned command should be well formed (and include the -json flag)", func() {
+ So(result, ShouldResemble, Command{
+ directory: "/directory",
+ executable: "go",
+ arguments: []string{"test", "-v", "-tags=", "-timeout=1s", "-json", "-arg1", "-arg2"},
+ })
+ })
+ })
+
+ Convey("And the package being tested does NOT use the GoConvey DSL", func() {
+ result := runWithoutCoverage(buildSucceeded, noCoveragePassed, noGoConvey, "/directory", "go", "1s", "-tags=", []string{"-arg1", "-arg2"})
+
+ Convey("Then the returned command should be well formed (and NOT include the -json flag)", func() {
+ So(result, ShouldResemble, Command{
+ directory: "/directory",
+ executable: "go",
+ arguments: []string{"test", "-v", "-tags=", "-timeout=1s", "-arg1", "-arg2"},
+ })
+ })
+ })
+
+ Convey("And the package being tested specifies an alternate timeout", func() {
+ result := runWithoutCoverage(buildSucceeded, buildSucceeded, noGoConvey, "/directory", "go", "1s", "-tags=", []string{"-timeout=5s"})
+
+ Convey("The returned command should allow the alternate value", func() {
+ So(result, ShouldResemble, Command{
+ directory: "/directory",
+ executable: "go",
+ arguments: []string{"test", "-v", "-tags=", "-timeout=5s"},
+ })
+ })
+ })
+
+ })
+
+ Convey("When generating coverage reports", t, func() {
+ Convey("And the previous command failed for any reason (compilation or failed tests)", func() {
+ result := generateReports(buildFailed, yesCoverage, "/directory", "go", "reportData", "reportHTML")
+
+ Convey("Then no action should be taken", func() {
+ So(result, ShouldResemble, buildFailed)
+ })
+ })
+
+ Convey("And coverage reports are unwanted", func() {
+ result := generateReports(noCoveragePassed, noCoverage, "/directory", "go", "reportData", "reportHTML")
+
+ Convey("Then no action should beg taken", func() {
+ So(result, ShouldResemble, noCoveragePassed)
+ })
+ })
+
+ Convey("And tests passed and coverage reports are wanted", func() {
+ result := generateReports(coveragePassed, yesCoverage, "/directory", "go", "reportData", "reportHTML")
+
+ Convey("Then the resulting command should be well-formed", func() {
+ So(result, ShouldResemble, Command{
+ directory: "/directory",
+ executable: "go",
+ arguments: []string{"tool", "cover", "-html=reportData", "-o", "reportHTML"},
+ })
+ })
+ })
+ })
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/system.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/system.goconvey
new file mode 100644
index 00000000000..aa26e8b739b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/system/system.goconvey
@@ -0,0 +1,3 @@
+#ignore
+-timeout=1s
+-short \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/functional_core.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/functional_core.go
new file mode 100644
index 00000000000..404a25d33ac
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/functional_core.go
@@ -0,0 +1,171 @@
+package watch
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/smartystreets/goconvey/web/server/messaging"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+
+func Categorize(items chan *FileSystemItem, root string, watchSuffixes []string) (folders, profiles, goFiles []*FileSystemItem) {
+ for item := range items {
+ if item.IsFolder && !isHidden(item.Name) && !foundInHiddenDirectory(item, root) {
+ folders = append(folders, item)
+
+ } else if strings.HasSuffix(item.Name, ".goconvey") && len(item.Name) > len(".goconvey") {
+ profiles = append(profiles, item)
+
+ } else {
+ for _, suffix := range watchSuffixes {
+ if strings.HasSuffix(item.Name, suffix) && !isHidden(item.Name) && !foundInHiddenDirectory(item, root) {
+ goFiles = append(goFiles, item)
+ }
+ }
+ }
+ }
+ return folders, profiles, goFiles
+}
+
+func foundInHiddenDirectory(item *FileSystemItem, root string) bool {
+ path := item.Path
+ if len(path) > len(root) {
+ path = path[len(root):]
+ }
+
+ for _, folder := range strings.Split(filepath.Dir(path), slash) {
+ if isHidden(folder) {
+ return true
+ }
+ }
+
+ return false
+}
+func isHidden(path string) bool {
+ return strings.HasPrefix(path, ".") || strings.HasPrefix(path, "_") || strings.HasPrefix(path, "flymake_")
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func ParseProfile(profile string) (isDisabled bool, tags, arguments []string) {
+ lines := strings.Split(profile, "\n")
+
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+
+ if len(arguments) == 0 && strings.ToLower(line) == "ignore" {
+ return true, nil, nil
+
+ } else if strings.HasPrefix(line, "-tags=") {
+ tags = append(tags, strings.Split(strings.SplitN(line, "=", 2)[1], ",")...)
+ continue
+
+ } else if len(line) == 0 {
+ continue
+
+ } else if strings.HasPrefix(line, "#") {
+ continue
+
+ } else if strings.HasPrefix(line, "//") {
+ continue
+
+ } else if line == "-cover" || strings.HasPrefix(line, "-coverprofile") {
+ continue
+
+ } else if line == "-v" {
+ continue // Verbose mode is always enabled so there is no need to record it here.
+
+ }
+
+ arguments = append(arguments, line)
+ }
+
+ return false, tags, arguments
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func CreateFolders(items []*FileSystemItem) messaging.Folders {
+ folders := map[string]*messaging.Folder{}
+
+ for _, item := range items {
+ folders[item.Path] = &messaging.Folder{Path: item.Path, Root: item.Root}
+ }
+
+ return folders
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func LimitDepth(folders messaging.Folders, depth int) {
+ if depth < 0 {
+ return
+ }
+
+ for path, folder := range folders {
+ if strings.Count(path[len(folder.Root):], slash) > depth {
+ delete(folders, path)
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func AttachProfiles(folders messaging.Folders, items []*FileSystemItem) {
+ for _, profile := range items {
+ if folder, exists := folders[filepath.Dir(profile.Path)]; exists {
+ folder.Disabled, folder.BuildTags, folder.TestArguments = profile.ProfileDisabled, profile.ProfileTags, profile.ProfileArguments
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func MarkIgnored(folders messaging.Folders, ignored map[string]struct{}) {
+ if len(ignored) == 0 {
+ return
+ }
+
+ for _, folder := range folders {
+ for ignored := range ignored {
+ if !folder.Ignored && strings.HasSuffix(folder.Path, ignored) {
+ folder.Ignored = true
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func ActiveFolders(folders messaging.Folders) messaging.Folders {
+ var active messaging.Folders = map[string]*messaging.Folder{}
+
+ for path, folder := range folders {
+ if folder.Ignored || folder.Disabled {
+ continue
+ }
+
+ active[path] = folder
+ }
+ return active
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func Sum(folders messaging.Folders, items []*FileSystemItem) int64 {
+ var sum int64
+ for _, item := range items {
+ if _, exists := folders[filepath.Dir(item.Path)]; exists {
+ sum += item.Size + item.Modified
+ }
+ }
+ return sum
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+const slash = string(os.PathSeparator)
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/functional_core_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/functional_core_test.go
new file mode 100644
index 00000000000..d0f74867acc
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/functional_core_test.go
@@ -0,0 +1,419 @@
+package watch
+
+import (
+ "fmt"
+ "testing"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "github.com/smartystreets/goconvey/web/server/messaging"
+)
+
+func TestCategorize(t *testing.T) {
+ fileSystem := []*FileSystemItem{
+ {
+ Root: "/.hello",
+ Path: "/.hello",
+ Name: "hello",
+ IsFolder: true,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/1/hello/world.txt",
+ Name: "world.txt",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/1/2/3/4/5/hello/world.go",
+ Name: "world.go",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/world.go",
+ Name: "world.go",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/hello/world.tmpl",
+ Name: "world.tmpl",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/hello/.world.go",
+ Name: ".world.go",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/hello/_world.go",
+ Name: ".world.go",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/hello/flymake_world.go",
+ Name: "flymake_world.go",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/.hello",
+ Name: ".hello",
+ IsFolder: true,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/.hello/hello",
+ Name: "hello",
+ IsFolder: true,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/.hello/world.go",
+ Name: "world.go",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/hello/hi.goconvey",
+ Name: "hi.goconvey",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/hello2/.goconvey",
+ Name: ".goconvey",
+ IsFolder: false,
+ },
+ {
+ Root: "/.hello",
+ Path: "/.hello/_hello",
+ Name: "_hello",
+ IsFolder: true,
+ },
+ }
+
+ Convey("A stream of file system items should be categorized correctly", t, func() {
+ items := make(chan *FileSystemItem)
+
+ go func() {
+ for _, item := range fileSystem {
+ items <- item
+ }
+ close(items)
+ }()
+
+ folders, profiles, goFiles := Categorize(items, "/.hello", []string{".go"})
+ So(folders, ShouldResemble, fileSystem[:1])
+ So(profiles, ShouldResemble, fileSystem[11:12])
+ So(goFiles, ShouldResemble, fileSystem[2:4])
+ })
+
+ Convey("A stream of file system items should be categorized correctly", t, func() {
+ items := make(chan *FileSystemItem)
+
+ go func() {
+ for _, item := range fileSystem {
+ items <- item
+ }
+ close(items)
+ }()
+
+ folders, profiles, goFiles := Categorize(items, "/.hello", []string{".go", ".tmpl"})
+ So(folders, ShouldResemble, fileSystem[:1])
+ So(profiles, ShouldResemble, fileSystem[11:12])
+ So(goFiles, ShouldResemble, fileSystem[2:5])
+ })
+}
+
+func TestParseProfile(t *testing.T) {
+ var parseProfileTestCases = []struct {
+ SKIP bool
+ description string
+ input string
+ resultIgnored bool
+ resultTestTags []string
+ resultTestArgs []string
+ }{
+ {
+ SKIP: false,
+ description: "Blank profile",
+ input: "",
+ resultIgnored: false,
+ },
+ {
+ SKIP: false,
+ description: "All lines are blank or whitespace",
+ input: "\n \n \t\t\t \n \n \n",
+ resultIgnored: false,
+ },
+ {
+ SKIP: false,
+ description: "Ignored package, no args included",
+ input: "IGNORE\n-timeout=4s",
+ resultIgnored: true,
+ },
+ {
+ SKIP: false,
+ description: "Ignore directive is commented, all args are included",
+ input: "#IGNORE\n-timeout=4s\n-parallel=5",
+ resultIgnored: false,
+ resultTestArgs: []string{"-timeout=4s", "-parallel=5"},
+ },
+ {
+ SKIP: false,
+ description: "No ignore directive, all args are included",
+ input: "-run=TestBlah\n-timeout=42s",
+ resultIgnored: false,
+ resultTestArgs: []string{"-run=TestBlah", "-timeout=42s"},
+ },
+ {
+ SKIP: false,
+ description: "Some args are commented, therefore ignored",
+ input: "-run=TestBlah\n #-timeout=42s",
+ resultIgnored: false,
+ resultTestArgs: []string{"-run=TestBlah"},
+ },
+ {
+ SKIP: false,
+ description: "All args are commented, therefore all are ignored",
+ input: "#-run=TestBlah\n//-timeout=42",
+ resultIgnored: false,
+ },
+ {
+ SKIP: false,
+ description: "We ignore certain flags like -v and -cover and -coverprofile because they are specified by the shell",
+ input: "-v\n-cover\n-coverprofile=blah.out",
+ resultIgnored: false,
+ },
+ {
+ SKIP: false,
+ description: "We allow certain coverage flags like -coverpkg and -covermode",
+ input: "-coverpkg=blah\n-covermode=atomic",
+ resultIgnored: false,
+ resultTestArgs: []string{"-coverpkg=blah", "-covermode=atomic"},
+ },
+ {
+ SKIP: false,
+ description: "We parse out -tags particularly",
+ input: "-coverpkg=blah\n-covermode=atomic\n-tags=foo,bar",
+ resultIgnored: false,
+ resultTestTags: []string{"foo", "bar"},
+ resultTestArgs: []string{"-coverpkg=blah", "-covermode=atomic"},
+ },
+ }
+
+ for i, test := range parseProfileTestCases {
+ if test.SKIP {
+ SkipConvey(fmt.Sprintf("Profile Parsing, Test Case #%d: %s (SKIPPED)", i, test.description), t, nil)
+ } else {
+ Convey(fmt.Sprintf("Profile Parsing, Test Case #%d: %s", i, test.description), t, func() {
+ ignored, testTags, testArgs := ParseProfile(test.input)
+
+ So(ignored, ShouldEqual, test.resultIgnored)
+ So(testTags, ShouldResemble, test.resultTestTags)
+ So(testArgs, ShouldResemble, test.resultTestArgs)
+ })
+ }
+ }
+}
+
+func TestCreateFolders(t *testing.T) {
+ Convey("File system items that represent folders should be converted to folder structs correctly", t, func() {
+ expected := map[string]*messaging.Folder{
+ "/root/1": {Path: "/root/1", Root: "/root"},
+ "/root/1/2": {Path: "/root/1/2", Root: "/root"},
+ "/root/1/2/3": {Path: "/root/1/2/3", Root: "/root"},
+ }
+
+ inputs := []*FileSystemItem{
+ {Path: "/root/1", Root: "/root", IsFolder: true},
+ {Path: "/root/1/2", Root: "/root", IsFolder: true},
+ {Path: "/root/1/2/3", Root: "/root", IsFolder: true},
+ }
+
+ actual := CreateFolders(inputs)
+
+ for key, actualValue := range actual {
+ So(actualValue, ShouldResemble, expected[key])
+ }
+ })
+}
+
+func TestLimitDepth(t *testing.T) {
+ Convey("Subject: Limiting folders based on relative depth from a common root", t, func() {
+
+ folders := map[string]*messaging.Folder{
+ "/root/1": {
+ Path: "/root/1",
+ Root: "/root",
+ },
+ "/root/1/2": {
+ Path: "/root/1/2",
+ Root: "/root",
+ },
+ "/root/1/2/3": {
+ Path: "/root/1/2/3",
+ Root: "/root",
+ },
+ }
+
+ Convey("When there is no depth limit", func() {
+ LimitDepth(folders, -1)
+
+ Convey("No folders should be excluded", func() {
+ So(len(folders), ShouldEqual, 3)
+ })
+ })
+
+ Convey("When there is a limit", func() {
+ LimitDepth(folders, 2)
+
+ Convey("The deepest folder (in this case) should be excluded", func() {
+ So(len(folders), ShouldEqual, 2)
+ _, exists := folders["/root/1/2/3"]
+ So(exists, ShouldBeFalse)
+ })
+ })
+ })
+}
+
+func TestAttachProfiles(t *testing.T) {
+ Convey("Subject: Attaching profile information to a folder", t, func() {
+ folders := map[string]*messaging.Folder{
+ "/root/1": {
+ Path: "/root/1",
+ Root: "/root",
+ },
+ "/root/1/2": {
+ Path: "/root/1/2",
+ Root: "/root",
+ },
+ "/root/1/2/3": {
+ Path: "/root/1/2/3",
+ Root: "/root",
+ },
+ }
+
+ profiles := []*FileSystemItem{
+ {
+ Path: "/root/too-shallow.goconvey",
+ ProfileDisabled: true,
+ ProfileArguments: []string{"1", "2"},
+ },
+ {
+ Path: "/root/1/2/hi.goconvey",
+ ProfileDisabled: true,
+ ProfileArguments: []string{"1", "2"},
+ },
+ {
+ Path: "/root/1/2/3/4/does-not-exist",
+ ProfileDisabled: true,
+ ProfileArguments: []string{"1", "2", "3", "4"},
+ },
+ }
+
+ Convey("Profiles that match folders should be merged with those folders", func() {
+ AttachProfiles(folders, profiles)
+
+ Convey("No profiles matched the first folder, so no assignments should occur", func() {
+ So(folders["/root/1"].Disabled, ShouldBeFalse)
+ So(folders["/root/1"].TestArguments, ShouldBeEmpty)
+ })
+
+ Convey("The second folder should match the first profile", func() {
+ So(folders["/root/1/2"].Disabled, ShouldBeTrue)
+ So(folders["/root/1/2"].TestArguments, ShouldResemble, []string{"1", "2"})
+ })
+
+ Convey("No profiles match the third folder so no assignments should occur", func() {
+ So(folders["/root/1/2/3"].Disabled, ShouldBeFalse)
+ So(folders["/root/1/2/3"].TestArguments, ShouldBeEmpty)
+ })
+ })
+ })
+}
+
+func TestMarkIgnored(t *testing.T) {
+ Convey("Subject: folders that have been ignored should be marked as such", t, func() {
+ folders := map[string]*messaging.Folder{
+ "/root/1": {
+ Path: "/root/1",
+ Root: "/root",
+ },
+ "/root/1/2": {
+ Path: "/root/1/2",
+ Root: "/root",
+ },
+ "/root/1/2/3": {
+ Path: "/root/1/2/3",
+ Root: "/root",
+ },
+ }
+
+ Convey("When there are no ignored folders", func() {
+ ignored := map[string]struct{}{}
+ MarkIgnored(folders, ignored)
+
+ Convey("No folders should be marked as ignored", func() {
+ So(folders["/root/1"].Ignored, ShouldBeFalse)
+ So(folders["/root/1/2"].Ignored, ShouldBeFalse)
+ So(folders["/root/1/2/3"].Ignored, ShouldBeFalse)
+ })
+ })
+ Convey("When there are ignored folders", func() {
+ ignored := map[string]struct{}{"1/2": {}}
+ MarkIgnored(folders, ignored)
+
+ Convey("The ignored folders should be marked as ignored", func() {
+ So(folders["/root/1"].Ignored, ShouldBeFalse)
+ So(folders["/root/1/2"].Ignored, ShouldBeTrue)
+ So(folders["/root/1/2/3"].Ignored, ShouldBeFalse)
+ })
+ })
+ })
+}
+
+func TestActiveFolders(t *testing.T) {
+ Convey("Subject: Folders that are not ignored or disabled are active", t, func() {
+ folders := map[string]*messaging.Folder{
+ "/root/1": {
+ Path: "/root/1",
+ Root: "/root",
+ Ignored: true,
+ },
+ "/root/1/2": {
+ Path: "/root/1/2",
+ Root: "/root",
+ },
+ "/root/1/2/3": {
+ Path: "/root/1/2/3",
+ Root: "/root",
+ Disabled: true,
+ },
+ }
+
+ active := ActiveFolders(folders)
+
+ So(len(active), ShouldEqual, 1)
+ So(active["/root/1/2"], ShouldResemble, folders["/root/1/2"])
+ })
+}
+
+func TestSum(t *testing.T) {
+ Convey("Subject: file system items within specified directores should be counted and summed", t, func() {
+ folders := map[string]*messaging.Folder{
+ "/root/1": {Path: "/root/1", Root: "/root", Ignored: true},
+ }
+ items := []*FileSystemItem{
+ {Size: 1, Modified: 3, Path: "/root/1/hi.go"},
+ {Size: 7, Modified: 13, Path: "/root/1/bye.go"},
+ {Size: 33, Modified: 45, Path: "/root/1/2/salutations.go"}, // not counted
+ }
+
+ So(Sum(folders, items), ShouldEqual, 1+3+7+13)
+ })
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/imperative_shell.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/imperative_shell.go
new file mode 100644
index 00000000000..f4e886c9283
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/imperative_shell.go
@@ -0,0 +1,77 @@
+package watch
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+
+type FileSystemItem struct {
+ Root string
+ Path string
+ Name string
+ Size int64
+ Modified int64
+ IsFolder bool
+
+ ProfileDisabled bool
+ ProfileTags []string
+ ProfileArguments []string
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+func YieldFileSystemItems(root string, excludedDirs []string) chan *FileSystemItem {
+ items := make(chan *FileSystemItem)
+
+ go func() {
+ filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return filepath.SkipDir
+ }
+
+ basePath := filepath.Base(path)
+ for _, item := range excludedDirs {
+ if item == basePath && info.IsDir() && item != "" && basePath != "" {
+ return filepath.SkipDir
+ }
+ }
+
+ items <- &FileSystemItem{
+ Root: root,
+ Path: path,
+ Name: info.Name(),
+ Size: info.Size(),
+ Modified: info.ModTime().Unix(),
+ IsFolder: info.IsDir(),
+ }
+
+ return nil
+ })
+ close(items)
+ }()
+
+ return items
+}
+
+///////////////////////////////////////////////////////////////////////////////
+
+// ReadContents reads files wholesale. This function is only called on files
+// that end in '.goconvey'. These files should be very small, probably not
+// ever more than a few hundred bytes. The ignored errors are ok because in
+// the event of an IO error all that need be returned is an empty string.
+func ReadContents(path string) string {
+ file, err := os.Open(path)
+ if err != nil {
+ return ""
+ }
+ defer file.Close()
+ reader := io.LimitReader(file, 1024*4)
+ content, _ := ioutil.ReadAll(reader)
+ return string(content)
+}
+
+///////////////////////////////////////////////////////////////////////////////
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration.go
new file mode 100644
index 00000000000..941241ab86a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration.go
@@ -0,0 +1,185 @@
+package watch
+
+import (
+ "log"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/smartystreets/goconvey/web/server/messaging"
+)
+
+type Watcher struct {
+ nap time.Duration
+ rootFolder string
+ folderDepth int
+ ignoredFolders map[string]struct{}
+ fileSystemState int64
+ paused bool
+ stopped bool
+ watchSuffixes []string
+ excludedDirs []string
+
+ input chan messaging.WatcherCommand
+ output chan messaging.Folders
+
+ lock sync.RWMutex
+}
+
+func NewWatcher(rootFolder string, folderDepth int, nap time.Duration,
+ input chan messaging.WatcherCommand, output chan messaging.Folders, watchSuffixes string, excludedDirs []string) *Watcher {
+
+ return &Watcher{
+ nap: nap,
+ rootFolder: rootFolder,
+ folderDepth: folderDepth,
+ input: input,
+ output: output,
+ watchSuffixes: strings.Split(watchSuffixes, ","),
+ excludedDirs: excludedDirs,
+
+ ignoredFolders: make(map[string]struct{}),
+ }
+}
+
+func (this *Watcher) Listen() {
+ for {
+ if this.stopped {
+ return
+ }
+
+ select {
+
+ case command := <-this.input:
+ this.respond(command)
+
+ default:
+ if !this.paused {
+ this.scan()
+ }
+ time.Sleep(nap)
+ }
+ }
+}
+
+func (this *Watcher) respond(command messaging.WatcherCommand) {
+ switch command.Instruction {
+
+ case messaging.WatcherAdjustRoot:
+ log.Println("Adjusting root...")
+ this.rootFolder = command.Details
+ this.execute()
+
+ case messaging.WatcherIgnore:
+ log.Println("Ignoring specified folders")
+ this.ignore(command.Details)
+ // Prevent a filesystem change due to the number of active folders changing
+ _, checksum := this.gather()
+ this.set(checksum)
+
+ case messaging.WatcherReinstate:
+ log.Println("Reinstating specified folders")
+ this.reinstate(command.Details)
+ // Prevent a filesystem change due to the number of active folders changing
+ _, checksum := this.gather()
+ this.set(checksum)
+
+ case messaging.WatcherPause:
+ log.Println("Pausing watcher...")
+ this.paused = true
+
+ case messaging.WatcherResume:
+ log.Println("Resuming watcher...")
+ this.paused = false
+
+ case messaging.WatcherExecute:
+ log.Println("Gathering folders for immediate execution...")
+ this.execute()
+
+ case messaging.WatcherStop:
+ log.Println("Stopping the watcher...")
+ close(this.output)
+ this.stopped = true
+
+ default:
+ log.Println("Unrecognized command from server:", command.Instruction)
+ }
+}
+
+func (this *Watcher) execute() {
+ folders, _ := this.gather()
+ this.sendToExecutor(folders)
+}
+
+func (this *Watcher) scan() {
+ folders, checksum := this.gather()
+
+ if checksum == this.fileSystemState {
+ return
+ }
+
+ log.Println("File system state modified, publishing current folders...", this.fileSystemState, checksum)
+
+ defer this.set(checksum)
+ this.sendToExecutor(folders)
+}
+
+func (this *Watcher) gather() (folders messaging.Folders, checksum int64) {
+ items := YieldFileSystemItems(this.rootFolder, this.excludedDirs)
+ folderItems, profileItems, goFileItems := Categorize(items, this.rootFolder, this.watchSuffixes)
+
+ for _, item := range profileItems {
+ // TODO: don't even bother if the item's size is over a few hundred bytes...
+ contents := ReadContents(item.Path)
+ item.ProfileDisabled, item.ProfileTags, item.ProfileArguments = ParseProfile(contents)
+ }
+
+ folders = CreateFolders(folderItems)
+ LimitDepth(folders, this.folderDepth)
+ AttachProfiles(folders, profileItems)
+ this.protectedRead(func() { MarkIgnored(folders, this.ignoredFolders) })
+
+ active := ActiveFolders(folders)
+ checksum = int64(len(active))
+ checksum += Sum(active, profileItems)
+ checksum += Sum(active, goFileItems)
+
+ return folders, checksum
+}
+
+func (this *Watcher) set(state int64) {
+ this.fileSystemState = state
+}
+
+func (this *Watcher) sendToExecutor(folders messaging.Folders) {
+ this.output <- folders
+}
+
+func (this *Watcher) ignore(paths string) {
+ this.protectedWrite(func() {
+ for _, folder := range strings.Split(paths, string(os.PathListSeparator)) {
+ this.ignoredFolders[folder] = struct{}{}
+ log.Println("Currently ignored folders:", this.ignoredFolders)
+ }
+ })
+}
+func (this *Watcher) reinstate(paths string) {
+ this.protectedWrite(func() {
+ for _, folder := range strings.Split(paths, string(os.PathListSeparator)) {
+ delete(this.ignoredFolders, folder)
+ }
+ })
+}
+func (this *Watcher) protectedWrite(protected func()) {
+ this.lock.Lock()
+ defer this.lock.Unlock()
+ protected()
+}
+func (this *Watcher) protectedRead(protected func()) {
+ this.lock.RLock()
+ defer this.lock.RUnlock()
+ protected()
+}
+
+const nap = time.Millisecond * 250
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_test.go
new file mode 100644
index 00000000000..e26ad48c47a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_test.go
@@ -0,0 +1,200 @@
+package watch
+
+import (
+ "bytes"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ . "github.com/smartystreets/goconvey/convey"
+ "github.com/smartystreets/goconvey/web/server/messaging"
+)
+
+func TestWatcher(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping potentially long-running integration test...")
+ return
+ }
+
+ log.SetFlags(log.LstdFlags | log.Lshortfile | log.Lmicroseconds)
+ output := new(bytes.Buffer)
+ log.SetOutput(output)
+ defer func() { t.Log(output.String()) }()
+
+ _, filename, _, _ := runtime.Caller(0)
+ originalRoot := filepath.Join(filepath.Dir(filename), "integration_testing")
+ temporary, err := ioutil.TempDir("/tmp", "goconvey")
+ if err != nil {
+ t.Fatal(err)
+ }
+ root := filepath.Join(temporary, "integration_testing")
+ sub := filepath.Join(root, "sub")
+
+ err = CopyDir(originalRoot, root)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ err = os.RemoveAll(temporary)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ var ( // commands
+ pause = messaging.WatcherCommand{Instruction: messaging.WatcherPause}
+ resume = messaging.WatcherCommand{Instruction: messaging.WatcherResume}
+
+ ignore = messaging.WatcherCommand{Instruction: messaging.WatcherIgnore, Details: sub}
+ reinstate = messaging.WatcherCommand{Instruction: messaging.WatcherReinstate, Details: sub}
+
+ adjustToSub = messaging.WatcherCommand{Instruction: messaging.WatcherAdjustRoot, Details: sub}
+ adjustToRoot = messaging.WatcherCommand{Instruction: messaging.WatcherAdjustRoot, Details: root}
+
+ execute = messaging.WatcherCommand{Instruction: messaging.WatcherExecute}
+
+ bogus = messaging.WatcherCommand{Instruction: 42}
+
+ stop = messaging.WatcherCommand{Instruction: messaging.WatcherStop}
+ )
+
+ Convey("Subject: Watcher operations", t, func() {
+ input := make(chan messaging.WatcherCommand)
+ output := make(chan messaging.Folders)
+ excludedDirs := []string{}
+ watcher := NewWatcher(root, -1, time.Millisecond, input, output, ".go", excludedDirs)
+
+ go watcher.Listen()
+
+ Convey("Initial scan results", func() {
+ go func() { input <- stop }()
+
+ results := []messaging.Folders{}
+ for result := range output {
+ results = append(results, result)
+ }
+
+ So(len(results), ShouldEqual, 1)
+ })
+
+ Convey("Manual execution produces additional results", func() {
+ go func() {
+ input <- execute
+ input <- stop
+ }()
+
+ results := []messaging.Folders{}
+ for result := range output {
+ results = append(results, result)
+ }
+
+ So(len(results), ShouldEqual, 2)
+ So(len(results[0]), ShouldEqual, 2)
+ So(len(results[1]), ShouldEqual, 2)
+ })
+
+ Convey("Ignore and Reinstate commands are not reflected in the scan results", func() {
+ go func() {
+ input <- ignore
+ input <- reinstate
+ input <- stop
+ }()
+
+ results := []messaging.Folders{}
+ for result := range output {
+ results = append(results, result)
+ }
+
+ So(len(results), ShouldEqual, 1)
+ So(results[0][sub].Ignored, ShouldBeFalse) // initial
+ })
+
+ Convey("Adjusting the root changes the number of folders in the scanned results", func() {
+ go func() {
+ input <- adjustToSub
+ input <- adjustToRoot
+ input <- stop
+ }()
+
+ results := []messaging.Folders{}
+ for result := range output {
+ results = append(results, result)
+ }
+
+ So(len(results), ShouldEqual, 3)
+ So(len(results[0]), ShouldEqual, 2) // initial
+ So(len(results[1]), ShouldEqual, 1) // root moved to sub
+ So(len(results[2]), ShouldEqual, 2) // root moved back to original location
+ })
+
+ Convey("A bogus command does not cause any additional scanning beyond the initial scan", func() {
+ go func() {
+ input <- bogus
+ input <- stop
+ }()
+
+ results := []messaging.Folders{}
+ for result := range output {
+ results = append(results, result)
+ }
+
+ So(len(results), ShouldEqual, 1) // initial scan
+ })
+
+ Convey("Scanning occurs as a result of a file system update", func() {
+ go func() {
+ time.Sleep(time.Second)
+ exec.Command("touch", filepath.Join(root, "main.go")).Run()
+ time.Sleep(time.Second)
+ input <- stop
+ }()
+
+ results := []messaging.Folders{}
+ for result := range output {
+ results = append(results, result)
+ }
+
+ So(len(results), ShouldEqual, 2)
+ })
+
+ Convey("Scanning does not occur as a result of resuming after a pause", func() {
+ go func() {
+ input <- pause
+ input <- resume
+ input <- stop
+ }()
+
+ results := []messaging.Folders{}
+ for result := range output {
+ results = append(results, result)
+ }
+
+ So(len(results), ShouldEqual, 1)
+ })
+
+ Convey("Scanning should not occur when the watcher is paused", func() {
+ go func() {
+ input <- pause
+ for x := 0; x < 2; x++ {
+ time.Sleep(time.Millisecond * 250)
+ exec.Command("touch", filepath.Join(root, "main.go")).Run()
+ time.Sleep(time.Millisecond * 250)
+ }
+ input <- resume
+ input <- stop
+ }()
+
+ results := []messaging.Folders{}
+ for result := range output {
+ results = append(results, result)
+ }
+
+ So(len(results), ShouldEqual, 2)
+ })
+ })
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/doc_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/doc_test.go
new file mode 100644
index 00000000000..06ab7d0f9a3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/doc_test.go
@@ -0,0 +1 @@
+package main
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/main.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/main.go
new file mode 100644
index 00000000000..1cd335173be
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/main.go
@@ -0,0 +1,10 @@
+// This file's only purpose is to provide a realistic
+// environment from which to run integration tests
+// against the Watcher.
+package main
+
+import "fmt"
+
+func main() {
+ fmt.Println("Hello, World!")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/.gitignore b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/.gitignore
new file mode 100644
index 00000000000..2574d15c0cc
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/.gitignore
@@ -0,0 +1,2 @@
+github.com-smartystreets-goconvey-web-server-integration_testing-sub.html
+github.com-smartystreets-goconvey-web-server-integration_testing-sub.txt \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/stuff.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/stuff.go
new file mode 100644
index 00000000000..22673259756
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/stuff.go
@@ -0,0 +1,4 @@
+// This file's only purpose is to provide a realistic
+// environment from which to run integration tests
+// against the Watcher.
+package sub
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/stuff_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/stuff_test.go
new file mode 100644
index 00000000000..ea9eebd2d50
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/stuff_test.go
@@ -0,0 +1,17 @@
+// This file's only purpose is to provide a realistic
+// environment from which to run integration tests
+// against the Watcher.
+package sub
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestStuff(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ fmt.Println()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/sub.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/sub.goconvey
new file mode 100644
index 00000000000..6f6e9405e8f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/integration_testing/sub/sub.goconvey
@@ -0,0 +1,7 @@
+IGNORE
+-short
+-run=TestStuff
+
+// This file's only purpose is to provide a realistic
+// environment from which to run integration tests
+// against the Watcher.
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/util_test.go b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/util_test.go
new file mode 100644
index 00000000000..b5ac11fa02a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/util_test.go
@@ -0,0 +1,92 @@
+// Credits: https://gist.github.com/jaybill/2876519
+package watch
+
+import "os"
+import "io"
+import "io/ioutil"
+import "log"
+
+// Copies original source to destination destination.
+func CopyFile(source string, destination string) (err error) {
+ originalFile, err := os.Open(source)
+ if err != nil {
+ return err
+ }
+ defer originalFile.Close()
+ destinationFile, err := os.Create(destination)
+ if err != nil {
+ return err
+ }
+ defer destinationFile.Close()
+ _, err = io.Copy(destinationFile, originalFile)
+ if err == nil {
+ info, err := os.Stat(source)
+ if err != nil {
+ err = os.Chmod(destination, info.Mode())
+ }
+
+ }
+
+ return
+}
+
+// Recursively copies a directory tree, attempting to preserve permissions.
+// Source directory must exist, destination directory must *not* exist.
+func CopyDir(source string, destination string) (err error) {
+
+ // get properties of source dir
+ sourceFile, err := os.Stat(source)
+ if err != nil {
+ return err
+ }
+
+ if !sourceFile.IsDir() {
+ return &CustomError{Message: "Source is not a directory"}
+ }
+
+ // ensure destination dir does not already exist
+
+ _, err = os.Open(destination)
+ if !os.IsNotExist(err) {
+ return &CustomError{Message: "Destination already exists"}
+ }
+
+ // create destination dir
+
+ err = os.MkdirAll(destination, sourceFile.Mode())
+ if err != nil {
+ return err
+ }
+
+ entries, err := ioutil.ReadDir(source)
+
+ for _, entry := range entries {
+
+ sourcePath := source + "/" + entry.Name()
+ destinationPath := destination + "/" + entry.Name()
+ if entry.IsDir() {
+ err = CopyDir(sourcePath, destinationPath)
+ if err != nil {
+ log.Println(err)
+ }
+ } else {
+ // perform copy
+ err = CopyFile(sourcePath, destinationPath)
+ if err != nil {
+ log.Println(err)
+ }
+ }
+
+ }
+ return
+}
+
+// A struct for returning custom error messages
+type CustomError struct {
+ Message string
+}
+
+// Returns the error message defined in Message as a string
+func (this *CustomError) Error() string {
+ return this.Message
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/watch.goconvey b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/watch.goconvey
new file mode 100644
index 00000000000..aa26e8b739b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/smartystreets/goconvey/web/server/watch/watch.goconvey
@@ -0,0 +1,3 @@
+#ignore
+-timeout=1s
+-short \ No newline at end of file
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/LICENSE b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/LICENSE
new file mode 100644
index 00000000000..37ec93a14fd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/README.md b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/README.md
new file mode 100644
index 00000000000..6bd3383a0e8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/README.md
@@ -0,0 +1,26 @@
+# OpenSSL bindings for Go
+
+Please see http://godoc.org/github.com/spacemonkeygo/openssl for more info
+
+### License
+
+Copyright (C) 2014 Space Monkey, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+### Using on Windows
+1. Install [mingw-w64](http://mingw-w64.sourceforge.net/)
+2. Install [pkg-config-lite](http://sourceforge.net/projects/pkgconfiglite)
+3. Build (or install precompiled) openssl for mingw32-w64
+4. Set __PKG\_CONFIG\_PATH__ to the directory containing openssl.pc
+ (i.e. c:\mingw64\mingw64\lib\pkgconfig)
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/bio.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/bio.go
new file mode 100644
index 00000000000..1be93aaa23a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/bio.go
@@ -0,0 +1,355 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+/*
+#include <string.h>
+#include <openssl/bio.h>
+
+extern int cbioNew(BIO *b);
+static int cbioFree(BIO *b) {
+ return 1;
+}
+
+extern int writeBioWrite(BIO *b, char *buf, int size);
+extern long writeBioCtrl(BIO *b, int cmd, long arg1, void *arg2);
+static int writeBioPuts(BIO *b, const char *str) {
+ return writeBioWrite(b, (char*)str, (int)strlen(str));
+}
+
+extern int readBioRead(BIO *b, char *buf, int size);
+extern long readBioCtrl(BIO *b, int cmd, long arg1, void *arg2);
+
+static BIO_METHOD writeBioMethod = {
+ BIO_TYPE_SOURCE_SINK,
+ "Go Write BIO",
+ (int (*)(BIO *, const char *, int))writeBioWrite,
+ NULL,
+ writeBioPuts,
+ NULL,
+ writeBioCtrl,
+ cbioNew,
+ cbioFree,
+ NULL};
+
+static BIO_METHOD* BIO_s_writeBio() { return &writeBioMethod; }
+
+static BIO_METHOD readBioMethod = {
+ BIO_TYPE_SOURCE_SINK,
+ "Go Read BIO",
+ NULL,
+ readBioRead,
+ NULL,
+ NULL,
+ readBioCtrl,
+ cbioNew,
+ cbioFree,
+ NULL};
+
+static BIO_METHOD* BIO_s_readBio() { return &readBioMethod; }
+*/
+import "C"
+
+import (
+ "errors"
+ "io"
+ "reflect"
+ "sync"
+ "unsafe"
+)
+
+const (
+ SSLRecordSize = 16 * 1024
+)
+
+func nonCopyGoBytes(ptr uintptr, length int) []byte {
+ var slice []byte
+ header := (*reflect.SliceHeader)(unsafe.Pointer(&slice))
+ header.Cap = length
+ header.Len = length
+ header.Data = ptr
+ return slice
+}
+
+func nonCopyCString(data *C.char, size C.int) []byte {
+ return nonCopyGoBytes(uintptr(unsafe.Pointer(data)), int(size))
+}
+
+//export cbioNew
+func cbioNew(b *C.BIO) C.int {
+ b.shutdown = 1
+ b.init = 1
+ b.num = -1
+ b.ptr = nil
+ b.flags = 0
+ return 1
+}
+
+type writeBio struct {
+ data_mtx sync.Mutex
+ op_mtx sync.Mutex
+ buf []byte
+ release_buffers bool
+}
+
+func loadWritePtr(b *C.BIO) *writeBio {
+ return (*writeBio)(unsafe.Pointer(b.ptr))
+}
+
+func bioClearRetryFlags(b *C.BIO) {
+ // from BIO_clear_retry_flags and BIO_clear_flags
+ b.flags &= ^(C.BIO_FLAGS_RWS | C.BIO_FLAGS_SHOULD_RETRY)
+}
+
+func bioSetRetryRead(b *C.BIO) {
+ // from BIO_set_retry_read and BIO_set_flags
+ b.flags |= (C.BIO_FLAGS_READ | C.BIO_FLAGS_SHOULD_RETRY)
+}
+
+//export writeBioWrite
+func writeBioWrite(b *C.BIO, data *C.char, size C.int) (rc C.int) {
+ defer func() {
+ if err := recover(); err != nil {
+ logger.Critf("openssl: writeBioWrite panic'd: %v", err)
+ rc = -1
+ }
+ }()
+ ptr := loadWritePtr(b)
+ if ptr == nil || data == nil || size < 0 {
+ return -1
+ }
+ ptr.data_mtx.Lock()
+ defer ptr.data_mtx.Unlock()
+ bioClearRetryFlags(b)
+ ptr.buf = append(ptr.buf, nonCopyCString(data, size)...)
+ return size
+}
+
+//export writeBioCtrl
+func writeBioCtrl(b *C.BIO, cmd C.int, arg1 C.long, arg2 unsafe.Pointer) (
+ rc C.long) {
+ defer func() {
+ if err := recover(); err != nil {
+ logger.Critf("openssl: writeBioCtrl panic'd: %v", err)
+ rc = -1
+ }
+ }()
+ switch cmd {
+ case C.BIO_CTRL_WPENDING:
+ return writeBioPending(b)
+ case C.BIO_CTRL_DUP, C.BIO_CTRL_FLUSH:
+ return 1
+ default:
+ return 0
+ }
+}
+
+func writeBioPending(b *C.BIO) C.long {
+ ptr := loadWritePtr(b)
+ if ptr == nil {
+ return 0
+ }
+ ptr.data_mtx.Lock()
+ defer ptr.data_mtx.Unlock()
+ return C.long(len(ptr.buf))
+}
+
+func (b *writeBio) WriteTo(w io.Writer) (rv int64, err error) {
+ b.op_mtx.Lock()
+ defer b.op_mtx.Unlock()
+
+ // write whatever data we currently have
+ b.data_mtx.Lock()
+ data := b.buf
+ b.data_mtx.Unlock()
+
+ if len(data) == 0 {
+ return 0, nil
+ }
+ n, err := w.Write(data)
+
+ // subtract however much data we wrote from the buffer
+ b.data_mtx.Lock()
+ b.buf = b.buf[:copy(b.buf, b.buf[n:])]
+ if b.release_buffers && len(b.buf) == 0 {
+ b.buf = nil
+ }
+ b.data_mtx.Unlock()
+
+ return int64(n), err
+}
+
+func (self *writeBio) Disconnect(b *C.BIO) {
+ if loadWritePtr(b) == self {
+ b.ptr = nil
+ }
+}
+
+func (b *writeBio) MakeCBIO() *C.BIO {
+ rv := C.BIO_new(C.BIO_s_writeBio())
+ rv.ptr = unsafe.Pointer(b)
+ return rv
+}
+
+type readBio struct {
+ data_mtx sync.Mutex
+ op_mtx sync.Mutex
+ buf []byte
+ eof bool
+ release_buffers bool
+}
+
+func loadReadPtr(b *C.BIO) *readBio {
+ return (*readBio)(unsafe.Pointer(b.ptr))
+}
+
+//export readBioRead
+func readBioRead(b *C.BIO, data *C.char, size C.int) (rc C.int) {
+ defer func() {
+ if err := recover(); err != nil {
+ logger.Critf("openssl: readBioRead panic'd: %v", err)
+ rc = -1
+ }
+ }()
+ ptr := loadReadPtr(b)
+ if ptr == nil || size < 0 {
+ return -1
+ }
+ ptr.data_mtx.Lock()
+ defer ptr.data_mtx.Unlock()
+ bioClearRetryFlags(b)
+ if len(ptr.buf) == 0 {
+ if ptr.eof {
+ return 0
+ }
+ bioSetRetryRead(b)
+ return -1
+ }
+ if size == 0 || data == nil {
+ return C.int(len(ptr.buf))
+ }
+ n := copy(nonCopyCString(data, size), ptr.buf)
+ ptr.buf = ptr.buf[:copy(ptr.buf, ptr.buf[n:])]
+ if ptr.release_buffers && len(ptr.buf) == 0 {
+ ptr.buf = nil
+ }
+ return C.int(n)
+}
+
+//export readBioCtrl
+func readBioCtrl(b *C.BIO, cmd C.int, arg1 C.long, arg2 unsafe.Pointer) (
+ rc C.long) {
+
+ defer func() {
+ if err := recover(); err != nil {
+ logger.Critf("openssl: readBioCtrl panic'd: %v", err)
+ rc = -1
+ }
+ }()
+ switch cmd {
+ case C.BIO_CTRL_PENDING:
+ return readBioPending(b)
+ case C.BIO_CTRL_DUP, C.BIO_CTRL_FLUSH:
+ return 1
+ default:
+ return 0
+ }
+}
+
+func readBioPending(b *C.BIO) C.long {
+ ptr := loadReadPtr(b)
+ if ptr == nil {
+ return 0
+ }
+ ptr.data_mtx.Lock()
+ defer ptr.data_mtx.Unlock()
+ return C.long(len(ptr.buf))
+}
+
+func (b *readBio) ReadFromOnce(r io.Reader) (n int, err error) {
+ b.op_mtx.Lock()
+ defer b.op_mtx.Unlock()
+
+ // make sure we have a destination that fits at least one SSL record
+ b.data_mtx.Lock()
+ if cap(b.buf) < len(b.buf)+SSLRecordSize {
+ new_buf := make([]byte, len(b.buf), len(b.buf)+SSLRecordSize)
+ copy(new_buf, b.buf)
+ b.buf = new_buf
+ }
+ dst := b.buf[len(b.buf):cap(b.buf)]
+ dst_slice := b.buf
+ b.data_mtx.Unlock()
+
+ n, err = r.Read(dst)
+ b.data_mtx.Lock()
+ defer b.data_mtx.Unlock()
+ if n > 0 {
+ if len(dst_slice) != len(b.buf) {
+ // someone shrunk the buffer, so we read in too far ahead and we
+ // need to slide backwards
+ copy(b.buf[len(b.buf):len(b.buf)+n], dst)
+ }
+ b.buf = b.buf[:len(b.buf)+n]
+ }
+ return n, err
+}
+
+func (b *readBio) MakeCBIO() *C.BIO {
+ rv := C.BIO_new(C.BIO_s_readBio())
+ rv.ptr = unsafe.Pointer(b)
+ return rv
+}
+
+func (self *readBio) Disconnect(b *C.BIO) {
+ if loadReadPtr(b) == self {
+ b.ptr = nil
+ }
+}
+
+func (b *readBio) MarkEOF() {
+ b.data_mtx.Lock()
+ defer b.data_mtx.Unlock()
+ b.eof = true
+}
+
+type anyBio C.BIO
+
+func asAnyBio(b *C.BIO) *anyBio { return (*anyBio)(b) }
+
+func (b *anyBio) Read(buf []byte) (n int, err error) {
+ if len(buf) == 0 {
+ return 0, nil
+ }
+ n = int(C.BIO_read((*C.BIO)(b), unsafe.Pointer(&buf[0]), C.int(len(buf))))
+ if n <= 0 {
+ return 0, io.EOF
+ }
+ return n, nil
+}
+
+func (b *anyBio) Write(buf []byte) (written int, err error) {
+ if len(buf) == 0 {
+ return 0, nil
+ }
+ n := int(C.BIO_write((*C.BIO)(b), unsafe.Pointer(&buf[0]),
+ C.int(len(buf))))
+ if n != len(buf) {
+ return n, errors.New("BIO write failed")
+ }
+ return n, nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/build.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/build.go
new file mode 100644
index 00000000000..dd72651d3ea
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/build.go
@@ -0,0 +1,23 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+// #cgo linux pkg-config: openssl
+// #cgo windows CFLAGS: -DWIN32_LEAN_AND_MEAN
+// #cgo darwin CFLAGS: -Wno-deprecated-declarations
+// #cgo darwin LDFLAGS: -lssl -lcrypto
+import "C"
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/cert.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/cert.go
new file mode 100644
index 00000000000..61637c649fa
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/cert.go
@@ -0,0 +1,407 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+// #include <openssl/conf.h>
+// #include <openssl/ssl.h>
+// #include <openssl/x509v3.h>
+//
+// void OPENSSL_free_not_a_macro(void *ref) { OPENSSL_free(ref); }
+//
+import "C"
+
+import (
+ "errors"
+ "io/ioutil"
+ "math/big"
+ "runtime"
+ "time"
+ "unsafe"
+)
+
+type EVP_MD int
+
+const (
+ EVP_NULL EVP_MD = iota
+ EVP_MD5 EVP_MD = iota
+ EVP_SHA EVP_MD = iota
+ EVP_SHA1 EVP_MD = iota
+ EVP_DSS EVP_MD = iota
+ EVP_DSS1 EVP_MD = iota
+ EVP_MDC2 EVP_MD = iota
+ EVP_RIPEMD160 EVP_MD = iota
+ EVP_SHA224 EVP_MD = iota
+ EVP_SHA256 EVP_MD = iota
+ EVP_SHA384 EVP_MD = iota
+ EVP_SHA512 EVP_MD = iota
+)
+
+type Certificate struct {
+ x *C.X509
+ Issuer *Certificate
+ ref interface{}
+ pubKey PublicKey
+}
+
+type CertificateInfo struct {
+ Serial *big.Int
+ Issued time.Duration
+ Expires time.Duration
+ Country string
+ Organization string
+ CommonName string
+}
+
+type Name struct {
+ name *C.X509_NAME
+}
+
+// Allocate and return a new Name object.
+func NewName() (*Name, error) {
+ n := C.X509_NAME_new()
+ if n == nil {
+ return nil, errors.New("could not create x509 name")
+ }
+ name := &Name{name: n}
+ runtime.SetFinalizer(name, func(n *Name) {
+ C.X509_NAME_free(n.name)
+ })
+ return name, nil
+}
+
+// AddTextEntry appends a text entry to an X509 NAME.
+func (n *Name) AddTextEntry(field, value string) error {
+ cfield := C.CString(field)
+ defer C.free(unsafe.Pointer(cfield))
+ cvalue := (*C.uchar)(unsafe.Pointer(C.CString(value)))
+ defer C.free(unsafe.Pointer(cvalue))
+ ret := C.X509_NAME_add_entry_by_txt(
+ n.name, cfield, C.MBSTRING_ASC, cvalue, -1, -1, 0)
+ if ret != 1 {
+ return errors.New("failed to add x509 name text entry")
+ }
+ return nil
+}
+
+// AddTextEntries allows adding multiple entries to a name in one call.
+func (n *Name) AddTextEntries(entries map[string]string) error {
+ for f, v := range entries {
+ if err := n.AddTextEntry(f, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetEntry returns a name entry based on NID. If no entry, then ("", false) is
+// returned.
+func (n *Name) GetEntry(nid NID) (entry string, ok bool) {
+ entrylen := C.X509_NAME_get_text_by_NID(n.name, C.int(nid), nil, 0)
+ if entrylen == -1 {
+ return "", false
+ }
+ buf := (*C.char)(C.malloc(C.size_t(entrylen + 1)))
+ defer C.free(unsafe.Pointer(buf))
+ C.X509_NAME_get_text_by_NID(n.name, C.int(nid), buf, entrylen+1)
+ return C.GoStringN(buf, entrylen), true
+}
+
+// NewCertificate generates a basic certificate based
+// on the provided CertificateInfo struct
+func NewCertificate(info *CertificateInfo, key PublicKey) (*Certificate, error) {
+ c := &Certificate{x: C.X509_new()}
+ runtime.SetFinalizer(c, func(c *Certificate) {
+ C.X509_free(c.x)
+ })
+
+ name, err := c.GetSubjectName()
+ if err != nil {
+ return nil, err
+ }
+ err = name.AddTextEntries(map[string]string{
+ "C": info.Country,
+ "O": info.Organization,
+ "CN": info.CommonName,
+ })
+ if err != nil {
+ return nil, err
+ }
+ // self-issue for now
+ if err := c.SetIssuerName(name); err != nil {
+ return nil, err
+ }
+ if err := c.SetSerial(info.Serial); err != nil {
+ return nil, err
+ }
+ if err := c.SetIssueDate(info.Issued); err != nil {
+ return nil, err
+ }
+ if err := c.SetExpireDate(info.Expires); err != nil {
+ return nil, err
+ }
+ if err := c.SetPubKey(key); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+func (c *Certificate) GetSubjectName() (*Name, error) {
+ n := C.X509_get_subject_name(c.x)
+ if n == nil {
+ return nil, errors.New("failed to get subject name")
+ }
+ return &Name{name: n}, nil
+}
+
+func (c *Certificate) GetIssuerName() (*Name, error) {
+ n := C.X509_get_issuer_name(c.x)
+ if n == nil {
+ return nil, errors.New("failed to get issuer name")
+ }
+ return &Name{name: n}, nil
+}
+
+func (c *Certificate) SetSubjectName(name *Name) error {
+ if C.X509_set_subject_name(c.x, name.name) != 1 {
+ return errors.New("failed to set subject name")
+ }
+ return nil
+}
+
+// SetIssuer updates the stored Issuer cert
+// and the internal x509 Issuer Name of a certificate.
+// The stored Issuer reference is used when adding extensions.
+func (c *Certificate) SetIssuer(issuer *Certificate) error {
+ name, err := issuer.GetSubjectName()
+ if err != nil {
+ return err
+ }
+ if err = c.SetIssuerName(name); err != nil {
+ return err
+ }
+ c.Issuer = issuer
+ return nil
+}
+
+// SetIssuerName populates the issuer name of a certificate.
+// Use SetIssuer instead, if possible.
+func (c *Certificate) SetIssuerName(name *Name) error {
+ if C.X509_set_issuer_name(c.x, name.name) != 1 {
+ return errors.New("failed to set subject name")
+ }
+ return nil
+}
+
+// SetSerial sets the serial of a certificate.
+func (c *Certificate) SetSerial(serial *big.Int) error {
+ sno := C.ASN1_INTEGER_new()
+ defer C.ASN1_INTEGER_free(sno)
+ bn := C.BN_new()
+ defer C.BN_free(bn)
+
+ serialBytes := serial.Bytes()
+ if bn = C.BN_bin2bn((*C.uchar)(unsafe.Pointer(&serialBytes[0])), C.int(len(serialBytes)), bn); bn == nil {
+ return errors.New("failed to set serial")
+ }
+ if sno = C.BN_to_ASN1_INTEGER(bn, sno); sno == nil {
+ return errors.New("failed to set serial")
+ }
+ if C.X509_set_serialNumber(c.x, sno) != 1 {
+ return errors.New("failed to set serial")
+ }
+ return nil
+}
+
+// SetIssueDate sets the certificate issue date relative to the current time.
+func (c *Certificate) SetIssueDate(when time.Duration) error {
+ offset := C.long(when / time.Second)
+ result := C.X509_gmtime_adj(c.x.cert_info.validity.notBefore, offset)
+ if result == nil {
+ return errors.New("failed to set issue date")
+ }
+ return nil
+}
+
+// SetExpireDate sets the certificate issue date relative to the current time.
+func (c *Certificate) SetExpireDate(when time.Duration) error {
+ offset := C.long(when / time.Second)
+ result := C.X509_gmtime_adj(c.x.cert_info.validity.notAfter, offset)
+ if result == nil {
+ return errors.New("failed to set expire date")
+ }
+ return nil
+}
+
+// SetPubKey assigns a new public key to a certificate.
+func (c *Certificate) SetPubKey(pubKey PublicKey) error {
+ c.pubKey = pubKey
+ if C.X509_set_pubkey(c.x, pubKey.evpPKey()) != 1 {
+ return errors.New("failed to set public key")
+ }
+ return nil
+}
+
+// Sign a certificate using a private key and a digest name.
+// Accepted digest names are 'sha256', 'sha384', and 'sha512'.
+func (c *Certificate) Sign(privKey PrivateKey, digest EVP_MD) error {
+ switch digest {
+ case EVP_SHA256:
+ case EVP_SHA384:
+ case EVP_SHA512:
+ default:
+ return errors.New("Unsupported digest" +
+ "You're probably looking for 'EVP_SHA256' or 'EVP_SHA512'.")
+ }
+ return c.insecureSign(privKey, digest)
+}
+
+func (c *Certificate) insecureSign(privKey PrivateKey, digest EVP_MD) error {
+ var md *C.EVP_MD
+ switch digest {
+ // please don't use these digest functions
+ case EVP_NULL:
+ md = C.EVP_md_null()
+ case EVP_MD5:
+ md = C.EVP_md5()
+ case EVP_SHA:
+ md = C.EVP_sha()
+ case EVP_SHA1:
+ md = C.EVP_sha1()
+ case EVP_DSS:
+ md = C.EVP_dss()
+ case EVP_DSS1:
+ md = C.EVP_dss1()
+ case EVP_RIPEMD160:
+ md = C.EVP_ripemd160()
+ case EVP_SHA224:
+ md = C.EVP_sha224()
+ // you actually want one of these
+ case EVP_SHA256:
+ md = C.EVP_sha256()
+ case EVP_SHA384:
+ md = C.EVP_sha384()
+ case EVP_SHA512:
+ md = C.EVP_sha512()
+ }
+ if C.X509_sign(c.x, privKey.evpPKey(), md) <= 0 {
+ return errors.New("failed to sign certificate")
+ }
+ return nil
+}
+
+// Add an extension to a certificate.
+// Extension constants are NID_* as found in openssl.
+func (c *Certificate) AddExtension(nid NID, value string) error {
+ issuer := c
+ if c.Issuer != nil {
+ issuer = c.Issuer
+ }
+ var ctx C.X509V3_CTX
+ C.X509V3_set_ctx(&ctx, c.x, issuer.x, nil, nil, 0)
+ ex := C.X509V3_EXT_conf_nid(nil, &ctx, C.int(nid), C.CString(value))
+ if ex == nil {
+ return errors.New("failed to create x509v3 extension")
+ }
+ defer C.X509_EXTENSION_free(ex)
+ if C.X509_add_ext(c.x, ex, -1) <= 0 {
+ return errors.New("failed to add x509v3 extension")
+ }
+ return nil
+}
+
+// Wraps AddExtension using a map of NID to text extension.
+// Will return without finishing if it encounters an error.
+func (c *Certificate) AddExtensions(extensions map[NID]string) error {
+ for nid, value := range extensions {
+ if err := c.AddExtension(nid, value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// LoadCertificateFromPEM loads an X509 certificate from a PEM-encoded block.
+func LoadCertificateFromPEM(pem_block []byte) (*Certificate, error) {
+ if len(pem_block) == 0 {
+ return nil, errors.New("empty pem block")
+ }
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
+ C.int(len(pem_block)))
+ cert := C.PEM_read_bio_X509(bio, nil, nil, nil)
+ C.BIO_free(bio)
+ if cert == nil {
+ return nil, errorFromErrorQueue()
+ }
+ x := &Certificate{x: cert}
+ runtime.SetFinalizer(x, func(x *Certificate) {
+ C.X509_free(x.x)
+ })
+ return x, nil
+}
+
+// MarshalPEM converts the X509 certificate to PEM-encoded format
+func (c *Certificate) MarshalPEM() (pem_block []byte, err error) {
+ bio := C.BIO_new(C.BIO_s_mem())
+ if bio == nil {
+ return nil, errors.New("failed to allocate memory BIO")
+ }
+ defer C.BIO_free(bio)
+ if int(C.PEM_write_bio_X509(bio, c.x)) != 1 {
+ return nil, errors.New("failed dumping certificate")
+ }
+ return ioutil.ReadAll(asAnyBio(bio))
+}
+
+// PublicKey returns the public key embedded in the X509 certificate.
+func (c *Certificate) PublicKey() (PublicKey, error) {
+ pkey := C.X509_get_pubkey(c.x)
+ if pkey == nil {
+ return nil, errors.New("no public key found")
+ }
+ key := &pKey{key: pkey}
+ runtime.SetFinalizer(key, func(key *pKey) {
+ C.EVP_PKEY_free(key.key)
+ })
+ return key, nil
+}
+
+// GetSerialNumberHex returns the certificate's serial number in hex format
+func (c *Certificate) GetSerialNumberHex() (serial string) {
+ asn1_i := C.X509_get_serialNumber(c.x)
+ bignum := C.ASN1_INTEGER_to_BN(asn1_i, nil)
+ hex := C.BN_bn2hex(bignum)
+ serial = C.GoString(hex)
+ C.BN_free(bignum)
+ C.OPENSSL_free_not_a_macro(unsafe.Pointer(hex))
+ return
+}
+
+func (c *Certificate) X509NamePrintEx() (out []byte, err error) {
+ bio := C.BIO_new(C.BIO_s_mem())
+ if bio == nil {
+ return nil, errors.New("failed to allocate memory BIO")
+ }
+ defer C.BIO_free(bio)
+ name := C.X509_get_subject_name(c.x)
+ // TODO, pass in flags instead of using this hardcoded one
+ if int(C.X509_NAME_print_ex(bio, name, 0, C.XN_FLAG_RFC2253)) < 0 {
+ return nil, errors.New("failed formatting subject")
+ }
+ return ioutil.ReadAll(asAnyBio(bio))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/cert_test.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/cert_test.go
new file mode 100644
index 00000000000..c32883ba4eb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/cert_test.go
@@ -0,0 +1,139 @@
+// Copyright (C) 2014 Ryan Hileman
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+import (
+ "math/big"
+ "testing"
+ "time"
+)
+
+func TestCertGenerate(t *testing.T) {
+ key, err := GenerateRSAKey(2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info := &CertificateInfo{
+ Serial: big.NewInt(int64(1)),
+ Issued: 0,
+ Expires: 24 * time.Hour,
+ Country: "US",
+ Organization: "Test",
+ CommonName: "localhost",
+ }
+ cert, err := NewCertificate(info, key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := cert.Sign(key, EVP_SHA256); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCAGenerate(t *testing.T) {
+ cakey, err := GenerateRSAKey(2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info := &CertificateInfo{
+ Serial: big.NewInt(int64(1)),
+ Issued: 0,
+ Expires: 24 * time.Hour,
+ Country: "US",
+ Organization: "Test CA",
+ CommonName: "CA",
+ }
+ ca, err := NewCertificate(info, cakey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := ca.AddExtensions(map[NID]string{
+ NID_basic_constraints: "critical,CA:TRUE",
+ NID_key_usage: "critical,keyCertSign,cRLSign",
+ NID_subject_key_identifier: "hash",
+ NID_netscape_cert_type: "sslCA",
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := ca.Sign(cakey, EVP_SHA256); err != nil {
+ t.Fatal(err)
+ }
+ key, err := GenerateRSAKey(2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info = &CertificateInfo{
+ Serial: big.NewInt(int64(1)),
+ Issued: 0,
+ Expires: 24 * time.Hour,
+ Country: "US",
+ Organization: "Test",
+ CommonName: "localhost",
+ }
+ cert, err := NewCertificate(info, key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := cert.AddExtensions(map[NID]string{
+ NID_basic_constraints: "critical,CA:FALSE",
+ NID_key_usage: "keyEncipherment",
+ NID_ext_key_usage: "serverAuth",
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := cert.SetIssuer(ca); err != nil {
+ t.Fatal(err)
+ }
+ if err := cert.Sign(cakey, EVP_SHA256); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCertGetNameEntry(t *testing.T) {
+ key, err := GenerateRSAKey(2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info := &CertificateInfo{
+ Serial: big.NewInt(int64(1)),
+ Issued: 0,
+ Expires: 24 * time.Hour,
+ Country: "US",
+ Organization: "Test",
+ CommonName: "localhost",
+ }
+ cert, err := NewCertificate(info, key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ name, err := cert.GetSubjectName()
+ if err != nil {
+ t.Fatal(err)
+ }
+ entry, ok := name.GetEntry(NID_commonName)
+ if !ok {
+ t.Fatal("no common name")
+ }
+ if entry != "localhost" {
+ t.Fatalf("expected localhost; got %q", entry)
+ }
+ entry, ok = name.GetEntry(NID_localityName)
+ if ok {
+ t.Fatal("did not expect a locality name")
+ }
+ if entry != "" {
+ t.Fatalf("entry should be empty; got %q", entry)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ciphers.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ciphers.go
new file mode 100644
index 00000000000..12662707f54
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ciphers.go
@@ -0,0 +1,355 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+// #include <openssl/evp.h>
+//
+// int EVP_CIPHER_block_size_not_a_macro(EVP_CIPHER *c) {
+// return EVP_CIPHER_block_size(c);
+// }
+//
+// int EVP_CIPHER_key_length_not_a_macro(EVP_CIPHER *c) {
+// return EVP_CIPHER_key_length(c);
+// }
+//
+// int EVP_CIPHER_iv_length_not_a_macro(EVP_CIPHER *c) {
+// return EVP_CIPHER_iv_length(c);
+// }
+//
+// int EVP_CIPHER_nid_not_a_macro(EVP_CIPHER *c) {
+// return EVP_CIPHER_nid(c);
+// }
+//
+// int EVP_CIPHER_CTX_block_size_not_a_macro(EVP_CIPHER_CTX *ctx) {
+// return EVP_CIPHER_CTX_block_size(ctx);
+// }
+//
+// int EVP_CIPHER_CTX_key_length_not_a_macro(EVP_CIPHER_CTX *ctx) {
+// return EVP_CIPHER_CTX_key_length(ctx);
+// }
+//
+// int EVP_CIPHER_CTX_iv_length_not_a_macro(EVP_CIPHER_CTX *ctx) {
+// return EVP_CIPHER_CTX_iv_length(ctx);
+// }
+//
+// const EVP_CIPHER *EVP_CIPHER_CTX_cipher_not_a_macro(EVP_CIPHER_CTX *ctx) {
+// return EVP_CIPHER_CTX_cipher(ctx);
+// }
+import "C"
+
+import (
+ "errors"
+ "fmt"
+ "runtime"
+ "unsafe"
+)
+
+const (
+ GCM_TAG_MAXLEN = 16
+)
+
+type CipherCtx interface {
+ Cipher() *Cipher
+ BlockSize() int
+ KeySize() int
+ IVSize() int
+}
+
+type Cipher struct {
+ ptr *C.EVP_CIPHER
+}
+
+func (c *Cipher) Nid() NID {
+ return NID(C.EVP_CIPHER_nid_not_a_macro(c.ptr))
+}
+
+func (c *Cipher) ShortName() (string, error) {
+ return Nid2ShortName(c.Nid())
+}
+
+func (c *Cipher) BlockSize() int {
+ return int(C.EVP_CIPHER_block_size_not_a_macro(c.ptr))
+}
+
+func (c *Cipher) KeySize() int {
+ return int(C.EVP_CIPHER_key_length_not_a_macro(c.ptr))
+}
+
+func (c *Cipher) IVSize() int {
+ return int(C.EVP_CIPHER_iv_length_not_a_macro(c.ptr))
+}
+
+func Nid2ShortName(nid NID) (string, error) {
+ sn := C.OBJ_nid2sn(C.int(nid))
+ if sn == nil {
+ return "", fmt.Errorf("NID %d not found", nid)
+ }
+ return C.GoString(sn), nil
+}
+
+func GetCipherByName(name string) (*Cipher, error) {
+ cname := C.CString(name)
+ defer C.free(unsafe.Pointer(cname))
+ p := C.EVP_get_cipherbyname(cname)
+ if p == nil {
+ return nil, fmt.Errorf("Cipher %v not found", name)
+ }
+ // we can consider ciphers to use static mem; don't need to free
+ return &Cipher{ptr: p}, nil
+}
+
+func GetCipherByNid(nid NID) (*Cipher, error) {
+ sn, err := Nid2ShortName(nid)
+ if err != nil {
+ return nil, err
+ }
+ return GetCipherByName(sn)
+}
+
+type cipherCtx struct {
+ ctx *C.EVP_CIPHER_CTX
+}
+
+func newCipherCtx() (*cipherCtx, error) {
+ cctx := C.EVP_CIPHER_CTX_new()
+ if cctx == nil {
+ return nil, errors.New("failed to allocate cipher context")
+ }
+ ctx := &cipherCtx{cctx}
+ runtime.SetFinalizer(ctx, func(ctx *cipherCtx) {
+ C.EVP_CIPHER_CTX_free(ctx.ctx)
+ })
+ return ctx, nil
+}
+
+func (ctx *cipherCtx) applyKeyAndIV(key, iv []byte) error {
+ var kptr, iptr *C.uchar
+ if key != nil {
+ if len(key) != ctx.KeySize() {
+ return fmt.Errorf("bad key size (%d bytes instead of %d)",
+ len(key), ctx.KeySize())
+ }
+ kptr = (*C.uchar)(&key[0])
+ }
+ if iv != nil {
+ if len(iv) != ctx.IVSize() {
+ return fmt.Errorf("bad IV size (%d bytes instead of %d)",
+ len(iv), ctx.IVSize())
+ }
+ iptr = (*C.uchar)(&iv[0])
+ }
+ if kptr != nil || iptr != nil {
+ var res C.int
+ if ctx.ctx.encrypt != 0 {
+ res = C.EVP_EncryptInit_ex(ctx.ctx, nil, nil, kptr, iptr)
+ } else {
+ res = C.EVP_DecryptInit_ex(ctx.ctx, nil, nil, kptr, iptr)
+ }
+ if 1 != res {
+ return errors.New("failed to apply key/IV")
+ }
+ }
+ return nil
+}
+
+func (ctx *cipherCtx) Cipher() *Cipher {
+ return &Cipher{ptr: C.EVP_CIPHER_CTX_cipher_not_a_macro(ctx.ctx)}
+}
+
+func (ctx *cipherCtx) BlockSize() int {
+ return int(C.EVP_CIPHER_CTX_block_size_not_a_macro(ctx.ctx))
+}
+
+func (ctx *cipherCtx) KeySize() int {
+ return int(C.EVP_CIPHER_CTX_key_length_not_a_macro(ctx.ctx))
+}
+
+func (ctx *cipherCtx) IVSize() int {
+ return int(C.EVP_CIPHER_CTX_iv_length_not_a_macro(ctx.ctx))
+}
+
+func (ctx *cipherCtx) setCtrl(code, arg int) error {
+ res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg), nil)
+ if res != 1 {
+ return fmt.Errorf("failed to set code %d to %d [result %d]",
+ code, arg, res)
+ }
+ return nil
+}
+
+func (ctx *cipherCtx) setCtrlBytes(code, arg int, value []byte) error {
+ res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg),
+ unsafe.Pointer(&value[0]))
+ if res != 1 {
+ return fmt.Errorf("failed to set code %d with arg %d to %x [result %d]",
+ code, arg, value, res)
+ }
+ return nil
+}
+
+func (ctx *cipherCtx) getCtrlInt(code, arg int) (int, error) {
+ var returnVal C.int
+ res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg),
+ unsafe.Pointer(&returnVal))
+ if res != 1 {
+ return 0, fmt.Errorf("failed to get code %d with arg %d [result %d]",
+ code, arg, res)
+ }
+ return int(returnVal), nil
+}
+
+func (ctx *cipherCtx) getCtrlBytes(code, arg, expectsize int) ([]byte, error) {
+ returnVal := make([]byte, expectsize)
+ res := C.EVP_CIPHER_CTX_ctrl(ctx.ctx, C.int(code), C.int(arg),
+ unsafe.Pointer(&returnVal[0]))
+ if res != 1 {
+ return nil, fmt.Errorf("failed to get code %d with arg %d [result %d]",
+ code, arg, res)
+ }
+ return returnVal, nil
+}
+
+type EncryptionCipherCtx interface {
+ CipherCtx
+
+ // pass in plaintext, get back ciphertext. can be called
+ // multiple times as needed
+ EncryptUpdate(input []byte) ([]byte, error)
+
+ // call after all plaintext has been passed in; may return
+ // additional ciphertext if needed to finish off a block
+ // or extra padding information
+ EncryptFinal() ([]byte, error)
+}
+
+type DecryptionCipherCtx interface {
+ CipherCtx
+
+ // pass in ciphertext, get back plaintext. can be called
+ // multiple times as needed
+ DecryptUpdate(input []byte) ([]byte, error)
+
+ // call after all ciphertext has been passed in; may return
+ // additional plaintext if needed to finish off a block
+ DecryptFinal() ([]byte, error)
+}
+
+type encryptionCipherCtx struct {
+ *cipherCtx
+}
+
+type decryptionCipherCtx struct {
+ *cipherCtx
+}
+
+func newEncryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) (
+ *encryptionCipherCtx, error) {
+ if c == nil {
+ return nil, errors.New("null cipher not allowed")
+ }
+ ctx, err := newCipherCtx()
+ if err != nil {
+ return nil, err
+ }
+ var eptr *C.ENGINE
+ if e != nil {
+ eptr = e.e
+ }
+ if 1 != C.EVP_EncryptInit_ex(ctx.ctx, c.ptr, eptr, nil, nil) {
+ return nil, errors.New("failed to initialize cipher context")
+ }
+ err = ctx.applyKeyAndIV(key, iv)
+ if err != nil {
+ return nil, err
+ }
+ return &encryptionCipherCtx{cipherCtx: ctx}, nil
+}
+
+func newDecryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) (
+ *decryptionCipherCtx, error) {
+ if c == nil {
+ return nil, errors.New("null cipher not allowed")
+ }
+ ctx, err := newCipherCtx()
+ if err != nil {
+ return nil, err
+ }
+ var eptr *C.ENGINE
+ if e != nil {
+ eptr = e.e
+ }
+ if 1 != C.EVP_DecryptInit_ex(ctx.ctx, c.ptr, eptr, nil, nil) {
+ return nil, errors.New("failed to initialize cipher context")
+ }
+ err = ctx.applyKeyAndIV(key, iv)
+ if err != nil {
+ return nil, err
+ }
+ return &decryptionCipherCtx{cipherCtx: ctx}, nil
+}
+
+func NewEncryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) (
+ EncryptionCipherCtx, error) {
+ return newEncryptionCipherCtx(c, e, key, iv)
+}
+
+func NewDecryptionCipherCtx(c *Cipher, e *Engine, key, iv []byte) (
+ DecryptionCipherCtx, error) {
+ return newDecryptionCipherCtx(c, e, key, iv)
+}
+
+func (ctx *encryptionCipherCtx) EncryptUpdate(input []byte) ([]byte, error) {
+ outbuf := make([]byte, len(input)+ctx.BlockSize())
+ outlen := C.int(len(outbuf))
+ res := C.EVP_EncryptUpdate(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen,
+ (*C.uchar)(&input[0]), C.int(len(input)))
+ if res != 1 {
+ return nil, fmt.Errorf("failed to encrypt [result %d]", res)
+ }
+ return outbuf[:outlen], nil
+}
+
+func (ctx *decryptionCipherCtx) DecryptUpdate(input []byte) ([]byte, error) {
+ outbuf := make([]byte, len(input)+ctx.BlockSize())
+ outlen := C.int(len(outbuf))
+ res := C.EVP_DecryptUpdate(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen,
+ (*C.uchar)(&input[0]), C.int(len(input)))
+ if res != 1 {
+ return nil, fmt.Errorf("failed to decrypt [result %d]", res)
+ }
+ return outbuf[:outlen], nil
+}
+
+func (ctx *encryptionCipherCtx) EncryptFinal() ([]byte, error) {
+ outbuf := make([]byte, ctx.BlockSize())
+ var outlen C.int
+ if 1 != C.EVP_EncryptFinal_ex(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen) {
+ return nil, errors.New("encryption failed")
+ }
+ return outbuf[:outlen], nil
+}
+
+func (ctx *decryptionCipherCtx) DecryptFinal() ([]byte, error) {
+ outbuf := make([]byte, ctx.BlockSize())
+ var outlen C.int
+ if 1 != C.EVP_DecryptFinal_ex(ctx.ctx, (*C.uchar)(&outbuf[0]), &outlen) {
+ // this may mean the tag failed to verify- all previous plaintext
+ // returned must be considered faked and invalid
+ return nil, errors.New("decryption failed")
+ }
+ return outbuf[:outlen], nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ciphers_test.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ciphers_test.go
new file mode 100644
index 00000000000..d1d430b1e15
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ciphers_test.go
@@ -0,0 +1,307 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !darwin
+
+package openssl
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func expectError(t *testing.T, err error, msg string) {
+ if err == nil {
+ t.Fatalf("Expected error containing %#v, but got none", msg)
+ }
+ if !strings.Contains(err.Error(), msg) {
+ t.Fatalf("Expected error containing %#v, but got %s", msg, err)
+ }
+}
+
+func TestBadInputs(t *testing.T) {
+ _, err := NewGCMEncryptionCipherCtx(256, nil,
+ []byte("abcdefghijklmnopqrstuvwxyz"), nil)
+ expectError(t, err, "bad key size")
+ _, err = NewGCMEncryptionCipherCtx(128, nil,
+ []byte("abcdefghijklmnopqrstuvwxyz"), nil)
+ expectError(t, err, "bad key size")
+ _, err = NewGCMEncryptionCipherCtx(200, nil,
+ []byte("abcdefghijklmnopqrstuvwxy"), nil)
+ expectError(t, err, "unknown block size")
+ c, err := GetCipherByName("AES-128-CBC")
+ if err != nil {
+ t.Fatal("Could not look up AES-128-CBC")
+ }
+ _, err = NewEncryptionCipherCtx(c, nil, []byte("abcdefghijklmnop"),
+ []byte("abc"))
+ expectError(t, err, "bad IV size")
+}
+
+func doEncryption(key, iv, aad, plaintext []byte, blocksize, bufsize int) (
+ ciphertext, tag []byte, err error) {
+ ectx, err := NewGCMEncryptionCipherCtx(blocksize, nil, key, iv)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed making GCM encryption ctx: %s", err)
+ }
+ err = ectx.ExtraData(aad)
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to add authenticated data: %s",
+ err)
+ }
+ plainb := bytes.NewBuffer(plaintext)
+ cipherb := new(bytes.Buffer)
+ for plainb.Len() > 0 {
+ moar, err := ectx.EncryptUpdate(plainb.Next(bufsize))
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to perform an encryption: %s",
+ err)
+ }
+ cipherb.Write(moar)
+ }
+ moar, err := ectx.EncryptFinal()
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to finalize encryption: %s", err)
+ }
+ cipherb.Write(moar)
+ tag, err = ectx.GetTag()
+ if err != nil {
+ return nil, nil, fmt.Errorf("Failed to get GCM tag: %s", err)
+ }
+ return cipherb.Bytes(), tag, nil
+}
+
+func doDecryption(key, iv, aad, ciphertext, tag []byte, blocksize,
+ bufsize int) (plaintext []byte, err error) {
+ dctx, err := NewGCMDecryptionCipherCtx(blocksize, nil, key, iv)
+ if err != nil {
+ return nil, fmt.Errorf("Failed making GCM decryption ctx: %s", err)
+ }
+ aadbuf := bytes.NewBuffer(aad)
+ for aadbuf.Len() > 0 {
+ err = dctx.ExtraData(aadbuf.Next(bufsize))
+ if err != nil {
+ return nil, fmt.Errorf("Failed to add authenticated data: %s", err)
+ }
+ }
+ plainb := new(bytes.Buffer)
+ cipherb := bytes.NewBuffer(ciphertext)
+ for cipherb.Len() > 0 {
+ moar, err := dctx.DecryptUpdate(cipherb.Next(bufsize))
+ if err != nil {
+ return nil, fmt.Errorf("Failed to perform a decryption: %s", err)
+ }
+ plainb.Write(moar)
+ }
+ err = dctx.SetTag(tag)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to set expected GCM tag: %s", err)
+ }
+ moar, err := dctx.DecryptFinal()
+ if err != nil {
+ return nil, fmt.Errorf("Failed to finalize decryption: %s", err)
+ }
+ plainb.Write(moar)
+ return plainb.Bytes(), nil
+}
+
+func checkEqual(t *testing.T, output []byte, original string) {
+ output_s := string(output)
+ if output_s != original {
+ t.Fatalf("output != original! %#v != %#v", output_s, original)
+ }
+}
+
+func TestGCM(t *testing.T) {
+ aad := []byte("foo bar baz")
+ key := []byte("nobody can guess this i'm sure..") // len=32
+ iv := []byte("just a bunch of bytes")
+ plaintext := "Long long ago, in a land far away..."
+
+ blocksizes_to_test := []int{256, 192, 128}
+
+ // best for this to have no common factors with blocksize, so that the
+ // buffering layer inside the CIPHER_CTX gets exercised
+ bufsize := 33
+
+ if len(plaintext)%8 == 0 {
+ plaintext += "!" // make sure padding is exercised
+ }
+
+ for _, bsize := range blocksizes_to_test {
+ subkey := key[:bsize/8]
+ ciphertext, tag, err := doEncryption(subkey, iv, aad, []byte(plaintext),
+ bsize, bufsize)
+ if err != nil {
+ t.Fatalf("Encryption with b=%d: %s", bsize, err)
+ }
+ plaintext_out, err := doDecryption(subkey, iv, aad, ciphertext, tag,
+ bsize, bufsize)
+ if err != nil {
+ t.Fatalf("Decryption with b=%d: %s", bsize, err)
+ }
+ checkEqual(t, plaintext_out, plaintext)
+ }
+}
+
+func TestGCMWithNoAAD(t *testing.T) {
+ key := []byte("0000111122223333")
+ iv := []byte("9999")
+ plaintext := "ABORT ABORT ABORT DANGAR"
+
+ ciphertext, tag, err := doEncryption(key, iv, nil, []byte(plaintext),
+ 128, 32)
+ if err != nil {
+ t.Fatal("Encryption failure:", err)
+ }
+ plaintext_out, err := doDecryption(key, iv, nil, ciphertext, tag, 128, 129)
+ if err != nil {
+ t.Fatal("Decryption failure:", err)
+ }
+ checkEqual(t, plaintext_out, plaintext)
+}
+
+func TestBadTag(t *testing.T) {
+ key := []byte("abcdefghijklmnop")
+ iv := []byte("v7239qjfv3qr793fuaj")
+ plaintext := "The red rooster has flown the coop I REPEAT" +
+ "the red rooster has flown the coop!!1!"
+
+ ciphertext, tag, err := doEncryption(key, iv, nil, []byte(plaintext),
+ 128, 32)
+ if err != nil {
+ t.Fatal("Encryption failure:", err)
+ }
+ // flip the last bit
+ tag[len(tag)-1] ^= 1
+ plaintext_out, err := doDecryption(key, iv, nil, ciphertext, tag, 128, 129)
+ if err == nil {
+ t.Fatal("Expected error for bad tag, but got none")
+ }
+ // flip it back, try again just to make sure
+ tag[len(tag)-1] ^= 1
+ plaintext_out, err = doDecryption(key, iv, nil, ciphertext, tag, 128, 129)
+ if err != nil {
+ t.Fatal("Decryption failure:", err)
+ }
+ checkEqual(t, plaintext_out, plaintext)
+}
+
+func TestBadCiphertext(t *testing.T) {
+ key := []byte("hard boiled eggs & bacon")
+ iv := []byte("x") // it's not a very /good/ IV, is it
+ aad := []byte("mu")
+ plaintext := "Roger roger bingo charlie, we have a niner fourteen tango"
+
+ ciphertext, tag, err := doEncryption(key, iv, aad, []byte(plaintext),
+ 192, 1)
+ if err != nil {
+ t.Fatal("Encryption failure:", err)
+ }
+ // flip the last bit
+ ciphertext[len(ciphertext)-1] ^= 1
+ plaintext_out, err := doDecryption(key, iv, aad, ciphertext, tag, 192, 192)
+ if err == nil {
+ t.Fatal("Expected error for bad ciphertext, but got none")
+ }
+ // flip it back, try again just to make sure
+ ciphertext[len(ciphertext)-1] ^= 1
+ plaintext_out, err = doDecryption(key, iv, aad, ciphertext, tag, 192, 192)
+ if err != nil {
+ t.Fatal("Decryption failure:", err)
+ }
+ checkEqual(t, plaintext_out, plaintext)
+}
+
+func TestBadAAD(t *testing.T) {
+ key := []byte("Ive got a lovely buncha coconuts")
+ iv := []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab")
+ aad := []byte("Hi i am a plain")
+ plaintext := "Whatever."
+
+ ciphertext, tag, err := doEncryption(key, iv, aad, []byte(plaintext),
+ 256, 256)
+ if err != nil {
+ t.Fatal("Encryption failure:", err)
+ }
+ // flip the last bit
+ aad[len(aad)-1] ^= 1
+ plaintext_out, err := doDecryption(key, iv, aad, ciphertext, tag, 256, 256)
+ if err == nil {
+ t.Fatal("Expected error for bad AAD, but got none")
+ }
+ // flip it back, try again just to make sure
+ aad[len(aad)-1] ^= 1
+ plaintext_out, err = doDecryption(key, iv, aad, ciphertext, tag, 256, 256)
+ if err != nil {
+ t.Fatal("Decryption failure:", err)
+ }
+ checkEqual(t, plaintext_out, plaintext)
+}
+
+func TestNonAuthenticatedEncryption(t *testing.T) {
+ key := []byte("never gonna give you up, never g")
+ iv := []byte("onna let you dow")
+ plaintext1 := "n, never gonna run around"
+ plaintext2 := " and desert you"
+
+ cipher, err := GetCipherByName("aes-256-cbc")
+ if err != nil {
+ t.Fatal("Could not get cipher: ", err)
+ }
+
+ eCtx, err := NewEncryptionCipherCtx(cipher, nil, key, iv)
+ if err != nil {
+ t.Fatal("Could not create encryption context: ", err)
+ }
+ cipherbytes, err := eCtx.EncryptUpdate([]byte(plaintext1))
+ if err != nil {
+ t.Fatal("EncryptUpdate(plaintext1) failure: ", err)
+ }
+ ciphertext := string(cipherbytes)
+ cipherbytes, err = eCtx.EncryptUpdate([]byte(plaintext2))
+ if err != nil {
+ t.Fatal("EncryptUpdate(plaintext2) failure: ", err)
+ }
+ ciphertext += string(cipherbytes)
+ cipherbytes, err = eCtx.EncryptFinal()
+ if err != nil {
+ t.Fatal("EncryptFinal() failure: ", err)
+ }
+ ciphertext += string(cipherbytes)
+
+ dCtx, err := NewDecryptionCipherCtx(cipher, nil, key, iv)
+ if err != nil {
+ t.Fatal("Could not create decryption context: ", err)
+ }
+ plainbytes, err := dCtx.DecryptUpdate([]byte(ciphertext[:15]))
+ if err != nil {
+ t.Fatal("DecryptUpdate(ciphertext part 1) failure: ", err)
+ }
+ plainOutput := string(plainbytes)
+ plainbytes, err = dCtx.DecryptUpdate([]byte(ciphertext[15:]))
+ if err != nil {
+ t.Fatal("DecryptUpdate(ciphertext part 2) failure: ", err)
+ }
+ plainOutput += string(plainbytes)
+ plainbytes, err = dCtx.DecryptFinal()
+ if err != nil {
+ t.Fatal("DecryptFinal() failure: ", err)
+ }
+ plainOutput += string(plainbytes)
+
+ checkEqual(t, []byte(plainOutput), plaintext1+plaintext2)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/conn.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/conn.go
new file mode 100644
index 00000000000..afc73a50ae3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/conn.go
@@ -0,0 +1,625 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+/*
+#include <stdlib.h>
+#include <openssl/ssl.h>
+#include <openssl/conf.h>
+#include <openssl/err.h>
+
+int sk_X509_num_not_a_macro(STACK_OF(X509) *sk) { return sk_X509_num(sk); }
+X509 *sk_X509_value_not_a_macro(STACK_OF(X509)* sk, int i) {
+ return sk_X509_value(sk, i);
+}
+const char * SSL_get_cipher_name_not_a_macro(const SSL *ssl) {
+ return SSL_get_cipher_name(ssl);
+}
+static int SSL_session_reused_not_a_macro(SSL *ssl) {
+ return SSL_session_reused(ssl);
+}
+*/
+import "C"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+ "unsafe"
+
+ "github.com/spacemonkeygo/openssl/utils"
+)
+
+var (
+ zeroReturn = errors.New("zero return")
+ wantRead = errors.New("want read")
+ wantWrite = errors.New("want write")
+ tryAgain = errors.New("try again")
+)
+
+type Conn struct {
+ conn net.Conn
+ ssl *C.SSL
+ ctx *Ctx // for gc
+ into_ssl *readBio
+ from_ssl *writeBio
+ is_shutdown bool
+ mtx sync.Mutex
+ want_read_future *utils.Future
+}
+
+type VerifyResult int
+
+const (
+ Ok VerifyResult = C.X509_V_OK
+ UnableToGetIssuerCert VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT
+ UnableToGetCrl VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_CRL
+ UnableToDecryptCertSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE
+ UnableToDecryptCrlSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE
+ UnableToDecodeIssuerPublicKey VerifyResult = C.X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY
+ CertSignatureFailure VerifyResult = C.X509_V_ERR_CERT_SIGNATURE_FAILURE
+ CrlSignatureFailure VerifyResult = C.X509_V_ERR_CRL_SIGNATURE_FAILURE
+ CertNotYetValid VerifyResult = C.X509_V_ERR_CERT_NOT_YET_VALID
+ CertHasExpired VerifyResult = C.X509_V_ERR_CERT_HAS_EXPIRED
+ CrlNotYetValid VerifyResult = C.X509_V_ERR_CRL_NOT_YET_VALID
+ CrlHasExpired VerifyResult = C.X509_V_ERR_CRL_HAS_EXPIRED
+ ErrorInCertNotBeforeField VerifyResult = C.X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD
+ ErrorInCertNotAfterField VerifyResult = C.X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD
+ ErrorInCrlLastUpdateField VerifyResult = C.X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD
+ ErrorInCrlNextUpdateField VerifyResult = C.X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD
+ OutOfMem VerifyResult = C.X509_V_ERR_OUT_OF_MEM
+ DepthZeroSelfSignedCert VerifyResult = C.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT
+ SelfSignedCertInChain VerifyResult = C.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN
+ UnableToGetIssuerCertLocally VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY
+ UnableToVerifyLeafSignature VerifyResult = C.X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE
+ CertChainTooLong VerifyResult = C.X509_V_ERR_CERT_CHAIN_TOO_LONG
+ CertRevoked VerifyResult = C.X509_V_ERR_CERT_REVOKED
+ InvalidCa VerifyResult = C.X509_V_ERR_INVALID_CA
+ PathLengthExceeded VerifyResult = C.X509_V_ERR_PATH_LENGTH_EXCEEDED
+ InvalidPurpose VerifyResult = C.X509_V_ERR_INVALID_PURPOSE
+ CertUntrusted VerifyResult = C.X509_V_ERR_CERT_UNTRUSTED
+ CertRejected VerifyResult = C.X509_V_ERR_CERT_REJECTED
+ SubjectIssuerMismatch VerifyResult = C.X509_V_ERR_SUBJECT_ISSUER_MISMATCH
+ AkidSkidMismatch VerifyResult = C.X509_V_ERR_AKID_SKID_MISMATCH
+ AkidIssuerSerialMismatch VerifyResult = C.X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH
+ KeyusageNoCertsign VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_CERTSIGN
+ UnableToGetCrlIssuer VerifyResult = C.X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER
+ UnhandledCriticalExtension VerifyResult = C.X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION
+ KeyusageNoCrlSign VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_CRL_SIGN
+ UnhandledCriticalCrlExtension VerifyResult = C.X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION
+ InvalidNonCa VerifyResult = C.X509_V_ERR_INVALID_NON_CA
+ ProxyPathLengthExceeded VerifyResult = C.X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED
+ KeyusageNoDigitalSignature VerifyResult = C.X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE
+ ProxyCertificatesNotAllowed VerifyResult = C.X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED
+ InvalidExtension VerifyResult = C.X509_V_ERR_INVALID_EXTENSION
+ InvalidPolicyExtension VerifyResult = C.X509_V_ERR_INVALID_POLICY_EXTENSION
+ NoExplicitPolicy VerifyResult = C.X509_V_ERR_NO_EXPLICIT_POLICY
+ UnnestedResource VerifyResult = C.X509_V_ERR_UNNESTED_RESOURCE
+ ApplicationVerification VerifyResult = C.X509_V_ERR_APPLICATION_VERIFICATION
+)
+
+func newSSL(ctx *C.SSL_CTX) (*C.SSL, error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ ssl := C.SSL_new(ctx)
+ if ssl == nil {
+ return nil, errorFromErrorQueue()
+ }
+ return ssl, nil
+}
+
+func newConn(conn net.Conn, ctx *Ctx) (*Conn, error) {
+ ssl, err := newSSL(ctx.ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ into_ssl := &readBio{}
+ from_ssl := &writeBio{}
+
+ if ctx.GetMode()&ReleaseBuffers > 0 {
+ into_ssl.release_buffers = true
+ from_ssl.release_buffers = true
+ }
+
+ into_ssl_cbio := into_ssl.MakeCBIO()
+ from_ssl_cbio := from_ssl.MakeCBIO()
+ if into_ssl_cbio == nil || from_ssl_cbio == nil {
+ // these frees are null safe
+ C.BIO_free(into_ssl_cbio)
+ C.BIO_free(from_ssl_cbio)
+ C.SSL_free(ssl)
+ return nil, errors.New("failed to allocate memory BIO")
+ }
+
+ // the ssl object takes ownership of these objects now
+ C.SSL_set_bio(ssl, into_ssl_cbio, from_ssl_cbio)
+
+ c := &Conn{
+ conn: conn,
+ ssl: ssl,
+ ctx: ctx,
+ into_ssl: into_ssl,
+ from_ssl: from_ssl}
+ runtime.SetFinalizer(c, func(c *Conn) {
+ c.into_ssl.Disconnect(into_ssl_cbio)
+ c.from_ssl.Disconnect(from_ssl_cbio)
+ C.SSL_free(c.ssl)
+ })
+ return c, nil
+}
+
+// Client wraps an existing stream connection and puts it in the connect state
+// for any subsequent handshakes.
+//
+// IMPORTANT NOTE: if you use this method instead of Dial to construct an SSL
+// connection, you are responsible for verifying the peer's hostname.
+// Otherwise, you are vulnerable to MITM attacks.
+//
+// Client also does not set up SNI for you like Dial does.
+//
+// Client connections probably won't work for you unless you set a verify
+// location or add some certs to the certificate store of the client context
+// you're using. This library is not nice enough to use the system certificate
+// store by default for you yet.
+func Client(conn net.Conn, ctx *Ctx) (*Conn, error) {
+ c, err := newConn(conn, ctx)
+ if err != nil {
+ return nil, err
+ }
+ C.SSL_set_connect_state(c.ssl)
+ return c, nil
+}
+
+// Server wraps an existing stream connection and puts it in the accept state
+// for any subsequent handshakes.
+func Server(conn net.Conn, ctx *Ctx) (*Conn, error) {
+ c, err := newConn(conn, ctx)
+ if err != nil {
+ return nil, err
+ }
+ C.SSL_set_accept_state(c.ssl)
+ return c, nil
+}
+
+func (c *Conn) CurrentCipher() (string, error) {
+ p := C.SSL_get_cipher_name_not_a_macro(c.ssl)
+ if p == nil {
+ return "", errors.New("Session not established")
+ }
+
+ return C.GoString(p), nil
+}
+
+func (c *Conn) fillInputBuffer() error {
+ for {
+ n, err := c.into_ssl.ReadFromOnce(c.conn)
+ if n == 0 && err == nil {
+ continue
+ }
+ if err == io.EOF {
+ c.into_ssl.MarkEOF()
+ return c.Close()
+ }
+ return err
+ }
+}
+
+func (c *Conn) flushOutputBuffer() error {
+ _, err := c.from_ssl.WriteTo(c.conn)
+ return err
+}
+
+func (c *Conn) getErrorHandler(rv C.int, errno error) func() error {
+ errcode := C.SSL_get_error(c.ssl, rv)
+ switch errcode {
+ case C.SSL_ERROR_ZERO_RETURN:
+ return func() error {
+ c.Close()
+ return io.ErrUnexpectedEOF
+ }
+ case C.SSL_ERROR_WANT_READ:
+ go c.flushOutputBuffer()
+ if c.want_read_future != nil {
+ want_read_future := c.want_read_future
+ return func() error {
+ _, err := want_read_future.Get()
+ return err
+ }
+ }
+ c.want_read_future = utils.NewFuture()
+ want_read_future := c.want_read_future
+ return func() (err error) {
+ defer func() {
+ c.mtx.Lock()
+ c.want_read_future = nil
+ c.mtx.Unlock()
+ want_read_future.Set(nil, err)
+ }()
+ err = c.fillInputBuffer()
+ if err != nil {
+ return err
+ }
+ return tryAgain
+ }
+ case C.SSL_ERROR_WANT_WRITE:
+ return func() error {
+ err := c.flushOutputBuffer()
+ if err != nil {
+ return err
+ }
+ return tryAgain
+ }
+ case C.SSL_ERROR_SYSCALL:
+ var err error
+ if C.ERR_peek_error() == 0 {
+ switch rv {
+ case 0:
+ err = errors.New("protocol-violating EOF")
+ case -1:
+ err = errno
+ default:
+ err = errorFromErrorQueue()
+ }
+ } else {
+ err = errorFromErrorQueue()
+ }
+ return func() error { return err }
+ default:
+ err := errorFromErrorQueue()
+ return func() error { return err }
+ }
+}
+
+func (c *Conn) handleError(errcb func() error) error {
+ if errcb != nil {
+ return errcb()
+ }
+ return nil
+}
+
+func (c *Conn) handshake() func() error {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ if c.is_shutdown {
+ return func() error { return io.ErrUnexpectedEOF }
+ }
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ rv, errno := C.SSL_do_handshake(c.ssl)
+ if rv > 0 {
+ return nil
+ }
+ return c.getErrorHandler(rv, errno)
+}
+
+// Handshake performs an SSL handshake. If a handshake is not manually
+// triggered, it will run before the first I/O on the encrypted stream.
+func (c *Conn) Handshake() error {
+ err := tryAgain
+ for err == tryAgain {
+ err = c.handleError(c.handshake())
+ }
+ go c.flushOutputBuffer()
+ return err
+}
+
+// PeerCertificate returns the Certificate of the peer with which you're
+// communicating. Only valid after a handshake.
+func (c *Conn) PeerCertificate() (*Certificate, error) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ if c.is_shutdown {
+ return nil, errors.New("connection closed")
+ }
+ x := C.SSL_get_peer_certificate(c.ssl)
+ if x == nil {
+ return nil, errors.New("no peer certificate found")
+ }
+ cert := &Certificate{x: x}
+ runtime.SetFinalizer(cert, func(cert *Certificate) {
+ C.X509_free(cert.x)
+ })
+ return cert, nil
+}
+
+// PeerCertificateChain returns the certificate chain of the peer. If called on
+// the client side, the stack also contains the peer's certificate; if called
+// on the server side, the peer's certificate must be obtained separately using
+// PeerCertificate.
+func (c *Conn) PeerCertificateChain() (rv []*Certificate, err error) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ if c.is_shutdown {
+ return nil, errors.New("connection closed")
+ }
+ sk := C.SSL_get_peer_cert_chain(c.ssl)
+ if sk == nil {
+ return nil, errors.New("no peer certificates found")
+ }
+ sk_num := int(C.sk_X509_num_not_a_macro(sk))
+ rv = make([]*Certificate, 0, sk_num)
+ for i := 0; i < sk_num; i++ {
+ x := C.sk_X509_value_not_a_macro(sk, C.int(i))
+ // ref holds on to the underlying connection memory so we don't need to
+ // worry about incrementing refcounts manually or freeing the X509
+ rv = append(rv, &Certificate{x: x, ref: c})
+ }
+ return rv, nil
+}
+
+// GetVerifyResult gets result of peer certificate verification
+// SSL_get_verify_result() returns the result of the verification of the X509
+// certificate presented by the peer, if any. See
+// https://www.openssl.org/docs/ssl/SSL_get_verify_result.html
+func (c *Conn) GetVerifyResults() error {
+ result := C.SSL_get_verify_result(c.ssl)
+ if int(result) != 0 {
+ return errors.New(C.GoString(
+ C.X509_verify_cert_error_string(result)))
+ }
+ return nil
+}
+
+type ConnectionState struct {
+ Certificate *Certificate
+ CertificateError error
+ CertificateChain []*Certificate
+ CertificateChainError error
+ SessionReused bool
+}
+
+func (c *Conn) ConnectionState() (rv ConnectionState) {
+ rv.Certificate, rv.CertificateError = c.PeerCertificate()
+ rv.CertificateChain, rv.CertificateChainError = c.PeerCertificateChain()
+ rv.SessionReused = c.SessionReused()
+ return
+}
+
+func (c *Conn) shutdown() func() error {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ rv, errno := C.SSL_shutdown(c.ssl)
+ if rv > 0 {
+ return nil
+ }
+ if rv == 0 {
+ // The OpenSSL docs say that in this case, the shutdown is not
+ // finished, and we should call SSL_shutdown() a second time, if a
+ // bidirectional shutdown is going to be performed. Further, the
+ // output of SSL_get_error may be misleading, as an erroneous
+ // SSL_ERROR_SYSCALL may be flagged even though no error occurred.
+ // So, TODO: revisit bidrectional shutdown, possibly trying again.
+ // Note: some broken clients won't engage in bidirectional shutdown
+ // without tickling them to close by sending a TCP_FIN packet, or
+ // shutting down the write-side of the connection.
+ return nil
+ } else {
+ return c.getErrorHandler(rv, errno)
+ }
+}
+
+func (c *Conn) shutdownLoop() error {
+ err := tryAgain
+ shutdown_tries := 0
+ for err == tryAgain {
+ shutdown_tries = shutdown_tries + 1
+ err = c.handleError(c.shutdown())
+ if err == nil {
+ return c.flushOutputBuffer()
+ }
+ if err == tryAgain && shutdown_tries >= 2 {
+ return errors.New("shutdown requested a third time?")
+ }
+ }
+ if err == io.ErrUnexpectedEOF {
+ err = nil
+ }
+ return err
+}
+
+// Close shuts down the SSL connection and closes the underlying wrapped
+// connection.
+func (c *Conn) Close() error {
+ c.mtx.Lock()
+ if c.is_shutdown {
+ c.mtx.Unlock()
+ return nil
+ }
+ c.is_shutdown = true
+ c.mtx.Unlock()
+ var errs utils.ErrorGroup
+ errs.Add(c.shutdownLoop())
+ errs.Add(c.conn.Close())
+ return errs.Finalize()
+}
+
+func (c *Conn) read(b []byte) (int, func() error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ if c.is_shutdown {
+ return 0, func() error { return io.EOF }
+ }
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ rv, errno := C.SSL_read(c.ssl, unsafe.Pointer(&b[0]), C.int(len(b)))
+ if rv > 0 {
+ return int(rv), nil
+ }
+ return 0, c.getErrorHandler(rv, errno)
+}
+
+// Read reads up to len(b) bytes into b. It returns the number of bytes read
+// and an error if applicable. io.EOF is returned when the caller can expect
+// to see no more data.
+func (c *Conn) Read(b []byte) (n int, err error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ err = tryAgain
+ for err == tryAgain {
+ n, errcb := c.read(b)
+ err = c.handleError(errcb)
+ if err == nil {
+ go c.flushOutputBuffer()
+ return n, nil
+ }
+ if err == io.ErrUnexpectedEOF {
+ err = io.EOF
+ }
+ }
+ return 0, err
+}
+
+func (c *Conn) write(b []byte) (int, func() error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+ if c.is_shutdown {
+ err := errors.New("connection closed")
+ return 0, func() error { return err }
+ }
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ rv, errno := C.SSL_write(c.ssl, unsafe.Pointer(&b[0]), C.int(len(b)))
+ if rv > 0 {
+ return int(rv), nil
+ }
+ return 0, c.getErrorHandler(rv, errno)
+}
+
+// Write will encrypt the contents of b and write it to the underlying stream.
+// Performance will be vastly improved if the size of b is a multiple of
+// SSLRecordSize.
+func (c *Conn) Write(b []byte) (written int, err error) {
+ if len(b) == 0 {
+ return 0, nil
+ }
+ err = tryAgain
+ for err == tryAgain {
+ n, errcb := c.write(b)
+ err = c.handleError(errcb)
+ if err == nil {
+ return n, c.flushOutputBuffer()
+ }
+ }
+ return 0, err
+}
+
+// VerifyHostname pulls the PeerCertificate and calls VerifyHostname on the
+// certificate.
+func (c *Conn) VerifyHostname(host string) error {
+ cert, err := c.PeerCertificate()
+ if err != nil {
+ return err
+ }
+ return cert.VerifyHostname(host)
+}
+
+// LocalAddr returns the underlying connection's local address
+func (c *Conn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+// RemoteAddr returns the underlying connection's remote address
+func (c *Conn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+// SetDeadline calls SetDeadline on the underlying connection.
+func (c *Conn) SetDeadline(t time.Time) error {
+ return c.conn.SetDeadline(t)
+}
+
+// SetReadDeadline calls SetReadDeadline on the underlying connection.
+func (c *Conn) SetReadDeadline(t time.Time) error {
+ return c.conn.SetReadDeadline(t)
+}
+
+// SetWriteDeadline calls SetWriteDeadline on the underlying connection.
+func (c *Conn) SetWriteDeadline(t time.Time) error {
+ return c.conn.SetWriteDeadline(t)
+}
+
+func (c *Conn) UnderlyingConn() net.Conn {
+ return c.conn
+}
+
+func (c *Conn) VerifyResult() VerifyResult {
+ return VerifyResult(C.SSL_get_verify_result(c.ssl))
+}
+
+func (c *Conn) SessionReused() bool {
+ return C.SSL_session_reused_not_a_macro(c.ssl) == 1
+}
+
+func (c *Conn) GetSession() ([]byte, error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ // get1 increases the refcount of the session, so we have to free it.
+ session := (*C.SSL_SESSION)(C.SSL_get1_session(c.ssl))
+ if session == nil {
+ return nil, errors.New("failed to get session")
+ }
+ defer C.SSL_SESSION_free(session)
+
+ // get the size of the encoding
+ slen := C.i2d_SSL_SESSION(session, nil)
+
+ buf := (*C.uchar)(C.malloc(C.size_t(slen)))
+ defer C.free(unsafe.Pointer(buf))
+
+ // this modifies the value of buf (seriously), so we have to pass in a temp
+ // var so that we can actually read the bytes from buf.
+ tmp := buf
+ slen2 := C.i2d_SSL_SESSION(session, &tmp)
+ if slen != slen2 {
+ return nil, errors.New("session had different lengths")
+ }
+
+ return C.GoBytes(unsafe.Pointer(buf), slen), nil
+}
+
+func (c *Conn) setSession(session []byte) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ ptr := (*C.uchar)(&session[0])
+ s := C.d2i_SSL_SESSION(nil, &ptr, C.long(len(session)))
+ if s == nil {
+ return fmt.Errorf("unable to load session: %s", errorFromErrorQueue())
+ }
+ defer C.SSL_SESSION_free(s)
+
+ ret := C.SSL_set_session(c.ssl, s)
+ if ret != 1 {
+ return fmt.Errorf("unable to set session: %s", errorFromErrorQueue())
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ctx.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ctx.go
new file mode 100644
index 00000000000..74422f290a3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ctx.go
@@ -0,0 +1,831 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+/*
+#include <openssl/crypto.h>
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/conf.h>
+#include <openssl/x509.h>
+
+static long SSL_CTX_set_options_not_a_macro(SSL_CTX* ctx, long options) {
+ return SSL_CTX_set_options(ctx, options);
+}
+
+static long SSL_CTX_clear_options_not_a_macro(SSL_CTX* ctx, long options) {
+ return SSL_CTX_clear_options(ctx, options);
+}
+
+static long SSL_CTX_get_options_not_a_macro(SSL_CTX* ctx) {
+ return SSL_CTX_get_options(ctx);
+}
+
+static long SSL_CTX_set_mode_not_a_macro(SSL_CTX* ctx, long modes) {
+ return SSL_CTX_set_mode(ctx, modes);
+}
+
+static long SSL_CTX_get_mode_not_a_macro(SSL_CTX* ctx) {
+ return SSL_CTX_get_mode(ctx);
+}
+
+static long SSL_CTX_set_session_cache_mode_not_a_macro(SSL_CTX* ctx, long modes) {
+ return SSL_CTX_set_session_cache_mode(ctx, modes);
+}
+
+static long SSL_CTX_sess_set_cache_size_not_a_macro(SSL_CTX* ctx, long t) {
+ return SSL_CTX_sess_set_cache_size(ctx, t);
+}
+
+static long SSL_CTX_sess_get_cache_size_not_a_macro(SSL_CTX* ctx) {
+ return SSL_CTX_sess_get_cache_size(ctx);
+}
+
+static long SSL_CTX_set_timeout_not_a_macro(SSL_CTX* ctx, long t) {
+ return SSL_CTX_set_timeout(ctx, t);
+}
+
+static long SSL_CTX_get_timeout_not_a_macro(SSL_CTX* ctx) {
+ return SSL_CTX_get_timeout(ctx);
+}
+
+static int CRYPTO_add_not_a_macro(int *pointer,int amount,int type) {
+ return CRYPTO_add(pointer, amount, type);
+}
+
+static long SSL_CTX_add_extra_chain_cert_not_a_macro(SSL_CTX* ctx, X509 *cert) {
+ return SSL_CTX_add_extra_chain_cert(ctx, cert);
+}
+
+#ifndef SSL_MODE_RELEASE_BUFFERS
+#define SSL_MODE_RELEASE_BUFFERS 0
+#endif
+
+#ifndef SSL_OP_NO_COMPRESSION
+#define SSL_OP_NO_COMPRESSION 0
+#endif
+
+static const SSL_METHOD *OUR_TLSv1_1_method() {
+#if OPENSSL_VERSION_NUMBER > 0x1000100fL && defined(TLS1_1_VERSION) && !defined(OPENSSL_SYSNAME_MACOSX)
+ return TLSv1_1_method();
+#else
+ return NULL;
+#endif
+}
+
+static const SSL_METHOD *OUR_TLSv1_2_method() {
+#if OPENSSL_VERSION_NUMBER > 0x1000100fL && defined(TLS1_2_VERSION) && !defined(OPENSSL_SYSNAME_MACOSX)
+ return TLSv1_2_method();
+#else
+ return NULL;
+#endif
+}
+
+#if defined SSL_CTRL_SET_TLSEXT_HOSTNAME
+ extern int sni_cb(SSL *ssl_conn, int *ad, void *arg);
+#endif
+
+extern int verify_cb(int ok, X509_STORE_CTX* store);
+
+typedef STACK_OF(X509_NAME) *STACK_OF_X509_NAME_not_a_macro;
+
+static void sk_X509_NAME_pop_free_not_a_macro(STACK_OF_X509_NAME_not_a_macro st) {
+ sk_X509_NAME_pop_free(st, X509_NAME_free);
+}
+
+extern int password_cb(char *buf, int size, int rwflag, void *password);
+*/
+import "C"
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "time"
+ "unsafe"
+
+ "github.com/spacemonkeygo/spacelog"
+)
+
+var (
+ ssl_ctx_idx = C.SSL_CTX_get_ex_new_index(0, nil, nil, nil, nil)
+
+ logger = spacelog.GetLogger()
+)
+
+type Ctx struct {
+ ctx *C.SSL_CTX
+ cert *Certificate
+ chain []*Certificate
+ key PrivateKey
+ verify_cb VerifyCallback
+ sni_cb TLSExtServernameCallback
+}
+
+//export get_ssl_ctx_idx
+func get_ssl_ctx_idx() C.int {
+ return ssl_ctx_idx
+}
+
+func newCtx(method *C.SSL_METHOD) (*Ctx, error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ ctx := C.SSL_CTX_new(method)
+ if ctx == nil {
+ return nil, errorFromErrorQueue()
+ }
+ c := &Ctx{ctx: ctx}
+ C.SSL_CTX_set_ex_data(ctx, get_ssl_ctx_idx(), unsafe.Pointer(c))
+ runtime.SetFinalizer(c, func(c *Ctx) {
+ C.SSL_CTX_free(c.ctx)
+ })
+ return c, nil
+}
+
+type SSLVersion int
+
+const (
+ SSLv3 SSLVersion = 0x02 // Vulnerable to "POODLE" attack.
+ TLSv1 SSLVersion = 0x03
+ TLSv1_1 SSLVersion = 0x04
+ TLSv1_2 SSLVersion = 0x05
+
+ // Make sure to disable SSLv2 and SSLv3 if you use this. SSLv3 is vulnerable
+ // to the "POODLE" attack, and SSLv2 is what, just don't even.
+ AnyVersion SSLVersion = 0x06
+)
+
+// NewCtxWithVersion creates an SSL context that is specific to the provided
+// SSL version. See http://www.openssl.org/docs/ssl/SSL_CTX_new.html for more.
+func NewCtxWithVersion(version SSLVersion) (*Ctx, error) {
+ var method *C.SSL_METHOD
+ switch version {
+ case TLSv1:
+ method = C.TLSv1_method()
+ case TLSv1_1:
+ method = C.OUR_TLSv1_1_method()
+ case TLSv1_2:
+ method = C.OUR_TLSv1_2_method()
+ case AnyVersion:
+ method = C.SSLv23_method()
+ }
+ if method == nil {
+ return nil, errors.New("unknown ssl/tls version")
+ }
+ return newCtx(method)
+}
+
+// NewCtx creates a context that supports any TLS version 1.0 and newer.
+func NewCtx() (*Ctx, error) {
+ c, err := NewCtxWithVersion(AnyVersion)
+ if err == nil {
+ c.SetOptions(NoSSLv2 | NoSSLv3)
+ }
+ return c, err
+}
+
+// NewCtxFromFiles calls NewCtx, loads the provided files, and configures the
+// context to use them.
+func NewCtxFromFiles(cert_file string, key_file string) (*Ctx, error) {
+ ctx, err := NewCtx()
+ if err != nil {
+ return nil, err
+ }
+
+ cert_bytes, err := ioutil.ReadFile(cert_file)
+ if err != nil {
+ return nil, err
+ }
+
+ certs := SplitPEM(cert_bytes)
+ if len(certs) == 0 {
+ return nil, fmt.Errorf("No PEM certificate found in '%s'", cert_file)
+ }
+ first, certs := certs[0], certs[1:]
+ cert, err := LoadCertificateFromPEM(first)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ctx.UseCertificate(cert)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, pem := range certs {
+ cert, err := LoadCertificateFromPEM(pem)
+ if err != nil {
+ return nil, err
+ }
+ err = ctx.AddChainCertificate(cert)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ key_bytes, err := ioutil.ReadFile(key_file)
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := LoadPrivateKeyFromPEM(key_bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ err = ctx.UsePrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+
+ return ctx, nil
+}
+
+// EllipticCurve repesents the ASN.1 OID of an elliptic curve.
+// see https://www.openssl.org/docs/apps/ecparam.html for a list of implemented curves.
+type EllipticCurve int
+
+const (
+ // P-256: X9.62/SECG curve over a 256 bit prime field
+ Prime256v1 EllipticCurve = C.NID_X9_62_prime256v1
+ // P-384: NIST/SECG curve over a 384 bit prime field
+ Secp384r1 EllipticCurve = C.NID_secp384r1
+)
+
+// UseCertificate configures the context to present the given certificate to
+// peers.
+func (c *Ctx) UseCertificate(cert *Certificate) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ c.cert = cert
+ if int(C.SSL_CTX_use_certificate(c.ctx, cert.x)) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+// UseCertificateChainFromFile loads a certificate chain from file into ctx.
+// The certificates must be in PEM format and must be sorted starting with the
+// subject's certificate (actual client or server certificate), followed by
+// intermediate CA certificates if applicable, and ending at the highest level
+// (root) CA. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_use_certificate.html
+func (c *Ctx) UseCertificateChainFile(cert_file string) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ var c_cert_file *C.char
+ if cert_file != "" {
+ c_cert_file = C.CString(cert_file)
+ defer C.free(unsafe.Pointer(c_cert_file))
+ }
+ if int(C.SSL_CTX_use_certificate_chain_file(c.ctx, c_cert_file)) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+// UsePrivateKeyFile adds the first private key found in file to the *Ctx, c. The
+// formatting type of the certificate must be specified from the known types
+// FiletypePEM, and FiletypeASN1
+func (c *Ctx) UsePrivateKeyFile(key_file string, file_type Filetypes) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ var c_key_file *C.char
+ if key_file != "" {
+ c_key_file = C.CString(key_file)
+ defer C.free(unsafe.Pointer(c_key_file))
+ }
+ if int(C.SSL_CTX_use_PrivateKey_file(c.ctx, c_key_file, C.int(file_type))) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+func (c *Ctx) UsePrivateKeyFileWithPassword(key_file string, file_type Filetypes, password string) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ var c_key_file *C.char
+
+ c_pwd := C.CString(password)
+ defer C.free(unsafe.Pointer(c_pwd))
+ C.SSL_CTX_set_default_passwd_cb_userdata(c.ctx, unsafe.Pointer(c_pwd))
+ C.SSL_CTX_set_default_passwd_cb(c.ctx, (*C.pem_password_cb)(C.password_cb))
+
+ if key_file != "" {
+ c_key_file = C.CString(key_file)
+ defer C.free(unsafe.Pointer(c_key_file))
+ }
+ if int(C.SSL_CTX_use_PrivateKey_file(c.ctx, c_key_file, C.int(file_type))) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+// CheckPrivateKey verifies that the private key agrees with the corresponding
+// public key in the certificate
+func (c *Ctx) CheckPrivateKey() error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ if int(C.SSL_CTX_check_private_key(c.ctx)) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+type StackOfX509Name struct {
+ stack C.STACK_OF_X509_NAME_not_a_macro
+ // shared indicates weather we are the sole owner of this pointer, and implies
+ // weather we should or shouldn't free the underlying data structure
+ // when this go data structure goes out of scope
+ shared bool
+}
+
+// LoadClientCAFile reads certificates from file and returns a StackOfX509Name
+// with the subject names found. See
+// https://www.openssl.org/docs/ssl/SSL_load_client_CA_file.html
+func LoadClientCAFile(ca_file string) (*StackOfX509Name, error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ var c_ca_file *C.char
+ if ca_file != "" {
+ c_ca_file = C.CString(ca_file)
+ defer C.free(unsafe.Pointer(c_ca_file))
+ }
+ stack := C.SSL_load_client_CA_file(c_ca_file)
+ if stack == nil {
+ return nil, errorFromErrorQueue()
+ }
+ caList := StackOfX509Name{
+ stack: stack,
+ shared: false,
+ }
+ runtime.SetFinalizer(&caList, func(c *StackOfX509Name) {
+ if !c.shared {
+ C.sk_X509_NAME_pop_free_not_a_macro(c.stack)
+ }
+ })
+ return &caList, nil
+}
+
+// SetClientCAList sets the list of CAs sent to the client when requesting a
+// client certificate for Ctx. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_client_CA_list.html
+func (c *Ctx) SetClientCAList(caList *StackOfX509Name) {
+ C.SSL_CTX_set_client_CA_list(c.ctx, caList.stack)
+ caList.shared = true
+}
+
+// AddChainCertificate adds a certificate to the chain presented in the
+// handshake.
+func (c *Ctx) AddChainCertificate(cert *Certificate) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ c.chain = append(c.chain, cert)
+ if int(C.SSL_CTX_add_extra_chain_cert_not_a_macro(c.ctx, cert.x)) != 1 {
+ return errorFromErrorQueue()
+ }
+ // OpenSSL takes ownership via SSL_CTX_add_extra_chain_cert
+ runtime.SetFinalizer(cert, nil)
+ return nil
+}
+
+// UsePrivateKey configures the context to use the given private key for SSL
+// handshakes.
+func (c *Ctx) UsePrivateKey(key PrivateKey) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ c.key = key
+ if int(C.SSL_CTX_use_PrivateKey(c.ctx, key.evpPKey())) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+type CertificateStore struct {
+ store *C.X509_STORE
+ // for GC
+ ctx *Ctx
+ certs []*Certificate
+}
+
+// Allocate a new, empty CertificateStore
+func NewCertificateStore() (*CertificateStore, error) {
+ s := C.X509_STORE_new()
+ if s == nil {
+ return nil, errors.New("failed to allocate X509_STORE")
+ }
+ store := &CertificateStore{store: s}
+ runtime.SetFinalizer(store, func(s *CertificateStore) {
+ C.X509_STORE_free(s.store)
+ })
+ return store, nil
+}
+
+// Parse a chained PEM file, loading all certificates into the Store.
+func (s *CertificateStore) LoadCertificatesFromPEM(data []byte) error {
+ pems := SplitPEM(data)
+ for _, pem := range pems {
+ cert, err := LoadCertificateFromPEM(pem)
+ if err != nil {
+ return err
+ }
+ err = s.AddCertificate(cert)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetCertificateStore returns the context's certificate store that will be
+// used for peer validation.
+func (c *Ctx) GetCertificateStore() *CertificateStore {
+ // we don't need to dealloc the cert store pointer here, because it points
+ // to a ctx internal. so we do need to keep the ctx around
+ return &CertificateStore{
+ store: C.SSL_CTX_get_cert_store(c.ctx),
+ ctx: c}
+}
+
+// AddCertificate marks the provided Certificate as a trusted certificate in
+// the given CertificateStore.
+func (s *CertificateStore) AddCertificate(cert *Certificate) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ s.certs = append(s.certs, cert)
+ if int(C.X509_STORE_add_cert(s.store, cert.x)) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+type X509VerificationFlag int
+
+func (s *CertificateStore) SetFlags(flags X509VerificationFlag) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ if int(C.X509_STORE_set_flags(s.store, C.ulong(flags))) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+// See https://www.openssl.org/docs/crypto/X509_VERIFY_PARAM_set_flags.html
+const (
+ CBIssuerCheck X509VerificationFlag = C.X509_V_FLAG_CB_ISSUER_CHECK
+ UseCheckTime X509VerificationFlag = C.X509_V_FLAG_USE_CHECK_TIME
+ CRLCheck X509VerificationFlag = C.X509_V_FLAG_CRL_CHECK
+ CRLCheckAll X509VerificationFlag = C.X509_V_FLAG_CRL_CHECK_ALL
+ IgnoreCritical X509VerificationFlag = C.X509_V_FLAG_IGNORE_CRITICAL
+ X509Strict X509VerificationFlag = C.X509_V_FLAG_X509_STRICT
+ AllowProxyCerts X509VerificationFlag = C.X509_V_FLAG_ALLOW_PROXY_CERTS
+ PolicyCheck X509VerificationFlag = C.X509_V_FLAG_POLICY_CHECK
+ ExplicitPolicy X509VerificationFlag = C.X509_V_FLAG_EXPLICIT_POLICY
+ InhibitAny X509VerificationFlag = C.X509_V_FLAG_INHIBIT_ANY
+ InhibitMap X509VerificationFlag = C.X509_V_FLAG_INHIBIT_MAP
+ NotifyPolicy X509VerificationFlag = C.X509_V_FLAG_NOTIFY_POLICY
+ // ExtendedCRLSupport X509VerificationFlag = C.X509_V_FLAG_EXTENDED_CRL_SUPPORT
+ // UseDeltas X509VerificationFlag = C.X509_V_FLAG_USE_DELTAS
+ // CheckSsSignature X509VerificationFlag = C.X509_V_FLAG_CHECK_SS_SIGNATURE
+ // TrustedFirst X509VerificationFlag = C.X509_V_FLAG_TRUSTED_FIRST
+ PolicyMask X509VerificationFlag = C.X509_V_FLAG_POLICY_MASK
+)
+
+type CertificateStoreLookup struct {
+ lookup *C.X509_LOOKUP
+ store *CertificateStore
+}
+
+// an X509LookupMethod is required to build a a CertificateStoreLookup in a
+// CertificateStore. The X509LookupMethod indicates the type or functionality
+// of the CertificateStoreLookup
+type X509LookupMethod *C.X509_LOOKUP_METHOD
+
+// CertificateStoreLookups with X509LookupFile methods look for certs in a file
+func X509LookupFile() X509LookupMethod {
+ return X509LookupMethod(C.X509_LOOKUP_file())
+}
+
+// CertificateStoreLookups with X509LookupHashDir methods look for certs in a
+// directory
+func X509LookupHashDir() X509LookupMethod {
+ return X509LookupMethod(C.X509_LOOKUP_hash_dir())
+}
+
+// AddLookup creates a CertificateStoreLookup of type X509LookupMethod in the
+// CertificateStore
+func (s *CertificateStore) AddLookup(method X509LookupMethod) (*CertificateStoreLookup, error) {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ var lookup *C.X509_LOOKUP
+ lookup = C.X509_STORE_add_lookup(s.store, method)
+ if lookup != nil {
+ return &CertificateStoreLookup{
+ lookup: lookup,
+ store: s,
+ }, nil
+ }
+ return nil, errorFromErrorQueue()
+}
+
+// LoadCRLFile adds a file to a CertificateStoreLookup in the
+// CertificateStore
+// I suspect that the CertificateStoreLookup needs to have been created with
+// X509LookupFile as the lookup method
+func (l *CertificateStoreLookup) LoadCRLFile(crl_file string) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ var c_crl_file *C.char
+ if crl_file != "" {
+ c_crl_file = C.CString(crl_file)
+ defer C.free(unsafe.Pointer(c_crl_file))
+ }
+ if int(C.X509_load_crl_file(l.lookup, c_crl_file, C.X509_FILETYPE_PEM)) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+type CertificateStoreCtx struct {
+ ctx *C.X509_STORE_CTX
+ ssl_ctx *Ctx
+}
+
+func (self *CertificateStoreCtx) VerifyResult() VerifyResult {
+ return VerifyResult(C.X509_STORE_CTX_get_error(self.ctx))
+}
+
+func (self *CertificateStoreCtx) Err() error {
+ code := C.X509_STORE_CTX_get_error(self.ctx)
+ if code == C.X509_V_OK {
+ return nil
+ }
+ return fmt.Errorf("openssl: %s",
+ C.GoString(C.X509_verify_cert_error_string(C.long(code))))
+}
+
+func (self *CertificateStoreCtx) Depth() int {
+ return int(C.X509_STORE_CTX_get_error_depth(self.ctx))
+}
+
+// the certicate returned is only valid for the lifetime of the underlying
+// X509_STORE_CTX
+func (self *CertificateStoreCtx) GetCurrentCert() *Certificate {
+ x509 := C.X509_STORE_CTX_get_current_cert(self.ctx)
+ if x509 == nil {
+ return nil
+ }
+ // add a ref
+ C.CRYPTO_add_not_a_macro(&x509.references, 1, C.CRYPTO_LOCK_X509)
+ cert := &Certificate{
+ x: x509,
+ }
+ runtime.SetFinalizer(cert, func(cert *Certificate) {
+ C.X509_free(cert.x)
+ })
+ return cert
+}
+
+// LoadVerifyLocations tells the context to trust all certificate authorities
+// provided in either the ca_file or the ca_path.
+// See http://www.openssl.org/docs/ssl/SSL_CTX_load_verify_locations.html for
+// more.
+func (c *Ctx) LoadVerifyLocations(ca_file string, ca_path string) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ var c_ca_file, c_ca_path *C.char
+ if ca_file != "" {
+ c_ca_file = C.CString(ca_file)
+ defer C.free(unsafe.Pointer(c_ca_file))
+ }
+ if ca_path != "" {
+ c_ca_path = C.CString(ca_path)
+ defer C.free(unsafe.Pointer(c_ca_path))
+ }
+ if C.SSL_CTX_load_verify_locations(c.ctx, c_ca_file, c_ca_path) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+type Options uint
+
+const (
+ // NoCompression is only valid if you are using OpenSSL 1.0.1 or newer
+ NoCompression Options = C.SSL_OP_NO_COMPRESSION
+ NoSSLv2 Options = C.SSL_OP_NO_SSLv2
+ NoSSLv3 Options = C.SSL_OP_NO_SSLv3
+ NoTLSv1 Options = C.SSL_OP_NO_TLSv1
+ CipherServerPreference Options = C.SSL_OP_CIPHER_SERVER_PREFERENCE
+ NoSessionResumptionOrRenegotiation Options = C.SSL_OP_NO_SESSION_RESUMPTION_ON_RENEGOTIATION
+ OpAll Options = C.SSL_OP_ALL
+)
+
+// SetOptions sets context options. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
+func (c *Ctx) SetOptions(options Options) Options {
+ return Options(C.SSL_CTX_set_options_not_a_macro(
+ c.ctx, C.long(options)))
+}
+
+func (c *Ctx) ClearOptions(options Options) Options {
+ return Options(C.SSL_CTX_clear_options_not_a_macro(
+ c.ctx, C.long(options)))
+}
+
+// GetOptions returns context options. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
+func (c *Ctx) GetOptions() Options {
+ return Options(C.SSL_CTX_get_options_not_a_macro(c.ctx))
+}
+
+type Modes int
+
+const (
+ // ReleaseBuffers is only valid if you are using OpenSSL 1.0.1 or newer
+ ReleaseBuffers Modes = C.SSL_MODE_RELEASE_BUFFERS
+ AutoRetry Modes = C.SSL_MODE_AUTO_RETRY
+)
+
+// SetMode sets context modes. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_mode.html
+func (c *Ctx) SetMode(modes Modes) Modes {
+ return Modes(C.SSL_CTX_set_mode_not_a_macro(c.ctx, C.long(modes)))
+}
+
+// GetMode returns context modes. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_mode.html
+func (c *Ctx) GetMode() Modes {
+ return Modes(C.SSL_CTX_get_mode_not_a_macro(c.ctx))
+}
+
+type VerifyOptions int
+
+const (
+ VerifyNone VerifyOptions = C.SSL_VERIFY_NONE
+ VerifyPeer VerifyOptions = C.SSL_VERIFY_PEER
+ VerifyFailIfNoPeerCert VerifyOptions = C.SSL_VERIFY_FAIL_IF_NO_PEER_CERT
+ VerifyClientOnce VerifyOptions = C.SSL_VERIFY_CLIENT_ONCE
+)
+
+type Filetypes int
+
+const (
+ FiletypePEM Filetypes = C.SSL_FILETYPE_PEM
+ FiletypeASN1 Filetypes = C.SSL_FILETYPE_ASN1
+)
+
+type VerifyCallback func(ok bool, store *CertificateStoreCtx) bool
+
+//export verify_cb_thunk
+func verify_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int {
+ defer func() {
+ if err := recover(); err != nil {
+ logger.Critf("openssl: verify callback panic'd: %v", err)
+ os.Exit(1)
+ }
+ }()
+ verify_cb := (*Ctx)(p).verify_cb
+ // set up defaults just in case verify_cb is nil
+ if verify_cb != nil {
+ store := &CertificateStoreCtx{ctx: ctx}
+ if verify_cb(ok == 1, store) {
+ ok = 1
+ } else {
+ ok = 0
+ }
+ }
+ return ok
+}
+
+// SetVerify controls peer verification settings. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (c *Ctx) SetVerify(options VerifyOptions, verify_cb VerifyCallback) {
+ c.verify_cb = verify_cb
+ if verify_cb != nil {
+ C.SSL_CTX_set_verify(c.ctx, C.int(options), (*[0]byte)(C.verify_cb))
+ } else {
+ C.SSL_CTX_set_verify(c.ctx, C.int(options), nil)
+ }
+}
+
+func (c *Ctx) SetVerifyMode(options VerifyOptions) {
+ c.SetVerify(options, c.verify_cb)
+}
+
+func (c *Ctx) SetVerifyCallback(verify_cb VerifyCallback) {
+ c.SetVerify(c.VerifyMode(), verify_cb)
+}
+
+func (c *Ctx) GetVerifyCallback() VerifyCallback {
+ return c.verify_cb
+}
+
+func (c *Ctx) VerifyMode() VerifyOptions {
+ return VerifyOptions(C.SSL_CTX_get_verify_mode(c.ctx))
+}
+
+// SetVerifyDepth controls how many certificates deep the certificate
+// verification logic is willing to follow a certificate chain. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (c *Ctx) SetVerifyDepth(depth int) {
+ C.SSL_CTX_set_verify_depth(c.ctx, C.int(depth))
+}
+
+// GetVerifyDepth controls how many certificates deep the certificate
+// verification logic is willing to follow a certificate chain. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (c *Ctx) GetVerifyDepth() int {
+ return int(C.SSL_CTX_get_verify_depth(c.ctx))
+}
+
+type TLSExtServernameCallback func(ssl *SSL) SSLTLSExtErr
+
+func (c *Ctx) SetSessionId(session_id []byte) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ var ptr *C.uchar
+ if len(session_id) > 0 {
+ ptr = (*C.uchar)(unsafe.Pointer(&session_id[0]))
+ }
+ if int(C.SSL_CTX_set_session_id_context(c.ctx, ptr,
+ C.uint(len(session_id)))) == 0 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+// SetCipherList sets the list of available ciphers. The format of the list is
+// described at http://www.openssl.org/docs/apps/ciphers.html, but see
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_cipher_list.html for more.
+func (c *Ctx) SetCipherList(list string) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+ clist := C.CString(list)
+ defer C.free(unsafe.Pointer(clist))
+ if int(C.SSL_CTX_set_cipher_list(c.ctx, clist)) == 0 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
+
+type SessionCacheModes int
+
+const (
+ SessionCacheOff SessionCacheModes = C.SSL_SESS_CACHE_OFF
+ SessionCacheClient SessionCacheModes = C.SSL_SESS_CACHE_CLIENT
+ SessionCacheServer SessionCacheModes = C.SSL_SESS_CACHE_SERVER
+ SessionCacheBoth SessionCacheModes = C.SSL_SESS_CACHE_BOTH
+ NoAutoClear SessionCacheModes = C.SSL_SESS_CACHE_NO_AUTO_CLEAR
+ NoInternalLookup SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL_LOOKUP
+ NoInternalStore SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL_STORE
+ NoInternal SessionCacheModes = C.SSL_SESS_CACHE_NO_INTERNAL
+)
+
+// SetSessionCacheMode enables or disables session caching. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_session_cache_mode.html
+func (c *Ctx) SetSessionCacheMode(modes SessionCacheModes) SessionCacheModes {
+ return SessionCacheModes(
+ C.SSL_CTX_set_session_cache_mode_not_a_macro(c.ctx, C.long(modes)))
+}
+
+// Set session cache timeout. Returns previously set value.
+// See https://www.openssl.org/docs/ssl/SSL_CTX_set_timeout.html
+func (c *Ctx) SetTimeout(t time.Duration) time.Duration {
+ prev := C.SSL_CTX_set_timeout_not_a_macro(c.ctx, C.long(t/time.Second))
+ return time.Duration(prev) * time.Second
+}
+
+// Get session cache timeout.
+// See https://www.openssl.org/docs/ssl/SSL_CTX_set_timeout.html
+func (c *Ctx) GetTimeout() time.Duration {
+ return time.Duration(C.SSL_CTX_get_timeout_not_a_macro(c.ctx)) * time.Second
+}
+
+// Set session cache size. Returns previously set value.
+// https://www.openssl.org/docs/ssl/SSL_CTX_sess_set_cache_size.html
+func (c *Ctx) SessSetCacheSize(t int) int {
+ return int(C.SSL_CTX_sess_set_cache_size_not_a_macro(c.ctx, C.long(t)))
+}
+
+// Get session cache size.
+// https://www.openssl.org/docs/ssl/SSL_CTX_sess_set_cache_size.html
+func (c *Ctx) SessGetCacheSize() int {
+ return int(C.SSL_CTX_sess_get_cache_size_not_a_macro(c.ctx))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ctx_test.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ctx_test.go
new file mode 100644
index 00000000000..9644e518bf3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ctx_test.go
@@ -0,0 +1,48 @@
+// Copyright (C) 2014 Ryan Hileman
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+import (
+ "testing"
+ "time"
+)
+
+func TestCtxTimeoutOption(t *testing.T) {
+ ctx, _ := NewCtx()
+ oldTimeout1 := ctx.GetTimeout()
+ newTimeout1 := oldTimeout1 + (time.Duration(99) * time.Second)
+ oldTimeout2 := ctx.SetTimeout(newTimeout1)
+ newTimeout2 := ctx.GetTimeout()
+ if oldTimeout1 != oldTimeout2 {
+ t.Error("SetTimeout() returns something undocumented")
+ }
+ if newTimeout1 != newTimeout2 {
+ t.Error("SetTimeout() does not save anything to ctx")
+ }
+}
+
+func TestCtxSessCacheSizeOption(t *testing.T) {
+ ctx, _ := NewCtx()
+ oldSize1 := ctx.SessGetCacheSize()
+ newSize1 := oldSize1 + 42
+ oldSize2 := ctx.SessSetCacheSize(newSize1)
+ newSize2 := ctx.SessGetCacheSize()
+ if oldSize1 != oldSize2 {
+ t.Error("SessSetCacheSize() returns something undocumented")
+ }
+ if newSize1 != newSize2 {
+ t.Error("SessSetCacheSize() does not save anything to ctx")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/dhparam.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/dhparam.go
new file mode 100644
index 00000000000..a698645c1ec
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/dhparam.go
@@ -0,0 +1,65 @@
+// +build cgo
+
+package openssl
+
+/*
+#include <openssl/crypto.h>
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/conf.h>
+#include <openssl/dh.h>
+
+static long SSL_CTX_set_tmp_dh_not_a_macro(SSL_CTX* ctx, DH *dh) {
+ return SSL_CTX_set_tmp_dh(ctx, dh);
+}
+static long PEM_read_DHparams_not_a_macro(SSL_CTX* ctx, DH *dh) {
+ return SSL_CTX_set_tmp_dh(ctx, dh);
+}
+*/
+import "C"
+
+import (
+ "errors"
+ "runtime"
+ "unsafe"
+)
+
+type DH struct {
+ dh *C.struct_dh_st
+}
+
+// LoadDHParametersFromPEM loads the Diffie-Hellman parameters from
+// a PEM-encoded block.
+func LoadDHParametersFromPEM(pem_block []byte) (*DH, error) {
+ if len(pem_block) == 0 {
+ return nil, errors.New("empty pem block")
+ }
+ bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
+ C.int(len(pem_block)))
+ if bio == nil {
+ return nil, errors.New("failed creating bio")
+ }
+ defer C.BIO_free(bio)
+
+ params := C.PEM_read_bio_DHparams(bio, nil, nil, nil)
+ if params == nil {
+ return nil, errors.New("failed reading dh parameters")
+ }
+ dhparams := &DH{dh: params}
+ runtime.SetFinalizer(dhparams, func(dhparams *DH) {
+ C.DH_free(dhparams.dh)
+ })
+ return dhparams, nil
+}
+
+// SetDHParameters sets the DH group (DH parameters) used to
+// negotiate an emphemeral DH key during handshaking.
+func (c *Ctx) SetDHParameters(dh *DH) error {
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ if int(C.SSL_CTX_set_tmp_dh_not_a_macro(c.ctx, dh.dh)) != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/digest.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/digest.go
new file mode 100644
index 00000000000..44d4d001b13
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/digest.go
@@ -0,0 +1,53 @@
+// Copyright (C) 2015 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+// #include <openssl/evp.h>
+import "C"
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+// Digest represents and openssl message digest.
+type Digest struct {
+ ptr *C.EVP_MD
+}
+
+// GetDigestByName returns the Digest with the name or nil and an error if the
+// digest was not found.
+func GetDigestByName(name string) (*Digest, error) {
+ cname := C.CString(name)
+ defer C.free(unsafe.Pointer(cname))
+ p := C.EVP_get_digestbyname(cname)
+ if p == nil {
+ return nil, fmt.Errorf("Digest %v not found", name)
+ }
+ // we can consider digests to use static mem; don't need to free
+ return &Digest{ptr: p}, nil
+}
+
+// GetDigestByName returns the Digest with the NID or nil and an error if the
+// digest was not found.
+func GetDigestByNid(nid NID) (*Digest, error) {
+ sn, err := Nid2ShortName(nid)
+ if err != nil {
+ return nil, err
+ }
+ return GetDigestByName(sn)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/engine.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/engine.go
new file mode 100644
index 00000000000..7a175b70f7c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/engine.go
@@ -0,0 +1,52 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+/*
+#include "openssl/engine.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "runtime"
+ "unsafe"
+)
+
+type Engine struct {
+ e *C.ENGINE
+}
+
+func EngineById(name string) (*Engine, error) {
+ cname := C.CString(name)
+ defer C.free(unsafe.Pointer(cname))
+ e := &Engine{
+ e: C.ENGINE_by_id(cname),
+ }
+ if e.e == nil {
+ return nil, fmt.Errorf("engine %s missing", name)
+ }
+ if C.ENGINE_init(e.e) == 0 {
+ C.ENGINE_free(e.e)
+ return nil, fmt.Errorf("engine %s not initialized", name)
+ }
+ runtime.SetFinalizer(e, func(e *Engine) {
+ C.ENGINE_finish(e.e)
+ C.ENGINE_free(e.e)
+ })
+ return e, nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/fips.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/fips.go
new file mode 100644
index 00000000000..cc463f17a18
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/fips.go
@@ -0,0 +1,22 @@
+// +build cgo
+// +build -darwin
+
+package openssl
+
+/*
+#include <openssl/ssl.h>
+*/
+import "C"
+
+func FIPSModeSet(mode bool) error {
+ var r C.int
+ if mode {
+ r = C.FIPS_mode_set(1)
+ } else {
+ r = C.FIPS_mode_set(0)
+ }
+ if r != 1 {
+ return errorFromErrorQueue()
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/hostname.c b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/hostname.c
new file mode 100644
index 00000000000..9a610292067
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/hostname.c
@@ -0,0 +1,367 @@
+/* Go-OpenSSL notice:
+ This file is required for all OpenSSL versions prior to 1.1.0. This simply
+ provides the new 1.1.0 X509_check_* methods for hostname validation if they
+ don't already exist.
+ */
+
+#include <openssl/x509.h>
+
+#ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT
+
+/* portions from x509v3.h and v3_utl.c */
+/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
+ * project.
+ */
+/* ====================================================================
+ * Copyright (c) 1999-2003 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * licensing@OpenSSL.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+/* X509 v3 extension utilities */
+
+#include <stdlib.h>
+#include <openssl/ssl.h>
+#include <openssl/conf.h>
+#include <openssl/x509v3.h>
+
+#define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1
+#define X509_CHECK_FLAG_NO_WILDCARDS 0x2
+
+typedef int (*equal_fn)(const unsigned char *pattern, size_t pattern_len,
+ const unsigned char *subject, size_t subject_len);
+
+/* Compare while ASCII ignoring case. */
+static int equal_nocase(const unsigned char *pattern, size_t pattern_len,
+ const unsigned char *subject, size_t subject_len)
+ {
+ if (pattern_len != subject_len)
+ return 0;
+ while (pattern_len)
+ {
+ unsigned char l = *pattern;
+ unsigned char r = *subject;
+ /* The pattern must not contain NUL characters. */
+ if (l == 0)
+ return 0;
+ if (l != r)
+ {
+ if ('A' <= l && l <= 'Z')
+ l = (l - 'A') + 'a';
+ if ('A' <= r && r <= 'Z')
+ r = (r - 'A') + 'a';
+ if (l != r)
+ return 0;
+ }
+ ++pattern;
+ ++subject;
+ --pattern_len;
+ }
+ return 1;
+ }
+
+/* Compare using memcmp. */
+static int equal_case(const unsigned char *pattern, size_t pattern_len,
+ const unsigned char *subject, size_t subject_len)
+{
+ /* The pattern must not contain NUL characters. */
+ if (memchr(pattern, '\0', pattern_len) != NULL)
+ return 0;
+ if (pattern_len != subject_len)
+ return 0;
+ return !memcmp(pattern, subject, pattern_len);
+}
+
+/* RFC 5280, section 7.5, requires that only the domain is compared in
+ a case-insensitive manner. */
+static int equal_email(const unsigned char *a, size_t a_len,
+ const unsigned char *b, size_t b_len)
+ {
+ size_t i = a_len;
+ if (a_len != b_len)
+ return 0;
+ /* We search backwards for the '@' character, so that we do
+ not have to deal with quoted local-parts. The domain part
+ is compared in a case-insensitive manner. */
+ while (i > 0)
+ {
+ --i;
+ if (a[i] == '@' || b[i] == '@')
+ {
+ if (!equal_nocase(a + i, a_len - i,
+ b + i, a_len - i))
+ return 0;
+ break;
+ }
+ }
+ if (i == 0)
+ i = a_len;
+ return equal_case(a, i, b, i);
+ }
+
+/* Compare the prefix and suffix with the subject, and check that the
+ characters in-between are valid. */
+static int wildcard_match(const unsigned char *prefix, size_t prefix_len,
+ const unsigned char *suffix, size_t suffix_len,
+ const unsigned char *subject, size_t subject_len)
+ {
+ const unsigned char *wildcard_start;
+ const unsigned char *wildcard_end;
+ const unsigned char *p;
+ if (subject_len < prefix_len + suffix_len)
+ return 0;
+ if (!equal_nocase(prefix, prefix_len, subject, prefix_len))
+ return 0;
+ wildcard_start = subject + prefix_len;
+ wildcard_end = subject + (subject_len - suffix_len);
+ if (!equal_nocase(wildcard_end, suffix_len, suffix, suffix_len))
+ return 0;
+ /* The wildcard must match at least one character. */
+ if (wildcard_start == wildcard_end)
+ return 0;
+ /* Check that the part matched by the wildcard contains only
+ permitted characters and only matches a single label. */
+ for (p = wildcard_start; p != wildcard_end; ++p)
+ if (!(('0' <= *p && *p <= '9') ||
+ ('A' <= *p && *p <= 'Z') ||
+ ('a' <= *p && *p <= 'z') ||
+ *p == '-'))
+ return 0;
+ return 1;
+ }
+
+/* Checks if the memory region consistens of [0-9A-Za-z.-]. */
+static int valid_domain_characters(const unsigned char *p, size_t len)
+ {
+ while (len)
+ {
+ if (!(('0' <= *p && *p <= '9') ||
+ ('A' <= *p && *p <= 'Z') ||
+ ('a' <= *p && *p <= 'z') ||
+ *p == '-' || *p == '.'))
+ return 0;
+ ++p;
+ --len;
+ }
+ return 1;
+ }
+
+/* Find the '*' in a wildcard pattern. If no such character is found
+ or the pattern is otherwise invalid, returns NULL. */
+static const unsigned char *wildcard_find_star(const unsigned char *pattern,
+ size_t pattern_len)
+ {
+ const unsigned char *star = memchr(pattern, '*', pattern_len);
+ size_t dot_count = 0;
+ const unsigned char *suffix_start;
+ size_t suffix_length;
+ if (star == NULL)
+ return NULL;
+ suffix_start = star + 1;
+ suffix_length = (pattern + pattern_len) - (star + 1);
+ if (!(valid_domain_characters(pattern, star - pattern) &&
+ valid_domain_characters(suffix_start, suffix_length)))
+ return NULL;
+ /* Check that the suffix matches at least two labels. */
+ while (suffix_length)
+ {
+ if (*suffix_start == '.')
+ ++dot_count;
+ ++suffix_start;
+ --suffix_length;
+ }
+ if (dot_count < 2)
+ return NULL;
+ return star;
+ }
+
+/* Compare using wildcards. */
+static int equal_wildcard(const unsigned char *pattern, size_t pattern_len,
+ const unsigned char *subject, size_t subject_len)
+ {
+ const unsigned char *star = wildcard_find_star(pattern, pattern_len);
+ if (star == NULL)
+ return equal_nocase(pattern, pattern_len,
+ subject, subject_len);
+ return wildcard_match(pattern, star - pattern,
+ star + 1, (pattern + pattern_len) - star - 1,
+ subject, subject_len);
+ }
+
+/* Compare an ASN1_STRING to a supplied string. If they match
+ * return 1. If cmp_type > 0 only compare if string matches the
+ * type, otherwise convert it to UTF8.
+ */
+
+static int do_check_string(ASN1_STRING *a, int cmp_type, equal_fn equal,
+ const unsigned char *b, size_t blen)
+ {
+ if (!a->data || !a->length)
+ return 0;
+ if (cmp_type > 0)
+ {
+ if (cmp_type != a->type)
+ return 0;
+ if (cmp_type == V_ASN1_IA5STRING)
+ return equal(a->data, a->length, b, blen);
+ if (a->length == (int)blen && !memcmp(a->data, b, blen))
+ return 1;
+ else
+ return 0;
+ }
+ else
+ {
+ int astrlen, rv;
+ unsigned char *astr;
+ astrlen = ASN1_STRING_to_UTF8(&astr, a);
+ if (astrlen < 0)
+ return -1;
+ rv = equal(astr, astrlen, b, blen);
+ OPENSSL_free(astr);
+ return rv;
+ }
+ }
+
+static int do_x509_check(X509 *x, const unsigned char *chk, size_t chklen,
+ unsigned int flags, int check_type)
+ {
+ STACK_OF(GENERAL_NAME) *gens = NULL;
+ X509_NAME *name = NULL;
+ int i;
+ int cnid;
+ int alt_type;
+ equal_fn equal;
+ if (check_type == GEN_EMAIL)
+ {
+ cnid = NID_pkcs9_emailAddress;
+ alt_type = V_ASN1_IA5STRING;
+ equal = equal_email;
+ }
+ else if (check_type == GEN_DNS)
+ {
+ cnid = NID_commonName;
+ alt_type = V_ASN1_IA5STRING;
+ if (flags & X509_CHECK_FLAG_NO_WILDCARDS)
+ equal = equal_nocase;
+ else
+ equal = equal_wildcard;
+ }
+ else
+ {
+ cnid = 0;
+ alt_type = V_ASN1_OCTET_STRING;
+ equal = equal_case;
+ }
+
+ if (chklen == 0)
+ chklen = strlen((const char *)chk);
+
+ gens = X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL);
+ if (gens)
+ {
+ int rv = 0;
+ for (i = 0; i < sk_GENERAL_NAME_num(gens); i++)
+ {
+ GENERAL_NAME *gen;
+ ASN1_STRING *cstr;
+ gen = sk_GENERAL_NAME_value(gens, i);
+ if(gen->type != check_type)
+ continue;
+ if (check_type == GEN_EMAIL)
+ cstr = gen->d.rfc822Name;
+ else if (check_type == GEN_DNS)
+ cstr = gen->d.dNSName;
+ else
+ cstr = gen->d.iPAddress;
+ if (do_check_string(cstr, alt_type, equal, chk, chklen))
+ {
+ rv = 1;
+ break;
+ }
+ }
+ GENERAL_NAMES_free(gens);
+ if (rv)
+ return 1;
+ if (!(flags & X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT) || !cnid)
+ return 0;
+ }
+ i = -1;
+ name = X509_get_subject_name(x);
+ while((i = X509_NAME_get_index_by_NID(name, cnid, i)) >= 0)
+ {
+ X509_NAME_ENTRY *ne;
+ ASN1_STRING *str;
+ ne = X509_NAME_get_entry(name, i);
+ str = X509_NAME_ENTRY_get_data(ne);
+ if (do_check_string(str, -1, equal, chk, chklen))
+ return 1;
+ }
+ return 0;
+ }
+
+int _X509_check_host(X509 *x, const unsigned char *chk, size_t chklen,
+ unsigned int flags)
+ {
+ return do_x509_check(x, chk, chklen, flags, GEN_DNS);
+ }
+
+int _X509_check_email(X509 *x, const unsigned char *chk, size_t chklen,
+ unsigned int flags)
+ {
+ return do_x509_check(x, chk, chklen, flags, GEN_EMAIL);
+ }
+
+int _X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen,
+ unsigned int flags)
+ {
+ return do_x509_check(x, chk, chklen, flags, GEN_IPADD);
+ }
+
+#endif
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/hostname.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/hostname.go
new file mode 100644
index 00000000000..c1d1202fb65
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/hostname.go
@@ -0,0 +1,127 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+/*
+#include <openssl/ssl.h>
+#include <openssl/conf.h>
+#include <openssl/x509.h>
+
+#ifndef X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT
+#define X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT 0x1
+#define X509_CHECK_FLAG_NO_WILDCARDS 0x2
+
+extern int _X509_check_host(X509 *x, const unsigned char *chk, size_t chklen,
+ unsigned int flags);
+extern int _X509_check_email(X509 *x, const unsigned char *chk, size_t chklen,
+ unsigned int flags);
+extern int _X509_check_ip(X509 *x, const unsigned char *chk, size_t chklen,
+ unsigned int flags);
+#endif
+*/
+import "C"
+
+import (
+ "errors"
+ "net"
+ "unsafe"
+)
+
+var (
+ ValidationError = errors.New("Host validation error")
+)
+
+type CheckFlags int
+
+const (
+ AlwaysCheckSubject CheckFlags = C.X509_CHECK_FLAG_ALWAYS_CHECK_SUBJECT
+ NoWildcards CheckFlags = C.X509_CHECK_FLAG_NO_WILDCARDS
+)
+
+// CheckHost checks that the X509 certificate is signed for the provided
+// host name. See http://www.openssl.org/docs/crypto/X509_check_host.html for
+// more. Note that CheckHost does not check the IP field. See VerifyHostname.
+// Specifically returns ValidationError if the Certificate didn't match but
+// there was no internal error.
+func (c *Certificate) CheckHost(host string, flags CheckFlags) error {
+ chost := unsafe.Pointer(C.CString(host))
+ defer C.free(chost)
+ rv := C._X509_check_host(c.x, (*C.uchar)(chost), C.size_t(len(host)),
+ C.uint(flags))
+ if rv > 0 {
+ return nil
+ }
+ if rv == 0 {
+ return ValidationError
+ }
+ return errors.New("hostname validation had an internal failure")
+}
+
+// CheckEmail checks that the X509 certificate is signed for the provided
+// email address. See http://www.openssl.org/docs/crypto/X509_check_host.html
+// for more.
+// Specifically returns ValidationError if the Certificate didn't match but
+// there was no internal error.
+func (c *Certificate) CheckEmail(email string, flags CheckFlags) error {
+ cemail := unsafe.Pointer(C.CString(email))
+ defer C.free(cemail)
+ rv := C._X509_check_email(c.x, (*C.uchar)(cemail), C.size_t(len(email)),
+ C.uint(flags))
+ if rv > 0 {
+ return nil
+ }
+ if rv == 0 {
+ return ValidationError
+ }
+ return errors.New("email validation had an internal failure")
+}
+
+// CheckIP checks that the X509 certificate is signed for the provided
+// IP address. See http://www.openssl.org/docs/crypto/X509_check_host.html
+// for more.
+// Specifically returns ValidationError if the Certificate didn't match but
+// there was no internal error.
+func (c *Certificate) CheckIP(ip net.IP, flags CheckFlags) error {
+ cip := unsafe.Pointer(&ip[0])
+ rv := C._X509_check_ip(c.x, (*C.uchar)(cip), C.size_t(len(ip)),
+ C.uint(flags))
+ if rv > 0 {
+ return nil
+ }
+ if rv == 0 {
+ return ValidationError
+ }
+ return errors.New("ip validation had an internal failure")
+}
+
+// VerifyHostname is a combination of CheckHost and CheckIP. If the provided
+// hostname looks like an IP address, it will be checked as an IP address,
+// otherwise it will be checked as a hostname.
+// Specifically returns ValidationError if the Certificate didn't match but
+// there was no internal error.
+func (c *Certificate) VerifyHostname(host string) error {
+ var ip net.IP
+ if len(host) >= 3 && host[0] == '[' && host[len(host)-1] == ']' {
+ ip = net.ParseIP(host[1 : len(host)-1])
+ } else {
+ ip = net.ParseIP(host)
+ }
+ if ip != nil {
+ return c.CheckIP(ip, 0)
+ }
+ return c.CheckHost(host, 0)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/http.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/http.go
new file mode 100644
index 00000000000..e3be32c264a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/http.go
@@ -0,0 +1,61 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+import (
+ "net/http"
+)
+
+// ListenAndServeTLS will take an http.Handler and serve it using OpenSSL over
+// the given tcp address, configured to use the provided cert and key files.
+func ListenAndServeTLS(addr string, cert_file string, key_file string,
+ handler http.Handler) error {
+ return ServerListenAndServeTLS(
+ &http.Server{Addr: addr, Handler: handler}, cert_file, key_file)
+}
+
+// ServerListenAndServeTLS will take an http.Server and serve it using OpenSSL
+// configured to use the provided cert and key files.
+func ServerListenAndServeTLS(srv *http.Server,
+ cert_file, key_file string) error {
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":https"
+ }
+
+ ctx, err := NewCtxFromFiles(cert_file, key_file)
+ if err != nil {
+ return err
+ }
+
+ l, err := Listen("tcp", addr, ctx)
+ if err != nil {
+ return err
+ }
+
+ return srv.Serve(l)
+}
+
+// TODO: http client integration
+// holy crap, getting this integrated nicely with the Go stdlib HTTP client
+// stack so that it does proxying, connection pooling, and most importantly
+// hostname verification is really hard. So much stuff is hardcoded to just use
+// the built-in TLS lib. I think to get this to work either some crazy
+// hacktackery beyond me, an almost straight up fork of the HTTP client, or
+// serious stdlib internal refactoring is necessary.
+// even more so, good luck getting openssl to use the operating system default
+// root certificates if the user doesn't provide any. sadlol
+// NOTE: if you're going to try and write your own round tripper, at least use
+// openssl.Dial, or equivalent logic
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init.go
new file mode 100644
index 00000000000..7663a480ed2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init.go
@@ -0,0 +1,155 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+/*
+Package openssl is a light wrapper around OpenSSL for Go.
+
+It strives to provide a near-drop-in replacement for the Go standard library
+tls package, while allowing for:
+
+Performance
+
+OpenSSL is battle-tested and optimized C. While Go's built-in library shows
+great promise, it is still young and in some places, inefficient. This simple
+OpenSSL wrapper can often do at least 2x with the same cipher and protocol.
+
+On my lappytop, I get the following benchmarking speeds:
+ BenchmarkSHA1Large_openssl 1000 2611282 ns/op 401.56 MB/s
+ BenchmarkSHA1Large_stdlib 500 3963983 ns/op 264.53 MB/s
+ BenchmarkSHA1Small_openssl 1000000 3476 ns/op 0.29 MB/s
+ BenchmarkSHA1Small_stdlib 5000000 550 ns/op 1.82 MB/s
+ BenchmarkSHA256Large_openssl 200 8085314 ns/op 129.69 MB/s
+ BenchmarkSHA256Large_stdlib 100 18948189 ns/op 55.34 MB/s
+ BenchmarkSHA256Small_openssl 1000000 4262 ns/op 0.23 MB/s
+ BenchmarkSHA256Small_stdlib 1000000 1444 ns/op 0.69 MB/s
+ BenchmarkOpenSSLThroughput 100000 21634 ns/op 47.33 MB/s
+ BenchmarkStdlibThroughput 50000 58974 ns/op 17.36 MB/s
+
+Interoperability
+
+Many systems support OpenSSL with a variety of plugins and modules for things,
+such as hardware acceleration in embedded devices.
+
+Greater flexibility and configuration
+
+OpenSSL allows for far greater configuration of corner cases and backwards
+compatibility (such as support of SSLv2). You shouldn't be using SSLv2 if you
+can help but, but sometimes you can't help it.
+
+Security
+
+Yeah yeah, Heartbleed. But according to the author of the standard library's
+TLS implementation, Go's TLS library is vulnerable to timing attacks. And
+whether or not OpenSSL received the appropriate amount of scrutiny
+pre-Heartbleed, it sure is receiving it now.
+
+Usage
+
+Starting an HTTP server that uses OpenSSL is very easy. It's as simple as:
+ log.Fatal(openssl.ListenAndServeTLS(
+ ":8443", "my_server.crt", "my_server.key", myHandler))
+
+Getting a net.Listener that uses OpenSSL is also easy:
+ ctx, err := openssl.NewCtxFromFiles("my_server.crt", "my_server.key")
+ if err != nil {
+ log.Fatal(err)
+ }
+ l, err := openssl.Listen("tcp", ":7777", ctx)
+
+Making a client connection is straightforward too:
+ ctx, err := NewCtx()
+ if err != nil {
+ log.Fatal(err)
+ }
+ err = ctx.LoadVerifyLocations("/etc/ssl/certs/ca-certificates.crt", "")
+ if err != nil {
+ log.Fatal(err)
+ }
+ conn, err := openssl.Dial("tcp", "localhost:7777", ctx, 0)
+
+Help wanted: To get this library to work with net/http's client, we
+had to fork net/http. It would be nice if an alternate http client library
+supported the generality needed to use OpenSSL instead of crypto/tls.
+*/
+package openssl
+
+/*
+#include <openssl/ssl.h>
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/evp.h>
+#include <openssl/engine.h>
+
+extern int Goopenssl_init_locks();
+extern void Goopenssl_thread_locking_callback(int, int, const char*, int);
+
+static int Goopenssl_init_threadsafety() {
+ // Set up OPENSSL thread safety callbacks. We only set the locking
+ // callback because the default id callback implementation is good
+ // enough for us.
+ int rc = Goopenssl_init_locks();
+ if (rc == 0) {
+ CRYPTO_set_locking_callback(Goopenssl_thread_locking_callback);
+ }
+ return rc;
+}
+
+static void OpenSSL_add_all_algorithms_not_a_macro() {
+ OpenSSL_add_all_algorithms();
+}
+
+*/
+import "C"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+)
+
+var (
+ sslMutexes []sync.Mutex
+)
+
+func init() {
+ C.OPENSSL_config(nil)
+ C.ENGINE_load_builtin_engines()
+ C.SSL_load_error_strings()
+ C.SSL_library_init()
+ C.OpenSSL_add_all_algorithms_not_a_macro()
+ rc := C.Goopenssl_init_threadsafety()
+ if rc != 0 {
+ panic(fmt.Errorf("Goopenssl_init_locks failed with %d", rc))
+ }
+}
+
+// errorFromErrorQueue needs to run in the same OS thread as the operation
+// that caused the possible error
+func errorFromErrorQueue() error {
+ var errs []string
+ for {
+ err := C.ERR_get_error()
+ if err == 0 {
+ break
+ }
+ errs = append(errs, fmt.Sprintf("%s:%s:%s",
+ C.GoString(C.ERR_lib_error_string(err)),
+ C.GoString(C.ERR_func_error_string(err)),
+ C.GoString(C.ERR_reason_error_string(err))))
+ }
+ return errors.New(fmt.Sprintf("SSL errors: %s", strings.Join(errs, "\n")))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init_posix.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init_posix.go
new file mode 100644
index 00000000000..03ed0f01bd0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init_posix.go
@@ -0,0 +1,64 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux darwin cgo
+// +build !windows
+
+package openssl
+
+/*
+#include <errno.h>
+#include <openssl/crypto.h>
+#include <pthread.h>
+
+pthread_mutex_t* goopenssl_locks;
+
+int Goopenssl_init_locks() {
+ int rc = 0;
+ int nlock;
+ int i;
+ int locks_needed = CRYPTO_num_locks();
+
+ goopenssl_locks = (pthread_mutex_t*)malloc(
+ sizeof(pthread_mutex_t) * locks_needed);
+ if (!goopenssl_locks) {
+ return ENOMEM;
+ }
+ for (nlock = 0; nlock < locks_needed; ++nlock) {
+ rc = pthread_mutex_init(&goopenssl_locks[nlock], NULL);
+ if (rc != 0) {
+ break;
+ }
+ }
+
+ if (rc != 0) {
+ for (i = nlock - 1; i >= 0; --i) {
+ pthread_mutex_destroy(&goopenssl_locks[i]);
+ }
+ free(goopenssl_locks);
+ goopenssl_locks = NULL;
+ }
+ return rc;
+}
+
+void Goopenssl_thread_locking_callback(int mode, int n, const char *file,
+ int line) {
+ if (mode & CRYPTO_LOCK) {
+ pthread_mutex_lock(&goopenssl_locks[n]);
+ } else {
+ pthread_mutex_unlock(&goopenssl_locks[n]);
+ }
+}
+*/
+import "C"
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init_windows.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init_windows.go
new file mode 100644
index 00000000000..5eca9fa0eac
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/init_windows.go
@@ -0,0 +1,60 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build windows cgo
+
+package openssl
+
+/*
+
+#cgo windows LDFLAGS: -lssleay32 -llibeay32 -L c:/openssl/bin
+#cgo windows CFLAGS: -I"c:/openssl/include"
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
+#include <errno.h>
+#include <openssl/crypto.h>
+#include <windows.h>
+
+CRITICAL_SECTION* goopenssl_locks;
+
+int Goopenssl_init_locks() {
+ int rc = 0;
+ int nlock;
+ int i;
+ int locks_needed = CRYPTO_num_locks();
+
+ goopenssl_locks = (CRITICAL_SECTION*)malloc(
+ sizeof(*goopenssl_locks) * locks_needed);
+ if (!goopenssl_locks) {
+ return ENOMEM;
+ }
+ for (nlock = 0; nlock < locks_needed; ++nlock) {
+ InitializeCriticalSection(&goopenssl_locks[nlock]);
+ }
+
+ return 0;
+}
+
+void Goopenssl_thread_locking_callback(int mode, int n, const char *file,
+ int line) {
+ if (mode & CRYPTO_LOCK) {
+ EnterCriticalSection(&goopenssl_locks[n]);
+ } else {
+ LeaveCriticalSection(&goopenssl_locks[n]);
+ }
+}
+*/
+import "C"
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/key.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/key.go
new file mode 100644
index 00000000000..c69a101631f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/key.go
@@ -0,0 +1,374 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+// #include <openssl/evp.h>
+// #include <openssl/ssl.h>
+// #include <openssl/conf.h>
+//
+// int EVP_SignInit_not_a_macro(EVP_MD_CTX *ctx, const EVP_MD *type) {
+// return EVP_SignInit(ctx, type);
+// }
+//
+// int EVP_SignUpdate_not_a_macro(EVP_MD_CTX *ctx, const void *d,
+// unsigned int cnt) {
+// return EVP_SignUpdate(ctx, d, cnt);
+// }
+//
+// int EVP_VerifyInit_not_a_macro(EVP_MD_CTX *ctx, const EVP_MD *type) {
+// return EVP_VerifyInit(ctx, type);
+// }
+//
+// int EVP_VerifyUpdate_not_a_macro(EVP_MD_CTX *ctx, const void *d,
+// unsigned int cnt) {
+// return EVP_VerifyUpdate(ctx, d, cnt);
+// }
+//
+// int EVP_PKEY_assign_charp(EVP_PKEY *pkey, int type, char *key) {
+// return EVP_PKEY_assign(pkey, type, key);
+// }
+import "C"
+
+import (
+ "errors"
+ "io/ioutil"
+ "runtime"
+ "unsafe"
+)
+
+type Method *C.EVP_MD
+
+var (
+ SHA256_Method Method = C.EVP_sha256()
+)
+
+type PublicKey interface {
+ // Verifies the data signature using PKCS1.15
+ VerifyPKCS1v15(method Method, data, sig []byte) error
+
+ // MarshalPKIXPublicKeyPEM converts the public key to PEM-encoded PKIX
+ // format
+ MarshalPKIXPublicKeyPEM() (pem_block []byte, err error)
+
+ // MarshalPKIXPublicKeyDER converts the public key to DER-encoded PKIX
+ // format
+ MarshalPKIXPublicKeyDER() (der_block []byte, err error)
+
+ evpPKey() *C.EVP_PKEY
+}
+
+type PrivateKey interface {
+ PublicKey
+
+ // Signs the data using PKCS1.15
+ SignPKCS1v15(Method, []byte) ([]byte, error)
+
+ // MarshalPKCS1PrivateKeyPEM converts the private key to PEM-encoded PKCS1
+ // format
+ MarshalPKCS1PrivateKeyPEM() (pem_block []byte, err error)
+
+ // MarshalPKCS1PrivateKeyDER converts the private key to DER-encoded PKCS1
+ // format
+ MarshalPKCS1PrivateKeyDER() (der_block []byte, err error)
+}
+
+type pKey struct {
+ key *C.EVP_PKEY
+}
+
+func (key *pKey) evpPKey() *C.EVP_PKEY { return key.key }
+
+func (key *pKey) SignPKCS1v15(method Method, data []byte) ([]byte, error) {
+ var ctx C.EVP_MD_CTX
+ C.EVP_MD_CTX_init(&ctx)
+ defer C.EVP_MD_CTX_cleanup(&ctx)
+
+ if 1 != C.EVP_SignInit_not_a_macro(&ctx, method) {
+ return nil, errors.New("signpkcs1v15: failed to init signature")
+ }
+ if len(data) > 0 {
+ if 1 != C.EVP_SignUpdate_not_a_macro(
+ &ctx, unsafe.Pointer(&data[0]), C.uint(len(data))) {
+ return nil, errors.New("signpkcs1v15: failed to update signature")
+ }
+ }
+ sig := make([]byte, C.EVP_PKEY_size(key.key))
+ var sigblen C.uint
+ if 1 != C.EVP_SignFinal(&ctx,
+ ((*C.uchar)(unsafe.Pointer(&sig[0]))), &sigblen, key.key) {
+ return nil, errors.New("signpkcs1v15: failed to finalize signature")
+ }
+ return sig[:sigblen], nil
+}
+
+func (key *pKey) VerifyPKCS1v15(method Method, data, sig []byte) error {
+ var ctx C.EVP_MD_CTX
+ C.EVP_MD_CTX_init(&ctx)
+ defer C.EVP_MD_CTX_cleanup(&ctx)
+
+ if 1 != C.EVP_VerifyInit_not_a_macro(&ctx, method) {
+ return errors.New("verifypkcs1v15: failed to init verify")
+ }
+ if len(data) > 0 {
+ if 1 != C.EVP_VerifyUpdate_not_a_macro(
+ &ctx, unsafe.Pointer(&data[0]), C.uint(len(data))) {
+ return errors.New("verifypkcs1v15: failed to update verify")
+ }
+ }
+ if 1 != C.EVP_VerifyFinal(&ctx,
+ ((*C.uchar)(unsafe.Pointer(&sig[0]))), C.uint(len(sig)), key.key) {
+ return errors.New("verifypkcs1v15: failed to finalize verify")
+ }
+ return nil
+}
+
+func (key *pKey) MarshalPKCS1PrivateKeyPEM() (pem_block []byte,
+ err error) {
+ bio := C.BIO_new(C.BIO_s_mem())
+ if bio == nil {
+ return nil, errors.New("failed to allocate memory BIO")
+ }
+ defer C.BIO_free(bio)
+ rsa := (*C.RSA)(C.EVP_PKEY_get1_RSA(key.key))
+ if rsa == nil {
+ return nil, errors.New("failed getting rsa key")
+ }
+ defer C.RSA_free(rsa)
+ if int(C.PEM_write_bio_RSAPrivateKey(bio, rsa, nil, nil, C.int(0), nil,
+ nil)) != 1 {
+ return nil, errors.New("failed dumping private key")
+ }
+ return ioutil.ReadAll(asAnyBio(bio))
+}
+
+func (key *pKey) MarshalPKCS1PrivateKeyDER() (der_block []byte,
+ err error) {
+ bio := C.BIO_new(C.BIO_s_mem())
+ if bio == nil {
+ return nil, errors.New("failed to allocate memory BIO")
+ }
+ defer C.BIO_free(bio)
+ rsa := (*C.RSA)(C.EVP_PKEY_get1_RSA(key.key))
+ if rsa == nil {
+ return nil, errors.New("failed getting rsa key")
+ }
+ defer C.RSA_free(rsa)
+ if int(C.i2d_RSAPrivateKey_bio(bio, rsa)) != 1 {
+ return nil, errors.New("failed dumping private key der")
+ }
+ return ioutil.ReadAll(asAnyBio(bio))
+}
+
+func (key *pKey) MarshalPKIXPublicKeyPEM() (pem_block []byte,
+ err error) {
+ bio := C.BIO_new(C.BIO_s_mem())
+ if bio == nil {
+ return nil, errors.New("failed to allocate memory BIO")
+ }
+ defer C.BIO_free(bio)
+ rsa := (*C.RSA)(C.EVP_PKEY_get1_RSA(key.key))
+ if rsa == nil {
+ return nil, errors.New("failed getting rsa key")
+ }
+ defer C.RSA_free(rsa)
+ if int(C.PEM_write_bio_RSA_PUBKEY(bio, rsa)) != 1 {
+ return nil, errors.New("failed dumping public key pem")
+ }
+ return ioutil.ReadAll(asAnyBio(bio))
+}
+
+func (key *pKey) MarshalPKIXPublicKeyDER() (der_block []byte,
+ err error) {
+ bio := C.BIO_new(C.BIO_s_mem())
+ if bio == nil {
+ return nil, errors.New("failed to allocate memory BIO")
+ }
+ defer C.BIO_free(bio)
+ rsa := (*C.RSA)(C.EVP_PKEY_get1_RSA(key.key))
+ if rsa == nil {
+ return nil, errors.New("failed getting rsa key")
+ }
+ defer C.RSA_free(rsa)
+ if int(C.i2d_RSA_PUBKEY_bio(bio, rsa)) != 1 {
+ return nil, errors.New("failed dumping public key der")
+ }
+ return ioutil.ReadAll(asAnyBio(bio))
+}
+
+// LoadPrivateKeyFromPEM loads a private key from a PEM-encoded block.
+func LoadPrivateKeyFromPEM(pem_block []byte) (PrivateKey, error) {
+ if len(pem_block) == 0 {
+ return nil, errors.New("empty pem block")
+ }
+ bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
+ C.int(len(pem_block)))
+ if bio == nil {
+ return nil, errors.New("failed creating bio")
+ }
+ defer C.BIO_free(bio)
+
+ rsakey := C.PEM_read_bio_RSAPrivateKey(bio, nil, nil, nil)
+ if rsakey == nil {
+ return nil, errors.New("failed reading rsa key")
+ }
+ defer C.RSA_free(rsakey)
+
+ // convert to PKEY
+ key := C.EVP_PKEY_new()
+ if key == nil {
+ return nil, errors.New("failed converting to evp_pkey")
+ }
+ if C.EVP_PKEY_set1_RSA(key, (*C.struct_rsa_st)(rsakey)) != 1 {
+ C.EVP_PKEY_free(key)
+ return nil, errors.New("failed converting to evp_pkey")
+ }
+
+ p := &pKey{key: key}
+ runtime.SetFinalizer(p, func(p *pKey) {
+ C.EVP_PKEY_free(p.key)
+ })
+ return p, nil
+}
+
+// LoadPrivateKeyFromPEM loads a private key from a PEM-encoded block.
+func LoadPrivateKeyFromPEMWidthPassword(pem_block []byte, password string) (
+ PrivateKey, error) {
+ if len(pem_block) == 0 {
+ return nil, errors.New("empty pem block")
+ }
+ bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
+ C.int(len(pem_block)))
+ if bio == nil {
+ return nil, errors.New("failed creating bio")
+ }
+ defer C.BIO_free(bio)
+ cs := C.CString(password)
+ defer C.free(unsafe.Pointer(cs))
+ rsakey := C.PEM_read_bio_RSAPrivateKey(bio, nil, nil, unsafe.Pointer(cs))
+ if rsakey == nil {
+ return nil, errors.New("failed reading rsa key")
+ }
+ defer C.RSA_free(rsakey)
+
+ // convert to PKEY
+ key := C.EVP_PKEY_new()
+ if key == nil {
+ return nil, errors.New("failed converting to evp_pkey")
+ }
+ if C.EVP_PKEY_set1_RSA(key, (*C.struct_rsa_st)(rsakey)) != 1 {
+ C.EVP_PKEY_free(key)
+ return nil, errors.New("failed converting to evp_pkey")
+ }
+
+ p := &pKey{key: key}
+ runtime.SetFinalizer(p, func(p *pKey) {
+ C.EVP_PKEY_free(p.key)
+ })
+ return p, nil
+}
+
+// LoadPublicKeyFromPEM loads a public key from a PEM-encoded block.
+func LoadPublicKeyFromPEM(pem_block []byte) (PublicKey, error) {
+ if len(pem_block) == 0 {
+ return nil, errors.New("empty pem block")
+ }
+ bio := C.BIO_new_mem_buf(unsafe.Pointer(&pem_block[0]),
+ C.int(len(pem_block)))
+ if bio == nil {
+ return nil, errors.New("failed creating bio")
+ }
+ defer C.BIO_free(bio)
+
+ rsakey := C.PEM_read_bio_RSA_PUBKEY(bio, nil, nil, nil)
+ if rsakey == nil {
+ return nil, errors.New("failed reading rsa key")
+ }
+ defer C.RSA_free(rsakey)
+
+ // convert to PKEY
+ key := C.EVP_PKEY_new()
+ if key == nil {
+ return nil, errors.New("failed converting to evp_pkey")
+ }
+ if C.EVP_PKEY_set1_RSA(key, (*C.struct_rsa_st)(rsakey)) != 1 {
+ C.EVP_PKEY_free(key)
+ return nil, errors.New("failed converting to evp_pkey")
+ }
+
+ p := &pKey{key: key}
+ runtime.SetFinalizer(p, func(p *pKey) {
+ C.EVP_PKEY_free(p.key)
+ })
+ return p, nil
+}
+
+// LoadPublicKeyFromDER loads a public key from a DER-encoded block.
+func LoadPublicKeyFromDER(der_block []byte) (PublicKey, error) {
+ if len(der_block) == 0 {
+ return nil, errors.New("empty der block")
+ }
+ bio := C.BIO_new_mem_buf(unsafe.Pointer(&der_block[0]),
+ C.int(len(der_block)))
+ if bio == nil {
+ return nil, errors.New("failed creating bio")
+ }
+ defer C.BIO_free(bio)
+
+ rsakey := C.d2i_RSA_PUBKEY_bio(bio, nil)
+ if rsakey == nil {
+ return nil, errors.New("failed reading rsa key")
+ }
+ defer C.RSA_free(rsakey)
+
+ // convert to PKEY
+ key := C.EVP_PKEY_new()
+ if key == nil {
+ return nil, errors.New("failed converting to evp_pkey")
+ }
+ if C.EVP_PKEY_set1_RSA(key, (*C.struct_rsa_st)(rsakey)) != 1 {
+ C.EVP_PKEY_free(key)
+ return nil, errors.New("failed converting to evp_pkey")
+ }
+
+ p := &pKey{key: key}
+ runtime.SetFinalizer(p, func(p *pKey) {
+ C.EVP_PKEY_free(p.key)
+ })
+ return p, nil
+}
+
+// GenerateRSAKey generates a new RSA private key with an exponent of 3.
+func GenerateRSAKey(bits int) (PrivateKey, error) {
+ exponent := 3
+ rsa := C.RSA_generate_key(C.int(bits), C.ulong(exponent), nil, nil)
+ if rsa == nil {
+ return nil, errors.New("failed to generate RSA key")
+ }
+ key := C.EVP_PKEY_new()
+ if key == nil {
+ return nil, errors.New("failed to allocate EVP_PKEY")
+ }
+ if C.EVP_PKEY_assign_charp(key, C.EVP_PKEY_RSA, (*C.char)(unsafe.Pointer(rsa))) != 1 {
+ C.EVP_PKEY_free(key)
+ return nil, errors.New("failed to assign RSA key")
+ }
+ p := &pKey{key: key}
+ runtime.SetFinalizer(p, func(p *pKey) {
+ C.EVP_PKEY_free(p.key)
+ })
+ return p, nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/key_test.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/key_test.go
new file mode 100644
index 00000000000..54752d381bf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/key_test.go
@@ -0,0 +1,149 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/hex"
+ pem_pkg "encoding/pem"
+ "io/ioutil"
+ "testing"
+)
+
+func TestMarshal(t *testing.T) {
+ key, err := LoadPrivateKeyFromPEM(keyBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cert, err := LoadCertificateFromPEM(certBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ pem, err := cert.MarshalPEM()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(pem, certBytes) {
+ ioutil.WriteFile("generated", pem, 0644)
+ ioutil.WriteFile("hardcoded", certBytes, 0644)
+ t.Fatal("invalid cert pem bytes")
+ }
+
+ pem, err = key.MarshalPKCS1PrivateKeyPEM()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(pem, keyBytes) {
+ ioutil.WriteFile("generated", pem, 0644)
+ ioutil.WriteFile("hardcoded", keyBytes, 0644)
+ t.Fatal("invalid private key pem bytes")
+ }
+ tls_cert, err := tls.X509KeyPair(certBytes, keyBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tls_key, ok := tls_cert.PrivateKey.(*rsa.PrivateKey)
+ if !ok {
+ t.Fatal("FASDFASDF")
+ }
+ _ = tls_key
+
+ der, err := key.MarshalPKCS1PrivateKeyDER()
+ if err != nil {
+ t.Fatal(err)
+ }
+ tls_der := x509.MarshalPKCS1PrivateKey(tls_key)
+ if !bytes.Equal(der, tls_der) {
+ t.Fatal("invalid private key der bytes: %s\n v.s. %s\n",
+ hex.Dump(der), hex.Dump(tls_der))
+ }
+
+ der, err = key.MarshalPKIXPublicKeyDER()
+ if err != nil {
+ t.Fatal(err)
+ }
+ tls_der, err = x509.MarshalPKIXPublicKey(&tls_key.PublicKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(der, tls_der) {
+ ioutil.WriteFile("generated", []byte(hex.Dump(der)), 0644)
+ ioutil.WriteFile("hardcoded", []byte(hex.Dump(tls_der)), 0644)
+ t.Fatal("invalid public key der bytes")
+ }
+
+ pem, err = key.MarshalPKIXPublicKeyPEM()
+ if err != nil {
+ t.Fatal(err)
+ }
+ tls_pem := pem_pkg.EncodeToMemory(&pem_pkg.Block{
+ Type: "PUBLIC KEY", Bytes: tls_der})
+ if !bytes.Equal(pem, tls_pem) {
+ ioutil.WriteFile("generated", pem, 0644)
+ ioutil.WriteFile("hardcoded", tls_pem, 0644)
+ t.Fatal("invalid public key pem bytes")
+ }
+
+ loaded_pubkey_from_pem, err := LoadPublicKeyFromPEM(pem)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ loaded_pubkey_from_der, err := LoadPublicKeyFromDER(der)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ new_der_from_pem, err := loaded_pubkey_from_pem.MarshalPKIXPublicKeyDER()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ new_der_from_der, err := loaded_pubkey_from_der.MarshalPKIXPublicKeyDER()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(new_der_from_der, tls_der) {
+ ioutil.WriteFile("generated", []byte(hex.Dump(new_der_from_der)), 0644)
+ ioutil.WriteFile("hardcoded", []byte(hex.Dump(tls_der)), 0644)
+ t.Fatal("invalid public key der bytes")
+ }
+
+ if !bytes.Equal(new_der_from_pem, tls_der) {
+ ioutil.WriteFile("generated", []byte(hex.Dump(new_der_from_pem)), 0644)
+ ioutil.WriteFile("hardcoded", []byte(hex.Dump(tls_der)), 0644)
+ t.Fatal("invalid public key der bytes")
+ }
+}
+
+func TestGenerate(t *testing.T) {
+ key, err := GenerateRSAKey(2048)
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = key.MarshalPKIXPublicKeyPEM()
+ if err != nil {
+ t.Fatal(err)
+ }
+ _, err = key.MarshalPKCS1PrivateKeyPEM()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/net.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/net.go
new file mode 100644
index 00000000000..3cdd040d4d4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/net.go
@@ -0,0 +1,134 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+import (
+ "errors"
+ "net"
+)
+
+type listener struct {
+ net.Listener
+ ctx *Ctx
+}
+
+func (l *listener) Accept() (c net.Conn, err error) {
+ c, err = l.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+ ssl_c, err := Server(c, l.ctx)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ return ssl_c, nil
+}
+
+// NewListener wraps an existing net.Listener such that all accepted
+// connections are wrapped as OpenSSL server connections using the provided
+// context ctx.
+func NewListener(inner net.Listener, ctx *Ctx) net.Listener {
+ return &listener{
+ Listener: inner,
+ ctx: ctx}
+}
+
+// Listen is a wrapper around net.Listen that wraps incoming connections with
+// an OpenSSL server connection using the provided context ctx.
+func Listen(network, laddr string, ctx *Ctx) (net.Listener, error) {
+ if ctx == nil {
+ return nil, errors.New("no ssl context provided")
+ }
+ l, err := net.Listen(network, laddr)
+ if err != nil {
+ return nil, err
+ }
+ return NewListener(l, ctx), nil
+}
+
+type DialFlags int
+
+const (
+ InsecureSkipHostVerification DialFlags = 1 << iota
+ DisableSNI
+)
+
+// Dial will connect to network/address and then wrap the corresponding
+// underlying connection with an OpenSSL client connection using context ctx.
+// If flags includes InsecureSkipHostVerification, the server certificate's
+// hostname will not be checked to match the hostname in addr. Otherwise, flags
+// should be 0.
+//
+// Dial probably won't work for you unless you set a verify location or add
+// some certs to the certificate store of the client context you're using.
+// This library is not nice enough to use the system certificate store by
+// default for you yet.
+func Dial(network, addr string, ctx *Ctx, flags DialFlags) (*Conn, error) {
+ return DialSession(network, addr, ctx, flags, nil)
+}
+
+// DialSession will connect to network/address and then wrap the corresponding
+// underlying connection with an OpenSSL client connection using context ctx.
+// If flags includes InsecureSkipHostVerification, the server certificate's
+// hostname will not be checked to match the hostname in addr. Otherwise, flags
+// should be 0.
+//
+// Dial probably won't work for you unless you set a verify location or add
+// some certs to the certificate store of the client context you're using.
+// This library is not nice enough to use the system certificate store by
+// default for you yet.
+//
+// If session is not nil it will be used to resume the tls state. The session
+// can be retrieved from the GetSession method on the Conn.
+func DialSession(network, addr string, ctx *Ctx, flags DialFlags,
+ session []byte) (*Conn, error) {
+
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ if ctx == nil {
+ var err error
+ ctx, err = NewCtx()
+ if err != nil {
+ return nil, err
+ }
+ // TODO: use operating system default certificate chain?
+ }
+ c, err := net.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := Client(c, ctx)
+ if err != nil {
+ c.Close()
+ return nil, err
+ }
+ // XXX removed SNI
+ err = conn.Handshake()
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ if flags&InsecureSkipHostVerification == 0 {
+ err = conn.VerifyHostname(host)
+ if err != nil {
+ conn.Close()
+ return nil, err
+ }
+ }
+ return conn, nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/nid.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/nid.go
new file mode 100644
index 00000000000..c80f237b605
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/nid.go
@@ -0,0 +1,199 @@
+// Copyright (C) 2014 Ryan Hileman
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+type NID int
+
+const (
+ NID_rsadsi NID = 1
+ NID_pkcs NID = 2
+ NID_md2 NID = 3
+ NID_md5 NID = 4
+ NID_rc4 NID = 5
+ NID_rsaEncryption NID = 6
+ NID_md2WithRSAEncryption NID = 7
+ NID_md5WithRSAEncryption NID = 8
+ NID_pbeWithMD2AndDES_CBC NID = 9
+ NID_pbeWithMD5AndDES_CBC NID = 10
+ NID_X500 NID = 11
+ NID_X509 NID = 12
+ NID_commonName NID = 13
+ NID_countryName NID = 14
+ NID_localityName NID = 15
+ NID_stateOrProvinceName NID = 16
+ NID_organizationName NID = 17
+ NID_organizationalUnitName NID = 18
+ NID_rsa NID = 19
+ NID_pkcs7 NID = 20
+ NID_pkcs7_data NID = 21
+ NID_pkcs7_signed NID = 22
+ NID_pkcs7_enveloped NID = 23
+ NID_pkcs7_signedAndEnveloped NID = 24
+ NID_pkcs7_digest NID = 25
+ NID_pkcs7_encrypted NID = 26
+ NID_pkcs3 NID = 27
+ NID_dhKeyAgreement NID = 28
+ NID_des_ecb NID = 29
+ NID_des_cfb64 NID = 30
+ NID_des_cbc NID = 31
+ NID_des_ede NID = 32
+ NID_des_ede3 NID = 33
+ NID_idea_cbc NID = 34
+ NID_idea_cfb64 NID = 35
+ NID_idea_ecb NID = 36
+ NID_rc2_cbc NID = 37
+ NID_rc2_ecb NID = 38
+ NID_rc2_cfb64 NID = 39
+ NID_rc2_ofb64 NID = 40
+ NID_sha NID = 41
+ NID_shaWithRSAEncryption NID = 42
+ NID_des_ede_cbc NID = 43
+ NID_des_ede3_cbc NID = 44
+ NID_des_ofb64 NID = 45
+ NID_idea_ofb64 NID = 46
+ NID_pkcs9 NID = 47
+ NID_pkcs9_emailAddress NID = 48
+ NID_pkcs9_unstructuredName NID = 49
+ NID_pkcs9_contentType NID = 50
+ NID_pkcs9_messageDigest NID = 51
+ NID_pkcs9_signingTime NID = 52
+ NID_pkcs9_countersignature NID = 53
+ NID_pkcs9_challengePassword NID = 54
+ NID_pkcs9_unstructuredAddress NID = 55
+ NID_pkcs9_extCertAttributes NID = 56
+ NID_netscape NID = 57
+ NID_netscape_cert_extension NID = 58
+ NID_netscape_data_type NID = 59
+ NID_des_ede_cfb64 NID = 60
+ NID_des_ede3_cfb64 NID = 61
+ NID_des_ede_ofb64 NID = 62
+ NID_des_ede3_ofb64 NID = 63
+ NID_sha1 NID = 64
+ NID_sha1WithRSAEncryption NID = 65
+ NID_dsaWithSHA NID = 66
+ NID_dsa_2 NID = 67
+ NID_pbeWithSHA1AndRC2_CBC NID = 68
+ NID_id_pbkdf2 NID = 69
+ NID_dsaWithSHA1_2 NID = 70
+ NID_netscape_cert_type NID = 71
+ NID_netscape_base_url NID = 72
+ NID_netscape_revocation_url NID = 73
+ NID_netscape_ca_revocation_url NID = 74
+ NID_netscape_renewal_url NID = 75
+ NID_netscape_ca_policy_url NID = 76
+ NID_netscape_ssl_server_name NID = 77
+ NID_netscape_comment NID = 78
+ NID_netscape_cert_sequence NID = 79
+ NID_desx_cbc NID = 80
+ NID_id_ce NID = 81
+ NID_subject_key_identifier NID = 82
+ NID_key_usage NID = 83
+ NID_private_key_usage_period NID = 84
+ NID_subject_alt_name NID = 85
+ NID_issuer_alt_name NID = 86
+ NID_basic_constraints NID = 87
+ NID_crl_number NID = 88
+ NID_certificate_policies NID = 89
+ NID_authority_key_identifier NID = 90
+ NID_bf_cbc NID = 91
+ NID_bf_ecb NID = 92
+ NID_bf_cfb64 NID = 93
+ NID_bf_ofb64 NID = 94
+ NID_mdc2 NID = 95
+ NID_mdc2WithRSA NID = 96
+ NID_rc4_40 NID = 97
+ NID_rc2_40_cbc NID = 98
+ NID_givenName NID = 99
+ NID_surname NID = 100
+ NID_initials NID = 101
+ NID_uniqueIdentifier NID = 102
+ NID_crl_distribution_points NID = 103
+ NID_md5WithRSA NID = 104
+ NID_serialNumber NID = 105
+ NID_title NID = 106
+ NID_description NID = 107
+ NID_cast5_cbc NID = 108
+ NID_cast5_ecb NID = 109
+ NID_cast5_cfb64 NID = 110
+ NID_cast5_ofb64 NID = 111
+ NID_pbeWithMD5AndCast5_CBC NID = 112
+ NID_dsaWithSHA1 NID = 113
+ NID_md5_sha1 NID = 114
+ NID_sha1WithRSA NID = 115
+ NID_dsa NID = 116
+ NID_ripemd160 NID = 117
+ NID_ripemd160WithRSA NID = 119
+ NID_rc5_cbc NID = 120
+ NID_rc5_ecb NID = 121
+ NID_rc5_cfb64 NID = 122
+ NID_rc5_ofb64 NID = 123
+ NID_rle_compression NID = 124
+ NID_zlib_compression NID = 125
+ NID_ext_key_usage NID = 126
+ NID_id_pkix NID = 127
+ NID_id_kp NID = 128
+ NID_server_auth NID = 129
+ NID_client_auth NID = 130
+ NID_code_sign NID = 131
+ NID_email_protect NID = 132
+ NID_time_stamp NID = 133
+ NID_ms_code_ind NID = 134
+ NID_ms_code_com NID = 135
+ NID_ms_ctl_sign NID = 136
+ NID_ms_sgc NID = 137
+ NID_ms_efs NID = 138
+ NID_ns_sgc NID = 139
+ NID_delta_crl NID = 140
+ NID_crl_reason NID = 141
+ NID_invalidity_date NID = 142
+ NID_sxnet NID = 143
+ NID_pbe_WithSHA1And128BitRC4 NID = 144
+ NID_pbe_WithSHA1And40BitRC4 NID = 145
+ NID_pbe_WithSHA1And3_Key_TripleDES_CBC NID = 146
+ NID_pbe_WithSHA1And2_Key_TripleDES_CBC NID = 147
+ NID_pbe_WithSHA1And128BitRC2_CBC NID = 148
+ NID_pbe_WithSHA1And40BitRC2_CBC NID = 149
+ NID_keyBag NID = 150
+ NID_pkcs8ShroudedKeyBag NID = 151
+ NID_certBag NID = 152
+ NID_crlBag NID = 153
+ NID_secretBag NID = 154
+ NID_safeContentsBag NID = 155
+ NID_friendlyName NID = 156
+ NID_localKeyID NID = 157
+ NID_x509Certificate NID = 158
+ NID_sdsiCertificate NID = 159
+ NID_x509Crl NID = 160
+ NID_pbes2 NID = 161
+ NID_pbmac1 NID = 162
+ NID_hmacWithSHA1 NID = 163
+ NID_id_qt_cps NID = 164
+ NID_id_qt_unotice NID = 165
+ NID_rc2_64_cbc NID = 166
+ NID_SMIMECapabilities NID = 167
+ NID_pbeWithMD2AndRC2_CBC NID = 168
+ NID_pbeWithMD5AndRC2_CBC NID = 169
+ NID_pbeWithSHA1AndDES_CBC NID = 170
+ NID_ms_ext_req NID = 171
+ NID_ext_req NID = 172
+ NID_name NID = 173
+ NID_dnQualifier NID = 174
+ NID_id_pe NID = 175
+ NID_id_ad NID = 176
+ NID_info_access NID = 177
+ NID_ad_OCSP NID = 178
+ NID_ad_ca_issuers NID = 179
+ NID_OCSP_sign NID = 180
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/oracle_stubs.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/oracle_stubs.go
new file mode 100644
index 00000000000..30492f3b9d8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/oracle_stubs.go
@@ -0,0 +1,162 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !cgo
+
+package openssl
+
+import (
+ "errors"
+ "net"
+ "time"
+)
+
+const (
+ SSLRecordSize = 16 * 1024
+)
+
+type Conn struct{}
+
+func Client(conn net.Conn, ctx *Ctx) (*Conn, error)
+func Server(conn net.Conn, ctx *Ctx) (*Conn, error)
+
+func (c *Conn) Handshake() error
+func (c *Conn) PeerCertificate() (*Certificate, error)
+func (c *Conn) Close() error
+func (c *Conn) Read(b []byte) (n int, err error)
+func (c *Conn) Write(b []byte) (written int, err error)
+
+func (c *Conn) VerifyHostname(host string) error
+
+func (c *Conn) LocalAddr() net.Addr
+func (c *Conn) RemoteAddr() net.Addr
+func (c *Conn) SetDeadline(t time.Time) error
+func (c *Conn) SetReadDeadline(t time.Time) error
+func (c *Conn) SetWriteDeadline(t time.Time) error
+
+type Ctx struct{}
+
+type SSLVersion int
+
+const (
+ SSLv3 SSLVersion = 0x02
+ TLSv1 SSLVersion = 0x03
+ TLSv1_1 SSLVersion = 0x04
+ TLSv1_2 SSLVersion = 0x05
+ AnyVersion SSLVersion = 0x06
+)
+
+func NewCtxWithVersion(version SSLVersion) (*Ctx, error)
+func NewCtx() (*Ctx, error)
+func NewCtxFromFiles(cert_file string, key_file string) (*Ctx, error)
+func (c *Ctx) UseCertificate(cert *Certificate) error
+func (c *Ctx) UsePrivateKey(key PrivateKey) error
+
+type CertificateStore struct{}
+
+func (c *Ctx) GetCertificateStore() *CertificateStore
+
+func (s *CertificateStore) AddCertificate(cert *Certificate) error
+
+func (c *Ctx) LoadVerifyLocations(ca_file string, ca_path string) error
+
+type Options int
+
+const (
+ NoCompression Options = 0
+ NoSSLv2 Options = 0
+ NoSSLv3 Options = 0
+ NoTLSv1 Options = 0
+ CipherServerPreference Options = 0
+ NoSessionResumptionOrRenegotiation Options = 0
+ NoTicket Options = 0
+)
+
+func (c *Ctx) SetOptions(options Options) Options
+
+type Modes int
+
+const (
+ ReleaseBuffers Modes = 0
+)
+
+func (c *Ctx) SetMode(modes Modes) Modes
+
+type VerifyOptions int
+
+const (
+ VerifyNone VerifyOptions = 0
+ VerifyPeer VerifyOptions = 0
+ VerifyFailIfNoPeerCert VerifyOptions = 0
+ VerifyClientOnce VerifyOptions = 0
+)
+
+func (c *Ctx) SetVerify(options VerifyOptions)
+func (c *Ctx) SetVerifyDepth(depth int)
+func (c *Ctx) SetSessionId(session_id []byte) error
+
+func (c *Ctx) SetCipherList(list string) error
+
+type SessionCacheModes int
+
+const (
+ SessionCacheOff SessionCacheModes = 0
+ SessionCacheClient SessionCacheModes = 0
+ SessionCacheServer SessionCacheModes = 0
+ SessionCacheBoth SessionCacheModes = 0
+ NoAutoClear SessionCacheModes = 0
+ NoInternalLookup SessionCacheModes = 0
+ NoInternalStore SessionCacheModes = 0
+ NoInternal SessionCacheModes = 0
+)
+
+func (c *Ctx) SetSessionCacheMode(modes SessionCacheModes) SessionCacheModes
+
+var (
+ ValidationError = errors.New("Host validation error")
+)
+
+type CheckFlags int
+
+const (
+ AlwaysCheckSubject CheckFlags = 0
+ NoWildcards CheckFlags = 0
+)
+
+func (c *Certificate) CheckHost(host string, flags CheckFlags) error
+func (c *Certificate) CheckEmail(email string, flags CheckFlags) error
+func (c *Certificate) CheckIP(ip net.IP, flags CheckFlags) error
+func (c *Certificate) VerifyHostname(host string) error
+
+type PublicKey interface {
+ MarshalPKIXPublicKeyPEM() (pem_block []byte, err error)
+ MarshalPKIXPublicKeyDER() (der_block []byte, err error)
+ evpPKey() struct{}
+}
+
+type PrivateKey interface {
+ PublicKey
+ MarshalPKCS1PrivateKeyPEM() (pem_block []byte, err error)
+ MarshalPKCS1PrivateKeyDER() (der_block []byte, err error)
+}
+
+func LoadPrivateKeyFromPEM(pem_block []byte) (PrivateKey, error)
+
+type Certificate struct{}
+
+func LoadCertificateFromPEM(pem_block []byte) (*Certificate, error)
+
+func (c *Certificate) MarshalPEM() (pem_block []byte, err error)
+
+func (c *Certificate) PublicKey() (PublicKey, error)
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/password.c b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/password.c
new file mode 100644
index 00000000000..db9582ca726
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/password.c
@@ -0,0 +1,10 @@
+#include <openssl/ssl.h>
+#include "_cgo_export.h"
+
+int password_cb(char *buf,int buf_len, int rwflag,void *userdata) {
+ char* pw = (char *)userdata;
+ int l = strlen(pw);
+ if (l + 1 > buf_len) return 0;
+ strcpy(buf,pw);
+ return l;
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/pem.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/pem.go
new file mode 100644
index 00000000000..6dad5972dbd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/pem.go
@@ -0,0 +1,32 @@
+// Copyright (C) 2014 Ryan Hileman
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+import (
+ "regexp"
+)
+
+var pemSplit *regexp.Regexp = regexp.MustCompile(`(?sm)` +
+ `(^-----[\s-]*?BEGIN.*?-----$` +
+ `.*?` +
+ `^-----[\s-]*?END.*?-----$)`)
+
+func SplitPEM(data []byte) [][]byte {
+ var results [][]byte
+ for _, block := range pemSplit.FindAll(data, -1) {
+ results = append(results, block)
+ }
+ return results
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha1.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha1.go
new file mode 100644
index 00000000000..2592b6627d1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha1.go
@@ -0,0 +1,99 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+/*
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "openssl/evp.h"
+*/
+import "C"
+
+import (
+ "errors"
+ "runtime"
+ "unsafe"
+)
+
+type SHA1Hash struct {
+ ctx C.EVP_MD_CTX
+ engine *Engine
+}
+
+func NewSHA1Hash() (*SHA1Hash, error) { return NewSHA1HashWithEngine(nil) }
+
+func NewSHA1HashWithEngine(e *Engine) (*SHA1Hash, error) {
+ hash := &SHA1Hash{engine: e}
+ C.EVP_MD_CTX_init(&hash.ctx)
+ runtime.SetFinalizer(hash, func(hash *SHA1Hash) { hash.Close() })
+ if err := hash.Reset(); err != nil {
+ return nil, err
+ }
+ return hash, nil
+}
+
+func (s *SHA1Hash) Close() {
+ C.EVP_MD_CTX_cleanup(&s.ctx)
+}
+
+func engineRef(e *Engine) *C.ENGINE {
+ if e == nil {
+ return nil
+ }
+ return e.e
+}
+
+func (s *SHA1Hash) Reset() error {
+ if 1 != C.EVP_DigestInit_ex(&s.ctx, C.EVP_sha1(), engineRef(s.engine)) {
+ return errors.New("openssl: sha1: cannot init digest ctx")
+ }
+ return nil
+}
+
+func (s *SHA1Hash) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ if 1 != C.EVP_DigestUpdate(&s.ctx, unsafe.Pointer(&p[0]),
+ C.size_t(len(p))) {
+ return 0, errors.New("openssl: sha1: cannot update digest")
+ }
+ return len(p), nil
+}
+
+func (s *SHA1Hash) Sum() (result [20]byte, err error) {
+ if 1 != C.EVP_DigestFinal_ex(&s.ctx,
+ (*C.uchar)(unsafe.Pointer(&result[0])), nil) {
+ return result, errors.New("openssl: sha1: cannot finalize ctx")
+ }
+ return result, s.Reset()
+}
+
+func SHA1(data []byte) (result [20]byte, err error) {
+ hash, err := NewSHA1Hash()
+ if err != nil {
+ return result, err
+ }
+ defer hash.Close()
+ if _, err := hash.Write(data); err != nil {
+ return result, err
+ }
+ return hash.Sum()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha1_test.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha1_test.go
new file mode 100644
index 00000000000..37037e4468b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha1_test.go
@@ -0,0 +1,111 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "io"
+ "testing"
+)
+
+func TestSHA1(t *testing.T) {
+ for i := 0; i < 100; i++ {
+ buf := make([]byte, 10*1024-i)
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ t.Fatal(err)
+ }
+
+ expected := sha1.Sum(buf)
+ got, err := SHA1(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if expected != got {
+ t.Fatal("exp:%x got:%x", expected, got)
+ }
+ }
+}
+
+func TestSHA1Writer(t *testing.T) {
+ ohash, err := NewSHA1Hash()
+ if err != nil {
+ t.Fatal(err)
+ }
+ hash := sha1.New()
+
+ for i := 0; i < 100; i++ {
+ if err := ohash.Reset(); err != nil {
+ t.Fatal(err)
+ }
+ hash.Reset()
+ buf := make([]byte, 10*1024-i)
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := ohash.Write(buf); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := hash.Write(buf); err != nil {
+ t.Fatal(err)
+ }
+
+ var got, exp [20]byte
+
+ hash.Sum(exp[:0])
+ got, err := ohash.Sum()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if got != exp {
+ t.Fatal("exp:%x got:%x", exp, got)
+ }
+ }
+}
+
+type shafunc func([]byte)
+
+func benchmarkSHA1(b *testing.B, length int64, fn shafunc) {
+ buf := make([]byte, length)
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(length)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fn(buf)
+ }
+}
+
+func BenchmarkSHA1Large_openssl(b *testing.B) {
+ benchmarkSHA1(b, 1024*1024, func(buf []byte) { SHA1(buf) })
+}
+
+func BenchmarkSHA1Large_stdlib(b *testing.B) {
+ benchmarkSHA1(b, 1024*1024, func(buf []byte) { sha1.Sum(buf) })
+}
+
+func BenchmarkSHA1Small_openssl(b *testing.B) {
+ benchmarkSHA1(b, 1, func(buf []byte) { SHA1(buf) })
+}
+
+func BenchmarkSHA1Small_stdlib(b *testing.B) {
+ benchmarkSHA1(b, 1, func(buf []byte) { sha1.Sum(buf) })
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha256.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha256.go
new file mode 100644
index 00000000000..6785b32f881
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha256.go
@@ -0,0 +1,92 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+/*
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "openssl/evp.h"
+*/
+import "C"
+
+import (
+ "errors"
+ "runtime"
+ "unsafe"
+)
+
+type SHA256Hash struct {
+ ctx C.EVP_MD_CTX
+ engine *Engine
+}
+
+func NewSHA256Hash() (*SHA256Hash, error) { return NewSHA256HashWithEngine(nil) }
+
+func NewSHA256HashWithEngine(e *Engine) (*SHA256Hash, error) {
+ hash := &SHA256Hash{engine: e}
+ C.EVP_MD_CTX_init(&hash.ctx)
+ runtime.SetFinalizer(hash, func(hash *SHA256Hash) { hash.Close() })
+ if err := hash.Reset(); err != nil {
+ return nil, err
+ }
+ return hash, nil
+}
+
+func (s *SHA256Hash) Close() {
+ C.EVP_MD_CTX_cleanup(&s.ctx)
+}
+
+func (s *SHA256Hash) Reset() error {
+ if 1 != C.EVP_DigestInit_ex(&s.ctx, C.EVP_sha256(), engineRef(s.engine)) {
+ return errors.New("openssl: sha256: cannot init digest ctx")
+ }
+ return nil
+}
+
+func (s *SHA256Hash) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+ if 1 != C.EVP_DigestUpdate(&s.ctx, unsafe.Pointer(&p[0]),
+ C.size_t(len(p))) {
+ return 0, errors.New("openssl: sha256: cannot update digest")
+ }
+ return len(p), nil
+}
+
+func (s *SHA256Hash) Sum() (result [32]byte, err error) {
+ if 1 != C.EVP_DigestFinal_ex(&s.ctx,
+ (*C.uchar)(unsafe.Pointer(&result[0])), nil) {
+ return result, errors.New("openssl: sha256: cannot finalize ctx")
+ }
+ return result, s.Reset()
+}
+
+func SHA256(data []byte) (result [32]byte, err error) {
+ hash, err := NewSHA256Hash()
+ if err != nil {
+ return result, err
+ }
+ defer hash.Close()
+ if _, err := hash.Write(data); err != nil {
+ return result, err
+ }
+ return hash.Sum()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha256_test.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha256_test.go
new file mode 100644
index 00000000000..89df88afd44
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sha256_test.go
@@ -0,0 +1,109 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+import (
+ "crypto/rand"
+ "crypto/sha256"
+ "io"
+ "testing"
+)
+
+func TestSHA256(t *testing.T) {
+ for i := 0; i < 100; i++ {
+ buf := make([]byte, 10*1024-i)
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ t.Fatal(err)
+ }
+
+ expected := sha256.Sum256(buf)
+ got, err := SHA256(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if expected != got {
+ t.Fatal("exp:%x got:%x", expected, got)
+ }
+ }
+}
+
+func TestSHA256Writer(t *testing.T) {
+ ohash, err := NewSHA256Hash()
+ if err != nil {
+ t.Fatal(err)
+ }
+ hash := sha256.New()
+
+ for i := 0; i < 100; i++ {
+ if err := ohash.Reset(); err != nil {
+ t.Fatal(err)
+ }
+ hash.Reset()
+ buf := make([]byte, 10*1024-i)
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := ohash.Write(buf); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := hash.Write(buf); err != nil {
+ t.Fatal(err)
+ }
+
+ var got, exp [32]byte
+
+ hash.Sum(exp[:0])
+ got, err := ohash.Sum()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if got != exp {
+ t.Fatal("exp:%x got:%x", exp, got)
+ }
+ }
+}
+
+func benchmarkSHA256(b *testing.B, length int64, fn shafunc) {
+ buf := make([]byte, length)
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(length)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fn(buf)
+ }
+}
+
+func BenchmarkSHA256Large_openssl(b *testing.B) {
+ benchmarkSHA256(b, 1024*1024, func(buf []byte) { SHA256(buf) })
+}
+
+func BenchmarkSHA256Large_stdlib(b *testing.B) {
+ benchmarkSHA256(b, 1024*1024, func(buf []byte) { sha256.Sum256(buf) })
+}
+
+func BenchmarkSHA256Small_openssl(b *testing.B) {
+ benchmarkSHA256(b, 1, func(buf []byte) { SHA256(buf) })
+}
+
+func BenchmarkSHA256Small_stdlib(b *testing.B) {
+ benchmarkSHA256(b, 1, func(buf []byte) { sha256.Sum256(buf) })
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sni.c b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sni.c
new file mode 100644
index 00000000000..5398da869b8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sni.c
@@ -0,0 +1,23 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <openssl/ssl.h>
+#include "_cgo_export.h"
+#include <stdio.h>
+
+int sni_cb(SSL *con, int *ad, void *arg) {
+ SSL_CTX* ssl_ctx = ssl_ctx = SSL_get_SSL_CTX(con);
+ void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx());
+ return sni_cb_thunk(p, con, ad, arg);
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sni_test.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sni_test.go
new file mode 100644
index 00000000000..ee3b1a8bbaf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/sni_test.go
@@ -0,0 +1,23 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+import "fmt"
+
+// We can implemant SNI rfc6066 (http://tools.ietf.org/html/rfc6066) on the server side using foolowing callback.
+// You should implement context storage (tlsCtxStorage) by your self.
+func ExampleSetTLSExtServernameCallback() {
+ fmt.Println("Hello")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ssl.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ssl.go
new file mode 100644
index 00000000000..d6120e15d99
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ssl.go
@@ -0,0 +1,167 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+/*
+#include <openssl/crypto.h>
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/conf.h>
+
+static long SSL_set_options_not_a_macro(SSL* ssl, long options) {
+ return SSL_set_options(ssl, options);
+}
+
+static long SSL_get_options_not_a_macro(SSL* ssl) {
+ return SSL_get_options(ssl);
+}
+
+static long SSL_clear_options_not_a_macro(SSL* ssl, long options) {
+ return SSL_clear_options(ssl, options);
+}
+
+extern int verify_ssl_cb(int ok, X509_STORE_CTX* store);
+*/
+import "C"
+
+import (
+ "os"
+ "unsafe"
+)
+
+type SSLTLSExtErr int
+
+var (
+ ssl_idx = C.SSL_get_ex_new_index(0, nil, nil, nil, nil)
+)
+
+//export get_ssl_idx
+func get_ssl_idx() C.int {
+ return ssl_idx
+}
+
+type SSL struct {
+ ssl *C.SSL
+ verify_cb VerifyCallback
+}
+
+//export verify_ssl_cb_thunk
+func verify_ssl_cb_thunk(p unsafe.Pointer, ok C.int, ctx *C.X509_STORE_CTX) C.int {
+ defer func() {
+ if err := recover(); err != nil {
+ logger.Critf("openssl: verify callback panic'd: %v", err)
+ os.Exit(1)
+ }
+ }()
+ verify_cb := (*SSL)(p).verify_cb
+ // set up defaults just in case verify_cb is nil
+ if verify_cb != nil {
+ store := &CertificateStoreCtx{ctx: ctx}
+ if verify_cb(ok == 1, store) {
+ ok = 1
+ } else {
+ ok = 0
+ }
+ }
+ return ok
+}
+
+// GetOptions returns SSL options. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
+func (s *SSL) GetOptions() Options {
+ return Options(C.SSL_get_options_not_a_macro(s.ssl))
+}
+
+// SetOptions sets SSL options. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
+func (s *SSL) SetOptions(options Options) Options {
+ return Options(C.SSL_set_options_not_a_macro(s.ssl, C.long(options)))
+}
+
+// ClearOptions clear SSL options. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_options.html
+func (s *SSL) ClearOptions(options Options) Options {
+ return Options(C.SSL_clear_options_not_a_macro(s.ssl, C.long(options)))
+}
+
+// SetVerify controls peer verification settings. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (s *SSL) SetVerify(options VerifyOptions, verify_cb VerifyCallback) {
+ s.verify_cb = verify_cb
+ if verify_cb != nil {
+ C.SSL_set_verify(s.ssl, C.int(options), (*[0]byte)(C.verify_ssl_cb))
+ } else {
+ C.SSL_set_verify(s.ssl, C.int(options), nil)
+ }
+}
+
+// SetVerifyMode controls peer verification setting. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (s *SSL) SetVerifyMode(options VerifyOptions) {
+ s.SetVerify(options, s.verify_cb)
+}
+
+// SetVerifyCallback controls peer verification setting. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (s *SSL) SetVerifyCallback(verify_cb VerifyCallback) {
+ s.SetVerify(s.VerifyMode(), s.verify_cb)
+}
+
+// GetVerifyCallback returns callback function. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (s *SSL) GetVerifyCallback() VerifyCallback {
+ return s.verify_cb
+}
+
+// VerifyMode returns peer verification setting. See
+// http://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (s *SSL) VerifyMode() VerifyOptions {
+ return VerifyOptions(C.SSL_get_verify_mode(s.ssl))
+}
+
+// SetVerifyDepth controls how many certificates deep the certificate
+// verification logic is willing to follow a certificate chain. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (s *SSL) SetVerifyDepth(depth int) {
+ C.SSL_set_verify_depth(s.ssl, C.int(depth))
+}
+
+// GetVerifyDepth controls how many certificates deep the certificate
+// verification logic is willing to follow a certificate chain. See
+// https://www.openssl.org/docs/ssl/SSL_CTX_set_verify.html
+func (s *SSL) GetVerifyDepth() int {
+ return int(C.SSL_get_verify_depth(s.ssl))
+}
+
+//export sni_cb_thunk
+func sni_cb_thunk(p unsafe.Pointer, con *C.SSL, ad unsafe.Pointer, arg unsafe.Pointer) C.int {
+ defer func() {
+ if err := recover(); err != nil {
+ logger.Critf("openssl: verify callback sni panic'd: %v", err)
+ os.Exit(1)
+ }
+ }()
+
+ sni_cb := (*Ctx)(p).sni_cb
+
+ s := &SSL{ssl: con}
+ // This attaches a pointer to our SSL struct into the SNI callback.
+ C.SSL_set_ex_data(s.ssl, get_ssl_idx(), unsafe.Pointer(s))
+
+ // Note: this is ctx.sni_cb, not C.sni_cb
+ return C.int(sni_cb(s))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ssl_test.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ssl_test.go
new file mode 100644
index 00000000000..f83225dec97
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/ssl_test.go
@@ -0,0 +1,633 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openssl
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/tls"
+ "io"
+ "io/ioutil"
+ "net"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/spacemonkeygo/openssl/utils"
+)
+
+var (
+ certBytes = []byte(`-----BEGIN CERTIFICATE-----
+MIIDxDCCAqygAwIBAgIVAMcK/0VWQr2O3MNfJCydqR7oVELcMA0GCSqGSIb3DQEB
+BQUAMIGQMUkwRwYDVQQDE0A1NjdjZGRmYzRjOWZiNTYwZTk1M2ZlZjA1N2M0NGFm
+MDdiYjc4MDIzODIxYTA5NThiY2RmMGMwNzJhOTdiMThhMQswCQYDVQQGEwJVUzEN
+MAsGA1UECBMEVXRhaDEQMA4GA1UEBxMHTWlkdmFsZTEVMBMGA1UEChMMU3BhY2Ug
+TW9ua2V5MB4XDTEzMTIxNzE4MzgyMloXDTIzMTIxNTE4MzgyMlowgZAxSTBHBgNV
+BAMTQDM4NTg3ODRkMjU1NTdiNTM1MWZmNjRmMmQzMTQ1ZjkwYTJlMTIzMDM4Y2Yz
+Mjc1Yzg1OTM1MjcxYWIzMmNiMDkxCzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRVdGFo
+MRAwDgYDVQQHEwdNaWR2YWxlMRUwEwYDVQQKEwxTcGFjZSBNb25rZXkwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDdf3icNvFsrlrnNLi8SocscqlSbFq+
+pEvmhcSoqgDLqebnqu8Ld73HJJ74MGXEgRX8xZT5FinOML31CR6t9E/j3dqV6p+G
+fdlFLe3IqtC0/bPVnCDBirBygBI4uCrMq+1VhAxPWclrDo7l9QRYbsExH9lfn+Ry
+vxeNMZiOASasvVZNncY8E9usBGRdH17EfDL/TPwXqWOLyxSN5o54GTztjjy9w9CG
+QP7jcCueKYyQJQCtEmnwc6P/q6/EPv5R6drBkX6loAPtmCUAkHqxkWOJrRq/v7Pw
+zRYhfY+ZpVHGc7WEkDnLzRiUypr1C9oxvLKS10etZEIwEdKyOkSg2fdPAgMBAAGj
+EzARMA8GA1UdEwEB/wQFMAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEcz0RTTJ99l
+HTK/zTyfV5VZEhtwqu6bwre/hD7lhI+1ji0DZYGIgCbJLKuZhj+cHn2h5nPhN7zE
+M9tc4pn0TgeVS0SVFSe6TGnIFipNogvP17E+vXpDZcW/xn9kPKeVCZc1hlDt1W4Z
+5q+ub3aUwuMwYs7bcArtDrumCmciJ3LFyNhebPi4mntb5ooeLFLaujEmVYyrQnpo
+tWKC9sMlJmLm4yAso64Sv9KLS2T9ivJBNn0ZtougozBCCTqrqgZVjha+B2yjHe9f
+sRkg/uxcJf7wC5Y0BLlp1+aPwdmZD87T3a1uQ1Ij93jmHG+2T9U20MklHAePOl0q
+yTqdSPnSH1c=
+-----END CERTIFICATE-----
+`)
+ keyBytes = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA3X94nDbxbK5a5zS4vEqHLHKpUmxavqRL5oXEqKoAy6nm56rv
+C3e9xySe+DBlxIEV/MWU+RYpzjC99QkerfRP493aleqfhn3ZRS3tyKrQtP2z1Zwg
+wYqwcoASOLgqzKvtVYQMT1nJaw6O5fUEWG7BMR/ZX5/kcr8XjTGYjgEmrL1WTZ3G
+PBPbrARkXR9exHwy/0z8F6lji8sUjeaOeBk87Y48vcPQhkD+43ArnimMkCUArRJp
+8HOj/6uvxD7+UenawZF+paAD7ZglAJB6sZFjia0av7+z8M0WIX2PmaVRxnO1hJA5
+y80YlMqa9QvaMbyyktdHrWRCMBHSsjpEoNn3TwIDAQABAoIBAQCwgp6YzmgCFce3
+LBpzYmjqEM3CMzr1ZXRe1gbr6d4Mbu7leyBX4SpJAnP0kIzo1X2yG7ol7XWPLOST
+2pqqQWFQ00EX6wsJYEy+hmVRXl5HfU3MUkkAMwd9l3Xt4UWqKPBPD5XHvmN2fvl9
+Y4388vXdseXGAGNK1eFs0TMjJuOtDxDyrmJcnxpJ7y/77y/Hb5rUa9DCvj8tkKHg
+HmeIwQE0HhIFofj+qCYbqeVyjbPAaYZMrISXb2HmcyULKEOGRbMH24IzInKA0NxV
+kdP9qmV8Y2bJ609Fft/y8Vpj31iEdq/OFXyobdVvnXMnaVyAetoaWy7AOTIQ2Cnw
+wGbJ/F8BAoGBAN/pCnLQrWREeVMuFjf+MgYgCtRRaQ8EOVvjYcXXi0PhtOMFTAb7
+djqhlgmBOFsmeXcb8YRZsF+pNtu1xk5RJOquyKfK8j1rUdAJfoxGHiaUFI2/1i9E
+zuXX/Ao0xNRkWMxMKuwYBmmt1fMuVo+1M8UEwFMdHRtgxe+/+eOV1J2PAoGBAP09
+7GLOYSYAI1OO3BN/bEVNau6tAxP5YShGmX2Qxy0+ooxHZ1V3D8yo6C0hSg+H+fPT
+mjMgGcvaW6K+QyCdHDjgbk2hfdZ+Beq92JApPrH9gMV7MPhwHzgwjzDDio9OFxYY
+3vjBQ2yX+9jvz9lkvq2NM3fqFqbsG6Et+5mCc6pBAoGBAI62bxVtEgbladrtdfXs
+S6ABzkUzOl362EBL9iZuUnJKqstDtgiBQALwuLuIJA5cwHB9W/t6WuMt7CwveJy0
+NW5rRrNDtBAXlgad9o2bp135ZfxO+EoadjCi8B7lMUsaRkq4hWcDjRrQVJxxvXRN
+DxkVBSw0Uzf+/0nnN3OqLODbAoGACCY+/isAC1YDzQOS53m5RT2pjEa7C6CB1Ob4
+t4a6MiWK25LMq35qXr6swg8JMBjDHWqY0r5ctievvTv8Mwd7SgVG526j+wwRKq2z
+U2hQYS/0Peap+8S37Hn7kakpQ1VS/t4MBttJTSxS6XdGLAvG6xTZLCm3UuXUOcqe
+ByGgkUECgYEAmop45kRi974g4MPvyLplcE4syb19ifrHj76gPRBi94Cp8jZosY1T
+ucCCa4lOGgPtXJ0Qf1c8yq5vh4yqkQjrgUTkr+CFDGR6y4CxmNDQxEMYIajaIiSY
+qmgvgyRayemfO2zR0CPgC6wSoGBth+xW6g+WA8y0z76ZSaWpFi8lVM4=
+-----END RSA PRIVATE KEY-----
+`)
+)
+
+func NetPipe(t testing.TB) (net.Conn, net.Conn) {
+ l, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer l.Close()
+ client_future := utils.NewFuture()
+ go func() {
+ client_future.Set(net.Dial(l.Addr().Network(), l.Addr().String()))
+ }()
+ var errs utils.ErrorGroup
+ server_conn, err := l.Accept()
+ errs.Add(err)
+ client_conn, err := client_future.Get()
+ errs.Add(err)
+ err = errs.Finalize()
+ if err != nil {
+ if server_conn != nil {
+ server_conn.Close()
+ }
+ if client_conn != nil {
+ client_conn.(net.Conn).Close()
+ }
+ t.Fatal(err)
+ }
+ return server_conn, client_conn.(net.Conn)
+}
+
+type HandshakingConn interface {
+ net.Conn
+ Handshake() error
+}
+
+func SimpleConnTest(t testing.TB, constructor func(
+ t testing.TB, conn1, conn2 net.Conn) (sslconn1, sslconn2 HandshakingConn)) {
+ server_conn, client_conn := NetPipe(t)
+ defer server_conn.Close()
+ defer client_conn.Close()
+
+ data := "first test string\n"
+
+ server, client := constructor(t, server_conn, client_conn)
+ defer close_both(server, client)
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+
+ err := client.Handshake()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ _, err = io.Copy(client, bytes.NewReader([]byte(data)))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = client.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+ go func() {
+ defer wg.Done()
+
+ err := server.Handshake()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, len(data)))
+ _, err = io.CopyN(buf, server, int64(len(data)))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(buf.Bytes()) != data {
+ t.Fatal("mismatched data")
+ }
+
+ err = server.Close()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+ wg.Wait()
+}
+
+func close_both(closer1, closer2 io.Closer) {
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ closer1.Close()
+ }()
+ go func() {
+ defer wg.Done()
+ closer2.Close()
+ }()
+ wg.Wait()
+}
+
+func ClosingTest(t testing.TB, constructor func(
+ t testing.TB, conn1, conn2 net.Conn) (sslconn1, sslconn2 HandshakingConn)) {
+
+ run_test := func(close_tcp bool, server_writes bool) {
+ server_conn, client_conn := NetPipe(t)
+ defer server_conn.Close()
+ defer client_conn.Close()
+ server, client := constructor(t, server_conn, client_conn)
+ defer close_both(server, client)
+
+ var sslconn1, sslconn2 HandshakingConn
+ var conn1 net.Conn
+ if server_writes {
+ sslconn1 = server
+ conn1 = server_conn
+ sslconn2 = client
+ } else {
+ sslconn1 = client
+ conn1 = client_conn
+ sslconn2 = server
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ _, err := sslconn1.Write([]byte("hello"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if close_tcp {
+ err = conn1.Close()
+ } else {
+ err = sslconn1.Close()
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+ data, err := ioutil.ReadAll(sslconn2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(data, []byte("hello")) {
+ t.Fatal("bytes don't match")
+ }
+ }()
+
+ wg.Wait()
+ }
+
+ run_test(true, false)
+ run_test(false, false)
+ run_test(true, true)
+ run_test(false, true)
+}
+
+func ThroughputBenchmark(b *testing.B, constructor func(
+ t testing.TB, conn1, conn2 net.Conn) (sslconn1, sslconn2 HandshakingConn)) {
+ server_conn, client_conn := NetPipe(b)
+ defer server_conn.Close()
+ defer client_conn.Close()
+
+ server, client := constructor(b, server_conn, client_conn)
+ defer close_both(server, client)
+
+ b.SetBytes(1024)
+ data := make([]byte, b.N*1024)
+ _, err := io.ReadFull(rand.Reader, data[:])
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ _, err = io.Copy(client, bytes.NewReader([]byte(data)))
+ if err != nil {
+ b.Fatal(err)
+ }
+ }()
+ go func() {
+ defer wg.Done()
+
+ buf := &bytes.Buffer{}
+ _, err = io.CopyN(buf, server, int64(len(data)))
+ if err != nil {
+ b.Fatal(err)
+ }
+ if !bytes.Equal(buf.Bytes(), data) {
+ b.Fatal("mismatched data")
+ }
+ }()
+ wg.Wait()
+ b.StopTimer()
+}
+
+func StdlibConstructor(t testing.TB, server_conn, client_conn net.Conn) (
+ server, client HandshakingConn) {
+ cert, err := tls.X509KeyPair(certBytes, keyBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ config := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ InsecureSkipVerify: true,
+ CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}}
+ server = tls.Server(server_conn, config)
+ client = tls.Client(client_conn, config)
+ return server, client
+}
+
+func passThruVerify(t testing.TB) func(bool, *CertificateStoreCtx) bool {
+ x := func(ok bool, store *CertificateStoreCtx) bool {
+ cert := store.GetCurrentCert()
+ if cert == nil {
+ t.Fatalf("Could not obtain cert from store\n")
+ }
+ sn := cert.GetSerialNumberHex()
+ if len(sn) == 0 {
+ t.Fatalf("Could not obtain serial number from cert")
+ }
+ return ok
+ }
+ return x
+}
+
+func OpenSSLConstructor(t testing.TB, server_conn, client_conn net.Conn) (
+ server, client HandshakingConn) {
+ ctx, err := NewCtx()
+ if err != nil {
+ t.Fatal(err)
+ }
+ ctx.SetVerify(VerifyNone, passThruVerify(t))
+ key, err := LoadPrivateKeyFromPEM(keyBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ctx.UsePrivateKey(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cert, err := LoadCertificateFromPEM(certBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ctx.UseCertificate(cert)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ctx.SetCipherList("AES128-SHA")
+ if err != nil {
+ t.Fatal(err)
+ }
+ server, err = Server(server_conn, ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ client, err = Client(client_conn, ctx)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return server, client
+}
+
+func StdlibOpenSSLConstructor(t testing.TB, server_conn, client_conn net.Conn) (
+ server, client HandshakingConn) {
+ server_std, _ := StdlibConstructor(t, server_conn, client_conn)
+ _, client_ssl := OpenSSLConstructor(t, server_conn, client_conn)
+ return server_std, client_ssl
+}
+
+func OpenSSLStdlibConstructor(t testing.TB, server_conn, client_conn net.Conn) (
+ server, client HandshakingConn) {
+ _, client_std := StdlibConstructor(t, server_conn, client_conn)
+ server_ssl, _ := OpenSSLConstructor(t, server_conn, client_conn)
+ return server_ssl, client_std
+}
+
+func TestStdlibSimple(t *testing.T) {
+ SimpleConnTest(t, StdlibConstructor)
+}
+
+func TestOpenSSLSimple(t *testing.T) {
+ SimpleConnTest(t, OpenSSLConstructor)
+}
+
+func TestStdlibClosing(t *testing.T) {
+ ClosingTest(t, StdlibConstructor)
+}
+
+func TestOpenSSLClosing(t *testing.T) {
+ ClosingTest(t, OpenSSLConstructor)
+}
+
+func BenchmarkStdlibThroughput(b *testing.B) {
+ ThroughputBenchmark(b, StdlibConstructor)
+}
+
+func BenchmarkOpenSSLThroughput(b *testing.B) {
+ ThroughputBenchmark(b, OpenSSLConstructor)
+}
+
+func TestStdlibOpenSSLSimple(t *testing.T) {
+ SimpleConnTest(t, StdlibOpenSSLConstructor)
+}
+
+func TestOpenSSLStdlibSimple(t *testing.T) {
+ SimpleConnTest(t, OpenSSLStdlibConstructor)
+}
+
+func TestStdlibOpenSSLClosing(t *testing.T) {
+ ClosingTest(t, StdlibOpenSSLConstructor)
+}
+
+func TestOpenSSLStdlibClosing(t *testing.T) {
+ ClosingTest(t, OpenSSLStdlibConstructor)
+}
+
+func BenchmarkStdlibOpenSSLThroughput(b *testing.B) {
+ ThroughputBenchmark(b, StdlibOpenSSLConstructor)
+}
+
+func BenchmarkOpenSSLStdlibThroughput(b *testing.B) {
+ ThroughputBenchmark(b, OpenSSLStdlibConstructor)
+}
+
+func FullDuplexRenegotiationTest(t testing.TB, constructor func(
+ t testing.TB, conn1, conn2 net.Conn) (sslconn1, sslconn2 HandshakingConn)) {
+
+ server_conn, client_conn := NetPipe(t)
+ defer server_conn.Close()
+ defer client_conn.Close()
+
+ times := 256
+ data_len := 4 * SSLRecordSize
+ data1 := make([]byte, data_len)
+ _, err := io.ReadFull(rand.Reader, data1[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ data2 := make([]byte, data_len)
+ _, err = io.ReadFull(rand.Reader, data1[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ server, client := constructor(t, server_conn, client_conn)
+ defer close_both(server, client)
+
+ var wg sync.WaitGroup
+
+ send_func := func(sender HandshakingConn, data []byte) {
+ defer wg.Done()
+ for i := 0; i < times; i++ {
+ if i == times/2 {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ err := sender.Handshake()
+ if err != nil {
+ t.Fatal(err)
+ }
+ }()
+ }
+ _, err := sender.Write(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ recv_func := func(receiver net.Conn, data []byte) {
+ defer wg.Done()
+
+ buf := make([]byte, len(data))
+ for i := 0; i < times; i++ {
+ n, err := io.ReadFull(receiver, buf[:])
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(buf[:n], data) {
+ t.Fatal(err)
+ }
+ }
+ }
+
+ wg.Add(4)
+ go recv_func(server, data1)
+ go send_func(client, data1)
+ go send_func(server, data2)
+ go recv_func(client, data2)
+ wg.Wait()
+}
+
+func TestStdlibFullDuplexRenegotiation(t *testing.T) {
+ FullDuplexRenegotiationTest(t, StdlibConstructor)
+}
+
+func TestOpenSSLFullDuplexRenegotiation(t *testing.T) {
+ FullDuplexRenegotiationTest(t, OpenSSLConstructor)
+}
+
+func TestOpenSSLStdlibFullDuplexRenegotiation(t *testing.T) {
+ FullDuplexRenegotiationTest(t, OpenSSLStdlibConstructor)
+}
+
+func TestStdlibOpenSSLFullDuplexRenegotiation(t *testing.T) {
+ FullDuplexRenegotiationTest(t, StdlibOpenSSLConstructor)
+}
+
+func LotsOfConns(t *testing.T, payload_size int64, loops, clients int,
+ sleep time.Duration, newListener func(net.Listener) net.Listener,
+ newClient func(net.Conn) (net.Conn, error)) {
+ tcp_listener, err := net.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ ssl_listener := newListener(tcp_listener)
+ go func() {
+ for {
+ conn, err := ssl_listener.Accept()
+ if err != nil {
+ t.Fatalf("failed accept: %s", err)
+ continue
+ }
+ go func() {
+ defer func() {
+ err = conn.Close()
+ if err != nil {
+ t.Fatalf("failed closing: %s", err)
+ }
+ }()
+ for i := 0; i < loops; i++ {
+ _, err := io.Copy(ioutil.Discard,
+ io.LimitReader(conn, payload_size))
+ if err != nil {
+ t.Fatalf("failed reading: %s", err)
+ return
+ }
+ _, err = io.Copy(conn, io.LimitReader(rand.Reader,
+ payload_size))
+ if err != nil {
+ t.Fatalf("failed writing: %s", err)
+ return
+ }
+ }
+ time.Sleep(sleep)
+ }()
+ }
+ }()
+ var wg sync.WaitGroup
+ for i := 0; i < clients; i++ {
+ tcp_client, err := net.Dial(tcp_listener.Addr().Network(),
+ tcp_listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ ssl_client, err := newClient(tcp_client)
+ if err != nil {
+ t.Fatal(err)
+ }
+ wg.Add(1)
+ go func(i int) {
+ defer func() {
+ err = ssl_client.Close()
+ if err != nil {
+ t.Fatalf("failed closing: %s", err)
+ }
+ wg.Done()
+ }()
+ for i := 0; i < loops; i++ {
+ _, err := io.Copy(ssl_client, io.LimitReader(rand.Reader,
+ payload_size))
+ if err != nil {
+ t.Fatalf("failed writing: %s", err)
+ return
+ }
+ _, err = io.Copy(ioutil.Discard,
+ io.LimitReader(ssl_client, payload_size))
+ if err != nil {
+ t.Fatalf("failed reading: %s", err)
+ return
+ }
+ }
+ time.Sleep(sleep)
+ }(i)
+ }
+ wg.Wait()
+}
+
+func TestStdlibLotsOfConns(t *testing.T) {
+ tls_cert, err := tls.X509KeyPair(certBytes, keyBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ tls_config := &tls.Config{
+ Certificates: []tls.Certificate{tls_cert},
+ InsecureSkipVerify: true,
+ CipherSuites: []uint16{tls.TLS_RSA_WITH_AES_128_CBC_SHA}}
+ LotsOfConns(t, 1024*64, 10, 100, 0*time.Second,
+ func(l net.Listener) net.Listener {
+ return tls.NewListener(l, tls_config)
+ }, func(c net.Conn) (net.Conn, error) {
+ return tls.Client(c, tls_config), nil
+ })
+}
+
+func TestOpenSSLLotsOfConns(t *testing.T) {
+ ctx, err := NewCtx()
+ if err != nil {
+ t.Fatal(err)
+ }
+ key, err := LoadPrivateKeyFromPEM(keyBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ctx.UsePrivateKey(key)
+ if err != nil {
+ t.Fatal(err)
+ }
+ cert, err := LoadCertificateFromPEM(certBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ctx.UseCertificate(cert)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = ctx.SetCipherList("AES128-SHA")
+ if err != nil {
+ t.Fatal(err)
+ }
+ LotsOfConns(t, 1024*64, 10, 100, 0*time.Second,
+ func(l net.Listener) net.Listener {
+ return NewListener(l, ctx)
+ }, func(c net.Conn) (net.Conn, error) {
+ return Client(c, ctx)
+ })
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/utils/errors.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/utils/errors.go
new file mode 100644
index 00000000000..bab314c95d7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/utils/errors.go
@@ -0,0 +1,50 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "errors"
+ "strings"
+)
+
+// ErrorGroup collates errors
+type ErrorGroup struct {
+ Errors []error
+}
+
+// Add adds an error to an existing error group
+func (e *ErrorGroup) Add(err error) {
+ if err != nil {
+ e.Errors = append(e.Errors, err)
+ }
+}
+
+// Finalize returns an error corresponding to the ErrorGroup state. If there's
+// no errors in the group, finalize returns nil. If there's only one error,
+// Finalize returns that error. Otherwise, Finalize will make a new error
+// consisting of the messages from the constituent errors.
+func (e *ErrorGroup) Finalize() error {
+ if len(e.Errors) == 0 {
+ return nil
+ }
+ if len(e.Errors) == 1 {
+ return e.Errors[0]
+ }
+ msgs := make([]string, 0, len(e.Errors))
+ for _, err := range e.Errors {
+ msgs = append(msgs, err.Error())
+ }
+ return errors.New(strings.Join(msgs, "\n"))
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/utils/future.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/utils/future.go
new file mode 100644
index 00000000000..fa1bbbfb861
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/utils/future.go
@@ -0,0 +1,79 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "sync"
+)
+
+// Future is a type that is essentially the inverse of a channel. With a
+// channel, you have multiple senders and one receiver. With a future, you can
+// have multiple receivers and one sender. Additionally, a future protects
+// against double-sends. Since this is usually used for returning function
+// results, we also capture and return error values as well. Use NewFuture
+// to initialize.
+type Future struct {
+ mutex *sync.Mutex
+ cond *sync.Cond
+ received bool
+ val interface{}
+ err error
+}
+
+// NewFuture returns an initialized and ready Future.
+func NewFuture() *Future {
+ mutex := &sync.Mutex{}
+ return &Future{
+ mutex: mutex,
+ cond: sync.NewCond(mutex),
+ received: false,
+ val: nil,
+ err: nil,
+ }
+}
+
+// Get blocks until the Future has a value set.
+func (self *Future) Get() (interface{}, error) {
+ self.mutex.Lock()
+ defer self.mutex.Unlock()
+ for {
+ if self.received {
+ return self.val, self.err
+ }
+ self.cond.Wait()
+ }
+}
+
+// Fired returns whether or not a value has been set. If Fired is true, Get
+// won't block.
+func (self *Future) Fired() bool {
+ self.mutex.Lock()
+ defer self.mutex.Unlock()
+ return self.received
+}
+
+// Set provides the value to present and future Get calls. If Set has already
+// been called, this is a no-op.
+func (self *Future) Set(val interface{}, err error) {
+ self.mutex.Lock()
+ defer self.mutex.Unlock()
+ if self.received {
+ return
+ }
+ self.received = true
+ self.val = val
+ self.err = err
+ self.cond.Broadcast()
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/verify.c b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/verify.c
new file mode 100644
index 00000000000..d55866c4cf0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/verify.c
@@ -0,0 +1,31 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <openssl/ssl.h>
+#include "_cgo_export.h"
+
+int verify_cb(int ok, X509_STORE_CTX* store) {
+ SSL* ssl = (SSL *)X509_STORE_CTX_get_app_data(store);
+ SSL_CTX* ssl_ctx = SSL_get_SSL_CTX(ssl);
+ void* p = SSL_CTX_get_ex_data(ssl_ctx, get_ssl_ctx_idx());
+ // get the pointer to the go Ctx object and pass it back into the thunk
+ return verify_cb_thunk(p, ok, store);
+}
+
+int verify_ssl_cb(int ok, X509_STORE_CTX* store) {
+ SSL* ssl = (SSL *)X509_STORE_CTX_get_app_data(store);
+ void* p = SSL_get_ex_data(ssl, get_ssl_idx());
+ // get the pointer to the go Ctx object and pass it back into the thunk
+ return verify_ssl_cb_thunk(p, ok, store);
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/version.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/version.go
new file mode 100644
index 00000000000..8f3d392cde8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/openssl/version.go
@@ -0,0 +1,22 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build cgo
+
+package openssl
+
+// #include <openssl/opensslv.h>
+import "C"
+
+const Version string = C.OPENSSL_VERSION_TEXT
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/.travis.yml b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/.travis.yml
new file mode 100644
index 00000000000..d87fbdcf39c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/.travis.yml
@@ -0,0 +1,6 @@
+language: go
+
+go:
+ - 1.2
+ - release
+ - tip
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/LICENSE b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/LICENSE
new file mode 100644
index 00000000000..37ec93a14fd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/README.md b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/README.md
new file mode 100644
index 00000000000..28033f68d9c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/README.md
@@ -0,0 +1,19 @@
+# spacelog [![Build Status](https://api.travis-ci.org/spacemonkeygo/spacelog.svg?branch=master)](https://travis-ci.org/spacemonkeygo/spacelog)
+
+Please see http://godoc.org/github.com/spacemonkeygo/spacelog for info
+
+### License
+
+Copyright (C) 2014 Space Monkey, Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture.go
new file mode 100644
index 00000000000..d7ea1ca31a6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture.go
@@ -0,0 +1,67 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+)
+
+// CaptureOutputToFile opens a filehandle using the given path, then calls
+// CaptureOutputToFd on the associated filehandle.
+func CaptureOutputToFile(path string) error {
+ fh, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0644)
+ if err != nil {
+ return err
+ }
+ defer fh.Close()
+ return CaptureOutputToFd(int(fh.Fd()))
+}
+
+// CaptureOutputToProcess starts a process and using CaptureOutputToFd,
+// redirects stdout and stderr to the subprocess' stdin.
+// CaptureOutputToProcess expects the subcommand to last the lifetime of the
+// process, and if the subprocess dies, will panic.
+func CaptureOutputToProcess(command string, args ...string) error {
+ cmd := exec.Command(command, args...)
+ out, err := cmd.StdinPipe()
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+ type fder interface {
+ Fd() uintptr
+ }
+ out_fder, ok := out.(fder)
+ if !ok {
+ return fmt.Errorf("unable to get underlying pipe")
+ }
+ err = CaptureOutputToFd(int(out_fder.Fd()))
+ if err != nil {
+ return err
+ }
+ err = cmd.Start()
+ if err != nil {
+ return err
+ }
+ go func() {
+ err := cmd.Wait()
+ if err != nil {
+ panic(fmt.Errorf("captured output process died! %s", err))
+ }
+ }()
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture_other.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture_other.go
new file mode 100644
index 00000000000..5a62a2accaf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture_other.go
@@ -0,0 +1,35 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package spacelog
+
+import (
+ "syscall"
+)
+
+// CaptureOutputToFd redirects the current process' stdout and stderr file
+// descriptors to the given file descriptor, using the dup2 syscall.
+func CaptureOutputToFd(fd int) error {
+ err := syscall.Dup2(fd, syscall.Stdout)
+ if err != nil {
+ return err
+ }
+ err = syscall.Dup2(fd, syscall.Stderr)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture_windows.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture_windows.go
new file mode 100644
index 00000000000..e9f061dcf47
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/capture_windows.go
@@ -0,0 +1,23 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "fmt"
+)
+
+func CaptureOutputToFd(fd int) error {
+ return fmt.Errorf("CaptureOutputToFd not supported on Windows")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/collection.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/collection.go
new file mode 100644
index 00000000000..fd612db6ebd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/collection.go
@@ -0,0 +1,229 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "text/template"
+)
+
+var (
+ // If set, these prefixes will be stripped out of automatic logger names.
+ IgnoredPrefixes []string
+
+ badChars = regexp.MustCompile("[^a-zA-Z0-9_.-]")
+ slashes = regexp.MustCompile("[/]")
+)
+
+func callerName() string {
+ pc, _, _, ok := runtime.Caller(2)
+ if !ok {
+ return "unknown.unknown"
+ }
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ return "unknown.unknown"
+ }
+ name := f.Name()
+ for _, prefix := range IgnoredPrefixes {
+ name = strings.TrimPrefix(name, prefix)
+ }
+ return badChars.ReplaceAllLiteralString(
+ slashes.ReplaceAllLiteralString(name, "."), "_")
+}
+
+// LoggerCollections contain all of the loggers a program might use. Typically
+// a codebase will just use the default logger collection.
+type LoggerCollection struct {
+ mtx sync.Mutex
+ loggers map[string]*Logger
+ level LogLevel
+ handler Handler
+}
+
+// NewLoggerCollection creates a new logger collection. It's unlikely you will
+// ever practically need this method. Use the DefaultLoggerCollection instead.
+func NewLoggerCollection() *LoggerCollection {
+ return &LoggerCollection{
+ loggers: make(map[string]*Logger),
+ level: DefaultLevel,
+ handler: defaultHandler}
+}
+
+// GetLogger returns a new Logger with a name automatically generated using
+// the callstack. If you want to avoid automatic name generation check out
+// GetLoggerNamed
+func (c *LoggerCollection) GetLogger() *Logger {
+ return GetLoggerNamed(callerName())
+}
+
+func (c *LoggerCollection) getLogger(name string, level LogLevel,
+ handler Handler) *Logger {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ logger, exists := c.loggers[name]
+ if !exists {
+ logger = &Logger{level: level,
+ collection: c,
+ name: name,
+ handler: handler}
+ c.loggers[name] = logger
+ }
+ return logger
+}
+
+// GetLoggerNamed returns a new Logger with the provided name. GetLogger is
+// more frequently used.
+func (c *LoggerCollection) GetLoggerNamed(name string) *Logger {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ logger, exists := c.loggers[name]
+ if !exists {
+ logger = &Logger{level: c.level,
+ collection: c,
+ name: name,
+ handler: c.handler}
+ c.loggers[name] = logger
+ }
+ return logger
+}
+
+// SetLevel will set the current log level for all loggers with names that
+// match a provided regular expression. If the regular expression is nil, then
+// all loggers match.
+func (c *LoggerCollection) SetLevel(re *regexp.Regexp, level LogLevel) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ if re == nil {
+ c.level = level
+ }
+ for name, logger := range c.loggers {
+ if re == nil || re.MatchString(name) {
+ logger.setLevel(level)
+ }
+ }
+}
+
+// SetHandler will set the current log handler for all loggers with names that
+// match a provided regular expression. If the regular expression is nil, then
+// all loggers match.
+func (c *LoggerCollection) SetHandler(re *regexp.Regexp, handler Handler) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ if re == nil {
+ c.handler = handler
+ }
+ for name, logger := range c.loggers {
+ if re == nil || re.MatchString(name) {
+ logger.setHandler(handler)
+ }
+ }
+}
+
+// SetTextTemplate will set the current text template for all loggers with
+// names that match a provided regular expression. If the regular expression
+// is nil, then all loggers match. Note that not every handler is guaranteed
+// to support text templates and a text template will only apply to
+// text-oriented and unstructured handlers.
+func (c *LoggerCollection) SetTextTemplate(re *regexp.Regexp,
+ t *template.Template) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ if re == nil {
+ c.handler.SetTextTemplate(t)
+ }
+ for name, logger := range c.loggers {
+ if re == nil || re.MatchString(name) {
+ logger.getHandler().SetTextTemplate(t)
+ }
+ }
+}
+
+// SetTextOutput will set the current output interface for all loggers with
+// names that match a provided regular expression. If the regular expression
+// is nil, then all loggers match. Note that not every handler is guaranteed
+// to support text output and a text output interface will only apply to
+// text-oriented and unstructured handlers.
+func (c *LoggerCollection) SetTextOutput(re *regexp.Regexp,
+ output TextOutput) {
+ c.mtx.Lock()
+ defer c.mtx.Unlock()
+
+ if re == nil {
+ c.handler.SetTextOutput(output)
+ }
+ for name, logger := range c.loggers {
+ if re == nil || re.MatchString(name) {
+ logger.getHandler().SetTextOutput(output)
+ }
+ }
+}
+
+var (
+ // It's unlikely you'll need to use this directly
+ DefaultLoggerCollection = NewLoggerCollection()
+)
+
+// GetLogger returns an automatically-named logger on the default logger
+// collection.
+func GetLogger() *Logger {
+ return DefaultLoggerCollection.GetLoggerNamed(callerName())
+}
+
+// GetLoggerNamed returns a new Logger with the provided name on the default
+// logger collection. GetLogger is more frequently used.
+func GetLoggerNamed(name string) *Logger {
+ return DefaultLoggerCollection.GetLoggerNamed(name)
+}
+
+// SetLevel will set the current log level for all loggers on the default
+// collection with names that match a provided regular expression. If the
+// regular expression is nil, then all loggers match.
+func SetLevel(re *regexp.Regexp, level LogLevel) {
+ DefaultLoggerCollection.SetLevel(re, level)
+}
+
+// SetHandler will set the current log handler for all loggers on the default
+// collection with names that match a provided regular expression. If the
+// regular expression is nil, then all loggers match.
+func SetHandler(re *regexp.Regexp, handler Handler) {
+ DefaultLoggerCollection.SetHandler(re, handler)
+}
+
+// SetTextTemplate will set the current text template for all loggers on the
+// default collection with names that match a provided regular expression. If
+// the regular expression is nil, then all loggers match. Note that not every
+// handler is guaranteed to support text templates and a text template will
+// only apply to text-oriented and unstructured handlers.
+func SetTextTemplate(re *regexp.Regexp, t *template.Template) {
+ DefaultLoggerCollection.SetTextTemplate(re, t)
+}
+
+// SetTextOutput will set the current output interface for all loggers on the
+// default collection with names that match a provided regular expression. If
+// the regular expression is nil, then all loggers match. Note that not every
+// handler is guaranteed to support text output and a text output interface
+// will only apply to text-oriented and unstructured handlers.
+func SetTextOutput(re *regexp.Regexp, output TextOutput) {
+ DefaultLoggerCollection.SetTextOutput(re, output)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/convenience.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/convenience.go
new file mode 100644
index 00000000000..4b4efd22389
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/convenience.go
@@ -0,0 +1,266 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "fmt"
+ "io"
+)
+
+// Debug logs a collection of values if the logger's level is debug or even
+// more permissive.
+func (l *Logger) Debug(v ...interface{}) {
+ if l.getLevel() <= Debug {
+ l.getHandler().Log(l.name, Debug, fmt.Sprint(v...), 1)
+ }
+}
+
+// Debugf logs a format string with values if the logger's level is debug or
+// even more permissive.
+func (l *Logger) Debugf(format string, v ...interface{}) {
+ if l.getLevel() <= Debug {
+ l.getHandler().Log(l.name, Debug, fmt.Sprintf(format, v...), 1)
+ }
+}
+
+// Debuge logs an error value if the error is not nil and the logger's level
+// is debug or even more permissive.
+func (l *Logger) Debuge(err error) {
+ if l.getLevel() <= Debug && err != nil {
+ l.getHandler().Log(l.name, Debug, err.Error(), 1)
+ }
+}
+
+// DebugEnabled returns true if the logger's level is debug or even more
+// permissive.
+func (l *Logger) DebugEnabled() bool {
+ return l.getLevel() <= Debug
+}
+
+// Info logs a collection of values if the logger's level is info or even
+// more permissive.
+func (l *Logger) Info(v ...interface{}) {
+ if l.getLevel() <= Info {
+ l.getHandler().Log(l.name, Info, fmt.Sprint(v...), 1)
+ }
+}
+
+// Infof logs a format string with values if the logger's level is info or
+// even more permissive.
+func (l *Logger) Infof(format string, v ...interface{}) {
+ if l.getLevel() <= Info {
+ l.getHandler().Log(l.name, Info, fmt.Sprintf(format, v...), 1)
+ }
+}
+
+// Infoe logs an error value if the error is not nil and the logger's level
+// is info or even more permissive.
+func (l *Logger) Infoe(err error) {
+ if l.getLevel() <= Info && err != nil {
+ l.getHandler().Log(l.name, Info, err.Error(), 1)
+ }
+}
+
+// InfoEnabled returns true if the logger's level is info or even more
+// permissive.
+func (l *Logger) InfoEnabled() bool {
+ return l.getLevel() <= Info
+}
+
+// Notice logs a collection of values if the logger's level is notice or even
+// more permissive.
+func (l *Logger) Notice(v ...interface{}) {
+ if l.getLevel() <= Notice {
+ l.getHandler().Log(l.name, Notice, fmt.Sprint(v...), 1)
+ }
+}
+
+// Noticef logs a format string with values if the logger's level is notice or
+// even more permissive.
+func (l *Logger) Noticef(format string, v ...interface{}) {
+ if l.getLevel() <= Notice {
+ l.getHandler().Log(l.name, Notice, fmt.Sprintf(format, v...), 1)
+ }
+}
+
+// Noticee logs an error value if the error is not nil and the logger's level
+// is notice or even more permissive.
+func (l *Logger) Noticee(err error) {
+ if l.getLevel() <= Notice && err != nil {
+ l.getHandler().Log(l.name, Notice, err.Error(), 1)
+ }
+}
+
+// NoticeEnabled returns true if the logger's level is notice or even more
+// permissive.
+func (l *Logger) NoticeEnabled() bool {
+ return l.getLevel() <= Notice
+}
+
+// Warn logs a collection of values if the logger's level is warning or even
+// more permissive.
+func (l *Logger) Warn(v ...interface{}) {
+ if l.getLevel() <= Warning {
+ l.getHandler().Log(l.name, Warning, fmt.Sprint(v...), 1)
+ }
+}
+
+// Warnf logs a format string with values if the logger's level is warning or
+// even more permissive.
+func (l *Logger) Warnf(format string, v ...interface{}) {
+ if l.getLevel() <= Warning {
+ l.getHandler().Log(l.name, Warning, fmt.Sprintf(format, v...), 1)
+ }
+}
+
+// Warne logs an error value if the error is not nil and the logger's level
+// is warning or even more permissive.
+func (l *Logger) Warne(err error) {
+ if l.getLevel() <= Warning && err != nil {
+ l.getHandler().Log(l.name, Warning, err.Error(), 1)
+ }
+}
+
+// WarnEnabled returns true if the logger's level is warning or even more
+// permissive.
+func (l *Logger) WarnEnabled() bool {
+ return l.getLevel() <= Warning
+}
+
+// Error logs a collection of values if the logger's level is error or even
+// more permissive.
+func (l *Logger) Error(v ...interface{}) {
+ if l.getLevel() <= Error {
+ l.getHandler().Log(l.name, Error, fmt.Sprint(v...), 1)
+ }
+}
+
+// Errorf logs a format string with values if the logger's level is error or
+// even more permissive.
+func (l *Logger) Errorf(format string, v ...interface{}) {
+ if l.getLevel() <= Error {
+ l.getHandler().Log(l.name, Error, fmt.Sprintf(format, v...), 1)
+ }
+}
+
+// Errore logs an error value if the error is not nil and the logger's level
+// is error or even more permissive.
+func (l *Logger) Errore(err error) {
+ if l.getLevel() <= Error && err != nil {
+ l.getHandler().Log(l.name, Error, err.Error(), 1)
+ }
+}
+
+// ErrorEnabled returns true if the logger's level is error or even more
+// permissive.
+func (l *Logger) ErrorEnabled() bool {
+ return l.getLevel() <= Error
+}
+
+// Crit logs a collection of values if the logger's level is critical or even
+// more permissive.
+func (l *Logger) Crit(v ...interface{}) {
+ if l.getLevel() <= Critical {
+ l.getHandler().Log(l.name, Critical, fmt.Sprint(v...), 1)
+ }
+}
+
+// Critf logs a format string with values if the logger's level is critical or
+// even more permissive.
+func (l *Logger) Critf(format string, v ...interface{}) {
+ if l.getLevel() <= Critical {
+ l.getHandler().Log(l.name, Critical, fmt.Sprintf(format, v...), 1)
+ }
+}
+
+// Crite logs an error value if the error is not nil and the logger's level
+// is critical or even more permissive.
+func (l *Logger) Crite(err error) {
+ if l.getLevel() <= Critical && err != nil {
+ l.getHandler().Log(l.name, Critical, err.Error(), 1)
+ }
+}
+
+// CritEnabled returns true if the logger's level is critical or even more
+// permissive.
+func (l *Logger) CritEnabled() bool {
+ return l.getLevel() <= Critical
+}
+
+// Log logs a collection of values if the logger's level is the provided level
+// or even more permissive.
+func (l *Logger) Log(level LogLevel, v ...interface{}) {
+ if l.getLevel() <= level {
+ l.getHandler().Log(l.name, level, fmt.Sprint(v...), 1)
+ }
+}
+
+// Logf logs a format string with values if the logger's level is the provided
+// level or even more permissive.
+func (l *Logger) Logf(level LogLevel, format string, v ...interface{}) {
+ if l.getLevel() <= level {
+ l.getHandler().Log(l.name, level, fmt.Sprintf(format, v...), 1)
+ }
+}
+
+// Loge logs an error value if the error is not nil and the logger's level
+// is the provided level or even more permissive.
+func (l *Logger) Loge(level LogLevel, err error) {
+ if l.getLevel() <= level && err != nil {
+ l.getHandler().Log(l.name, level, err.Error(), 1)
+ }
+}
+
+// LevelEnabled returns true if the logger's level is the provided level or
+// even more permissive.
+func (l *Logger) LevelEnabled(level LogLevel) bool {
+ return l.getLevel() <= level
+}
+
+type writer struct {
+ l *Logger
+ level LogLevel
+}
+
+func (w *writer) Write(data []byte) (int, error) {
+ if w.l.getLevel() <= w.level {
+ w.l.getHandler().Log(w.l.name, w.level, string(data), 1)
+ }
+ return len(data), nil
+}
+
+// Writer returns an io.Writer that writes messages at the given log level.
+func (l *Logger) Writer(level LogLevel) io.Writer {
+ return &writer{l: l, level: level}
+}
+
+type writerNoCaller struct {
+ l *Logger
+ level LogLevel
+}
+
+func (w *writerNoCaller) Write(data []byte) (int, error) {
+ if w.l.getLevel() <= w.level {
+ w.l.getHandler().Log(w.l.name, w.level, string(data), -1)
+ }
+ return len(data), nil
+}
+
+// WriterWithoutCaller returns an io.Writer that writes messages at the given
+// log level, but does not attempt to collect the Write caller, and provides
+// no caller information to the log event.
+func (l *Logger) WriterWithoutCaller(level LogLevel) io.Writer {
+ return &writerNoCaller{l: l, level: level}
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/doc.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/doc.go
new file mode 100644
index 00000000000..28c25b4db64
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/doc.go
@@ -0,0 +1,39 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package spacelog is a collection of interface lego bricks designed to help you
+build a flexible logging system.
+
+spacelog is loosely inspired by the Python logging library.
+
+The basic interaction is between a Logger and a Handler. A Logger is
+what the programmer typically interacts with for creating log messages. A
+Logger will be at a given log level, and if log messages can clear that
+specific logger's log level filter, they will be passed off to the Handler.
+
+Loggers are instantiated from GetLogger and GetLoggerNamed.
+
+A Handler is a very generic interface for handling log events. You can provide
+your own Handler for doing structured JSON output or colorized output or
+countless other things.
+
+Provided are a simple TextHandler with a variety of log event templates and
+TextOutput sinks, such as io.Writer, Syslog, and so forth.
+
+Make sure to see the source of the setup subpackage for an example of easy and
+configurable logging setup at process start:
+ http://godoc.org/github.com/spacemonkeygo/spacelog/setup
+*/
+package spacelog
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/event.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/event.go
new file mode 100644
index 00000000000..da863cbf2c2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/event.go
@@ -0,0 +1,75 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+// TermColors is a type that knows how to output terminal colors and formatting
+type TermColors struct{}
+
+// LogEvent is a type made by the default text handler for feeding to log
+// templates. It has as much contextual data about the log event as possible.
+type LogEvent struct {
+ LoggerName string
+ Level LogLevel
+ Message string
+ Filepath string
+ Line int
+ Timestamp time.Time
+
+ TermColors
+}
+
+// Reset resets the color palette for terminals that support color
+func (TermColors) Reset() string { return "\x1b[0m" }
+func (TermColors) Bold() string { return "\x1b[1m" }
+func (TermColors) Underline() string { return "\x1b[4m" }
+func (TermColors) Black() string { return "\x1b[30m" }
+func (TermColors) Red() string { return "\x1b[31m" }
+func (TermColors) Green() string { return "\x1b[32m" }
+func (TermColors) Yellow() string { return "\x1b[33m" }
+func (TermColors) Blue() string { return "\x1b[34m" }
+func (TermColors) Magenta() string { return "\x1b[35m" }
+func (TermColors) Cyan() string { return "\x1b[36m" }
+func (TermColors) White() string { return "\x1b[37m" }
+
+func (l *LogEvent) Filename() string {
+ if l.Filepath == "" {
+ return ""
+ }
+ return filepath.Base(l.Filepath)
+}
+
+func (l *LogEvent) Time() string {
+ return l.Timestamp.Format("15:04:05")
+}
+
+func (l *LogEvent) Date() string {
+ return l.Timestamp.Format("2006/01/02")
+}
+
+// LevelJustified returns the log level in string form justified so that all
+// log levels take the same text width.
+func (l *LogEvent) LevelJustified() (rv string) {
+ rv = l.Level.String()
+ if len(rv) < 5 {
+ rv += strings.Repeat(" ", 5-len(rv))
+ }
+ return rv
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/handler.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/handler.go
new file mode 100644
index 00000000000..e3db0865479
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/handler.go
@@ -0,0 +1,53 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "text/template"
+)
+
+// Handler is an interface that knows how to process log events. This is the
+// basic interface type for building a logging system. If you want to route
+// structured log data somewhere, you would implement this interface.
+type Handler interface {
+ // Log is called for every message. if calldepth is negative, caller
+ // information is missing
+ Log(logger_name string, level LogLevel, msg string, calldepth int)
+
+ // These two calls are expected to be no-ops on non-text-output handlers
+ SetTextTemplate(t *template.Template)
+ SetTextOutput(output TextOutput)
+}
+
+// HandlerFunc is a type to make implementation of the Handler interface easier
+type HandlerFunc func(logger_name string, level LogLevel, msg string,
+ calldepth int)
+
+// Log simply calls f(logger_name, level, msg, calldepth)
+func (f HandlerFunc) Log(logger_name string, level LogLevel, msg string,
+ calldepth int) {
+ f(logger_name, level, msg, calldepth)
+}
+
+// SetTextTemplate is a no-op
+func (HandlerFunc) SetTextTemplate(t *template.Template) {}
+
+// SetTextOutput is a no-op
+func (HandlerFunc) SetTextOutput(output TextOutput) {}
+
+var (
+ defaultHandler = NewTextHandler(StdlibTemplate,
+ &StdlibOutput{})
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/level.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/level.go
new file mode 100644
index 00000000000..1797be04041
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/level.go
@@ -0,0 +1,126 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type LogLevel int32
+
+const (
+ Debug LogLevel = 10
+ Info LogLevel = 20
+ Notice LogLevel = 30
+ Warning LogLevel = 40
+ Error LogLevel = 50
+ Critical LogLevel = 60
+ // syslog has Alert
+ // syslog has Emerg
+
+ DefaultLevel = Notice
+)
+
+// String returns the log level name in short form
+func (l LogLevel) String() string {
+ switch l.Match() {
+ case Critical:
+ return "CRIT"
+ case Error:
+ return "ERR"
+ case Warning:
+ return "WARN"
+ case Notice:
+ return "NOTE"
+ case Info:
+ return "INFO"
+ case Debug:
+ return "DEBUG"
+ default:
+ return "UNSET"
+ }
+}
+
+// String returns the log level name in long human readable form
+func (l LogLevel) Name() string {
+ switch l.Match() {
+ case Critical:
+ return "critical"
+ case Error:
+ return "error"
+ case Warning:
+ return "warning"
+ case Notice:
+ return "notice"
+ case Info:
+ return "info"
+ case Debug:
+ return "debug"
+ default:
+ return "unset"
+ }
+}
+
+// Match returns the greatest named log level that is less than or equal to
+// the receiver log level. For example, if the log level is 43, Match() will
+// return 40 (Warning)
+func (l LogLevel) Match() LogLevel {
+ if l >= Critical {
+ return Critical
+ }
+ if l >= Error {
+ return Error
+ }
+ if l >= Warning {
+ return Warning
+ }
+ if l >= Notice {
+ return Notice
+ }
+ if l >= Info {
+ return Info
+ }
+ if l >= Debug {
+ return Debug
+ }
+ return 0
+}
+
+// LevelFromString will convert a named log level to its corresponding value
+// type, or error if both the name was unknown and an integer value was unable
+// to be parsed.
+func LevelFromString(str string) (LogLevel, error) {
+ switch strings.ToLower(str) {
+ case "crit", "critical":
+ return Critical, nil
+ case "err", "error":
+ return Error, nil
+ case "warn", "warning":
+ return Warning, nil
+ case "note", "notice":
+ return Notice, nil
+ case "info":
+ return Info, nil
+ case "debug":
+ return Debug, nil
+ }
+ val, err := strconv.ParseInt(str, 10, 32)
+ if err == nil {
+ return LogLevel(val), nil
+ }
+ return 0, fmt.Errorf("Invalid log level: %s", str)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/logger.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/logger.go
new file mode 100644
index 00000000000..ae1734b2780
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/logger.go
@@ -0,0 +1,61 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// Logger is the basic type that allows for logging. A logger has an associated
+// name, given to it during construction, either through a logger collection,
+// GetLogger, GetLoggerNamed, or another Logger's Scope method. A logger also
+// has an associated level and handler, typically configured through the logger
+// collection to which it belongs.
+type Logger struct {
+ level LogLevel
+ name string
+ collection *LoggerCollection
+
+ handler_mtx sync.RWMutex
+ handler Handler
+}
+
+// Scope returns a new Logger with the same level and handler, using the
+// receiver Logger's name as a prefix.
+func (l *Logger) Scope(name string) *Logger {
+ return l.collection.getLogger(l.name+"."+name, l.getLevel(),
+ l.getHandler())
+}
+
+func (l *Logger) setLevel(level LogLevel) {
+ atomic.StoreInt32((*int32)(&l.level), int32(level))
+}
+
+func (l *Logger) getLevel() LogLevel {
+ return LogLevel(atomic.LoadInt32((*int32)(&l.level)))
+}
+
+func (l *Logger) setHandler(handler Handler) {
+ l.handler_mtx.Lock()
+ defer l.handler_mtx.Unlock()
+ l.handler = handler
+}
+
+func (l *Logger) getHandler() Handler {
+ l.handler_mtx.RLock()
+ defer l.handler_mtx.RUnlock()
+ return l.handler
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output.go
new file mode 100644
index 00000000000..8751268fbe6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output.go
@@ -0,0 +1,178 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "sync"
+)
+
+type TextOutput interface {
+ Output(LogLevel, []byte)
+}
+
+// WriterOutput is an io.Writer wrapper that matches the TextOutput interface
+type WriterOutput struct {
+ w io.Writer
+}
+
+// NewWriterOutput returns a TextOutput that writes messages to an io.Writer
+func NewWriterOutput(w io.Writer) *WriterOutput {
+ return &WriterOutput{w: w}
+}
+
+func (o *WriterOutput) Output(_ LogLevel, message []byte) {
+ o.w.Write(append(bytes.TrimRight(message, "\r\n"), platformNewline...))
+}
+
+// StdlibOutput is a TextOutput that simply writes to the default Go stdlib
+// logging system. It is the default. If you configure the Go stdlib to write
+// to spacelog, make sure to provide a new TextOutput to your logging
+// collection
+type StdlibOutput struct{}
+
+func (*StdlibOutput) Output(_ LogLevel, message []byte) {
+ log.Print(string(message))
+}
+
+type bufferMsg struct {
+ level LogLevel
+ message []byte
+}
+
+// BufferedOutput uses a channel to synchronize writes to a wrapped TextOutput
+// and allows for buffering a limited amount of log events.
+type BufferedOutput struct {
+ o TextOutput
+ c chan bufferMsg
+ running sync.Mutex
+ close_once sync.Once
+}
+
+// NewBufferedOutput returns a BufferedOutput wrapping output with a buffer
+// size of buffer.
+func NewBufferedOutput(output TextOutput, buffer int) *BufferedOutput {
+ if buffer < 0 {
+ buffer = 0
+ }
+ b := &BufferedOutput{
+ o: output,
+ c: make(chan bufferMsg, buffer)}
+ go b.process()
+ return b
+}
+
+// Close shuts down the BufferedOutput's processing
+func (b *BufferedOutput) Close() {
+ b.close_once.Do(func() {
+ close(b.c)
+ })
+ b.running.Lock()
+ b.running.Unlock()
+}
+
+func (b *BufferedOutput) Output(level LogLevel, message []byte) {
+ b.c <- bufferMsg{level: level, message: message}
+}
+
+func (b *BufferedOutput) process() {
+ b.running.Lock()
+ defer b.running.Unlock()
+ for {
+ msg, open := <-b.c
+ if !open {
+ break
+ }
+ b.o.Output(msg.level, msg.message)
+ }
+}
+
+// A TextOutput object that also implements HupHandlingTextOutput may have its
+// OnHup() method called when an administrative signal is sent to this process.
+type HupHandlingTextOutput interface {
+ TextOutput
+ OnHup()
+}
+
+// FileWriterOutput is like WriterOutput with a plain file handle, but it
+// knows how to reopen the file (or try to reopen it) if it hasn't been able
+// to open the file previously, or if an appropriate signal has been received.
+type FileWriterOutput struct {
+ *WriterOutput
+ path string
+}
+
+// Creates a new FileWriterOutput object. This is the only case where an
+// error opening the file will be reported to the caller; if we try to
+// reopen it later and the reopen fails, we'll just keep trying until it
+// works.
+func NewFileWriterOutput(path string) (*FileWriterOutput, error) {
+ fo := &FileWriterOutput{path: path}
+ fh, err := fo.openFile()
+ if err != nil {
+ return nil, err
+ }
+ fo.WriterOutput = NewWriterOutput(fh)
+ return fo, nil
+}
+
+// Try to open the file with the path associated with this object.
+func (fo *FileWriterOutput) openFile() (*os.File, error) {
+ return os.OpenFile(fo.path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
+}
+
+// Try to communicate a message without using our log file. In all likelihood,
+// stderr is closed or redirected to /dev/null, but at least we can try
+// writing there. In the very worst case, if an admin attaches a ptrace to
+// this process, it will be more clear what the problem is.
+func (fo *FileWriterOutput) fallbackLog(tmpl string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, tmpl, args...)
+}
+
+// Output a log line by writing it to the file. If the file has been
+// released, try to open it again. If that fails, cry for a little
+// while, then throw away the message and carry on.
+func (fo *FileWriterOutput) Output(ll LogLevel, message []byte) {
+ if fo.WriterOutput == nil {
+ fh, err := fo.openFile()
+ if err != nil {
+ fo.fallbackLog("Could not open %#v: %s", fo.path, err)
+ return
+ }
+ fo.WriterOutput = NewWriterOutput(fh)
+ }
+ fo.WriterOutput.Output(ll, message)
+}
+
+// Throw away any references/handles to the output file. This probably
+// means the admin wants to rotate the file out and have this process
+// open a new one. Close the underlying io.Writer if that is a thing
+// that it knows how to do.
+func (fo *FileWriterOutput) OnHup() {
+ if fo.WriterOutput != nil {
+ wc, ok := fo.WriterOutput.w.(io.Closer)
+ if ok {
+ err := wc.Close()
+ if err != nil {
+ fo.fallbackLog("Closing %#v failed: %s", fo.path, err)
+ }
+ }
+ fo.WriterOutput = nil
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output_other.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output_other.go
new file mode 100644
index 00000000000..2be240a1781
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output_other.go
@@ -0,0 +1,19 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package spacelog
+
+var platformNewline = []byte("\n")
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output_windows.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output_windows.go
new file mode 100644
index 00000000000..58b71daba69
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/output_windows.go
@@ -0,0 +1,17 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+var platformNewline = []byte("\r\n")
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/setup.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/setup.go
new file mode 100644
index 00000000000..26ad00572c9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/setup.go
@@ -0,0 +1,183 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "math"
+ "os"
+ "os/signal"
+ "regexp"
+ "strings"
+ "syscall"
+ "text/template"
+)
+
+// SetupConfig is a configuration struct meant to be used with
+// github.com/spacemonkeygo/flagfile/utils.Setup
+// but can be used independently.
+type SetupConfig struct {
+ Output string `default:"stderr" usage:"log output. can be stdout, stderr, syslog, or a path"`
+ Level string `default:"" usage:"base logger level"`
+ Filter string `default:"" usage:"sets loggers matching this regular expression to the lowest level"`
+ Format string `default:"" usage:"format string to use"`
+ Stdlevel string `default:"warn" usage:"logger level for stdlib log integration"`
+ Subproc string `default:"" usage:"process to run for stdout/stderr-captured logging. The command is first processed as a Go template that supports {{.Facility}}, {{.Level}}, and {{.Name}} fields, and then passed to sh. If set, will redirect stdout and stderr to the given process. A good default is 'setsid logger --priority {{.Facility}}.{{.Level}} --tag {{.Name}}'"`
+ Buffer int `default:"0" usage:"the number of messages to buffer. 0 for no buffer"`
+ // Facility defaults to syslog.LOG_USER (which is 8)
+ Facility int `default:"8" usage:"the syslog facility to use if syslog output is configured"`
+ HupRotate bool `default:"false" usage:"if true, sending a HUP signal will reopen log files"`
+}
+
+var (
+ stdlog = GetLoggerNamed("stdlog")
+ funcmap = template.FuncMap{"ColorizeLevel": ColorizeLevel}
+)
+
+// SetFormatMethod adds functions to the template function map, such that
+// command-line and Setup provided templates can call methods added to the map
+// via this method. The map comes prepopulated with ColorizeLevel, but can be
+// overridden. SetFormatMethod should be called (if at all) before one of
+// this package's Setup methods.
+func SetFormatMethod(name string, fn interface{}) {
+ funcmap[name] = fn
+}
+
+// MustSetup is the same as Setup, but panics instead of returning an error
+func MustSetup(procname string, config SetupConfig) {
+ err := Setup(procname, config)
+ if err != nil {
+ panic(err)
+ }
+}
+
+type subprocInfo struct {
+ Facility string
+ Level string
+ Name string
+}
+
+// Setup takes a given procname and sets spacelog up with the given
+// configuration. Setup supports:
+// * capturing stdout and stderr to a subprocess
+// * configuring the default level
+// * configuring log filters (enabling only some loggers)
+// * configuring the logging template
+// * configuring the output (a file, syslog, stdout, stderr)
+// * configuring log event buffering
+// * capturing all standard library logging with configurable log level
+// It is expected that this method will be called once at process start.
+func Setup(procname string, config SetupConfig) error {
+ if config.Subproc != "" {
+ t, err := template.New("subproc").Parse(config.Subproc)
+ if err != nil {
+ return err
+ }
+ var buf bytes.Buffer
+ err = t.Execute(&buf, &subprocInfo{
+ Facility: fmt.Sprintf("%d", config.Facility),
+ Level: fmt.Sprintf("%d", 2), // syslog.LOG_CRIT
+ Name: procname})
+ if err != nil {
+ return err
+ }
+ err = CaptureOutputToProcess("sh", "-c", string(buf.Bytes()))
+ if err != nil {
+ return err
+ }
+ }
+ if config.Level != "" {
+ level_val, err := LevelFromString(config.Level)
+ if err != nil {
+ return err
+ }
+ if level_val != DefaultLevel {
+ SetLevel(nil, level_val)
+ }
+ }
+ if config.Filter != "" {
+ re, err := regexp.Compile(config.Filter)
+ if err != nil {
+ return err
+ }
+ SetLevel(re, LogLevel(math.MinInt32))
+ }
+ var t *template.Template
+ if config.Format != "" {
+ var err error
+ t, err = template.New("user").Funcs(funcmap).Parse(config.Format)
+ if err != nil {
+ return err
+ }
+ }
+ var textout TextOutput
+ switch strings.ToLower(config.Output) {
+ case "syslog":
+ w, err := NewSyslogOutput(SyslogPriority(config.Facility), procname)
+ if err != nil {
+ return err
+ }
+ if t == nil {
+ t = SyslogTemplate
+ }
+ textout = w
+ case "stdout":
+ if t == nil {
+ t = DefaultTemplate
+ }
+ textout = NewWriterOutput(os.Stdout)
+ case "stderr", "":
+ if t == nil {
+ t = DefaultTemplate
+ }
+ textout = NewWriterOutput(os.Stderr)
+ default:
+ if t == nil {
+ t = StandardTemplate
+ }
+ var err error
+ textout, err = NewFileWriterOutput(config.Output)
+ if err != nil {
+ return err
+ }
+ }
+ if config.HupRotate {
+ if hh, ok := textout.(HupHandlingTextOutput); ok {
+ sigchan := make(chan os.Signal)
+ signal.Notify(sigchan, syscall.SIGHUP)
+ go func() {
+ for _ = range sigchan {
+ hh.OnHup()
+ }
+ }()
+ }
+ }
+ if config.Buffer > 0 {
+ textout = NewBufferedOutput(textout, config.Buffer)
+ }
+ SetHandler(nil, NewTextHandler(t, textout))
+ log.SetFlags(log.Lshortfile)
+ if config.Stdlevel == "" {
+ config.Stdlevel = "warn"
+ }
+ stdlog_level_val, err := LevelFromString(config.Stdlevel)
+ if err != nil {
+ return err
+ }
+ log.SetOutput(stdlog.WriterWithoutCaller(stdlog_level_val))
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/setup/setup.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/setup/setup.go
new file mode 100644
index 00000000000..22186888afd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/setup/setup.go
@@ -0,0 +1,80 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package setup provides simple helpers for configuring spacelog from flags.
+
+This package adds the following flags:
+ --log.output - can either be stdout, stderr, syslog, or a file path
+ --log.level - the base logger level
+ --log.filter - loggers that match this regular expression get set to the
+ lowest level
+ --log.format - a go text template for log lines
+ --log.stdlevel - the logger level to assume the standard library logger is
+ using
+ --log.subproc - a process to run for stdout/stderr capturing
+ --log.buffer - the number of message to buffer
+*/
+package setup
+
+import (
+ "github.com/spacemonkeygo/flagfile/utils"
+ "github.com/spacemonkeygo/spacelog"
+)
+
+var (
+ config spacelog.SetupConfig
+)
+
+func init() {
+ utils.Setup("log", &config)
+}
+
+// SetFormatMethod in this subpackage is deprecated and will be removed soon.
+// Please see spacelog.SetFormatMethod instead
+func SetFormatMethod(name string, fn interface{}) {
+ spacelog.SetFormatMethod(name, fn)
+}
+
+// MustSetup calls spacelog.MustSetup with a flag-configured config struct
+// It's pretty useless to call this method without parsing flags first, via
+// flagfile.Load()
+func MustSetup(procname string) {
+ spacelog.MustSetup(procname, config)
+}
+
+// Setup calls spacelog.Setup with a flag-configured config struct
+// It's pretty useless to call this method without parsing flags first, via
+// flagfile.Load()
+func Setup(procname string) error {
+ return spacelog.Setup(procname, config)
+}
+
+// MustSetupWithFacility is deprecated and will be removed soon. Please
+// configure facility through the facility flag option.
+func MustSetupWithFacility(procname string, facility spacelog.SyslogPriority) {
+ err := SetupWithFacility(procname, facility)
+ if err != nil {
+ panic(err)
+ }
+}
+
+// SetupWithFacility is deprecated and will be removed soon. Please
+// configure facility through the facility flag option.
+func SetupWithFacility(procname string,
+ facility spacelog.SyslogPriority) error {
+ config_copy := config
+ config_copy.Facility = int(facility)
+ return spacelog.Setup(procname, config_copy)
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/syslog.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/syslog.go
new file mode 100644
index 00000000000..0408a5a553b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/syslog.go
@@ -0,0 +1,63 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package spacelog
+
+import (
+ "bytes"
+ "log/syslog"
+)
+
+type SyslogPriority syslog.Priority
+
+// SyslogOutput is a syslog client that matches the TextOutput interface
+type SyslogOutput struct {
+ w *syslog.Writer
+}
+
+// NewSyslogOutput returns a TextOutput object that writes to syslog using
+// the given facility and tag. The log level will be determined by the log
+// event.
+func NewSyslogOutput(facility SyslogPriority, tag string) (
+ TextOutput, error) {
+ w, err := syslog.New(syslog.Priority(facility), tag)
+ if err != nil {
+ return nil, err
+ }
+ return &SyslogOutput{w: w}, nil
+}
+
+func (o *SyslogOutput) Output(level LogLevel, message []byte) {
+ level = level.Match()
+ for _, msg := range bytes.Split(message, []byte{'\n'}) {
+ switch level {
+ case Critical:
+ o.w.Crit(string(msg))
+ case Error:
+ o.w.Err(string(msg))
+ case Warning:
+ o.w.Warning(string(msg))
+ case Notice:
+ o.w.Notice(string(msg))
+ case Info:
+ o.w.Info(string(msg))
+ case Debug:
+ fallthrough
+ default:
+ o.w.Debug(string(msg))
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/syslog_windows.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/syslog_windows.go
new file mode 100644
index 00000000000..edba3c2a56b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/syslog_windows.go
@@ -0,0 +1,26 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "fmt"
+)
+
+type SyslogPriority int
+
+func NewSyslogOutput(facility SyslogPriority, tag string) (
+ TextOutput, error) {
+ return nil, fmt.Errorf("SyslogOutput not supported on Windows")
+}
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates.go
new file mode 100644
index 00000000000..4ac0fdc0f23
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates.go
@@ -0,0 +1,69 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "text/template"
+)
+
+// ColorizeLevel returns a TermColor byte sequence for the appropriate color
+// for the level. If you'd like to configure your own color choices, you can
+// make your own template with its own function map to your own colorize
+// function.
+func ColorizeLevel(level LogLevel) string {
+ switch level.Match() {
+ case Critical, Error:
+ return TermColors{}.Red()
+ case Warning:
+ return TermColors{}.Magenta()
+ case Notice:
+ return TermColors{}.Yellow()
+ case Info, Debug:
+ return TermColors{}.Green()
+ }
+ return ""
+}
+
+var (
+ // ColorTemplate uses the default ColorizeLevel method for color choices.
+ ColorTemplate = template.Must(template.New("color").Funcs(template.FuncMap{
+ "ColorizeLevel": ColorizeLevel}).Parse(
+ `{{.Blue}}{{.Date}} {{.Time}}{{.Reset}} ` +
+ `{{.Bold}}{{ColorizeLevel .Level}}{{.LevelJustified}}{{.Reset}} ` +
+ `{{.Underline}}{{.LoggerName}}{{.Reset}} ` +
+ `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}- ` +
+ `{{ColorizeLevel .Level}}{{.Message}}{{.Reset}}`))
+
+ // StandardTemplate is like ColorTemplate with no color.
+ StandardTemplate = template.Must(template.New("standard").Parse(
+ `{{.Date}} {{.Time}} ` +
+ `{{.Level}} {{.LoggerName}} ` +
+ `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
+ `- {{.Message}}`))
+
+ // SyslogTemplate is missing the date and time as syslog adds those
+ // things.
+ SyslogTemplate = template.Must(template.New("syslog").Parse(
+ `{{.Level}} {{.LoggerName}} ` +
+ `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
+ `- {{.Message}}`))
+
+ // StdlibTemplate is missing the date and time as the stdlib logger often
+ // adds those things.
+ StdlibTemplate = template.Must(template.New("stdlib").Parse(
+ `{{.Level}} {{.LoggerName}} ` +
+ `{{if .Filename}}{{.Filename}}:{{.Line}} {{end}}` +
+ `- {{.Message}}`))
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates_others.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates_others.go
new file mode 100644
index 00000000000..114e2e14312
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates_others.go
@@ -0,0 +1,22 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package spacelog
+
+var (
+ // DefaultTemplate is default template for stdout/stderr for the platform
+ DefaultTemplate = ColorTemplate
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates_windows.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates_windows.go
new file mode 100644
index 00000000000..512b600481e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/templates_windows.go
@@ -0,0 +1,20 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+var (
+ // DefaultTemplate is default template for stdout/stderr for the platform
+ DefaultTemplate = StandardTemplate
+)
diff --git a/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/text.go b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/text.go
new file mode 100644
index 00000000000..8b36ce99f50
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/github.com/spacemonkeygo/spacelog/text.go
@@ -0,0 +1,80 @@
+// Copyright (C) 2014 Space Monkey, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spacelog
+
+import (
+ "bytes"
+ "fmt"
+ "runtime"
+ "strings"
+ "sync"
+ "text/template"
+ "time"
+)
+
+// TextHandler is the default implementation of the Handler interface. A
+// TextHandler, on log events, makes LogEvent structures, passes them to the
+// configured template, and then passes that output to a configured TextOutput
+// interface.
+type TextHandler struct {
+ mtx sync.RWMutex
+ template *template.Template
+ output TextOutput
+}
+
+// NewTextHandler creates a Handler that creates LogEvents, passes them to
+// the given template, and passes the result to output
+func NewTextHandler(t *template.Template, output TextOutput) *TextHandler {
+ return &TextHandler{template: t, output: output}
+}
+
+// Log makes a LogEvent, formats it with the configured template, then passes
+// the output to configured output sink
+func (h *TextHandler) Log(logger_name string, level LogLevel, msg string,
+ calldepth int) {
+ h.mtx.RLock()
+ output, template := h.output, h.template
+ h.mtx.RUnlock()
+ event := LogEvent{
+ LoggerName: logger_name,
+ Level: level,
+ Message: strings.TrimRight(msg, "\n\r"),
+ Timestamp: time.Now()}
+ if calldepth >= 0 {
+ _, event.Filepath, event.Line, _ = runtime.Caller(calldepth + 1)
+ }
+ var buf bytes.Buffer
+ err := template.Execute(&buf, &event)
+ if err != nil {
+ output.Output(level, []byte(
+ fmt.Sprintf("log format template failed: %s", err)))
+ return
+ }
+ output.Output(level, buf.Bytes())
+}
+
+// SetTextTemplate changes the TextHandler's text formatting template
+func (h *TextHandler) SetTextTemplate(t *template.Template) {
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+ h.template = t
+}
+
+// SetTextOutput changes the TextHandler's TextOutput sink
+func (h *TextHandler) SetTextOutput(output TextOutput) {
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+ h.output = output
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/.gitattributes b/src/mongo/gotools/vendor/src/golang.org/x/crypto/.gitattributes
new file mode 100644
index 00000000000..d2f212e5da8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/.gitattributes
@@ -0,0 +1,10 @@
+# Treat all files in this repo as binary, with no git magic updating
+# line endings. Windows users contributing to Go will need to use a
+# modern version of git and editors capable of LF line endings.
+#
+# We'll prevent accidental CRLF line endings from entering the repo
+# via the git-review gofmt checks.
+#
+# See golang.org/issue/9281
+
+* -text
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/.gitignore b/src/mongo/gotools/vendor/src/golang.org/x/crypto/.gitignore
new file mode 100644
index 00000000000..8339fd61d3f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/.gitignore
@@ -0,0 +1,2 @@
+# Add no patterns to .hgignore except for files generated by the build.
+last-change
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/AUTHORS b/src/mongo/gotools/vendor/src/golang.org/x/crypto/AUTHORS
new file mode 100644
index 00000000000..15167cd746c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/CONTRIBUTING.md b/src/mongo/gotools/vendor/src/golang.org/x/crypto/CONTRIBUTING.md
new file mode 100644
index 00000000000..88dff59bc7d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/CONTRIBUTORS b/src/mongo/gotools/vendor/src/golang.org/x/crypto/CONTRIBUTORS
new file mode 100644
index 00000000000..1c4577e9680
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/LICENSE b/src/mongo/gotools/vendor/src/golang.org/x/crypto/LICENSE
new file mode 100644
index 00000000000..6a66aea5eaf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/PATENTS b/src/mongo/gotools/vendor/src/golang.org/x/crypto/PATENTS
new file mode 100644
index 00000000000..733099041f8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/README b/src/mongo/gotools/vendor/src/golang.org/x/crypto/README
new file mode 100644
index 00000000000..f1e0cbf94e0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/README
@@ -0,0 +1,3 @@
+This repository holds supplementary Go cryptography libraries.
+
+To submit changes to this repository, see http://golang.org/doc/contribute.html.
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/base64.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/base64.go
new file mode 100644
index 00000000000..fc311609081
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/base64.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypt
+
+import "encoding/base64"
+
+const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
+
+var bcEncoding = base64.NewEncoding(alphabet)
+
+func base64Encode(src []byte) []byte {
+ n := bcEncoding.EncodedLen(len(src))
+ dst := make([]byte, n)
+ bcEncoding.Encode(dst, src)
+ for dst[n-1] == '=' {
+ n--
+ }
+ return dst[:n]
+}
+
+func base64Decode(src []byte) ([]byte, error) {
+ numOfEquals := 4 - (len(src) % 4)
+ for i := 0; i < numOfEquals; i++ {
+ src = append(src, '=')
+ }
+
+ dst := make([]byte, bcEncoding.DecodedLen(len(src)))
+ n, err := bcEncoding.Decode(dst, src)
+ if err != nil {
+ return nil, err
+ }
+ return dst[:n], nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go
new file mode 100644
index 00000000000..f8b807f9c3a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/bcrypt.go
@@ -0,0 +1,294 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing
+// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf
+package bcrypt // import "golang.org/x/crypto/bcrypt"
+
+// The code is a port of Provos and Mazières's C implementation.
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "golang.org/x/crypto/blowfish"
+ "io"
+ "strconv"
+)
+
+const (
+ MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword
+ MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword
+ DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword
+)
+
+// The error returned from CompareHashAndPassword when a password and hash do
+// not match.
+var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password")
+
+// The error returned from CompareHashAndPassword when a hash is too short to
+// be a bcrypt hash.
+var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password")
+
+// The error returned from CompareHashAndPassword when a hash was created with
+// a bcrypt algorithm newer than this implementation.
+type HashVersionTooNewError byte
+
+func (hv HashVersionTooNewError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion)
+}
+
+// The error returned from CompareHashAndPassword when a hash starts with something other than '$'
+type InvalidHashPrefixError byte
+
+func (ih InvalidHashPrefixError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih))
+}
+
+type InvalidCostError int
+
+func (ic InvalidCostError) Error() string {
+ return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost))
+}
+
+const (
+ majorVersion = '2'
+ minorVersion = 'a'
+ maxSaltSize = 16
+ maxCryptedHashSize = 23
+ encodedSaltSize = 22
+ encodedHashSize = 31
+ minHashSize = 59
+)
+
+// magicCipherData is an IV for the 64 Blowfish encryption calls in
+// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes.
+var magicCipherData = []byte{
+ 0x4f, 0x72, 0x70, 0x68,
+ 0x65, 0x61, 0x6e, 0x42,
+ 0x65, 0x68, 0x6f, 0x6c,
+ 0x64, 0x65, 0x72, 0x53,
+ 0x63, 0x72, 0x79, 0x44,
+ 0x6f, 0x75, 0x62, 0x74,
+}
+
+type hashed struct {
+ hash []byte
+ salt []byte
+ cost int // allowed range is MinCost to MaxCost
+ major byte
+ minor byte
+}
+
+// GenerateFromPassword returns the bcrypt hash of the password at the given
+// cost. If the cost given is less than MinCost, the cost will be set to
+// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package,
+// to compare the returned hashed password with its cleartext version.
+func GenerateFromPassword(password []byte, cost int) ([]byte, error) {
+ p, err := newFromPassword(password, cost)
+ if err != nil {
+ return nil, err
+ }
+ return p.Hash(), nil
+}
+
+// CompareHashAndPassword compares a bcrypt hashed password with its possible
+// plaintext equivalent. Returns nil on success, or an error on failure.
+func CompareHashAndPassword(hashedPassword, password []byte) error {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return err
+ }
+
+ otherHash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return err
+ }
+
+ otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor}
+ if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 {
+ return nil
+ }
+
+ return ErrMismatchedHashAndPassword
+}
+
+// Cost returns the hashing cost used to create the given hashed
+// password. When, in the future, the hashing cost of a password system needs
+// to be increased in order to adjust for greater computational power, this
+// function allows one to establish which passwords need to be updated.
+func Cost(hashedPassword []byte) (int, error) {
+ p, err := newFromHash(hashedPassword)
+ if err != nil {
+ return 0, err
+ }
+ return p.cost, nil
+}
+
+func newFromPassword(password []byte, cost int) (*hashed, error) {
+ if cost < MinCost {
+ cost = DefaultCost
+ }
+ p := new(hashed)
+ p.major = majorVersion
+ p.minor = minorVersion
+
+ err := checkCost(cost)
+ if err != nil {
+ return nil, err
+ }
+ p.cost = cost
+
+ unencodedSalt := make([]byte, maxSaltSize)
+ _, err = io.ReadFull(rand.Reader, unencodedSalt)
+ if err != nil {
+ return nil, err
+ }
+
+ p.salt = base64Encode(unencodedSalt)
+ hash, err := bcrypt(password, p.cost, p.salt)
+ if err != nil {
+ return nil, err
+ }
+ p.hash = hash
+ return p, err
+}
+
+func newFromHash(hashedSecret []byte) (*hashed, error) {
+ if len(hashedSecret) < minHashSize {
+ return nil, ErrHashTooShort
+ }
+ p := new(hashed)
+ n, err := p.decodeVersion(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+ n, err = p.decodeCost(hashedSecret)
+ if err != nil {
+ return nil, err
+ }
+ hashedSecret = hashedSecret[n:]
+
+ // The "+2" is here because we'll have to append at most 2 '=' to the salt
+ // when base64 decoding it in expensiveBlowfishSetup().
+ p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2)
+ copy(p.salt, hashedSecret[:encodedSaltSize])
+
+ hashedSecret = hashedSecret[encodedSaltSize:]
+ p.hash = make([]byte, len(hashedSecret))
+ copy(p.hash, hashedSecret)
+
+ return p, nil
+}
+
+func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
+ cipherData := make([]byte, len(magicCipherData))
+ copy(cipherData, magicCipherData)
+
+ c, err := expensiveBlowfishSetup(password, uint32(cost), salt)
+ if err != nil {
+ return nil, err
+ }
+
+ for i := 0; i < 24; i += 8 {
+ for j := 0; j < 64; j++ {
+ c.Encrypt(cipherData[i:i+8], cipherData[i:i+8])
+ }
+ }
+
+ // Bug compatibility with C bcrypt implementations. We only encode 23 of
+ // the 24 bytes encrypted.
+ hsh := base64Encode(cipherData[:maxCryptedHashSize])
+ return hsh, nil
+}
+
+func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
+
+ csalt, err := base64Decode(salt)
+ if err != nil {
+ return nil, err
+ }
+
+ // Bug compatibility with C bcrypt implementations. They use the trailing
+ // NULL in the key string during expansion.
+ ckey := append(key, 0)
+
+ c, err := blowfish.NewSaltedCipher(ckey, csalt)
+ if err != nil {
+ return nil, err
+ }
+
+ var i, rounds uint64
+ rounds = 1 << cost
+ for i = 0; i < rounds; i++ {
+ blowfish.ExpandKey(ckey, c)
+ blowfish.ExpandKey(csalt, c)
+ }
+
+ return c, nil
+}
+
+func (p *hashed) Hash() []byte {
+ arr := make([]byte, 60)
+ arr[0] = '$'
+ arr[1] = p.major
+ n := 2
+ if p.minor != 0 {
+ arr[2] = p.minor
+ n = 3
+ }
+ arr[n] = '$'
+ n += 1
+ copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost)))
+ n += 2
+ arr[n] = '$'
+ n += 1
+ copy(arr[n:], p.salt)
+ n += encodedSaltSize
+ copy(arr[n:], p.hash)
+ n += encodedHashSize
+ return arr[:n]
+}
+
+func (p *hashed) decodeVersion(sbytes []byte) (int, error) {
+ if sbytes[0] != '$' {
+ return -1, InvalidHashPrefixError(sbytes[0])
+ }
+ if sbytes[1] > majorVersion {
+ return -1, HashVersionTooNewError(sbytes[1])
+ }
+ p.major = sbytes[1]
+ n := 3
+ if sbytes[2] != '$' {
+ p.minor = sbytes[2]
+ n++
+ }
+ return n, nil
+}
+
+// sbytes should begin where decodeVersion left off.
+func (p *hashed) decodeCost(sbytes []byte) (int, error) {
+ cost, err := strconv.Atoi(string(sbytes[0:2]))
+ if err != nil {
+ return -1, err
+ }
+ err = checkCost(cost)
+ if err != nil {
+ return -1, err
+ }
+ p.cost = cost
+ return 3, nil
+}
+
+func (p *hashed) String() string {
+ return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor)
+}
+
+func checkCost(cost int) error {
+ if cost < MinCost || cost > MaxCost {
+ return InvalidCostError(cost)
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/bcrypt_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/bcrypt_test.go
new file mode 100644
index 00000000000..f08a6f5b229
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bcrypt/bcrypt_test.go
@@ -0,0 +1,226 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bcrypt
+
+import (
+ "bytes"
+ "fmt"
+ "testing"
+)
+
+func TestBcryptingIsEasy(t *testing.T) {
+ pass := []byte("mypassword")
+ hp, err := GenerateFromPassword(pass, 0)
+ if err != nil {
+ t.Fatalf("GenerateFromPassword error: %s", err)
+ }
+
+ if CompareHashAndPassword(hp, pass) != nil {
+ t.Errorf("%v should hash %s correctly", hp, pass)
+ }
+
+ notPass := "notthepass"
+ err = CompareHashAndPassword(hp, []byte(notPass))
+ if err != ErrMismatchedHashAndPassword {
+ t.Errorf("%v and %s should be mismatched", hp, notPass)
+ }
+}
+
+func TestBcryptingIsCorrect(t *testing.T) {
+ pass := []byte("allmine")
+ salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
+ expectedHash := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
+
+ hash, err := bcrypt(pass, 10, salt)
+ if err != nil {
+ t.Fatalf("bcrypt blew up: %v", err)
+ }
+ if !bytes.HasSuffix(expectedHash, hash) {
+ t.Errorf("%v should be the suffix of %v", hash, expectedHash)
+ }
+
+ h, err := newFromHash(expectedHash)
+ if err != nil {
+ t.Errorf("Unable to parse %s: %v", string(expectedHash), err)
+ }
+
+ // This is not the safe way to compare these hashes. We do this only for
+ // testing clarity. Use bcrypt.CompareHashAndPassword()
+ if err == nil && !bytes.Equal(expectedHash, h.Hash()) {
+ t.Errorf("Parsed hash %v should equal %v", h.Hash(), expectedHash)
+ }
+}
+
+func TestVeryShortPasswords(t *testing.T) {
+ key := []byte("k")
+ salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
+ _, err := bcrypt(key, 10, salt)
+ if err != nil {
+ t.Errorf("One byte key resulted in error: %s", err)
+ }
+}
+
+func TestTooLongPasswordsWork(t *testing.T) {
+ salt := []byte("XajjQvNhvvRt5GSeFk1xFe")
+ // One byte over the usual 56 byte limit that blowfish has
+ tooLongPass := []byte("012345678901234567890123456789012345678901234567890123456")
+ tooLongExpected := []byte("$2a$10$XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C")
+ hash, err := bcrypt(tooLongPass, 10, salt)
+ if err != nil {
+ t.Fatalf("bcrypt blew up on long password: %v", err)
+ }
+ if !bytes.HasSuffix(tooLongExpected, hash) {
+ t.Errorf("%v should be the suffix of %v", hash, tooLongExpected)
+ }
+}
+
+type InvalidHashTest struct {
+ err error
+ hash []byte
+}
+
+var invalidTests = []InvalidHashTest{
+ {ErrHashTooShort, []byte("$2a$10$fooo")},
+ {ErrHashTooShort, []byte("$2a")},
+ {HashVersionTooNewError('3'), []byte("$3a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
+ {InvalidHashPrefixError('%'), []byte("%2a$10$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
+ {InvalidCostError(32), []byte("$2a$32$sssssssssssssssssssssshhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh")},
+}
+
+func TestInvalidHashErrors(t *testing.T) {
+ check := func(name string, expected, err error) {
+ if err == nil {
+ t.Errorf("%s: Should have returned an error", name)
+ }
+ if err != nil && err != expected {
+ t.Errorf("%s gave err %v but should have given %v", name, err, expected)
+ }
+ }
+ for _, iht := range invalidTests {
+ _, err := newFromHash(iht.hash)
+ check("newFromHash", iht.err, err)
+ err = CompareHashAndPassword(iht.hash, []byte("anything"))
+ check("CompareHashAndPassword", iht.err, err)
+ }
+}
+
+func TestUnpaddedBase64Encoding(t *testing.T) {
+ original := []byte{101, 201, 101, 75, 19, 227, 199, 20, 239, 236, 133, 32, 30, 109, 243, 30}
+ encodedOriginal := []byte("XajjQvNhvvRt5GSeFk1xFe")
+
+ encoded := base64Encode(original)
+
+ if !bytes.Equal(encodedOriginal, encoded) {
+ t.Errorf("Encoded %v should have equaled %v", encoded, encodedOriginal)
+ }
+
+ decoded, err := base64Decode(encodedOriginal)
+ if err != nil {
+ t.Fatalf("base64Decode blew up: %s", err)
+ }
+
+ if !bytes.Equal(decoded, original) {
+ t.Errorf("Decoded %v should have equaled %v", decoded, original)
+ }
+}
+
+func TestCost(t *testing.T) {
+ suffix := "XajjQvNhvvRt5GSeFk1xFe5l47dONXg781AmZtd869sO8zfsHuw7C"
+ for _, vers := range []string{"2a", "2"} {
+ for _, cost := range []int{4, 10} {
+ s := fmt.Sprintf("$%s$%02d$%s", vers, cost, suffix)
+ h := []byte(s)
+ actual, err := Cost(h)
+ if err != nil {
+ t.Errorf("Cost, error: %s", err)
+ continue
+ }
+ if actual != cost {
+ t.Errorf("Cost, expected: %d, actual: %d", cost, actual)
+ }
+ }
+ }
+ _, err := Cost([]byte("$a$a$" + suffix))
+ if err == nil {
+ t.Errorf("Cost, malformed but no error returned")
+ }
+}
+
+func TestCostValidationInHash(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ pass := []byte("mypassword")
+
+ for c := 0; c < MinCost; c++ {
+ p, _ := newFromPassword(pass, c)
+ if p.cost != DefaultCost {
+ t.Errorf("newFromPassword should default costs below %d to %d, but was %d", MinCost, DefaultCost, p.cost)
+ }
+ }
+
+ p, _ := newFromPassword(pass, 14)
+ if p.cost != 14 {
+ t.Errorf("newFromPassword should default cost to 14, but was %d", p.cost)
+ }
+
+ hp, _ := newFromHash(p.Hash())
+ if p.cost != hp.cost {
+ t.Errorf("newFromHash should maintain the cost at %d, but was %d", p.cost, hp.cost)
+ }
+
+ _, err := newFromPassword(pass, 32)
+ if err == nil {
+ t.Fatalf("newFromPassword: should return a cost error")
+ }
+ if err != InvalidCostError(32) {
+ t.Errorf("newFromPassword: should return cost error, got %#v", err)
+ }
+}
+
+func TestCostReturnsWithLeadingZeroes(t *testing.T) {
+ hp, _ := newFromPassword([]byte("abcdefgh"), 7)
+ cost := hp.Hash()[4:7]
+ expected := []byte("07$")
+
+ if !bytes.Equal(expected, cost) {
+ t.Errorf("single digit costs in hash should have leading zeros: was %v instead of %v", cost, expected)
+ }
+}
+
+func TestMinorNotRequired(t *testing.T) {
+ noMinorHash := []byte("$2$10$XajjQvNhvvRt5GSeFk1xFeyqRrsxkhBkUiQeg0dt.wU1qD4aFDcga")
+ h, err := newFromHash(noMinorHash)
+ if err != nil {
+ t.Fatalf("No minor hash blew up: %s", err)
+ }
+ if h.minor != 0 {
+ t.Errorf("Should leave minor version at 0, but was %d", h.minor)
+ }
+
+ if !bytes.Equal(noMinorHash, h.Hash()) {
+ t.Errorf("Should generate hash %v, but created %v", noMinorHash, h.Hash())
+ }
+}
+
+func BenchmarkEqual(b *testing.B) {
+ b.StopTimer()
+ passwd := []byte("somepasswordyoulike")
+ hash, _ := GenerateFromPassword(passwd, 10)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ CompareHashAndPassword(hash, passwd)
+ }
+}
+
+func BenchmarkGeneration(b *testing.B) {
+ b.StopTimer()
+ passwd := []byte("mylongpassword1234")
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ GenerateFromPassword(passwd, 10)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/block.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/block.go
new file mode 100644
index 00000000000..9d80f19521b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/block.go
@@ -0,0 +1,159 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blowfish
+
+// getNextWord returns the next big-endian uint32 value from the byte slice
+// at the given position in a circular manner, updating the position.
+func getNextWord(b []byte, pos *int) uint32 {
+ var w uint32
+ j := *pos
+ for i := 0; i < 4; i++ {
+ w = w<<8 | uint32(b[j])
+ j++
+ if j >= len(b) {
+ j = 0
+ }
+ }
+ *pos = j
+ return w
+}
+
+// ExpandKey performs a key expansion on the given *Cipher. Specifically, it
+// performs the Blowfish algorithm's key schedule which sets up the *Cipher's
+// pi and substitution tables for calls to Encrypt. This is used, primarily,
+// by the bcrypt package to reuse the Blowfish key schedule during its
+// set up. It's unlikely that you need to use this directly.
+func ExpandKey(key []byte, c *Cipher) {
+ j := 0
+ for i := 0; i < 18; i++ {
+ // Using inlined getNextWord for performance.
+ var d uint32
+ for k := 0; k < 4; k++ {
+ d = d<<8 | uint32(key[j])
+ j++
+ if j >= len(key) {
+ j = 0
+ }
+ }
+ c.p[i] ^= d
+ }
+
+ var l, r uint32
+ for i := 0; i < 18; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.p[i], c.p[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s0[i], c.s0[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s1[i], c.s1[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s2[i], c.s2[i+1] = l, r
+ }
+ for i := 0; i < 256; i += 2 {
+ l, r = encryptBlock(l, r, c)
+ c.s3[i], c.s3[i+1] = l, r
+ }
+}
+
+// This is similar to ExpandKey, but folds the salt during the key
+// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero
+// salt passed in, reusing ExpandKey turns out to be a place of inefficiency
+// and specializing it here is useful.
+func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) {
+ j := 0
+ for i := 0; i < 18; i++ {
+ c.p[i] ^= getNextWord(key, &j)
+ }
+
+ j = 0
+ var l, r uint32
+ for i := 0; i < 18; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.p[i], c.p[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s0[i], c.s0[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s1[i], c.s1[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s2[i], c.s2[i+1] = l, r
+ }
+
+ for i := 0; i < 256; i += 2 {
+ l ^= getNextWord(salt, &j)
+ r ^= getNextWord(salt, &j)
+ l, r = encryptBlock(l, r, c)
+ c.s3[i], c.s3[i+1] = l, r
+ }
+}
+
+func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+ xl, xr := l, r
+ xl ^= c.p[0]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16]
+ xr ^= c.p[17]
+ return xr, xl
+}
+
+func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) {
+ xl, xr := l, r
+ xl ^= c.p[17]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3]
+ xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2]
+ xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1]
+ xr ^= c.p[0]
+ return xr, xl
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/blowfish_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/blowfish_test.go
new file mode 100644
index 00000000000..7afa1fdf3d5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/blowfish_test.go
@@ -0,0 +1,274 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package blowfish
+
+import "testing"
+
+type CryptTest struct {
+ key []byte
+ in []byte
+ out []byte
+}
+
+// Test vector values are from http://www.schneier.com/code/vectors.txt.
+var encryptTests = []CryptTest{
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
+ {
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0x51, 0x86, 0x6F, 0xD5, 0xB8, 0x5E, 0xCB, 0x8A}},
+ {
+ []byte{0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ []byte{0x7D, 0x85, 0x6F, 0x9A, 0x61, 0x30, 0x63, 0xF2}},
+ {
+ []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
+ []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
+ []byte{0x24, 0x66, 0xDD, 0x87, 0x8B, 0x96, 0x3C, 0x9D}},
+
+ {
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
+ []byte{0x61, 0xF9, 0xC3, 0x80, 0x22, 0x81, 0xB0, 0x96}},
+ {
+ []byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0x7D, 0x0C, 0xC6, 0x30, 0xAF, 0xDA, 0x1E, 0xC7}},
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x4E, 0xF9, 0x97, 0x45, 0x61, 0x98, 0xDD, 0x78}},
+ {
+ []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0x0A, 0xCE, 0xAB, 0x0F, 0xC6, 0xA0, 0xA2, 0x8D}},
+ {
+ []byte{0x7C, 0xA1, 0x10, 0x45, 0x4A, 0x1A, 0x6E, 0x57},
+ []byte{0x01, 0xA1, 0xD6, 0xD0, 0x39, 0x77, 0x67, 0x42},
+ []byte{0x59, 0xC6, 0x82, 0x45, 0xEB, 0x05, 0x28, 0x2B}},
+ {
+ []byte{0x01, 0x31, 0xD9, 0x61, 0x9D, 0xC1, 0x37, 0x6E},
+ []byte{0x5C, 0xD5, 0x4C, 0xA8, 0x3D, 0xEF, 0x57, 0xDA},
+ []byte{0xB1, 0xB8, 0xCC, 0x0B, 0x25, 0x0F, 0x09, 0xA0}},
+ {
+ []byte{0x07, 0xA1, 0x13, 0x3E, 0x4A, 0x0B, 0x26, 0x86},
+ []byte{0x02, 0x48, 0xD4, 0x38, 0x06, 0xF6, 0x71, 0x72},
+ []byte{0x17, 0x30, 0xE5, 0x77, 0x8B, 0xEA, 0x1D, 0xA4}},
+ {
+ []byte{0x38, 0x49, 0x67, 0x4C, 0x26, 0x02, 0x31, 0x9E},
+ []byte{0x51, 0x45, 0x4B, 0x58, 0x2D, 0xDF, 0x44, 0x0A},
+ []byte{0xA2, 0x5E, 0x78, 0x56, 0xCF, 0x26, 0x51, 0xEB}},
+ {
+ []byte{0x04, 0xB9, 0x15, 0xBA, 0x43, 0xFE, 0xB5, 0xB6},
+ []byte{0x42, 0xFD, 0x44, 0x30, 0x59, 0x57, 0x7F, 0xA2},
+ []byte{0x35, 0x38, 0x82, 0xB1, 0x09, 0xCE, 0x8F, 0x1A}},
+ {
+ []byte{0x01, 0x13, 0xB9, 0x70, 0xFD, 0x34, 0xF2, 0xCE},
+ []byte{0x05, 0x9B, 0x5E, 0x08, 0x51, 0xCF, 0x14, 0x3A},
+ []byte{0x48, 0xF4, 0xD0, 0x88, 0x4C, 0x37, 0x99, 0x18}},
+ {
+ []byte{0x01, 0x70, 0xF1, 0x75, 0x46, 0x8F, 0xB5, 0xE6},
+ []byte{0x07, 0x56, 0xD8, 0xE0, 0x77, 0x47, 0x61, 0xD2},
+ []byte{0x43, 0x21, 0x93, 0xB7, 0x89, 0x51, 0xFC, 0x98}},
+ {
+ []byte{0x43, 0x29, 0x7F, 0xAD, 0x38, 0xE3, 0x73, 0xFE},
+ []byte{0x76, 0x25, 0x14, 0xB8, 0x29, 0xBF, 0x48, 0x6A},
+ []byte{0x13, 0xF0, 0x41, 0x54, 0xD6, 0x9D, 0x1A, 0xE5}},
+ {
+ []byte{0x07, 0xA7, 0x13, 0x70, 0x45, 0xDA, 0x2A, 0x16},
+ []byte{0x3B, 0xDD, 0x11, 0x90, 0x49, 0x37, 0x28, 0x02},
+ []byte{0x2E, 0xED, 0xDA, 0x93, 0xFF, 0xD3, 0x9C, 0x79}},
+ {
+ []byte{0x04, 0x68, 0x91, 0x04, 0xC2, 0xFD, 0x3B, 0x2F},
+ []byte{0x26, 0x95, 0x5F, 0x68, 0x35, 0xAF, 0x60, 0x9A},
+ []byte{0xD8, 0x87, 0xE0, 0x39, 0x3C, 0x2D, 0xA6, 0xE3}},
+ {
+ []byte{0x37, 0xD0, 0x6B, 0xB5, 0x16, 0xCB, 0x75, 0x46},
+ []byte{0x16, 0x4D, 0x5E, 0x40, 0x4F, 0x27, 0x52, 0x32},
+ []byte{0x5F, 0x99, 0xD0, 0x4F, 0x5B, 0x16, 0x39, 0x69}},
+ {
+ []byte{0x1F, 0x08, 0x26, 0x0D, 0x1A, 0xC2, 0x46, 0x5E},
+ []byte{0x6B, 0x05, 0x6E, 0x18, 0x75, 0x9F, 0x5C, 0xCA},
+ []byte{0x4A, 0x05, 0x7A, 0x3B, 0x24, 0xD3, 0x97, 0x7B}},
+ {
+ []byte{0x58, 0x40, 0x23, 0x64, 0x1A, 0xBA, 0x61, 0x76},
+ []byte{0x00, 0x4B, 0xD6, 0xEF, 0x09, 0x17, 0x60, 0x62},
+ []byte{0x45, 0x20, 0x31, 0xC1, 0xE4, 0xFA, 0xDA, 0x8E}},
+ {
+ []byte{0x02, 0x58, 0x16, 0x16, 0x46, 0x29, 0xB0, 0x07},
+ []byte{0x48, 0x0D, 0x39, 0x00, 0x6E, 0xE7, 0x62, 0xF2},
+ []byte{0x75, 0x55, 0xAE, 0x39, 0xF5, 0x9B, 0x87, 0xBD}},
+ {
+ []byte{0x49, 0x79, 0x3E, 0xBC, 0x79, 0xB3, 0x25, 0x8F},
+ []byte{0x43, 0x75, 0x40, 0xC8, 0x69, 0x8F, 0x3C, 0xFA},
+ []byte{0x53, 0xC5, 0x5F, 0x9C, 0xB4, 0x9F, 0xC0, 0x19}},
+ {
+ []byte{0x4F, 0xB0, 0x5E, 0x15, 0x15, 0xAB, 0x73, 0xA7},
+ []byte{0x07, 0x2D, 0x43, 0xA0, 0x77, 0x07, 0x52, 0x92},
+ []byte{0x7A, 0x8E, 0x7B, 0xFA, 0x93, 0x7E, 0x89, 0xA3}},
+ {
+ []byte{0x49, 0xE9, 0x5D, 0x6D, 0x4C, 0xA2, 0x29, 0xBF},
+ []byte{0x02, 0xFE, 0x55, 0x77, 0x81, 0x17, 0xF1, 0x2A},
+ []byte{0xCF, 0x9C, 0x5D, 0x7A, 0x49, 0x86, 0xAD, 0xB5}},
+ {
+ []byte{0x01, 0x83, 0x10, 0xDC, 0x40, 0x9B, 0x26, 0xD6},
+ []byte{0x1D, 0x9D, 0x5C, 0x50, 0x18, 0xF7, 0x28, 0xC2},
+ []byte{0xD1, 0xAB, 0xB2, 0x90, 0x65, 0x8B, 0xC7, 0x78}},
+ {
+ []byte{0x1C, 0x58, 0x7F, 0x1C, 0x13, 0x92, 0x4F, 0xEF},
+ []byte{0x30, 0x55, 0x32, 0x28, 0x6D, 0x6F, 0x29, 0x5A},
+ []byte{0x55, 0xCB, 0x37, 0x74, 0xD1, 0x3E, 0xF2, 0x01}},
+ {
+ []byte{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0xFA, 0x34, 0xEC, 0x48, 0x47, 0xB2, 0x68, 0xB2}},
+ {
+ []byte{0x1F, 0x1F, 0x1F, 0x1F, 0x0E, 0x0E, 0x0E, 0x0E},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0xA7, 0x90, 0x79, 0x51, 0x08, 0xEA, 0x3C, 0xAE}},
+ {
+ []byte{0xE0, 0xFE, 0xE0, 0xFE, 0xF1, 0xFE, 0xF1, 0xFE},
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0xC3, 0x9E, 0x07, 0x2D, 0x9F, 0xAC, 0x63, 0x1D}},
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0x01, 0x49, 0x33, 0xE0, 0xCD, 0xAF, 0xF6, 0xE4}},
+ {
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0xF2, 0x1E, 0x9A, 0x77, 0xB7, 0x1C, 0x49, 0xBC}},
+ {
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x24, 0x59, 0x46, 0x88, 0x57, 0x54, 0x36, 0x9A}},
+ {
+ []byte{0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10},
+ []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF},
+ []byte{0x6B, 0x5C, 0x5A, 0x9C, 0x5D, 0x9E, 0x0A, 0x5A}},
+}
+
+func TestCipherEncrypt(t *testing.T) {
+ for i, tt := range encryptTests {
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
+ continue
+ }
+ ct := make([]byte, len(tt.out))
+ c.Encrypt(ct, tt.in)
+ for j, v := range ct {
+ if v != tt.out[j] {
+ t.Errorf("Cipher.Encrypt, test vector #%d: cipher-text[%d] = %#x, expected %#x", i, j, v, tt.out[j])
+ break
+ }
+ }
+ }
+}
+
+func TestCipherDecrypt(t *testing.T) {
+ for i, tt := range encryptTests {
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(tt.key), err)
+ continue
+ }
+ pt := make([]byte, len(tt.in))
+ c.Decrypt(pt, tt.out)
+ for j, v := range pt {
+ if v != tt.in[j] {
+ t.Errorf("Cipher.Decrypt, test vector #%d: plain-text[%d] = %#x, expected %#x", i, j, v, tt.in[j])
+ break
+ }
+ }
+ }
+}
+
+func TestSaltedCipherKeyLength(t *testing.T) {
+ if _, err := NewSaltedCipher(nil, []byte{'a'}); err != KeySizeError(0) {
+ t.Errorf("NewSaltedCipher with short key, gave error %#v, expected %#v", err, KeySizeError(0))
+ }
+
+ // A 57-byte key. One over the typical blowfish restriction.
+ key := []byte("012345678901234567890123456789012345678901234567890123456")
+ if _, err := NewSaltedCipher(key, []byte{'a'}); err != nil {
+ t.Errorf("NewSaltedCipher with long key, gave error %#v", err)
+ }
+}
+
+// Test vectors generated with Blowfish from OpenSSH.
+var saltedVectors = [][8]byte{
+ {0x0c, 0x82, 0x3b, 0x7b, 0x8d, 0x01, 0x4b, 0x7e},
+ {0xd1, 0xe1, 0x93, 0xf0, 0x70, 0xa6, 0xdb, 0x12},
+ {0xfc, 0x5e, 0xba, 0xde, 0xcb, 0xf8, 0x59, 0xad},
+ {0x8a, 0x0c, 0x76, 0xe7, 0xdd, 0x2c, 0xd3, 0xa8},
+ {0x2c, 0xcb, 0x7b, 0xee, 0xac, 0x7b, 0x7f, 0xf8},
+ {0xbb, 0xf6, 0x30, 0x6f, 0xe1, 0x5d, 0x62, 0xbf},
+ {0x97, 0x1e, 0xc1, 0x3d, 0x3d, 0xe0, 0x11, 0xe9},
+ {0x06, 0xd7, 0x4d, 0xb1, 0x80, 0xa3, 0xb1, 0x38},
+ {0x67, 0xa1, 0xa9, 0x75, 0x0e, 0x5b, 0xc6, 0xb4},
+ {0x51, 0x0f, 0x33, 0x0e, 0x4f, 0x67, 0xd2, 0x0c},
+ {0xf1, 0x73, 0x7e, 0xd8, 0x44, 0xea, 0xdb, 0xe5},
+ {0x14, 0x0e, 0x16, 0xce, 0x7f, 0x4a, 0x9c, 0x7b},
+ {0x4b, 0xfe, 0x43, 0xfd, 0xbf, 0x36, 0x04, 0x47},
+ {0xb1, 0xeb, 0x3e, 0x15, 0x36, 0xa7, 0xbb, 0xe2},
+ {0x6d, 0x0b, 0x41, 0xdd, 0x00, 0x98, 0x0b, 0x19},
+ {0xd3, 0xce, 0x45, 0xce, 0x1d, 0x56, 0xb7, 0xfc},
+ {0xd9, 0xf0, 0xfd, 0xda, 0xc0, 0x23, 0xb7, 0x93},
+ {0x4c, 0x6f, 0xa1, 0xe4, 0x0c, 0xa8, 0xca, 0x57},
+ {0xe6, 0x2f, 0x28, 0xa7, 0x0c, 0x94, 0x0d, 0x08},
+ {0x8f, 0xe3, 0xf0, 0xb6, 0x29, 0xe3, 0x44, 0x03},
+ {0xff, 0x98, 0xdd, 0x04, 0x45, 0xb4, 0x6d, 0x1f},
+ {0x9e, 0x45, 0x4d, 0x18, 0x40, 0x53, 0xdb, 0xef},
+ {0xb7, 0x3b, 0xef, 0x29, 0xbe, 0xa8, 0x13, 0x71},
+ {0x02, 0x54, 0x55, 0x41, 0x8e, 0x04, 0xfc, 0xad},
+ {0x6a, 0x0a, 0xee, 0x7c, 0x10, 0xd9, 0x19, 0xfe},
+ {0x0a, 0x22, 0xd9, 0x41, 0xcc, 0x23, 0x87, 0x13},
+ {0x6e, 0xff, 0x1f, 0xff, 0x36, 0x17, 0x9c, 0xbe},
+ {0x79, 0xad, 0xb7, 0x40, 0xf4, 0x9f, 0x51, 0xa6},
+ {0x97, 0x81, 0x99, 0xa4, 0xde, 0x9e, 0x9f, 0xb6},
+ {0x12, 0x19, 0x7a, 0x28, 0xd0, 0xdc, 0xcc, 0x92},
+ {0x81, 0xda, 0x60, 0x1e, 0x0e, 0xdd, 0x65, 0x56},
+ {0x7d, 0x76, 0x20, 0xb2, 0x73, 0xc9, 0x9e, 0xee},
+}
+
+func TestSaltedCipher(t *testing.T) {
+ var key, salt [32]byte
+ for i := range key {
+ key[i] = byte(i)
+ salt[i] = byte(i + 32)
+ }
+ for i, v := range saltedVectors {
+ c, err := NewSaltedCipher(key[:], salt[:i])
+ if err != nil {
+ t.Fatal(err)
+ }
+ var buf [8]byte
+ c.Encrypt(buf[:], buf[:])
+ if v != buf {
+ t.Errorf("%d: expected %x, got %x", i, v, buf)
+ }
+ }
+}
+
+func BenchmarkExpandKeyWithSalt(b *testing.B) {
+ key := make([]byte, 32)
+ salt := make([]byte, 16)
+ c, _ := NewCipher(key)
+ for i := 0; i < b.N; i++ {
+ expandKeyWithSalt(key, salt, c)
+ }
+}
+
+func BenchmarkExpandKey(b *testing.B) {
+ key := make([]byte, 32)
+ c, _ := NewCipher(key)
+ for i := 0; i < b.N; i++ {
+ ExpandKey(key, c)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/cipher.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/cipher.go
new file mode 100644
index 00000000000..542984aa8da
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/cipher.go
@@ -0,0 +1,91 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm.
+package blowfish // import "golang.org/x/crypto/blowfish"
+
+// The code is a port of Bruce Schneier's C implementation.
+// See http://www.schneier.com/blowfish.html.
+
+import "strconv"
+
+// The Blowfish block size in bytes.
+const BlockSize = 8
+
+// A Cipher is an instance of Blowfish encryption using a particular key.
+type Cipher struct {
+ p [18]uint32
+ s0, s1, s2, s3 [256]uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+ return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a Cipher.
+// The key argument should be the Blowfish key, from 1 to 56 bytes.
+func NewCipher(key []byte) (*Cipher, error) {
+ var result Cipher
+ if k := len(key); k < 1 || k > 56 {
+ return nil, KeySizeError(k)
+ }
+ initCipher(&result)
+ ExpandKey(key, &result)
+ return &result, nil
+}
+
+// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
+// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
+// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
+// bytes.
+func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
+ if len(salt) == 0 {
+ return NewCipher(key)
+ }
+ var result Cipher
+ if k := len(key); k < 1 {
+ return nil, KeySizeError(k)
+ }
+ initCipher(&result)
+ expandKeyWithSalt(key, salt, &result)
+ return &result, nil
+}
+
+// BlockSize returns the Blowfish block size, 8 bytes.
+// It is necessary to satisfy the Block interface in the
+// package "crypto/cipher".
+func (c *Cipher) BlockSize() int { return BlockSize }
+
+// Encrypt encrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+// Note that for amounts of data larger than a block,
+// it is not safe to just call Encrypt on successive blocks;
+// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
+func (c *Cipher) Encrypt(dst, src []byte) {
+ l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+ l, r = encryptBlock(l, r, c)
+ dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+ dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+// Decrypt decrypts the 8-byte buffer src using the key k
+// and stores the result in dst.
+func (c *Cipher) Decrypt(dst, src []byte) {
+ l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+ l, r = decryptBlock(l, r, c)
+ dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l)
+ dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r)
+}
+
+func initCipher(c *Cipher) {
+ copy(c.p[0:], p[0:])
+ copy(c.s0[0:], s0[0:])
+ copy(c.s1[0:], s1[0:])
+ copy(c.s2[0:], s2[0:])
+ copy(c.s3[0:], s3[0:])
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/const.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/const.go
new file mode 100644
index 00000000000..8c5ee4cb08a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/blowfish/const.go
@@ -0,0 +1,199 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The startup permutation array and substitution boxes.
+// They are the hexadecimal digits of PI; see:
+// http://www.schneier.com/code/constants.txt.
+
+package blowfish
+
+var s0 = [256]uint32{
+ 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96,
+ 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16,
+ 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658,
+ 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013,
+ 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e,
+ 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60,
+ 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6,
+ 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a,
+ 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c,
+ 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193,
+ 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1,
+ 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239,
+ 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a,
+ 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3,
+ 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176,
+ 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe,
+ 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706,
+ 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b,
+ 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b,
+ 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463,
+ 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c,
+ 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3,
+ 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a,
+ 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8,
+ 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760,
+ 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db,
+ 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8,
+ 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b,
+ 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33,
+ 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4,
+ 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0,
+ 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c,
+ 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777,
+ 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299,
+ 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705,
+ 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf,
+ 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e,
+ 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa,
+ 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9,
+ 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915,
+ 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f,
+ 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664,
+ 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a,
+}
+
+var s1 = [256]uint32{
+ 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d,
+ 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1,
+ 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65,
+ 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1,
+ 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9,
+ 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737,
+ 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d,
+ 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd,
+ 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc,
+ 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41,
+ 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908,
+ 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af,
+ 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124,
+ 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c,
+ 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908,
+ 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd,
+ 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b,
+ 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e,
+ 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa,
+ 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a,
+ 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d,
+ 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66,
+ 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5,
+ 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84,
+ 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96,
+ 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14,
+ 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca,
+ 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7,
+ 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77,
+ 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99,
+ 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054,
+ 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73,
+ 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea,
+ 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105,
+ 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646,
+ 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285,
+ 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea,
+ 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb,
+ 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e,
+ 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc,
+ 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd,
+ 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20,
+ 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7,
+}
+
+var s2 = [256]uint32{
+ 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7,
+ 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af,
+ 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af,
+ 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504,
+ 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4,
+ 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee,
+ 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec,
+ 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b,
+ 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332,
+ 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527,
+ 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58,
+ 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c,
+ 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22,
+ 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17,
+ 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60,
+ 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115,
+ 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99,
+ 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0,
+ 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74,
+ 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d,
+ 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3,
+ 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3,
+ 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979,
+ 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c,
+ 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa,
+ 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a,
+ 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086,
+ 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc,
+ 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24,
+ 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2,
+ 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84,
+ 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c,
+ 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09,
+ 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10,
+ 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe,
+ 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027,
+ 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0,
+ 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634,
+ 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188,
+ 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc,
+ 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8,
+ 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837,
+ 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0,
+}
+
+var s3 = [256]uint32{
+ 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742,
+ 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b,
+ 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79,
+ 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6,
+ 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a,
+ 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4,
+ 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1,
+ 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59,
+ 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797,
+ 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28,
+ 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6,
+ 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28,
+ 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba,
+ 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a,
+ 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5,
+ 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f,
+ 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce,
+ 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680,
+ 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd,
+ 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb,
+ 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb,
+ 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370,
+ 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc,
+ 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048,
+ 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc,
+ 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9,
+ 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a,
+ 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f,
+ 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a,
+ 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1,
+ 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b,
+ 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e,
+ 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e,
+ 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f,
+ 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623,
+ 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc,
+ 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a,
+ 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6,
+ 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3,
+ 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060,
+ 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c,
+ 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f,
+ 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6,
+}
+
+var p = [18]uint32{
+ 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0,
+ 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c,
+ 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b,
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/bn256.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/bn256.go
new file mode 100644
index 00000000000..014f8b3557c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/bn256.go
@@ -0,0 +1,404 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package bn256 implements a particular bilinear group at the 128-bit security level.
+//
+// Bilinear groups are the basis of many of the new cryptographic protocols
+// that have been proposed over the past decade. They consist of a triplet of
+// groups (G₁, G₂ and GT) such that there exists a function e(g₁ˣ,g₂ʸ)=gTˣʸ
+// (where gₓ is a generator of the respective group). That function is called
+// a pairing function.
+//
+// This package specifically implements the Optimal Ate pairing over a 256-bit
+// Barreto-Naehrig curve as described in
+// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
+// with the implementation described in that paper.
+package bn256 // import "golang.org/x/crypto/bn256"
+
+import (
+ "crypto/rand"
+ "io"
+ "math/big"
+)
+
+// BUG(agl): this implementation is not constant time.
+// TODO(agl): keep GF(p²) elements in Mongomery form.
+
+// G1 is an abstract cyclic group. The zero value is suitable for use as the
+// output of an operation, but cannot be used as an input.
+type G1 struct {
+ p *curvePoint
+}
+
+// RandomG1 returns x and g₁ˣ where x is a random, non-zero number read from r.
+func RandomG1(r io.Reader) (*big.Int, *G1, error) {
+ var k *big.Int
+ var err error
+
+ for {
+ k, err = rand.Int(r, Order)
+ if err != nil {
+ return nil, nil, err
+ }
+ if k.Sign() > 0 {
+ break
+ }
+ }
+
+ return k, new(G1).ScalarBaseMult(k), nil
+}
+
+func (g *G1) String() string {
+ return "bn256.G1" + g.p.String()
+}
+
+// ScalarBaseMult sets e to g*k where g is the generator of the group and
+// then returns e.
+func (e *G1) ScalarBaseMult(k *big.Int) *G1 {
+ if e.p == nil {
+ e.p = newCurvePoint(nil)
+ }
+ e.p.Mul(curveGen, k, new(bnPool))
+ return e
+}
+
+// ScalarMult sets e to a*k and then returns e.
+func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
+ if e.p == nil {
+ e.p = newCurvePoint(nil)
+ }
+ e.p.Mul(a.p, k, new(bnPool))
+ return e
+}
+
+// Add sets e to a+b and then returns e.
+// BUG(agl): this function is not complete: a==b fails.
+func (e *G1) Add(a, b *G1) *G1 {
+ if e.p == nil {
+ e.p = newCurvePoint(nil)
+ }
+ e.p.Add(a.p, b.p, new(bnPool))
+ return e
+}
+
+// Neg sets e to -a and then returns e.
+func (e *G1) Neg(a *G1) *G1 {
+ if e.p == nil {
+ e.p = newCurvePoint(nil)
+ }
+ e.p.Negative(a.p)
+ return e
+}
+
+// Marshal converts n to a byte slice.
+func (n *G1) Marshal() []byte {
+ n.p.MakeAffine(nil)
+
+ xBytes := new(big.Int).Mod(n.p.x, p).Bytes()
+ yBytes := new(big.Int).Mod(n.p.y, p).Bytes()
+
+ // Each value is a 256-bit number.
+ const numBytes = 256 / 8
+
+ ret := make([]byte, numBytes*2)
+ copy(ret[1*numBytes-len(xBytes):], xBytes)
+ copy(ret[2*numBytes-len(yBytes):], yBytes)
+
+ return ret
+}
+
+// Unmarshal sets e to the result of converting the output of Marshal back into
+// a group element and then returns e.
+func (e *G1) Unmarshal(m []byte) (*G1, bool) {
+ // Each value is a 256-bit number.
+ const numBytes = 256 / 8
+
+ if len(m) != 2*numBytes {
+ return nil, false
+ }
+
+ if e.p == nil {
+ e.p = newCurvePoint(nil)
+ }
+
+ e.p.x.SetBytes(m[0*numBytes : 1*numBytes])
+ e.p.y.SetBytes(m[1*numBytes : 2*numBytes])
+
+ if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 {
+ // This is the point at infinity.
+ e.p.y.SetInt64(1)
+ e.p.z.SetInt64(0)
+ e.p.t.SetInt64(0)
+ } else {
+ e.p.z.SetInt64(1)
+ e.p.t.SetInt64(1)
+
+ if !e.p.IsOnCurve() {
+ return nil, false
+ }
+ }
+
+ return e, true
+}
+
+// G2 is an abstract cyclic group. The zero value is suitable for use as the
+// output of an operation, but cannot be used as an input.
+type G2 struct {
+ p *twistPoint
+}
+
+// RandomG1 returns x and g₂ˣ where x is a random, non-zero number read from r.
+func RandomG2(r io.Reader) (*big.Int, *G2, error) {
+ var k *big.Int
+ var err error
+
+ for {
+ k, err = rand.Int(r, Order)
+ if err != nil {
+ return nil, nil, err
+ }
+ if k.Sign() > 0 {
+ break
+ }
+ }
+
+ return k, new(G2).ScalarBaseMult(k), nil
+}
+
+func (g *G2) String() string {
+ return "bn256.G2" + g.p.String()
+}
+
+// ScalarBaseMult sets e to g*k where g is the generator of the group and
+// then returns out.
+func (e *G2) ScalarBaseMult(k *big.Int) *G2 {
+ if e.p == nil {
+ e.p = newTwistPoint(nil)
+ }
+ e.p.Mul(twistGen, k, new(bnPool))
+ return e
+}
+
+// ScalarMult sets e to a*k and then returns e.
+func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 {
+ if e.p == nil {
+ e.p = newTwistPoint(nil)
+ }
+ e.p.Mul(a.p, k, new(bnPool))
+ return e
+}
+
+// Add sets e to a+b and then returns e.
+// BUG(agl): this function is not complete: a==b fails.
+func (e *G2) Add(a, b *G2) *G2 {
+ if e.p == nil {
+ e.p = newTwistPoint(nil)
+ }
+ e.p.Add(a.p, b.p, new(bnPool))
+ return e
+}
+
+// Marshal converts n into a byte slice.
+func (n *G2) Marshal() []byte {
+ n.p.MakeAffine(nil)
+
+ xxBytes := new(big.Int).Mod(n.p.x.x, p).Bytes()
+ xyBytes := new(big.Int).Mod(n.p.x.y, p).Bytes()
+ yxBytes := new(big.Int).Mod(n.p.y.x, p).Bytes()
+ yyBytes := new(big.Int).Mod(n.p.y.y, p).Bytes()
+
+ // Each value is a 256-bit number.
+ const numBytes = 256 / 8
+
+ ret := make([]byte, numBytes*4)
+ copy(ret[1*numBytes-len(xxBytes):], xxBytes)
+ copy(ret[2*numBytes-len(xyBytes):], xyBytes)
+ copy(ret[3*numBytes-len(yxBytes):], yxBytes)
+ copy(ret[4*numBytes-len(yyBytes):], yyBytes)
+
+ return ret
+}
+
+// Unmarshal sets e to the result of converting the output of Marshal back into
+// a group element and then returns e.
+func (e *G2) Unmarshal(m []byte) (*G2, bool) {
+ // Each value is a 256-bit number.
+ const numBytes = 256 / 8
+
+ if len(m) != 4*numBytes {
+ return nil, false
+ }
+
+ if e.p == nil {
+ e.p = newTwistPoint(nil)
+ }
+
+ e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes])
+ e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes])
+ e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes])
+ e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes])
+
+ if e.p.x.x.Sign() == 0 &&
+ e.p.x.y.Sign() == 0 &&
+ e.p.y.x.Sign() == 0 &&
+ e.p.y.y.Sign() == 0 {
+ // This is the point at infinity.
+ e.p.y.SetOne()
+ e.p.z.SetZero()
+ e.p.t.SetZero()
+ } else {
+ e.p.z.SetOne()
+ e.p.t.SetOne()
+
+ if !e.p.IsOnCurve() {
+ return nil, false
+ }
+ }
+
+ return e, true
+}
+
+// GT is an abstract cyclic group. The zero value is suitable for use as the
+// output of an operation, but cannot be used as an input.
+type GT struct {
+ p *gfP12
+}
+
+func (g *GT) String() string {
+ return "bn256.GT" + g.p.String()
+}
+
+// ScalarMult sets e to a*k and then returns e.
+func (e *GT) ScalarMult(a *GT, k *big.Int) *GT {
+ if e.p == nil {
+ e.p = newGFp12(nil)
+ }
+ e.p.Exp(a.p, k, new(bnPool))
+ return e
+}
+
+// Add sets e to a+b and then returns e.
+func (e *GT) Add(a, b *GT) *GT {
+ if e.p == nil {
+ e.p = newGFp12(nil)
+ }
+ e.p.Mul(a.p, b.p, new(bnPool))
+ return e
+}
+
+// Neg sets e to -a and then returns e.
+func (e *GT) Neg(a *GT) *GT {
+ if e.p == nil {
+ e.p = newGFp12(nil)
+ }
+ e.p.Invert(a.p, new(bnPool))
+ return e
+}
+
+// Marshal converts n into a byte slice.
+func (n *GT) Marshal() []byte {
+ n.p.Minimal()
+
+ xxxBytes := n.p.x.x.x.Bytes()
+ xxyBytes := n.p.x.x.y.Bytes()
+ xyxBytes := n.p.x.y.x.Bytes()
+ xyyBytes := n.p.x.y.y.Bytes()
+ xzxBytes := n.p.x.z.x.Bytes()
+ xzyBytes := n.p.x.z.y.Bytes()
+ yxxBytes := n.p.y.x.x.Bytes()
+ yxyBytes := n.p.y.x.y.Bytes()
+ yyxBytes := n.p.y.y.x.Bytes()
+ yyyBytes := n.p.y.y.y.Bytes()
+ yzxBytes := n.p.y.z.x.Bytes()
+ yzyBytes := n.p.y.z.y.Bytes()
+
+ // Each value is a 256-bit number.
+ const numBytes = 256 / 8
+
+ ret := make([]byte, numBytes*12)
+ copy(ret[1*numBytes-len(xxxBytes):], xxxBytes)
+ copy(ret[2*numBytes-len(xxyBytes):], xxyBytes)
+ copy(ret[3*numBytes-len(xyxBytes):], xyxBytes)
+ copy(ret[4*numBytes-len(xyyBytes):], xyyBytes)
+ copy(ret[5*numBytes-len(xzxBytes):], xzxBytes)
+ copy(ret[6*numBytes-len(xzyBytes):], xzyBytes)
+ copy(ret[7*numBytes-len(yxxBytes):], yxxBytes)
+ copy(ret[8*numBytes-len(yxyBytes):], yxyBytes)
+ copy(ret[9*numBytes-len(yyxBytes):], yyxBytes)
+ copy(ret[10*numBytes-len(yyyBytes):], yyyBytes)
+ copy(ret[11*numBytes-len(yzxBytes):], yzxBytes)
+ copy(ret[12*numBytes-len(yzyBytes):], yzyBytes)
+
+ return ret
+}
+
+// Unmarshal sets e to the result of converting the output of Marshal back into
+// a group element and then returns e.
+func (e *GT) Unmarshal(m []byte) (*GT, bool) {
+ // Each value is a 256-bit number.
+ const numBytes = 256 / 8
+
+ if len(m) != 12*numBytes {
+ return nil, false
+ }
+
+ if e.p == nil {
+ e.p = newGFp12(nil)
+ }
+
+ e.p.x.x.x.SetBytes(m[0*numBytes : 1*numBytes])
+ e.p.x.x.y.SetBytes(m[1*numBytes : 2*numBytes])
+ e.p.x.y.x.SetBytes(m[2*numBytes : 3*numBytes])
+ e.p.x.y.y.SetBytes(m[3*numBytes : 4*numBytes])
+ e.p.x.z.x.SetBytes(m[4*numBytes : 5*numBytes])
+ e.p.x.z.y.SetBytes(m[5*numBytes : 6*numBytes])
+ e.p.y.x.x.SetBytes(m[6*numBytes : 7*numBytes])
+ e.p.y.x.y.SetBytes(m[7*numBytes : 8*numBytes])
+ e.p.y.y.x.SetBytes(m[8*numBytes : 9*numBytes])
+ e.p.y.y.y.SetBytes(m[9*numBytes : 10*numBytes])
+ e.p.y.z.x.SetBytes(m[10*numBytes : 11*numBytes])
+ e.p.y.z.y.SetBytes(m[11*numBytes : 12*numBytes])
+
+ return e, true
+}
+
+// Pair calculates an Optimal Ate pairing.
+func Pair(g1 *G1, g2 *G2) *GT {
+ return &GT{optimalAte(g2.p, g1.p, new(bnPool))}
+}
+
+// bnPool implements a tiny cache of *big.Int objects that's used to reduce the
+// number of allocations made during processing.
+type bnPool struct {
+ bns []*big.Int
+ count int
+}
+
+func (pool *bnPool) Get() *big.Int {
+ if pool == nil {
+ return new(big.Int)
+ }
+
+ pool.count++
+ l := len(pool.bns)
+ if l == 0 {
+ return new(big.Int)
+ }
+
+ bn := pool.bns[l-1]
+ pool.bns = pool.bns[:l-1]
+ return bn
+}
+
+func (pool *bnPool) Put(bn *big.Int) {
+ if pool == nil {
+ return
+ }
+ pool.bns = append(pool.bns, bn)
+ pool.count--
+}
+
+func (pool *bnPool) Count() int {
+ return pool.count
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/bn256_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/bn256_test.go
new file mode 100644
index 00000000000..1cec3884ecd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/bn256_test.go
@@ -0,0 +1,304 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+import (
+ "bytes"
+ "crypto/rand"
+ "math/big"
+ "testing"
+)
+
+func TestGFp2Invert(t *testing.T) {
+ pool := new(bnPool)
+
+ a := newGFp2(pool)
+ a.x.SetString("23423492374", 10)
+ a.y.SetString("12934872398472394827398470", 10)
+
+ inv := newGFp2(pool)
+ inv.Invert(a, pool)
+
+ b := newGFp2(pool).Mul(inv, a, pool)
+ if b.x.Int64() != 0 || b.y.Int64() != 1 {
+ t.Fatalf("bad result for a^-1*a: %s %s", b.x, b.y)
+ }
+
+ a.Put(pool)
+ b.Put(pool)
+ inv.Put(pool)
+
+ if c := pool.Count(); c > 0 {
+ t.Errorf("Pool count non-zero: %d\n", c)
+ }
+}
+
+func isZero(n *big.Int) bool {
+ return new(big.Int).Mod(n, p).Int64() == 0
+}
+
+func isOne(n *big.Int) bool {
+ return new(big.Int).Mod(n, p).Int64() == 1
+}
+
+func TestGFp6Invert(t *testing.T) {
+ pool := new(bnPool)
+
+ a := newGFp6(pool)
+ a.x.x.SetString("239487238491", 10)
+ a.x.y.SetString("2356249827341", 10)
+ a.y.x.SetString("082659782", 10)
+ a.y.y.SetString("182703523765", 10)
+ a.z.x.SetString("978236549263", 10)
+ a.z.y.SetString("64893242", 10)
+
+ inv := newGFp6(pool)
+ inv.Invert(a, pool)
+
+ b := newGFp6(pool).Mul(inv, a, pool)
+ if !isZero(b.x.x) ||
+ !isZero(b.x.y) ||
+ !isZero(b.y.x) ||
+ !isZero(b.y.y) ||
+ !isZero(b.z.x) ||
+ !isOne(b.z.y) {
+ t.Fatalf("bad result for a^-1*a: %s", b)
+ }
+
+ a.Put(pool)
+ b.Put(pool)
+ inv.Put(pool)
+
+ if c := pool.Count(); c > 0 {
+ t.Errorf("Pool count non-zero: %d\n", c)
+ }
+}
+
+func TestGFp12Invert(t *testing.T) {
+ pool := new(bnPool)
+
+ a := newGFp12(pool)
+ a.x.x.x.SetString("239846234862342323958623", 10)
+ a.x.x.y.SetString("2359862352529835623", 10)
+ a.x.y.x.SetString("928836523", 10)
+ a.x.y.y.SetString("9856234", 10)
+ a.x.z.x.SetString("235635286", 10)
+ a.x.z.y.SetString("5628392833", 10)
+ a.y.x.x.SetString("252936598265329856238956532167968", 10)
+ a.y.x.y.SetString("23596239865236954178968", 10)
+ a.y.y.x.SetString("95421692834", 10)
+ a.y.y.y.SetString("236548", 10)
+ a.y.z.x.SetString("924523", 10)
+ a.y.z.y.SetString("12954623", 10)
+
+ inv := newGFp12(pool)
+ inv.Invert(a, pool)
+
+ b := newGFp12(pool).Mul(inv, a, pool)
+ if !isZero(b.x.x.x) ||
+ !isZero(b.x.x.y) ||
+ !isZero(b.x.y.x) ||
+ !isZero(b.x.y.y) ||
+ !isZero(b.x.z.x) ||
+ !isZero(b.x.z.y) ||
+ !isZero(b.y.x.x) ||
+ !isZero(b.y.x.y) ||
+ !isZero(b.y.y.x) ||
+ !isZero(b.y.y.y) ||
+ !isZero(b.y.z.x) ||
+ !isOne(b.y.z.y) {
+ t.Fatalf("bad result for a^-1*a: %s", b)
+ }
+
+ a.Put(pool)
+ b.Put(pool)
+ inv.Put(pool)
+
+ if c := pool.Count(); c > 0 {
+ t.Errorf("Pool count non-zero: %d\n", c)
+ }
+}
+
+func TestCurveImpl(t *testing.T) {
+ pool := new(bnPool)
+
+ g := &curvePoint{
+ pool.Get().SetInt64(1),
+ pool.Get().SetInt64(-2),
+ pool.Get().SetInt64(1),
+ pool.Get().SetInt64(0),
+ }
+
+ x := pool.Get().SetInt64(32498273234)
+ X := newCurvePoint(pool).Mul(g, x, pool)
+
+ y := pool.Get().SetInt64(98732423523)
+ Y := newCurvePoint(pool).Mul(g, y, pool)
+
+ s1 := newCurvePoint(pool).Mul(X, y, pool).MakeAffine(pool)
+ s2 := newCurvePoint(pool).Mul(Y, x, pool).MakeAffine(pool)
+
+ if s1.x.Cmp(s2.x) != 0 ||
+ s2.x.Cmp(s1.x) != 0 {
+ t.Errorf("DH points don't match: (%s, %s) (%s, %s)", s1.x, s1.y, s2.x, s2.y)
+ }
+
+ pool.Put(x)
+ X.Put(pool)
+ pool.Put(y)
+ Y.Put(pool)
+ s1.Put(pool)
+ s2.Put(pool)
+ g.Put(pool)
+
+ if c := pool.Count(); c > 0 {
+ t.Errorf("Pool count non-zero: %d\n", c)
+ }
+}
+
+func TestOrderG1(t *testing.T) {
+ g := new(G1).ScalarBaseMult(Order)
+ if !g.p.IsInfinity() {
+ t.Error("G1 has incorrect order")
+ }
+
+ one := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
+ g.Add(g, one)
+ g.p.MakeAffine(nil)
+ if g.p.x.Cmp(one.p.x) != 0 || g.p.y.Cmp(one.p.y) != 0 {
+ t.Errorf("1+0 != 1 in G1")
+ }
+}
+
+func TestOrderG2(t *testing.T) {
+ g := new(G2).ScalarBaseMult(Order)
+ if !g.p.IsInfinity() {
+ t.Error("G2 has incorrect order")
+ }
+
+ one := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
+ g.Add(g, one)
+ g.p.MakeAffine(nil)
+ if g.p.x.x.Cmp(one.p.x.x) != 0 ||
+ g.p.x.y.Cmp(one.p.x.y) != 0 ||
+ g.p.y.x.Cmp(one.p.y.x) != 0 ||
+ g.p.y.y.Cmp(one.p.y.y) != 0 {
+ t.Errorf("1+0 != 1 in G2")
+ }
+}
+
+func TestOrderGT(t *testing.T) {
+ gt := Pair(&G1{curveGen}, &G2{twistGen})
+ g := new(GT).ScalarMult(gt, Order)
+ if !g.p.IsOne() {
+ t.Error("GT has incorrect order")
+ }
+}
+
+func TestBilinearity(t *testing.T) {
+ for i := 0; i < 2; i++ {
+ a, p1, _ := RandomG1(rand.Reader)
+ b, p2, _ := RandomG2(rand.Reader)
+ e1 := Pair(p1, p2)
+
+ e2 := Pair(&G1{curveGen}, &G2{twistGen})
+ e2.ScalarMult(e2, a)
+ e2.ScalarMult(e2, b)
+
+ minusE2 := new(GT).Neg(e2)
+ e1.Add(e1, minusE2)
+
+ if !e1.p.IsOne() {
+ t.Fatalf("bad pairing result: %s", e1)
+ }
+ }
+}
+
+func TestG1Marshal(t *testing.T) {
+ g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
+ form := g.Marshal()
+ _, ok := new(G1).Unmarshal(form)
+ if !ok {
+ t.Fatalf("failed to unmarshal")
+ }
+
+ g.ScalarBaseMult(Order)
+ form = g.Marshal()
+ g2, ok := new(G1).Unmarshal(form)
+ if !ok {
+ t.Fatalf("failed to unmarshal ∞")
+ }
+ if !g2.p.IsInfinity() {
+ t.Fatalf("∞ unmarshaled incorrectly")
+ }
+}
+
+func TestG2Marshal(t *testing.T) {
+ g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
+ form := g.Marshal()
+ _, ok := new(G2).Unmarshal(form)
+ if !ok {
+ t.Fatalf("failed to unmarshal")
+ }
+
+ g.ScalarBaseMult(Order)
+ form = g.Marshal()
+ g2, ok := new(G2).Unmarshal(form)
+ if !ok {
+ t.Fatalf("failed to unmarshal ∞")
+ }
+ if !g2.p.IsInfinity() {
+ t.Fatalf("∞ unmarshaled incorrectly")
+ }
+}
+
+func TestG1Identity(t *testing.T) {
+ g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(0))
+ if !g.p.IsInfinity() {
+ t.Error("failure")
+ }
+}
+
+func TestG2Identity(t *testing.T) {
+ g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(0))
+ if !g.p.IsInfinity() {
+ t.Error("failure")
+ }
+}
+
+func TestTripartiteDiffieHellman(t *testing.T) {
+ a, _ := rand.Int(rand.Reader, Order)
+ b, _ := rand.Int(rand.Reader, Order)
+ c, _ := rand.Int(rand.Reader, Order)
+
+ pa, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
+ qa, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
+ pb, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
+ qb, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
+ pc, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
+ qc, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
+
+ k1 := Pair(pb, qc)
+ k1.ScalarMult(k1, a)
+ k1Bytes := k1.Marshal()
+
+ k2 := Pair(pc, qa)
+ k2.ScalarMult(k2, b)
+ k2Bytes := k2.Marshal()
+
+ k3 := Pair(pa, qb)
+ k3.ScalarMult(k3, c)
+ k3Bytes := k3.Marshal()
+
+ if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) {
+ t.Errorf("keys didn't agree")
+ }
+}
+
+func BenchmarkPairing(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Pair(&G1{curveGen}, &G2{twistGen})
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/constants.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/constants.go
new file mode 100644
index 00000000000..08ccfdf3d68
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/constants.go
@@ -0,0 +1,44 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+import (
+ "math/big"
+)
+
+func bigFromBase10(s string) *big.Int {
+ n, _ := new(big.Int).SetString(s, 10)
+ return n
+}
+
+// u is the BN parameter that determines the prime: 1868033³.
+var u = bigFromBase10("6518589491078791937")
+
+// p is a prime over which we form a basic field: 36u⁴+36u³+24u³+6u+1.
+var p = bigFromBase10("65000549695646603732796438742359905742825358107623003571877145026864184071783")
+
+// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u³+6u+1.
+var Order = bigFromBase10("65000549695646603732796438742359905742570406053903786389881062969044166799969")
+
+// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+3.
+var xiToPMinus1Over6 = &gfP2{bigFromBase10("8669379979083712429711189836753509758585994370025260553045152614783263110636"), bigFromBase10("19998038925833620163537568958541907098007303196759855091367510456613536016040")}
+
+// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+3.
+var xiToPMinus1Over3 = &gfP2{bigFromBase10("26098034838977895781559542626833399156321265654106457577426020397262786167059"), bigFromBase10("15931493369629630809226283458085260090334794394361662678240713231519278691715")}
+
+// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+3.
+var xiToPMinus1Over2 = &gfP2{bigFromBase10("50997318142241922852281555961173165965672272825141804376761836765206060036244"), bigFromBase10("38665955945962842195025998234511023902832543644254935982879660597356748036009")}
+
+// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+3.
+var xiToPSquaredMinus1Over3 = bigFromBase10("65000549695646603727810655408050771481677621702948236658134783353303381437752")
+
+// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+3 (a cubic root of unity, mod p).
+var xiTo2PSquaredMinus2Over3 = bigFromBase10("4985783334309134261147736404674766913742361673560802634030")
+
+// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+3 (a cubic root of -1, mod p).
+var xiToPSquaredMinus1Over6 = bigFromBase10("65000549695646603727810655408050771481677621702948236658134783353303381437753")
+
+// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+3.
+var xiTo2PMinus2Over3 = &gfP2{bigFromBase10("19885131339612776214803633203834694332692106372356013117629940868870585019582"), bigFromBase10("21645619881471562101905880913352894726728173167203616652430647841922248593627")}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/curve.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/curve.go
new file mode 100644
index 00000000000..55b7063f163
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/curve.go
@@ -0,0 +1,278 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+import (
+ "math/big"
+)
+
+// curvePoint implements the elliptic curve y²=x³+3. Points are kept in
+// Jacobian form and t=z² when valid. G₁ is the set of points of this curve on
+// GF(p).
+type curvePoint struct {
+ x, y, z, t *big.Int
+}
+
+var curveB = new(big.Int).SetInt64(3)
+
+// curveGen is the generator of G₁.
+var curveGen = &curvePoint{
+ new(big.Int).SetInt64(1),
+ new(big.Int).SetInt64(-2),
+ new(big.Int).SetInt64(1),
+ new(big.Int).SetInt64(1),
+}
+
+func newCurvePoint(pool *bnPool) *curvePoint {
+ return &curvePoint{
+ pool.Get(),
+ pool.Get(),
+ pool.Get(),
+ pool.Get(),
+ }
+}
+
+func (c *curvePoint) String() string {
+ c.MakeAffine(new(bnPool))
+ return "(" + c.x.String() + ", " + c.y.String() + ")"
+}
+
+func (c *curvePoint) Put(pool *bnPool) {
+ pool.Put(c.x)
+ pool.Put(c.y)
+ pool.Put(c.z)
+ pool.Put(c.t)
+}
+
+func (c *curvePoint) Set(a *curvePoint) {
+ c.x.Set(a.x)
+ c.y.Set(a.y)
+ c.z.Set(a.z)
+ c.t.Set(a.t)
+}
+
+// IsOnCurve returns true iff c is on the curve where c must be in affine form.
+func (c *curvePoint) IsOnCurve() bool {
+ yy := new(big.Int).Mul(c.y, c.y)
+ xxx := new(big.Int).Mul(c.x, c.x)
+ xxx.Mul(xxx, c.x)
+ yy.Sub(yy, xxx)
+ yy.Sub(yy, curveB)
+ if yy.Sign() < 0 || yy.Cmp(p) >= 0 {
+ yy.Mod(yy, p)
+ }
+ return yy.Sign() == 0
+}
+
+func (c *curvePoint) SetInfinity() {
+ c.z.SetInt64(0)
+}
+
+func (c *curvePoint) IsInfinity() bool {
+ return c.z.Sign() == 0
+}
+
+func (c *curvePoint) Add(a, b *curvePoint, pool *bnPool) {
+ if a.IsInfinity() {
+ c.Set(b)
+ return
+ }
+ if b.IsInfinity() {
+ c.Set(a)
+ return
+ }
+
+ // See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
+
+ // Normalize the points by replacing a = [x1:y1:z1] and b = [x2:y2:z2]
+ // by [u1:s1:z1·z2] and [u2:s2:z1·z2]
+ // where u1 = x1·z2², s1 = y1·z2³ and u1 = x2·z1², s2 = y2·z1³
+ z1z1 := pool.Get().Mul(a.z, a.z)
+ z1z1.Mod(z1z1, p)
+ z2z2 := pool.Get().Mul(b.z, b.z)
+ z2z2.Mod(z2z2, p)
+ u1 := pool.Get().Mul(a.x, z2z2)
+ u1.Mod(u1, p)
+ u2 := pool.Get().Mul(b.x, z1z1)
+ u2.Mod(u2, p)
+
+ t := pool.Get().Mul(b.z, z2z2)
+ t.Mod(t, p)
+ s1 := pool.Get().Mul(a.y, t)
+ s1.Mod(s1, p)
+
+ t.Mul(a.z, z1z1)
+ t.Mod(t, p)
+ s2 := pool.Get().Mul(b.y, t)
+ s2.Mod(s2, p)
+
+ // Compute x = (2h)²(s²-u1-u2)
+ // where s = (s2-s1)/(u2-u1) is the slope of the line through
+ // (u1,s1) and (u2,s2). The extra factor 2h = 2(u2-u1) comes from the value of z below.
+ // This is also:
+ // 4(s2-s1)² - 4h²(u1+u2) = 4(s2-s1)² - 4h³ - 4h²(2u1)
+ // = r² - j - 2v
+ // with the notations below.
+ h := pool.Get().Sub(u2, u1)
+ xEqual := h.Sign() == 0
+
+ t.Add(h, h)
+ // i = 4h²
+ i := pool.Get().Mul(t, t)
+ i.Mod(i, p)
+ // j = 4h³
+ j := pool.Get().Mul(h, i)
+ j.Mod(j, p)
+
+ t.Sub(s2, s1)
+ yEqual := t.Sign() == 0
+ if xEqual && yEqual {
+ c.Double(a, pool)
+ return
+ }
+ r := pool.Get().Add(t, t)
+
+ v := pool.Get().Mul(u1, i)
+ v.Mod(v, p)
+
+ // t4 = 4(s2-s1)²
+ t4 := pool.Get().Mul(r, r)
+ t4.Mod(t4, p)
+ t.Add(v, v)
+ t6 := pool.Get().Sub(t4, j)
+ c.x.Sub(t6, t)
+
+ // Set y = -(2h)³(s1 + s*(x/4h²-u1))
+ // This is also
+ // y = - 2·s1·j - (s2-s1)(2x - 2i·u1) = r(v-x) - 2·s1·j
+ t.Sub(v, c.x) // t7
+ t4.Mul(s1, j) // t8
+ t4.Mod(t4, p)
+ t6.Add(t4, t4) // t9
+ t4.Mul(r, t) // t10
+ t4.Mod(t4, p)
+ c.y.Sub(t4, t6)
+
+ // Set z = 2(u2-u1)·z1·z2 = 2h·z1·z2
+ t.Add(a.z, b.z) // t11
+ t4.Mul(t, t) // t12
+ t4.Mod(t4, p)
+ t.Sub(t4, z1z1) // t13
+ t4.Sub(t, z2z2) // t14
+ c.z.Mul(t4, h)
+ c.z.Mod(c.z, p)
+
+ pool.Put(z1z1)
+ pool.Put(z2z2)
+ pool.Put(u1)
+ pool.Put(u2)
+ pool.Put(t)
+ pool.Put(s1)
+ pool.Put(s2)
+ pool.Put(h)
+ pool.Put(i)
+ pool.Put(j)
+ pool.Put(r)
+ pool.Put(v)
+ pool.Put(t4)
+ pool.Put(t6)
+}
+
+func (c *curvePoint) Double(a *curvePoint, pool *bnPool) {
+ // See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
+ A := pool.Get().Mul(a.x, a.x)
+ A.Mod(A, p)
+ B := pool.Get().Mul(a.y, a.y)
+ B.Mod(B, p)
+ C := pool.Get().Mul(B, B)
+ C.Mod(C, p)
+
+ t := pool.Get().Add(a.x, B)
+ t2 := pool.Get().Mul(t, t)
+ t2.Mod(t2, p)
+ t.Sub(t2, A)
+ t2.Sub(t, C)
+ d := pool.Get().Add(t2, t2)
+ t.Add(A, A)
+ e := pool.Get().Add(t, A)
+ f := pool.Get().Mul(e, e)
+ f.Mod(f, p)
+
+ t.Add(d, d)
+ c.x.Sub(f, t)
+
+ t.Add(C, C)
+ t2.Add(t, t)
+ t.Add(t2, t2)
+ c.y.Sub(d, c.x)
+ t2.Mul(e, c.y)
+ t2.Mod(t2, p)
+ c.y.Sub(t2, t)
+
+ t.Mul(a.y, a.z)
+ t.Mod(t, p)
+ c.z.Add(t, t)
+
+ pool.Put(A)
+ pool.Put(B)
+ pool.Put(C)
+ pool.Put(t)
+ pool.Put(t2)
+ pool.Put(d)
+ pool.Put(e)
+ pool.Put(f)
+}
+
+func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int, pool *bnPool) *curvePoint {
+ sum := newCurvePoint(pool)
+ sum.SetInfinity()
+ t := newCurvePoint(pool)
+
+ for i := scalar.BitLen(); i >= 0; i-- {
+ t.Double(sum, pool)
+ if scalar.Bit(i) != 0 {
+ sum.Add(t, a, pool)
+ } else {
+ sum.Set(t)
+ }
+ }
+
+ c.Set(sum)
+ sum.Put(pool)
+ t.Put(pool)
+ return c
+}
+
+func (c *curvePoint) MakeAffine(pool *bnPool) *curvePoint {
+ if words := c.z.Bits(); len(words) == 1 && words[0] == 1 {
+ return c
+ }
+
+ zInv := pool.Get().ModInverse(c.z, p)
+ t := pool.Get().Mul(c.y, zInv)
+ t.Mod(t, p)
+ zInv2 := pool.Get().Mul(zInv, zInv)
+ zInv2.Mod(zInv2, p)
+ c.y.Mul(t, zInv2)
+ c.y.Mod(c.y, p)
+ t.Mul(c.x, zInv2)
+ t.Mod(t, p)
+ c.x.Set(t)
+ c.z.SetInt64(1)
+ c.t.SetInt64(1)
+
+ pool.Put(zInv)
+ pool.Put(t)
+ pool.Put(zInv2)
+
+ return c
+}
+
+func (c *curvePoint) Negative(a *curvePoint) {
+ c.x.Set(a.x)
+ c.y.Neg(a.y)
+ c.z.Set(a.z)
+ c.t.SetInt64(0)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/example_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/example_test.go
new file mode 100644
index 00000000000..b2d19807a25
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/example_test.go
@@ -0,0 +1,43 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+import (
+ "crypto/rand"
+)
+
+func ExamplePair() {
+ // This implements the tripartite Diffie-Hellman algorithm from "A One
+ // Round Protocol for Tripartite Diffie-Hellman", A. Joux.
+ // http://www.springerlink.com/content/cddc57yyva0hburb/fulltext.pdf
+
+ // Each of three parties, a, b and c, generate a private value.
+ a, _ := rand.Int(rand.Reader, Order)
+ b, _ := rand.Int(rand.Reader, Order)
+ c, _ := rand.Int(rand.Reader, Order)
+
+ // Then each party calculates g₁ and g₂ times their private value.
+ pa := new(G1).ScalarBaseMult(a)
+ qa := new(G2).ScalarBaseMult(a)
+
+ pb := new(G1).ScalarBaseMult(b)
+ qb := new(G2).ScalarBaseMult(b)
+
+ pc := new(G1).ScalarBaseMult(c)
+ qc := new(G2).ScalarBaseMult(c)
+
+ // Now each party exchanges its public values with the other two and
+ // all parties can calculate the shared key.
+ k1 := Pair(pb, qc)
+ k1.ScalarMult(k1, a)
+
+ k2 := Pair(pc, qa)
+ k2.ScalarMult(k2, b)
+
+ k3 := Pair(pa, qb)
+ k3.ScalarMult(k3, c)
+
+ // k1, k2 and k3 will all be equal.
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp12.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp12.go
new file mode 100644
index 00000000000..f084eddf212
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp12.go
@@ -0,0 +1,200 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+// For details of the algorithms used, see "Multiplication and Squaring on
+// Pairing-Friendly Fields, Devegili et al.
+// http://eprint.iacr.org/2006/471.pdf.
+
+import (
+ "math/big"
+)
+
+// gfP12 implements the field of size p¹² as a quadratic extension of gfP6
+// where ω²=τ.
+type gfP12 struct {
+ x, y *gfP6 // value is xω + y
+}
+
+func newGFp12(pool *bnPool) *gfP12 {
+ return &gfP12{newGFp6(pool), newGFp6(pool)}
+}
+
+func (e *gfP12) String() string {
+ return "(" + e.x.String() + "," + e.y.String() + ")"
+}
+
+func (e *gfP12) Put(pool *bnPool) {
+ e.x.Put(pool)
+ e.y.Put(pool)
+}
+
+func (e *gfP12) Set(a *gfP12) *gfP12 {
+ e.x.Set(a.x)
+ e.y.Set(a.y)
+ return e
+}
+
+func (e *gfP12) SetZero() *gfP12 {
+ e.x.SetZero()
+ e.y.SetZero()
+ return e
+}
+
+func (e *gfP12) SetOne() *gfP12 {
+ e.x.SetZero()
+ e.y.SetOne()
+ return e
+}
+
+func (e *gfP12) Minimal() {
+ e.x.Minimal()
+ e.y.Minimal()
+}
+
+func (e *gfP12) IsZero() bool {
+ e.Minimal()
+ return e.x.IsZero() && e.y.IsZero()
+}
+
+func (e *gfP12) IsOne() bool {
+ e.Minimal()
+ return e.x.IsZero() && e.y.IsOne()
+}
+
+func (e *gfP12) Conjugate(a *gfP12) *gfP12 {
+ e.x.Negative(a.x)
+ e.y.Set(a.y)
+ return a
+}
+
+func (e *gfP12) Negative(a *gfP12) *gfP12 {
+ e.x.Negative(a.x)
+ e.y.Negative(a.y)
+ return e
+}
+
+// Frobenius computes (xω+y)^p = x^p ω·ξ^((p-1)/6) + y^p
+func (e *gfP12) Frobenius(a *gfP12, pool *bnPool) *gfP12 {
+ e.x.Frobenius(a.x, pool)
+ e.y.Frobenius(a.y, pool)
+ e.x.MulScalar(e.x, xiToPMinus1Over6, pool)
+ return e
+}
+
+// FrobeniusP2 computes (xω+y)^p² = x^p² ω·ξ^((p²-1)/6) + y^p²
+func (e *gfP12) FrobeniusP2(a *gfP12, pool *bnPool) *gfP12 {
+ e.x.FrobeniusP2(a.x)
+ e.x.MulGFP(e.x, xiToPSquaredMinus1Over6)
+ e.y.FrobeniusP2(a.y)
+ return e
+}
+
+func (e *gfP12) Add(a, b *gfP12) *gfP12 {
+ e.x.Add(a.x, b.x)
+ e.y.Add(a.y, b.y)
+ return e
+}
+
+func (e *gfP12) Sub(a, b *gfP12) *gfP12 {
+ e.x.Sub(a.x, b.x)
+ e.y.Sub(a.y, b.y)
+ return e
+}
+
+func (e *gfP12) Mul(a, b *gfP12, pool *bnPool) *gfP12 {
+ tx := newGFp6(pool)
+ tx.Mul(a.x, b.y, pool)
+ t := newGFp6(pool)
+ t.Mul(b.x, a.y, pool)
+ tx.Add(tx, t)
+
+ ty := newGFp6(pool)
+ ty.Mul(a.y, b.y, pool)
+ t.Mul(a.x, b.x, pool)
+ t.MulTau(t, pool)
+ e.y.Add(ty, t)
+ e.x.Set(tx)
+
+ tx.Put(pool)
+ ty.Put(pool)
+ t.Put(pool)
+ return e
+}
+
+func (e *gfP12) MulScalar(a *gfP12, b *gfP6, pool *bnPool) *gfP12 {
+ e.x.Mul(e.x, b, pool)
+ e.y.Mul(e.y, b, pool)
+ return e
+}
+
+func (c *gfP12) Exp(a *gfP12, power *big.Int, pool *bnPool) *gfP12 {
+ sum := newGFp12(pool)
+ sum.SetOne()
+ t := newGFp12(pool)
+
+ for i := power.BitLen() - 1; i >= 0; i-- {
+ t.Square(sum, pool)
+ if power.Bit(i) != 0 {
+ sum.Mul(t, a, pool)
+ } else {
+ sum.Set(t)
+ }
+ }
+
+ c.Set(sum)
+
+ sum.Put(pool)
+ t.Put(pool)
+
+ return c
+}
+
+func (e *gfP12) Square(a *gfP12, pool *bnPool) *gfP12 {
+ // Complex squaring algorithm
+ v0 := newGFp6(pool)
+ v0.Mul(a.x, a.y, pool)
+
+ t := newGFp6(pool)
+ t.MulTau(a.x, pool)
+ t.Add(a.y, t)
+ ty := newGFp6(pool)
+ ty.Add(a.x, a.y)
+ ty.Mul(ty, t, pool)
+ ty.Sub(ty, v0)
+ t.MulTau(v0, pool)
+ ty.Sub(ty, t)
+
+ e.y.Set(ty)
+ e.x.Double(v0)
+
+ v0.Put(pool)
+ t.Put(pool)
+ ty.Put(pool)
+
+ return e
+}
+
+func (e *gfP12) Invert(a *gfP12, pool *bnPool) *gfP12 {
+ // See "Implementing cryptographic pairings", M. Scott, section 3.2.
+ // ftp://136.206.11.249/pub/crypto/pairings.pdf
+ t1 := newGFp6(pool)
+ t2 := newGFp6(pool)
+
+ t1.Square(a.x, pool)
+ t2.Square(a.y, pool)
+ t1.MulTau(t1, pool)
+ t1.Sub(t2, t1)
+ t2.Invert(t1, pool)
+
+ e.x.Negative(a.x)
+ e.y.Set(a.y)
+ e.MulScalar(e, t2, pool)
+
+ t1.Put(pool)
+ t2.Put(pool)
+
+ return e
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp2.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp2.go
new file mode 100644
index 00000000000..97f3f1f3fa1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp2.go
@@ -0,0 +1,219 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+// For details of the algorithms used, see "Multiplication and Squaring on
+// Pairing-Friendly Fields, Devegili et al.
+// http://eprint.iacr.org/2006/471.pdf.
+
+import (
+ "math/big"
+)
+
+// gfP2 implements a field of size p² as a quadratic extension of the base
+// field where i²=-1.
+type gfP2 struct {
+ x, y *big.Int // value is xi+y.
+}
+
+func newGFp2(pool *bnPool) *gfP2 {
+ return &gfP2{pool.Get(), pool.Get()}
+}
+
+func (e *gfP2) String() string {
+ x := new(big.Int).Mod(e.x, p)
+ y := new(big.Int).Mod(e.y, p)
+ return "(" + x.String() + "," + y.String() + ")"
+}
+
+func (e *gfP2) Put(pool *bnPool) {
+ pool.Put(e.x)
+ pool.Put(e.y)
+}
+
+func (e *gfP2) Set(a *gfP2) *gfP2 {
+ e.x.Set(a.x)
+ e.y.Set(a.y)
+ return e
+}
+
+func (e *gfP2) SetZero() *gfP2 {
+ e.x.SetInt64(0)
+ e.y.SetInt64(0)
+ return e
+}
+
+func (e *gfP2) SetOne() *gfP2 {
+ e.x.SetInt64(0)
+ e.y.SetInt64(1)
+ return e
+}
+
+func (e *gfP2) Minimal() {
+ if e.x.Sign() < 0 || e.x.Cmp(p) >= 0 {
+ e.x.Mod(e.x, p)
+ }
+ if e.y.Sign() < 0 || e.y.Cmp(p) >= 0 {
+ e.y.Mod(e.y, p)
+ }
+}
+
+func (e *gfP2) IsZero() bool {
+ return e.x.Sign() == 0 && e.y.Sign() == 0
+}
+
+func (e *gfP2) IsOne() bool {
+ if e.x.Sign() != 0 {
+ return false
+ }
+ words := e.y.Bits()
+ return len(words) == 1 && words[0] == 1
+}
+
+func (e *gfP2) Conjugate(a *gfP2) *gfP2 {
+ e.y.Set(a.y)
+ e.x.Neg(a.x)
+ return e
+}
+
+func (e *gfP2) Negative(a *gfP2) *gfP2 {
+ e.x.Neg(a.x)
+ e.y.Neg(a.y)
+ return e
+}
+
+func (e *gfP2) Add(a, b *gfP2) *gfP2 {
+ e.x.Add(a.x, b.x)
+ e.y.Add(a.y, b.y)
+ return e
+}
+
+func (e *gfP2) Sub(a, b *gfP2) *gfP2 {
+ e.x.Sub(a.x, b.x)
+ e.y.Sub(a.y, b.y)
+ return e
+}
+
+func (e *gfP2) Double(a *gfP2) *gfP2 {
+ e.x.Lsh(a.x, 1)
+ e.y.Lsh(a.y, 1)
+ return e
+}
+
+func (c *gfP2) Exp(a *gfP2, power *big.Int, pool *bnPool) *gfP2 {
+ sum := newGFp2(pool)
+ sum.SetOne()
+ t := newGFp2(pool)
+
+ for i := power.BitLen() - 1; i >= 0; i-- {
+ t.Square(sum, pool)
+ if power.Bit(i) != 0 {
+ sum.Mul(t, a, pool)
+ } else {
+ sum.Set(t)
+ }
+ }
+
+ c.Set(sum)
+
+ sum.Put(pool)
+ t.Put(pool)
+
+ return c
+}
+
+// See "Multiplication and Squaring in Pairing-Friendly Fields",
+// http://eprint.iacr.org/2006/471.pdf
+func (e *gfP2) Mul(a, b *gfP2, pool *bnPool) *gfP2 {
+ tx := pool.Get().Mul(a.x, b.y)
+ t := pool.Get().Mul(b.x, a.y)
+ tx.Add(tx, t)
+ tx.Mod(tx, p)
+
+ ty := pool.Get().Mul(a.y, b.y)
+ t.Mul(a.x, b.x)
+ ty.Sub(ty, t)
+ e.y.Mod(ty, p)
+ e.x.Set(tx)
+
+ pool.Put(tx)
+ pool.Put(ty)
+ pool.Put(t)
+
+ return e
+}
+
+func (e *gfP2) MulScalar(a *gfP2, b *big.Int) *gfP2 {
+ e.x.Mul(a.x, b)
+ e.y.Mul(a.y, b)
+ return e
+}
+
+// MulXi sets e=ξa where ξ=i+3 and then returns e.
+func (e *gfP2) MulXi(a *gfP2, pool *bnPool) *gfP2 {
+ // (xi+y)(i+3) = (3x+y)i+(3y-x)
+ tx := pool.Get().Lsh(a.x, 1)
+ tx.Add(tx, a.x)
+ tx.Add(tx, a.y)
+
+ ty := pool.Get().Lsh(a.y, 1)
+ ty.Add(ty, a.y)
+ ty.Sub(ty, a.x)
+
+ e.x.Set(tx)
+ e.y.Set(ty)
+
+ pool.Put(tx)
+ pool.Put(ty)
+
+ return e
+}
+
+func (e *gfP2) Square(a *gfP2, pool *bnPool) *gfP2 {
+ // Complex squaring algorithm:
+ // (xi+b)² = (x+y)(y-x) + 2*i*x*y
+ t1 := pool.Get().Sub(a.y, a.x)
+ t2 := pool.Get().Add(a.x, a.y)
+ ty := pool.Get().Mul(t1, t2)
+ ty.Mod(ty, p)
+
+ t1.Mul(a.x, a.y)
+ t1.Lsh(t1, 1)
+
+ e.x.Mod(t1, p)
+ e.y.Set(ty)
+
+ pool.Put(t1)
+ pool.Put(t2)
+ pool.Put(ty)
+
+ return e
+}
+
+func (e *gfP2) Invert(a *gfP2, pool *bnPool) *gfP2 {
+ // See "Implementing cryptographic pairings", M. Scott, section 3.2.
+ // ftp://136.206.11.249/pub/crypto/pairings.pdf
+ t := pool.Get()
+ t.Mul(a.y, a.y)
+ t2 := pool.Get()
+ t2.Mul(a.x, a.x)
+ t.Add(t, t2)
+
+ inv := pool.Get()
+ inv.ModInverse(t, p)
+
+ e.x.Neg(a.x)
+ e.x.Mul(e.x, inv)
+ e.x.Mod(e.x, p)
+
+ e.y.Mul(a.y, inv)
+ e.y.Mod(e.y, p)
+
+ pool.Put(t)
+ pool.Put(t2)
+ pool.Put(inv)
+
+ return e
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp6.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp6.go
new file mode 100644
index 00000000000..f98ae782cc1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/gfp6.go
@@ -0,0 +1,296 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+// For details of the algorithms used, see "Multiplication and Squaring on
+// Pairing-Friendly Fields, Devegili et al.
+// http://eprint.iacr.org/2006/471.pdf.
+
+import (
+ "math/big"
+)
+
+// gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ
+// and ξ=i+3.
+type gfP6 struct {
+ x, y, z *gfP2 // value is xτ² + yτ + z
+}
+
+func newGFp6(pool *bnPool) *gfP6 {
+ return &gfP6{newGFp2(pool), newGFp2(pool), newGFp2(pool)}
+}
+
+func (e *gfP6) String() string {
+ return "(" + e.x.String() + "," + e.y.String() + "," + e.z.String() + ")"
+}
+
+func (e *gfP6) Put(pool *bnPool) {
+ e.x.Put(pool)
+ e.y.Put(pool)
+ e.z.Put(pool)
+}
+
+func (e *gfP6) Set(a *gfP6) *gfP6 {
+ e.x.Set(a.x)
+ e.y.Set(a.y)
+ e.z.Set(a.z)
+ return e
+}
+
+func (e *gfP6) SetZero() *gfP6 {
+ e.x.SetZero()
+ e.y.SetZero()
+ e.z.SetZero()
+ return e
+}
+
+func (e *gfP6) SetOne() *gfP6 {
+ e.x.SetZero()
+ e.y.SetZero()
+ e.z.SetOne()
+ return e
+}
+
+func (e *gfP6) Minimal() {
+ e.x.Minimal()
+ e.y.Minimal()
+ e.z.Minimal()
+}
+
+func (e *gfP6) IsZero() bool {
+ return e.x.IsZero() && e.y.IsZero() && e.z.IsZero()
+}
+
+func (e *gfP6) IsOne() bool {
+ return e.x.IsZero() && e.y.IsZero() && e.z.IsOne()
+}
+
+func (e *gfP6) Negative(a *gfP6) *gfP6 {
+ e.x.Negative(a.x)
+ e.y.Negative(a.y)
+ e.z.Negative(a.z)
+ return e
+}
+
+func (e *gfP6) Frobenius(a *gfP6, pool *bnPool) *gfP6 {
+ e.x.Conjugate(a.x)
+ e.y.Conjugate(a.y)
+ e.z.Conjugate(a.z)
+
+ e.x.Mul(e.x, xiTo2PMinus2Over3, pool)
+ e.y.Mul(e.y, xiToPMinus1Over3, pool)
+ return e
+}
+
+// FrobeniusP2 computes (xτ²+yτ+z)^(p²) = xτ^(2p²) + yτ^(p²) + z
+func (e *gfP6) FrobeniusP2(a *gfP6) *gfP6 {
+ // τ^(2p²) = τ²τ^(2p²-2) = τ²ξ^((2p²-2)/3)
+ e.x.MulScalar(a.x, xiTo2PSquaredMinus2Over3)
+ // τ^(p²) = ττ^(p²-1) = τξ^((p²-1)/3)
+ e.y.MulScalar(a.y, xiToPSquaredMinus1Over3)
+ e.z.Set(a.z)
+ return e
+}
+
+func (e *gfP6) Add(a, b *gfP6) *gfP6 {
+ e.x.Add(a.x, b.x)
+ e.y.Add(a.y, b.y)
+ e.z.Add(a.z, b.z)
+ return e
+}
+
+func (e *gfP6) Sub(a, b *gfP6) *gfP6 {
+ e.x.Sub(a.x, b.x)
+ e.y.Sub(a.y, b.y)
+ e.z.Sub(a.z, b.z)
+ return e
+}
+
+func (e *gfP6) Double(a *gfP6) *gfP6 {
+ e.x.Double(a.x)
+ e.y.Double(a.y)
+ e.z.Double(a.z)
+ return e
+}
+
+func (e *gfP6) Mul(a, b *gfP6, pool *bnPool) *gfP6 {
+ // "Multiplication and Squaring on Pairing-Friendly Fields"
+ // Section 4, Karatsuba method.
+ // http://eprint.iacr.org/2006/471.pdf
+
+ v0 := newGFp2(pool)
+ v0.Mul(a.z, b.z, pool)
+ v1 := newGFp2(pool)
+ v1.Mul(a.y, b.y, pool)
+ v2 := newGFp2(pool)
+ v2.Mul(a.x, b.x, pool)
+
+ t0 := newGFp2(pool)
+ t0.Add(a.x, a.y)
+ t1 := newGFp2(pool)
+ t1.Add(b.x, b.y)
+ tz := newGFp2(pool)
+ tz.Mul(t0, t1, pool)
+
+ tz.Sub(tz, v1)
+ tz.Sub(tz, v2)
+ tz.MulXi(tz, pool)
+ tz.Add(tz, v0)
+
+ t0.Add(a.y, a.z)
+ t1.Add(b.y, b.z)
+ ty := newGFp2(pool)
+ ty.Mul(t0, t1, pool)
+ ty.Sub(ty, v0)
+ ty.Sub(ty, v1)
+ t0.MulXi(v2, pool)
+ ty.Add(ty, t0)
+
+ t0.Add(a.x, a.z)
+ t1.Add(b.x, b.z)
+ tx := newGFp2(pool)
+ tx.Mul(t0, t1, pool)
+ tx.Sub(tx, v0)
+ tx.Add(tx, v1)
+ tx.Sub(tx, v2)
+
+ e.x.Set(tx)
+ e.y.Set(ty)
+ e.z.Set(tz)
+
+ t0.Put(pool)
+ t1.Put(pool)
+ tx.Put(pool)
+ ty.Put(pool)
+ tz.Put(pool)
+ v0.Put(pool)
+ v1.Put(pool)
+ v2.Put(pool)
+ return e
+}
+
+func (e *gfP6) MulScalar(a *gfP6, b *gfP2, pool *bnPool) *gfP6 {
+ e.x.Mul(a.x, b, pool)
+ e.y.Mul(a.y, b, pool)
+ e.z.Mul(a.z, b, pool)
+ return e
+}
+
+func (e *gfP6) MulGFP(a *gfP6, b *big.Int) *gfP6 {
+ e.x.MulScalar(a.x, b)
+ e.y.MulScalar(a.y, b)
+ e.z.MulScalar(a.z, b)
+ return e
+}
+
+// MulTau computes τ·(aτ²+bτ+c) = bτ²+cτ+aξ
+func (e *gfP6) MulTau(a *gfP6, pool *bnPool) {
+ tz := newGFp2(pool)
+ tz.MulXi(a.x, pool)
+ ty := newGFp2(pool)
+ ty.Set(a.y)
+ e.y.Set(a.z)
+ e.x.Set(ty)
+ e.z.Set(tz)
+ tz.Put(pool)
+ ty.Put(pool)
+}
+
+func (e *gfP6) Square(a *gfP6, pool *bnPool) *gfP6 {
+ v0 := newGFp2(pool).Square(a.z, pool)
+ v1 := newGFp2(pool).Square(a.y, pool)
+ v2 := newGFp2(pool).Square(a.x, pool)
+
+ c0 := newGFp2(pool).Add(a.x, a.y)
+ c0.Square(c0, pool)
+ c0.Sub(c0, v1)
+ c0.Sub(c0, v2)
+ c0.MulXi(c0, pool)
+ c0.Add(c0, v0)
+
+ c1 := newGFp2(pool).Add(a.y, a.z)
+ c1.Square(c1, pool)
+ c1.Sub(c1, v0)
+ c1.Sub(c1, v1)
+ xiV2 := newGFp2(pool).MulXi(v2, pool)
+ c1.Add(c1, xiV2)
+
+ c2 := newGFp2(pool).Add(a.x, a.z)
+ c2.Square(c2, pool)
+ c2.Sub(c2, v0)
+ c2.Add(c2, v1)
+ c2.Sub(c2, v2)
+
+ e.x.Set(c2)
+ e.y.Set(c1)
+ e.z.Set(c0)
+
+ v0.Put(pool)
+ v1.Put(pool)
+ v2.Put(pool)
+ c0.Put(pool)
+ c1.Put(pool)
+ c2.Put(pool)
+ xiV2.Put(pool)
+
+ return e
+}
+
+func (e *gfP6) Invert(a *gfP6, pool *bnPool) *gfP6 {
+ // See "Implementing cryptographic pairings", M. Scott, section 3.2.
+ // ftp://136.206.11.249/pub/crypto/pairings.pdf
+
+ // Here we can give a short explanation of how it works: let j be a cubic root of
+ // unity in GF(p²) so that 1+j+j²=0.
+ // Then (xτ² + yτ + z)(xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
+ // = (xτ² + yτ + z)(Cτ²+Bτ+A)
+ // = (x³ξ²+y³ξ+z³-3ξxyz) = F is an element of the base field (the norm).
+ //
+ // On the other hand (xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
+ // = τ²(y²-ξxz) + τ(ξx²-yz) + (z²-ξxy)
+ //
+ // So that's why A = (z²-ξxy), B = (ξx²-yz), C = (y²-ξxz)
+ t1 := newGFp2(pool)
+
+ A := newGFp2(pool)
+ A.Square(a.z, pool)
+ t1.Mul(a.x, a.y, pool)
+ t1.MulXi(t1, pool)
+ A.Sub(A, t1)
+
+ B := newGFp2(pool)
+ B.Square(a.x, pool)
+ B.MulXi(B, pool)
+ t1.Mul(a.y, a.z, pool)
+ B.Sub(B, t1)
+
+ C := newGFp2(pool)
+ C.Square(a.y, pool)
+ t1.Mul(a.x, a.z, pool)
+ C.Sub(C, t1)
+
+ F := newGFp2(pool)
+ F.Mul(C, a.y, pool)
+ F.MulXi(F, pool)
+ t1.Mul(A, a.z, pool)
+ F.Add(F, t1)
+ t1.Mul(B, a.x, pool)
+ t1.MulXi(t1, pool)
+ F.Add(F, t1)
+
+ F.Invert(F, pool)
+
+ e.x.Mul(C, F, pool)
+ e.y.Mul(B, F, pool)
+ e.z.Mul(A, F, pool)
+
+ t1.Put(pool)
+ A.Put(pool)
+ B.Put(pool)
+ C.Put(pool)
+ F.Put(pool)
+
+ return e
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/optate.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/optate.go
new file mode 100644
index 00000000000..7ae0746eb10
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/optate.go
@@ -0,0 +1,395 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+func lineFunctionAdd(r, p *twistPoint, q *curvePoint, r2 *gfP2, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) {
+ // See the mixed addition algorithm from "Faster Computation of the
+ // Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
+
+ B := newGFp2(pool).Mul(p.x, r.t, pool)
+
+ D := newGFp2(pool).Add(p.y, r.z)
+ D.Square(D, pool)
+ D.Sub(D, r2)
+ D.Sub(D, r.t)
+ D.Mul(D, r.t, pool)
+
+ H := newGFp2(pool).Sub(B, r.x)
+ I := newGFp2(pool).Square(H, pool)
+
+ E := newGFp2(pool).Add(I, I)
+ E.Add(E, E)
+
+ J := newGFp2(pool).Mul(H, E, pool)
+
+ L1 := newGFp2(pool).Sub(D, r.y)
+ L1.Sub(L1, r.y)
+
+ V := newGFp2(pool).Mul(r.x, E, pool)
+
+ rOut = newTwistPoint(pool)
+ rOut.x.Square(L1, pool)
+ rOut.x.Sub(rOut.x, J)
+ rOut.x.Sub(rOut.x, V)
+ rOut.x.Sub(rOut.x, V)
+
+ rOut.z.Add(r.z, H)
+ rOut.z.Square(rOut.z, pool)
+ rOut.z.Sub(rOut.z, r.t)
+ rOut.z.Sub(rOut.z, I)
+
+ t := newGFp2(pool).Sub(V, rOut.x)
+ t.Mul(t, L1, pool)
+ t2 := newGFp2(pool).Mul(r.y, J, pool)
+ t2.Add(t2, t2)
+ rOut.y.Sub(t, t2)
+
+ rOut.t.Square(rOut.z, pool)
+
+ t.Add(p.y, rOut.z)
+ t.Square(t, pool)
+ t.Sub(t, r2)
+ t.Sub(t, rOut.t)
+
+ t2.Mul(L1, p.x, pool)
+ t2.Add(t2, t2)
+ a = newGFp2(pool)
+ a.Sub(t2, t)
+
+ c = newGFp2(pool)
+ c.MulScalar(rOut.z, q.y)
+ c.Add(c, c)
+
+ b = newGFp2(pool)
+ b.SetZero()
+ b.Sub(b, L1)
+ b.MulScalar(b, q.x)
+ b.Add(b, b)
+
+ B.Put(pool)
+ D.Put(pool)
+ H.Put(pool)
+ I.Put(pool)
+ E.Put(pool)
+ J.Put(pool)
+ L1.Put(pool)
+ V.Put(pool)
+ t.Put(pool)
+ t2.Put(pool)
+
+ return
+}
+
+func lineFunctionDouble(r *twistPoint, q *curvePoint, pool *bnPool) (a, b, c *gfP2, rOut *twistPoint) {
+ // See the doubling algorithm for a=0 from "Faster Computation of the
+ // Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
+
+ A := newGFp2(pool).Square(r.x, pool)
+ B := newGFp2(pool).Square(r.y, pool)
+ C := newGFp2(pool).Square(B, pool)
+
+ D := newGFp2(pool).Add(r.x, B)
+ D.Square(D, pool)
+ D.Sub(D, A)
+ D.Sub(D, C)
+ D.Add(D, D)
+
+ E := newGFp2(pool).Add(A, A)
+ E.Add(E, A)
+
+ G := newGFp2(pool).Square(E, pool)
+
+ rOut = newTwistPoint(pool)
+ rOut.x.Sub(G, D)
+ rOut.x.Sub(rOut.x, D)
+
+ rOut.z.Add(r.y, r.z)
+ rOut.z.Square(rOut.z, pool)
+ rOut.z.Sub(rOut.z, B)
+ rOut.z.Sub(rOut.z, r.t)
+
+ rOut.y.Sub(D, rOut.x)
+ rOut.y.Mul(rOut.y, E, pool)
+ t := newGFp2(pool).Add(C, C)
+ t.Add(t, t)
+ t.Add(t, t)
+ rOut.y.Sub(rOut.y, t)
+
+ rOut.t.Square(rOut.z, pool)
+
+ t.Mul(E, r.t, pool)
+ t.Add(t, t)
+ b = newGFp2(pool)
+ b.SetZero()
+ b.Sub(b, t)
+ b.MulScalar(b, q.x)
+
+ a = newGFp2(pool)
+ a.Add(r.x, E)
+ a.Square(a, pool)
+ a.Sub(a, A)
+ a.Sub(a, G)
+ t.Add(B, B)
+ t.Add(t, t)
+ a.Sub(a, t)
+
+ c = newGFp2(pool)
+ c.Mul(rOut.z, r.t, pool)
+ c.Add(c, c)
+ c.MulScalar(c, q.y)
+
+ A.Put(pool)
+ B.Put(pool)
+ C.Put(pool)
+ D.Put(pool)
+ E.Put(pool)
+ G.Put(pool)
+ t.Put(pool)
+
+ return
+}
+
+func mulLine(ret *gfP12, a, b, c *gfP2, pool *bnPool) {
+ a2 := newGFp6(pool)
+ a2.x.SetZero()
+ a2.y.Set(a)
+ a2.z.Set(b)
+ a2.Mul(a2, ret.x, pool)
+ t3 := newGFp6(pool).MulScalar(ret.y, c, pool)
+
+ t := newGFp2(pool)
+ t.Add(b, c)
+ t2 := newGFp6(pool)
+ t2.x.SetZero()
+ t2.y.Set(a)
+ t2.z.Set(t)
+ ret.x.Add(ret.x, ret.y)
+
+ ret.y.Set(t3)
+
+ ret.x.Mul(ret.x, t2, pool)
+ ret.x.Sub(ret.x, a2)
+ ret.x.Sub(ret.x, ret.y)
+ a2.MulTau(a2, pool)
+ ret.y.Add(ret.y, a2)
+
+ a2.Put(pool)
+ t3.Put(pool)
+ t2.Put(pool)
+ t.Put(pool)
+}
+
+// sixuPlus2NAF is 6u+2 in non-adjacent form.
+var sixuPlus2NAF = []int8{0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 1, 0, -1, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 1}
+
+// miller implements the Miller loop for calculating the Optimal Ate pairing.
+// See algorithm 1 from http://cryptojedi.org/papers/dclxvi-20100714.pdf
+func miller(q *twistPoint, p *curvePoint, pool *bnPool) *gfP12 {
+ ret := newGFp12(pool)
+ ret.SetOne()
+
+ aAffine := newTwistPoint(pool)
+ aAffine.Set(q)
+ aAffine.MakeAffine(pool)
+
+ bAffine := newCurvePoint(pool)
+ bAffine.Set(p)
+ bAffine.MakeAffine(pool)
+
+ minusA := newTwistPoint(pool)
+ minusA.Negative(aAffine, pool)
+
+ r := newTwistPoint(pool)
+ r.Set(aAffine)
+
+ r2 := newGFp2(pool)
+ r2.Square(aAffine.y, pool)
+
+ for i := len(sixuPlus2NAF) - 1; i > 0; i-- {
+ a, b, c, newR := lineFunctionDouble(r, bAffine, pool)
+ if i != len(sixuPlus2NAF)-1 {
+ ret.Square(ret, pool)
+ }
+
+ mulLine(ret, a, b, c, pool)
+ a.Put(pool)
+ b.Put(pool)
+ c.Put(pool)
+ r.Put(pool)
+ r = newR
+
+ switch sixuPlus2NAF[i-1] {
+ case 1:
+ a, b, c, newR = lineFunctionAdd(r, aAffine, bAffine, r2, pool)
+ case -1:
+ a, b, c, newR = lineFunctionAdd(r, minusA, bAffine, r2, pool)
+ default:
+ continue
+ }
+
+ mulLine(ret, a, b, c, pool)
+ a.Put(pool)
+ b.Put(pool)
+ c.Put(pool)
+ r.Put(pool)
+ r = newR
+ }
+
+ // In order to calculate Q1 we have to convert q from the sextic twist
+ // to the full GF(p^12) group, apply the Frobenius there, and convert
+ // back.
+ //
+ // The twist isomorphism is (x', y') -> (xω², yω³). If we consider just
+ // x for a moment, then after applying the Frobenius, we have x̄ω^(2p)
+ // where x̄ is the conjugate of x. If we are going to apply the inverse
+ // isomorphism we need a value with a single coefficient of ω² so we
+ // rewrite this as x̄ω^(2p-2)ω². ξ⁶ = ω and, due to the construction of
+ // p, 2p-2 is a multiple of six. Therefore we can rewrite as
+ // x̄ξ^((p-1)/3)ω² and applying the inverse isomorphism eliminates the
+ // ω².
+ //
+ // A similar argument can be made for the y value.
+
+ q1 := newTwistPoint(pool)
+ q1.x.Conjugate(aAffine.x)
+ q1.x.Mul(q1.x, xiToPMinus1Over3, pool)
+ q1.y.Conjugate(aAffine.y)
+ q1.y.Mul(q1.y, xiToPMinus1Over2, pool)
+ q1.z.SetOne()
+ q1.t.SetOne()
+
+ // For Q2 we are applying the p² Frobenius. The two conjugations cancel
+ // out and we are left only with the factors from the isomorphism. In
+ // the case of x, we end up with a pure number which is why
+ // xiToPSquaredMinus1Over3 is ∈ GF(p). With y we get a factor of -1. We
+ // ignore this to end up with -Q2.
+
+ minusQ2 := newTwistPoint(pool)
+ minusQ2.x.MulScalar(aAffine.x, xiToPSquaredMinus1Over3)
+ minusQ2.y.Set(aAffine.y)
+ minusQ2.z.SetOne()
+ minusQ2.t.SetOne()
+
+ r2.Square(q1.y, pool)
+ a, b, c, newR := lineFunctionAdd(r, q1, bAffine, r2, pool)
+ mulLine(ret, a, b, c, pool)
+ a.Put(pool)
+ b.Put(pool)
+ c.Put(pool)
+ r.Put(pool)
+ r = newR
+
+ r2.Square(minusQ2.y, pool)
+ a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2, pool)
+ mulLine(ret, a, b, c, pool)
+ a.Put(pool)
+ b.Put(pool)
+ c.Put(pool)
+ r.Put(pool)
+ r = newR
+
+ aAffine.Put(pool)
+ bAffine.Put(pool)
+ minusA.Put(pool)
+ r.Put(pool)
+ r2.Put(pool)
+
+ return ret
+}
+
+// finalExponentiation computes the (p¹²-1)/Order-th power of an element of
+// GF(p¹²) to obtain an element of GT (steps 13-15 of algorithm 1 from
+// http://cryptojedi.org/papers/dclxvi-20100714.pdf)
+func finalExponentiation(in *gfP12, pool *bnPool) *gfP12 {
+ t1 := newGFp12(pool)
+
+ // This is the p^6-Frobenius
+ t1.x.Negative(in.x)
+ t1.y.Set(in.y)
+
+ inv := newGFp12(pool)
+ inv.Invert(in, pool)
+ t1.Mul(t1, inv, pool)
+
+ t2 := newGFp12(pool).FrobeniusP2(t1, pool)
+ t1.Mul(t1, t2, pool)
+
+ fp := newGFp12(pool).Frobenius(t1, pool)
+ fp2 := newGFp12(pool).FrobeniusP2(t1, pool)
+ fp3 := newGFp12(pool).Frobenius(fp2, pool)
+
+ fu, fu2, fu3 := newGFp12(pool), newGFp12(pool), newGFp12(pool)
+ fu.Exp(t1, u, pool)
+ fu2.Exp(fu, u, pool)
+ fu3.Exp(fu2, u, pool)
+
+ y3 := newGFp12(pool).Frobenius(fu, pool)
+ fu2p := newGFp12(pool).Frobenius(fu2, pool)
+ fu3p := newGFp12(pool).Frobenius(fu3, pool)
+ y2 := newGFp12(pool).FrobeniusP2(fu2, pool)
+
+ y0 := newGFp12(pool)
+ y0.Mul(fp, fp2, pool)
+ y0.Mul(y0, fp3, pool)
+
+ y1, y4, y5 := newGFp12(pool), newGFp12(pool), newGFp12(pool)
+ y1.Conjugate(t1)
+ y5.Conjugate(fu2)
+ y3.Conjugate(y3)
+ y4.Mul(fu, fu2p, pool)
+ y4.Conjugate(y4)
+
+ y6 := newGFp12(pool)
+ y6.Mul(fu3, fu3p, pool)
+ y6.Conjugate(y6)
+
+ t0 := newGFp12(pool)
+ t0.Square(y6, pool)
+ t0.Mul(t0, y4, pool)
+ t0.Mul(t0, y5, pool)
+ t1.Mul(y3, y5, pool)
+ t1.Mul(t1, t0, pool)
+ t0.Mul(t0, y2, pool)
+ t1.Square(t1, pool)
+ t1.Mul(t1, t0, pool)
+ t1.Square(t1, pool)
+ t0.Mul(t1, y1, pool)
+ t1.Mul(t1, y0, pool)
+ t0.Square(t0, pool)
+ t0.Mul(t0, t1, pool)
+
+ inv.Put(pool)
+ t1.Put(pool)
+ t2.Put(pool)
+ fp.Put(pool)
+ fp2.Put(pool)
+ fp3.Put(pool)
+ fu.Put(pool)
+ fu2.Put(pool)
+ fu3.Put(pool)
+ fu2p.Put(pool)
+ fu3p.Put(pool)
+ y0.Put(pool)
+ y1.Put(pool)
+ y2.Put(pool)
+ y3.Put(pool)
+ y4.Put(pool)
+ y5.Put(pool)
+ y6.Put(pool)
+
+ return t0
+}
+
+func optimalAte(a *twistPoint, b *curvePoint, pool *bnPool) *gfP12 {
+ e := miller(a, b, pool)
+ ret := finalExponentiation(e, pool)
+ e.Put(pool)
+
+ if a.IsInfinity() || b.IsInfinity() {
+ ret.SetOne()
+ }
+
+ return ret
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/twist.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/twist.go
new file mode 100644
index 00000000000..4f8b3fede42
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/bn256/twist.go
@@ -0,0 +1,249 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bn256
+
+import (
+ "math/big"
+)
+
+// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are
+// kept in Jacobian form and t=z² when valid. The group G₂ is the set of
+// n-torsion points of this curve over GF(p²) (where n = Order)
+type twistPoint struct {
+ x, y, z, t *gfP2
+}
+
+var twistB = &gfP2{
+ bigFromBase10("6500054969564660373279643874235990574282535810762300357187714502686418407178"),
+ bigFromBase10("45500384786952622612957507119651934019977750675336102500314001518804928850249"),
+}
+
+// twistGen is the generator of group G₂.
+var twistGen = &twistPoint{
+ &gfP2{
+ bigFromBase10("21167961636542580255011770066570541300993051739349375019639421053990175267184"),
+ bigFromBase10("64746500191241794695844075326670126197795977525365406531717464316923369116492"),
+ },
+ &gfP2{
+ bigFromBase10("20666913350058776956210519119118544732556678129809273996262322366050359951122"),
+ bigFromBase10("17778617556404439934652658462602675281523610326338642107814333856843981424549"),
+ },
+ &gfP2{
+ bigFromBase10("0"),
+ bigFromBase10("1"),
+ },
+ &gfP2{
+ bigFromBase10("0"),
+ bigFromBase10("1"),
+ },
+}
+
+func newTwistPoint(pool *bnPool) *twistPoint {
+ return &twistPoint{
+ newGFp2(pool),
+ newGFp2(pool),
+ newGFp2(pool),
+ newGFp2(pool),
+ }
+}
+
+func (c *twistPoint) String() string {
+ return "(" + c.x.String() + ", " + c.y.String() + ", " + c.z.String() + ")"
+}
+
+func (c *twistPoint) Put(pool *bnPool) {
+ c.x.Put(pool)
+ c.y.Put(pool)
+ c.z.Put(pool)
+ c.t.Put(pool)
+}
+
+func (c *twistPoint) Set(a *twistPoint) {
+ c.x.Set(a.x)
+ c.y.Set(a.y)
+ c.z.Set(a.z)
+ c.t.Set(a.t)
+}
+
+// IsOnCurve returns true iff c is on the curve where c must be in affine form.
+func (c *twistPoint) IsOnCurve() bool {
+ pool := new(bnPool)
+ yy := newGFp2(pool).Square(c.y, pool)
+ xxx := newGFp2(pool).Square(c.x, pool)
+ xxx.Mul(xxx, c.x, pool)
+ yy.Sub(yy, xxx)
+ yy.Sub(yy, twistB)
+ yy.Minimal()
+ return yy.x.Sign() == 0 && yy.y.Sign() == 0
+}
+
+func (c *twistPoint) SetInfinity() {
+ c.z.SetZero()
+}
+
+func (c *twistPoint) IsInfinity() bool {
+ return c.z.IsZero()
+}
+
+func (c *twistPoint) Add(a, b *twistPoint, pool *bnPool) {
+ // For additional comments, see the same function in curve.go.
+
+ if a.IsInfinity() {
+ c.Set(b)
+ return
+ }
+ if b.IsInfinity() {
+ c.Set(a)
+ return
+ }
+
+ // See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
+ z1z1 := newGFp2(pool).Square(a.z, pool)
+ z2z2 := newGFp2(pool).Square(b.z, pool)
+ u1 := newGFp2(pool).Mul(a.x, z2z2, pool)
+ u2 := newGFp2(pool).Mul(b.x, z1z1, pool)
+
+ t := newGFp2(pool).Mul(b.z, z2z2, pool)
+ s1 := newGFp2(pool).Mul(a.y, t, pool)
+
+ t.Mul(a.z, z1z1, pool)
+ s2 := newGFp2(pool).Mul(b.y, t, pool)
+
+ h := newGFp2(pool).Sub(u2, u1)
+ xEqual := h.IsZero()
+
+ t.Add(h, h)
+ i := newGFp2(pool).Square(t, pool)
+ j := newGFp2(pool).Mul(h, i, pool)
+
+ t.Sub(s2, s1)
+ yEqual := t.IsZero()
+ if xEqual && yEqual {
+ c.Double(a, pool)
+ return
+ }
+ r := newGFp2(pool).Add(t, t)
+
+ v := newGFp2(pool).Mul(u1, i, pool)
+
+ t4 := newGFp2(pool).Square(r, pool)
+ t.Add(v, v)
+ t6 := newGFp2(pool).Sub(t4, j)
+ c.x.Sub(t6, t)
+
+ t.Sub(v, c.x) // t7
+ t4.Mul(s1, j, pool) // t8
+ t6.Add(t4, t4) // t9
+ t4.Mul(r, t, pool) // t10
+ c.y.Sub(t4, t6)
+
+ t.Add(a.z, b.z) // t11
+ t4.Square(t, pool) // t12
+ t.Sub(t4, z1z1) // t13
+ t4.Sub(t, z2z2) // t14
+ c.z.Mul(t4, h, pool)
+
+ z1z1.Put(pool)
+ z2z2.Put(pool)
+ u1.Put(pool)
+ u2.Put(pool)
+ t.Put(pool)
+ s1.Put(pool)
+ s2.Put(pool)
+ h.Put(pool)
+ i.Put(pool)
+ j.Put(pool)
+ r.Put(pool)
+ v.Put(pool)
+ t4.Put(pool)
+ t6.Put(pool)
+}
+
+func (c *twistPoint) Double(a *twistPoint, pool *bnPool) {
+ // See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
+ A := newGFp2(pool).Square(a.x, pool)
+ B := newGFp2(pool).Square(a.y, pool)
+ C := newGFp2(pool).Square(B, pool)
+
+ t := newGFp2(pool).Add(a.x, B)
+ t2 := newGFp2(pool).Square(t, pool)
+ t.Sub(t2, A)
+ t2.Sub(t, C)
+ d := newGFp2(pool).Add(t2, t2)
+ t.Add(A, A)
+ e := newGFp2(pool).Add(t, A)
+ f := newGFp2(pool).Square(e, pool)
+
+ t.Add(d, d)
+ c.x.Sub(f, t)
+
+ t.Add(C, C)
+ t2.Add(t, t)
+ t.Add(t2, t2)
+ c.y.Sub(d, c.x)
+ t2.Mul(e, c.y, pool)
+ c.y.Sub(t2, t)
+
+ t.Mul(a.y, a.z, pool)
+ c.z.Add(t, t)
+
+ A.Put(pool)
+ B.Put(pool)
+ C.Put(pool)
+ t.Put(pool)
+ t2.Put(pool)
+ d.Put(pool)
+ e.Put(pool)
+ f.Put(pool)
+}
+
+func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int, pool *bnPool) *twistPoint {
+ sum := newTwistPoint(pool)
+ sum.SetInfinity()
+ t := newTwistPoint(pool)
+
+ for i := scalar.BitLen(); i >= 0; i-- {
+ t.Double(sum, pool)
+ if scalar.Bit(i) != 0 {
+ sum.Add(t, a, pool)
+ } else {
+ sum.Set(t)
+ }
+ }
+
+ c.Set(sum)
+ sum.Put(pool)
+ t.Put(pool)
+ return c
+}
+
+func (c *twistPoint) MakeAffine(pool *bnPool) *twistPoint {
+ if c.z.IsOne() {
+ return c
+ }
+
+ zInv := newGFp2(pool).Invert(c.z, pool)
+ t := newGFp2(pool).Mul(c.y, zInv, pool)
+ zInv2 := newGFp2(pool).Square(zInv, pool)
+ c.y.Mul(t, zInv2, pool)
+ t.Mul(c.x, zInv2, pool)
+ c.x.Set(t)
+ c.z.SetOne()
+ c.t.SetOne()
+
+ zInv.Put(pool)
+ t.Put(pool)
+ zInv2.Put(pool)
+
+ return c
+}
+
+func (c *twistPoint) Negative(a *twistPoint, pool *bnPool) {
+ c.x.Set(a.x)
+ c.y.SetZero()
+ c.y.Sub(c.y, a.y)
+ c.z.Set(a.z)
+ c.t.SetZero()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/cast5/cast5.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/cast5/cast5.go
new file mode 100644
index 00000000000..0b4af37bdc2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/cast5/cast5.go
@@ -0,0 +1,526 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
+// OpenPGP cipher.
+package cast5 // import "golang.org/x/crypto/cast5"
+
+import "errors"
+
+const BlockSize = 8
+const KeySize = 16
+
+type Cipher struct {
+ masking [16]uint32
+ rotate [16]uint8
+}
+
+func NewCipher(key []byte) (c *Cipher, err error) {
+ if len(key) != KeySize {
+ return nil, errors.New("CAST5: keys must be 16 bytes")
+ }
+
+ c = new(Cipher)
+ c.keySchedule(key)
+ return
+}
+
+func (c *Cipher) BlockSize() int {
+ return BlockSize
+}
+
+func (c *Cipher) Encrypt(dst, src []byte) {
+ l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+
+ l, r = r, l^f1(r, c.masking[0], c.rotate[0])
+ l, r = r, l^f2(r, c.masking[1], c.rotate[1])
+ l, r = r, l^f3(r, c.masking[2], c.rotate[2])
+ l, r = r, l^f1(r, c.masking[3], c.rotate[3])
+
+ l, r = r, l^f2(r, c.masking[4], c.rotate[4])
+ l, r = r, l^f3(r, c.masking[5], c.rotate[5])
+ l, r = r, l^f1(r, c.masking[6], c.rotate[6])
+ l, r = r, l^f2(r, c.masking[7], c.rotate[7])
+
+ l, r = r, l^f3(r, c.masking[8], c.rotate[8])
+ l, r = r, l^f1(r, c.masking[9], c.rotate[9])
+ l, r = r, l^f2(r, c.masking[10], c.rotate[10])
+ l, r = r, l^f3(r, c.masking[11], c.rotate[11])
+
+ l, r = r, l^f1(r, c.masking[12], c.rotate[12])
+ l, r = r, l^f2(r, c.masking[13], c.rotate[13])
+ l, r = r, l^f3(r, c.masking[14], c.rotate[14])
+ l, r = r, l^f1(r, c.masking[15], c.rotate[15])
+
+ dst[0] = uint8(r >> 24)
+ dst[1] = uint8(r >> 16)
+ dst[2] = uint8(r >> 8)
+ dst[3] = uint8(r)
+ dst[4] = uint8(l >> 24)
+ dst[5] = uint8(l >> 16)
+ dst[6] = uint8(l >> 8)
+ dst[7] = uint8(l)
+}
+
+func (c *Cipher) Decrypt(dst, src []byte) {
+ l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+
+ l, r = r, l^f1(r, c.masking[15], c.rotate[15])
+ l, r = r, l^f3(r, c.masking[14], c.rotate[14])
+ l, r = r, l^f2(r, c.masking[13], c.rotate[13])
+ l, r = r, l^f1(r, c.masking[12], c.rotate[12])
+
+ l, r = r, l^f3(r, c.masking[11], c.rotate[11])
+ l, r = r, l^f2(r, c.masking[10], c.rotate[10])
+ l, r = r, l^f1(r, c.masking[9], c.rotate[9])
+ l, r = r, l^f3(r, c.masking[8], c.rotate[8])
+
+ l, r = r, l^f2(r, c.masking[7], c.rotate[7])
+ l, r = r, l^f1(r, c.masking[6], c.rotate[6])
+ l, r = r, l^f3(r, c.masking[5], c.rotate[5])
+ l, r = r, l^f2(r, c.masking[4], c.rotate[4])
+
+ l, r = r, l^f1(r, c.masking[3], c.rotate[3])
+ l, r = r, l^f3(r, c.masking[2], c.rotate[2])
+ l, r = r, l^f2(r, c.masking[1], c.rotate[1])
+ l, r = r, l^f1(r, c.masking[0], c.rotate[0])
+
+ dst[0] = uint8(r >> 24)
+ dst[1] = uint8(r >> 16)
+ dst[2] = uint8(r >> 8)
+ dst[3] = uint8(r)
+ dst[4] = uint8(l >> 24)
+ dst[5] = uint8(l >> 16)
+ dst[6] = uint8(l >> 8)
+ dst[7] = uint8(l)
+}
+
+type keyScheduleA [4][7]uint8
+type keyScheduleB [4][5]uint8
+
+// keyScheduleRound contains the magic values for a round of the key schedule.
+// The keyScheduleA deals with the lines like:
+// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8]
+// Conceptually, both x and z are in the same array, x first. The first
+// element describes which word of this array gets written to and the
+// second, which word gets read. So, for the line above, it's "4, 0", because
+// it's writing to the first word of z, which, being after x, is word 4, and
+// reading from the first word of x: word 0.
+//
+// Next are the indexes into the S-boxes. Now the array is treated as bytes. So
+// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear
+// that it's z that we're indexing.
+//
+// keyScheduleB deals with lines like:
+// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2]
+// "K1" is ignored because key words are always written in order. So the five
+// elements are the S-box indexes. They use the same form as in keyScheduleA,
+// above.
+
+type keyScheduleRound struct{}
+type keySchedule []keyScheduleRound
+
+var schedule = []struct {
+ a keyScheduleA
+ b keyScheduleB
+}{
+ {
+ keyScheduleA{
+ {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8},
+ {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
+ {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
+ {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
+ },
+ keyScheduleB{
+ {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2},
+ {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6},
+ {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9},
+ {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc},
+ },
+ },
+ {
+ keyScheduleA{
+ {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
+ {1, 4, 0, 2, 1, 3, 16 + 2},
+ {2, 5, 7, 6, 5, 4, 16 + 1},
+ {3, 7, 0xa, 9, 0xb, 8, 16 + 3},
+ },
+ keyScheduleB{
+ {3, 2, 0xc, 0xd, 8},
+ {1, 0, 0xe, 0xf, 0xd},
+ {7, 6, 8, 9, 3},
+ {5, 4, 0xa, 0xb, 7},
+ },
+ },
+ {
+ keyScheduleA{
+ {4, 0, 0xd, 0xf, 0xc, 0xe, 8},
+ {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa},
+ {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9},
+ {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb},
+ },
+ keyScheduleB{
+ {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9},
+ {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc},
+ {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2},
+ {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6},
+ },
+ },
+ {
+ keyScheduleA{
+ {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0},
+ {1, 4, 0, 2, 1, 3, 16 + 2},
+ {2, 5, 7, 6, 5, 4, 16 + 1},
+ {3, 7, 0xa, 9, 0xb, 8, 16 + 3},
+ },
+ keyScheduleB{
+ {8, 9, 7, 6, 3},
+ {0xa, 0xb, 5, 4, 7},
+ {0xc, 0xd, 3, 2, 8},
+ {0xe, 0xf, 1, 0, 0xd},
+ },
+ },
+}
+
+func (c *Cipher) keySchedule(in []byte) {
+ var t [8]uint32
+ var k [32]uint32
+
+ for i := 0; i < 4; i++ {
+ j := i * 4
+ t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3])
+ }
+
+ x := []byte{6, 7, 4, 5}
+ ki := 0
+
+ for half := 0; half < 2; half++ {
+ for _, round := range schedule {
+ for j := 0; j < 4; j++ {
+ var a [7]uint8
+ copy(a[:], round.a[j][:])
+ w := t[a[1]]
+ w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff]
+ w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff]
+ w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff]
+ w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff]
+ w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff]
+ t[a[0]] = w
+ }
+
+ for j := 0; j < 4; j++ {
+ var b [5]uint8
+ copy(b[:], round.b[j][:])
+ w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff]
+ w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff]
+ w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff]
+ w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff]
+ w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff]
+ k[ki] = w
+ ki++
+ }
+ }
+ }
+
+ for i := 0; i < 16; i++ {
+ c.masking[i] = k[i]
+ c.rotate[i] = uint8(k[16+i] & 0x1f)
+ }
+}
+
+// These are the three 'f' functions. See RFC 2144, section 2.2.
+func f1(d, m uint32, r uint8) uint32 {
+ t := m + d
+ I := (t << r) | (t >> (32 - r))
+ return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff]
+}
+
+func f2(d, m uint32, r uint8) uint32 {
+ t := m ^ d
+ I := (t << r) | (t >> (32 - r))
+ return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff]
+}
+
+func f3(d, m uint32, r uint8) uint32 {
+ t := m - d
+ I := (t << r) | (t >> (32 - r))
+ return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff]
+}
+
+var sBox = [8][256]uint32{
+ {
+ 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949,
+ 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e,
+ 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d,
+ 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0,
+ 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7,
+ 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935,
+ 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d,
+ 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50,
+ 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe,
+ 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3,
+ 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167,
+ 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291,
+ 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779,
+ 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2,
+ 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511,
+ 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d,
+ 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5,
+ 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324,
+ 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c,
+ 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc,
+ 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d,
+ 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96,
+ 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a,
+ 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d,
+ 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd,
+ 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6,
+ 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9,
+ 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872,
+ 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c,
+ 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e,
+ 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9,
+ 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf,
+ },
+ {
+ 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651,
+ 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3,
+ 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb,
+ 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806,
+ 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b,
+ 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359,
+ 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b,
+ 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c,
+ 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34,
+ 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb,
+ 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd,
+ 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860,
+ 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b,
+ 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304,
+ 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b,
+ 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf,
+ 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c,
+ 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13,
+ 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f,
+ 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6,
+ 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6,
+ 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58,
+ 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906,
+ 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d,
+ 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6,
+ 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4,
+ 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6,
+ 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f,
+ 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249,
+ 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa,
+ 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9,
+ 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1,
+ },
+ {
+ 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90,
+ 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5,
+ 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e,
+ 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240,
+ 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5,
+ 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b,
+ 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71,
+ 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04,
+ 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82,
+ 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15,
+ 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2,
+ 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176,
+ 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148,
+ 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc,
+ 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341,
+ 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e,
+ 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51,
+ 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f,
+ 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a,
+ 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b,
+ 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b,
+ 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5,
+ 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45,
+ 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536,
+ 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc,
+ 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0,
+ 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69,
+ 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2,
+ 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49,
+ 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d,
+ 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a,
+ 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783,
+ },
+ {
+ 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1,
+ 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf,
+ 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15,
+ 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121,
+ 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25,
+ 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5,
+ 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb,
+ 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5,
+ 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d,
+ 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6,
+ 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23,
+ 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003,
+ 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6,
+ 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119,
+ 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24,
+ 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a,
+ 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79,
+ 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df,
+ 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26,
+ 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab,
+ 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7,
+ 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417,
+ 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2,
+ 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2,
+ 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a,
+ 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919,
+ 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef,
+ 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876,
+ 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab,
+ 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04,
+ 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282,
+ 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2,
+ },
+ {
+ 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f,
+ 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a,
+ 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff,
+ 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
+ 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
+ 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7,
+ 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9,
+ 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981,
+ 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774,
+ 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655,
+ 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2,
+ 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910,
+ 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1,
+ 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da,
+ 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049,
+ 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f,
+ 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba,
+ 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
+ 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3,
+ 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840,
+ 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4,
+ 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2,
+ 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7,
+ 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5,
+ 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
+ 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e,
+ 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801,
+ 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
+ 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
+ 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20,
+ 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8,
+ 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4,
+ },
+ {
+ 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac,
+ 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138,
+ 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367,
+ 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98,
+ 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
+ 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
+ 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
+ 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8,
+ 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9,
+ 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54,
+ 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387,
+ 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc,
+ 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf,
+ 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf,
+ 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f,
+ 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289,
+ 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950,
+ 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
+ 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b,
+ 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be,
+ 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
+ 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976,
+ 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0,
+ 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891,
+ 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da,
+ 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc,
+ 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084,
+ 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25,
+ 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121,
+ 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
+ 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd,
+ 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f,
+ },
+ {
+ 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f,
+ 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de,
+ 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
+ 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19,
+ 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2,
+ 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516,
+ 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
+ 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816,
+ 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756,
+ 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
+ 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264,
+ 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688,
+ 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28,
+ 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3,
+ 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7,
+ 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
+ 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
+ 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a,
+ 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566,
+ 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
+ 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962,
+ 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e,
+ 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c,
+ 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
+ 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
+ 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301,
+ 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be,
+ 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767,
+ 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647,
+ 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914,
+ 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c,
+ 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3,
+ },
+ {
+ 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5,
+ 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc,
+ 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd,
+ 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d,
+ 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2,
+ 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862,
+ 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc,
+ 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c,
+ 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e,
+ 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039,
+ 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
+ 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42,
+ 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5,
+ 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472,
+ 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225,
+ 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c,
+ 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb,
+ 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054,
+ 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70,
+ 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc,
+ 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
+ 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3,
+ 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4,
+ 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101,
+ 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f,
+ 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e,
+ 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
+ 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c,
+ 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384,
+ 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c,
+ 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
+ 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e,
+ },
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/cast5/cast5_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/cast5/cast5_test.go
new file mode 100644
index 00000000000..778b272a638
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/cast5/cast5_test.go
@@ -0,0 +1,106 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cast5
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+)
+
+// This test vector is taken from RFC 2144, App B.1.
+// Since the other two test vectors are for reduced-round variants, we can't
+// use them.
+var basicTests = []struct {
+ key, plainText, cipherText string
+}{
+ {
+ "0123456712345678234567893456789a",
+ "0123456789abcdef",
+ "238b4fe5847e44b2",
+ },
+}
+
+func TestBasic(t *testing.T) {
+ for i, test := range basicTests {
+ key, _ := hex.DecodeString(test.key)
+ plainText, _ := hex.DecodeString(test.plainText)
+ expected, _ := hex.DecodeString(test.cipherText)
+
+ c, err := NewCipher(key)
+ if err != nil {
+ t.Errorf("#%d: failed to create Cipher: %s", i, err)
+ continue
+ }
+ var cipherText [BlockSize]byte
+ c.Encrypt(cipherText[:], plainText)
+ if !bytes.Equal(cipherText[:], expected) {
+ t.Errorf("#%d: got:%x want:%x", i, cipherText, expected)
+ }
+
+ var plainTextAgain [BlockSize]byte
+ c.Decrypt(plainTextAgain[:], cipherText[:])
+ if !bytes.Equal(plainTextAgain[:], plainText) {
+ t.Errorf("#%d: got:%x want:%x", i, plainTextAgain, plainText)
+ }
+ }
+}
+
+// TestFull performs the test specified in RFC 2144, App B.2.
+// However, due to the length of time taken, it's disabled here and a more
+// limited version is included, below.
+func TestFull(t *testing.T) {
+ if testing.Short() {
+ // This is too slow for normal testing
+ return
+ }
+
+ a, b := iterate(1000000)
+
+ const expectedA = "eea9d0a249fd3ba6b3436fb89d6dca92"
+ const expectedB = "b2c95eb00c31ad7180ac05b8e83d696e"
+
+ if hex.EncodeToString(a) != expectedA {
+ t.Errorf("a: got:%x want:%s", a, expectedA)
+ }
+ if hex.EncodeToString(b) != expectedB {
+ t.Errorf("b: got:%x want:%s", b, expectedB)
+ }
+}
+
+func iterate(iterations int) ([]byte, []byte) {
+ const initValueHex = "0123456712345678234567893456789a"
+
+ initValue, _ := hex.DecodeString(initValueHex)
+
+ var a, b [16]byte
+ copy(a[:], initValue)
+ copy(b[:], initValue)
+
+ for i := 0; i < iterations; i++ {
+ c, _ := NewCipher(b[:])
+ c.Encrypt(a[:8], a[:8])
+ c.Encrypt(a[8:], a[8:])
+ c, _ = NewCipher(a[:])
+ c.Encrypt(b[:8], b[:8])
+ c.Encrypt(b[8:], b[8:])
+ }
+
+ return a[:], b[:]
+}
+
+func TestLimited(t *testing.T) {
+ a, b := iterate(1000)
+
+ const expectedA = "23f73b14b02a2ad7dfb9f2c35644798d"
+ const expectedB = "e5bf37eff14c456a40b21ce369370a9f"
+
+ if hex.EncodeToString(a) != expectedA {
+ t.Errorf("a: got:%x want:%s", a, expectedA)
+ }
+ if hex.EncodeToString(b) != expectedB {
+ t.Errorf("b: got:%x want:%s", b, expectedB)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/codereview.cfg b/src/mongo/gotools/vendor/src/golang.org/x/crypto/codereview.cfg
new file mode 100644
index 00000000000..3f8b14b64e8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/codereview.cfg
@@ -0,0 +1 @@
+issuerepo: golang/go
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/const_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/const_amd64.s
new file mode 100644
index 00000000000..797f9b051df
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/const_amd64.s
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF
+GLOBL ·REDMASK51(SB), 8, $8
+
+DATA ·_121666_213(SB)/8, $996687872
+GLOBL ·_121666_213(SB), 8, $8
+
+DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
+GLOBL ·_2P0(SB), 8, $8
+
+DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
+GLOBL ·_2P1234(SB), 8, $8
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/cswap_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/cswap_amd64.s
new file mode 100644
index 00000000000..45484d1b596
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/cswap_amd64.s
@@ -0,0 +1,88 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// func cswap(inout *[5]uint64, v uint64)
+TEXT ·cswap(SB),7,$0
+ MOVQ inout+0(FP),DI
+ MOVQ v+8(FP),SI
+
+ CMPQ SI,$1
+ MOVQ 0(DI),SI
+ MOVQ 80(DI),DX
+ MOVQ 8(DI),CX
+ MOVQ 88(DI),R8
+ MOVQ SI,R9
+ CMOVQEQ DX,SI
+ CMOVQEQ R9,DX
+ MOVQ CX,R9
+ CMOVQEQ R8,CX
+ CMOVQEQ R9,R8
+ MOVQ SI,0(DI)
+ MOVQ DX,80(DI)
+ MOVQ CX,8(DI)
+ MOVQ R8,88(DI)
+ MOVQ 16(DI),SI
+ MOVQ 96(DI),DX
+ MOVQ 24(DI),CX
+ MOVQ 104(DI),R8
+ MOVQ SI,R9
+ CMOVQEQ DX,SI
+ CMOVQEQ R9,DX
+ MOVQ CX,R9
+ CMOVQEQ R8,CX
+ CMOVQEQ R9,R8
+ MOVQ SI,16(DI)
+ MOVQ DX,96(DI)
+ MOVQ CX,24(DI)
+ MOVQ R8,104(DI)
+ MOVQ 32(DI),SI
+ MOVQ 112(DI),DX
+ MOVQ 40(DI),CX
+ MOVQ 120(DI),R8
+ MOVQ SI,R9
+ CMOVQEQ DX,SI
+ CMOVQEQ R9,DX
+ MOVQ CX,R9
+ CMOVQEQ R8,CX
+ CMOVQEQ R9,R8
+ MOVQ SI,32(DI)
+ MOVQ DX,112(DI)
+ MOVQ CX,40(DI)
+ MOVQ R8,120(DI)
+ MOVQ 48(DI),SI
+ MOVQ 128(DI),DX
+ MOVQ 56(DI),CX
+ MOVQ 136(DI),R8
+ MOVQ SI,R9
+ CMOVQEQ DX,SI
+ CMOVQEQ R9,DX
+ MOVQ CX,R9
+ CMOVQEQ R8,CX
+ CMOVQEQ R9,R8
+ MOVQ SI,48(DI)
+ MOVQ DX,128(DI)
+ MOVQ CX,56(DI)
+ MOVQ R8,136(DI)
+ MOVQ 64(DI),SI
+ MOVQ 144(DI),DX
+ MOVQ 72(DI),CX
+ MOVQ 152(DI),R8
+ MOVQ SI,R9
+ CMOVQEQ DX,SI
+ CMOVQEQ R9,DX
+ MOVQ CX,R9
+ CMOVQEQ R8,CX
+ CMOVQEQ R9,R8
+ MOVQ SI,64(DI)
+ MOVQ DX,144(DI)
+ MOVQ CX,72(DI)
+ MOVQ R8,152(DI)
+ MOVQ DI,AX
+ MOVQ SI,DX
+ RET
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/curve25519.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/curve25519.go
new file mode 100644
index 00000000000..6918c47fc2e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/curve25519.go
@@ -0,0 +1,841 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// We have a implementation in amd64 assembly so this code is only run on
+// non-amd64 platforms. The amd64 assembly does not support gccgo.
+// +build !amd64 gccgo appengine
+
+package curve25519
+
+// This code is a port of the public domain, "ref10" implementation of
+// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
+
+// fieldElement represents an element of the field GF(2^255 - 19). An element
+// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
+// context.
+type fieldElement [10]int32
+
+func feZero(fe *fieldElement) {
+ for i := range fe {
+ fe[i] = 0
+ }
+}
+
+func feOne(fe *fieldElement) {
+ feZero(fe)
+ fe[0] = 1
+}
+
+func feAdd(dst, a, b *fieldElement) {
+ for i := range dst {
+ dst[i] = a[i] + b[i]
+ }
+}
+
+func feSub(dst, a, b *fieldElement) {
+ for i := range dst {
+ dst[i] = a[i] - b[i]
+ }
+}
+
+func feCopy(dst, src *fieldElement) {
+ for i := range dst {
+ dst[i] = src[i]
+ }
+}
+
+// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
+//
+// Preconditions: b in {0,1}.
+func feCSwap(f, g *fieldElement, b int32) {
+ var x fieldElement
+ b = -b
+ for i := range x {
+ x[i] = b & (f[i] ^ g[i])
+ }
+
+ for i := range f {
+ f[i] ^= x[i]
+ }
+ for i := range g {
+ g[i] ^= x[i]
+ }
+}
+
+// load3 reads a 24-bit, little-endian value from in.
+func load3(in []byte) int64 {
+ var r int64
+ r = int64(in[0])
+ r |= int64(in[1]) << 8
+ r |= int64(in[2]) << 16
+ return r
+}
+
+// load4 reads a 32-bit, little-endian value from in.
+func load4(in []byte) int64 {
+ var r int64
+ r = int64(in[0])
+ r |= int64(in[1]) << 8
+ r |= int64(in[2]) << 16
+ r |= int64(in[3]) << 24
+ return r
+}
+
+func feFromBytes(dst *fieldElement, src *[32]byte) {
+ h0 := load4(src[:])
+ h1 := load3(src[4:]) << 6
+ h2 := load3(src[7:]) << 5
+ h3 := load3(src[10:]) << 3
+ h4 := load3(src[13:]) << 2
+ h5 := load4(src[16:])
+ h6 := load3(src[20:]) << 7
+ h7 := load3(src[23:]) << 5
+ h8 := load3(src[26:]) << 4
+ h9 := load3(src[29:]) << 2
+
+ var carry [10]int64
+ carry[9] = (h9 + 1<<24) >> 25
+ h0 += carry[9] * 19
+ h9 -= carry[9] << 25
+ carry[1] = (h1 + 1<<24) >> 25
+ h2 += carry[1]
+ h1 -= carry[1] << 25
+ carry[3] = (h3 + 1<<24) >> 25
+ h4 += carry[3]
+ h3 -= carry[3] << 25
+ carry[5] = (h5 + 1<<24) >> 25
+ h6 += carry[5]
+ h5 -= carry[5] << 25
+ carry[7] = (h7 + 1<<24) >> 25
+ h8 += carry[7]
+ h7 -= carry[7] << 25
+
+ carry[0] = (h0 + 1<<25) >> 26
+ h1 += carry[0]
+ h0 -= carry[0] << 26
+ carry[2] = (h2 + 1<<25) >> 26
+ h3 += carry[2]
+ h2 -= carry[2] << 26
+ carry[4] = (h4 + 1<<25) >> 26
+ h5 += carry[4]
+ h4 -= carry[4] << 26
+ carry[6] = (h6 + 1<<25) >> 26
+ h7 += carry[6]
+ h6 -= carry[6] << 26
+ carry[8] = (h8 + 1<<25) >> 26
+ h9 += carry[8]
+ h8 -= carry[8] << 26
+
+ dst[0] = int32(h0)
+ dst[1] = int32(h1)
+ dst[2] = int32(h2)
+ dst[3] = int32(h3)
+ dst[4] = int32(h4)
+ dst[5] = int32(h5)
+ dst[6] = int32(h6)
+ dst[7] = int32(h7)
+ dst[8] = int32(h8)
+ dst[9] = int32(h9)
+}
+
+// feToBytes marshals h to s.
+// Preconditions:
+// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Write p=2^255-19; q=floor(h/p).
+// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+//
+// Proof:
+// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
+//
+// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+// Then 0<y<1.
+//
+// Write r=h-pq.
+// Have 0<=r<=p-1=2^255-20.
+// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
+//
+// Write x=r+19(2^-255)r+y.
+// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
+//
+// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
+// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
+func feToBytes(s *[32]byte, h *fieldElement) {
+ var carry [10]int32
+
+ q := (19*h[9] + (1 << 24)) >> 25
+ q = (h[0] + q) >> 26
+ q = (h[1] + q) >> 25
+ q = (h[2] + q) >> 26
+ q = (h[3] + q) >> 25
+ q = (h[4] + q) >> 26
+ q = (h[5] + q) >> 25
+ q = (h[6] + q) >> 26
+ q = (h[7] + q) >> 25
+ q = (h[8] + q) >> 26
+ q = (h[9] + q) >> 25
+
+ // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
+ h[0] += 19 * q
+ // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
+
+ carry[0] = h[0] >> 26
+ h[1] += carry[0]
+ h[0] -= carry[0] << 26
+ carry[1] = h[1] >> 25
+ h[2] += carry[1]
+ h[1] -= carry[1] << 25
+ carry[2] = h[2] >> 26
+ h[3] += carry[2]
+ h[2] -= carry[2] << 26
+ carry[3] = h[3] >> 25
+ h[4] += carry[3]
+ h[3] -= carry[3] << 25
+ carry[4] = h[4] >> 26
+ h[5] += carry[4]
+ h[4] -= carry[4] << 26
+ carry[5] = h[5] >> 25
+ h[6] += carry[5]
+ h[5] -= carry[5] << 25
+ carry[6] = h[6] >> 26
+ h[7] += carry[6]
+ h[6] -= carry[6] << 26
+ carry[7] = h[7] >> 25
+ h[8] += carry[7]
+ h[7] -= carry[7] << 25
+ carry[8] = h[8] >> 26
+ h[9] += carry[8]
+ h[8] -= carry[8] << 26
+ carry[9] = h[9] >> 25
+ h[9] -= carry[9] << 25
+ // h10 = carry9
+
+ // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+ // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
+ // evidently 2^255 h10-2^255 q = 0.
+ // Goal: Output h[0]+...+2^230 h[9].
+
+ s[0] = byte(h[0] >> 0)
+ s[1] = byte(h[0] >> 8)
+ s[2] = byte(h[0] >> 16)
+ s[3] = byte((h[0] >> 24) | (h[1] << 2))
+ s[4] = byte(h[1] >> 6)
+ s[5] = byte(h[1] >> 14)
+ s[6] = byte((h[1] >> 22) | (h[2] << 3))
+ s[7] = byte(h[2] >> 5)
+ s[8] = byte(h[2] >> 13)
+ s[9] = byte((h[2] >> 21) | (h[3] << 5))
+ s[10] = byte(h[3] >> 3)
+ s[11] = byte(h[3] >> 11)
+ s[12] = byte((h[3] >> 19) | (h[4] << 6))
+ s[13] = byte(h[4] >> 2)
+ s[14] = byte(h[4] >> 10)
+ s[15] = byte(h[4] >> 18)
+ s[16] = byte(h[5] >> 0)
+ s[17] = byte(h[5] >> 8)
+ s[18] = byte(h[5] >> 16)
+ s[19] = byte((h[5] >> 24) | (h[6] << 1))
+ s[20] = byte(h[6] >> 7)
+ s[21] = byte(h[6] >> 15)
+ s[22] = byte((h[6] >> 23) | (h[7] << 3))
+ s[23] = byte(h[7] >> 5)
+ s[24] = byte(h[7] >> 13)
+ s[25] = byte((h[7] >> 21) | (h[8] << 4))
+ s[26] = byte(h[8] >> 4)
+ s[27] = byte(h[8] >> 12)
+ s[28] = byte((h[8] >> 20) | (h[9] << 6))
+ s[29] = byte(h[9] >> 2)
+ s[30] = byte(h[9] >> 10)
+ s[31] = byte(h[9] >> 18)
+}
+
+// feMul calculates h = f * g
+// Can overlap h with f or g.
+//
+// Preconditions:
+// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Notes on implementation strategy:
+//
+// Using schoolbook multiplication.
+// Karatsuba would save a little in some cost models.
+//
+// Most multiplications by 2 and 19 are 32-bit precomputations;
+// cheaper than 64-bit postcomputations.
+//
+// There is one remaining multiplication by 19 in the carry chain;
+// one *19 precomputation can be merged into this,
+// but the resulting data flow is considerably less clean.
+//
+// There are 12 carries below.
+// 10 of them are 2-way parallelizable and vectorizable.
+// Can get away with 11 carries, but then data flow is much deeper.
+//
+// With tighter constraints on inputs can squeeze carries into int32.
+func feMul(h, f, g *fieldElement) {
+ f0 := f[0]
+ f1 := f[1]
+ f2 := f[2]
+ f3 := f[3]
+ f4 := f[4]
+ f5 := f[5]
+ f6 := f[6]
+ f7 := f[7]
+ f8 := f[8]
+ f9 := f[9]
+ g0 := g[0]
+ g1 := g[1]
+ g2 := g[2]
+ g3 := g[3]
+ g4 := g[4]
+ g5 := g[5]
+ g6 := g[6]
+ g7 := g[7]
+ g8 := g[8]
+ g9 := g[9]
+ g1_19 := 19 * g1 // 1.4*2^29
+ g2_19 := 19 * g2 // 1.4*2^30; still ok
+ g3_19 := 19 * g3
+ g4_19 := 19 * g4
+ g5_19 := 19 * g5
+ g6_19 := 19 * g6
+ g7_19 := 19 * g7
+ g8_19 := 19 * g8
+ g9_19 := 19 * g9
+ f1_2 := 2 * f1
+ f3_2 := 2 * f3
+ f5_2 := 2 * f5
+ f7_2 := 2 * f7
+ f9_2 := 2 * f9
+ f0g0 := int64(f0) * int64(g0)
+ f0g1 := int64(f0) * int64(g1)
+ f0g2 := int64(f0) * int64(g2)
+ f0g3 := int64(f0) * int64(g3)
+ f0g4 := int64(f0) * int64(g4)
+ f0g5 := int64(f0) * int64(g5)
+ f0g6 := int64(f0) * int64(g6)
+ f0g7 := int64(f0) * int64(g7)
+ f0g8 := int64(f0) * int64(g8)
+ f0g9 := int64(f0) * int64(g9)
+ f1g0 := int64(f1) * int64(g0)
+ f1g1_2 := int64(f1_2) * int64(g1)
+ f1g2 := int64(f1) * int64(g2)
+ f1g3_2 := int64(f1_2) * int64(g3)
+ f1g4 := int64(f1) * int64(g4)
+ f1g5_2 := int64(f1_2) * int64(g5)
+ f1g6 := int64(f1) * int64(g6)
+ f1g7_2 := int64(f1_2) * int64(g7)
+ f1g8 := int64(f1) * int64(g8)
+ f1g9_38 := int64(f1_2) * int64(g9_19)
+ f2g0 := int64(f2) * int64(g0)
+ f2g1 := int64(f2) * int64(g1)
+ f2g2 := int64(f2) * int64(g2)
+ f2g3 := int64(f2) * int64(g3)
+ f2g4 := int64(f2) * int64(g4)
+ f2g5 := int64(f2) * int64(g5)
+ f2g6 := int64(f2) * int64(g6)
+ f2g7 := int64(f2) * int64(g7)
+ f2g8_19 := int64(f2) * int64(g8_19)
+ f2g9_19 := int64(f2) * int64(g9_19)
+ f3g0 := int64(f3) * int64(g0)
+ f3g1_2 := int64(f3_2) * int64(g1)
+ f3g2 := int64(f3) * int64(g2)
+ f3g3_2 := int64(f3_2) * int64(g3)
+ f3g4 := int64(f3) * int64(g4)
+ f3g5_2 := int64(f3_2) * int64(g5)
+ f3g6 := int64(f3) * int64(g6)
+ f3g7_38 := int64(f3_2) * int64(g7_19)
+ f3g8_19 := int64(f3) * int64(g8_19)
+ f3g9_38 := int64(f3_2) * int64(g9_19)
+ f4g0 := int64(f4) * int64(g0)
+ f4g1 := int64(f4) * int64(g1)
+ f4g2 := int64(f4) * int64(g2)
+ f4g3 := int64(f4) * int64(g3)
+ f4g4 := int64(f4) * int64(g4)
+ f4g5 := int64(f4) * int64(g5)
+ f4g6_19 := int64(f4) * int64(g6_19)
+ f4g7_19 := int64(f4) * int64(g7_19)
+ f4g8_19 := int64(f4) * int64(g8_19)
+ f4g9_19 := int64(f4) * int64(g9_19)
+ f5g0 := int64(f5) * int64(g0)
+ f5g1_2 := int64(f5_2) * int64(g1)
+ f5g2 := int64(f5) * int64(g2)
+ f5g3_2 := int64(f5_2) * int64(g3)
+ f5g4 := int64(f5) * int64(g4)
+ f5g5_38 := int64(f5_2) * int64(g5_19)
+ f5g6_19 := int64(f5) * int64(g6_19)
+ f5g7_38 := int64(f5_2) * int64(g7_19)
+ f5g8_19 := int64(f5) * int64(g8_19)
+ f5g9_38 := int64(f5_2) * int64(g9_19)
+ f6g0 := int64(f6) * int64(g0)
+ f6g1 := int64(f6) * int64(g1)
+ f6g2 := int64(f6) * int64(g2)
+ f6g3 := int64(f6) * int64(g3)
+ f6g4_19 := int64(f6) * int64(g4_19)
+ f6g5_19 := int64(f6) * int64(g5_19)
+ f6g6_19 := int64(f6) * int64(g6_19)
+ f6g7_19 := int64(f6) * int64(g7_19)
+ f6g8_19 := int64(f6) * int64(g8_19)
+ f6g9_19 := int64(f6) * int64(g9_19)
+ f7g0 := int64(f7) * int64(g0)
+ f7g1_2 := int64(f7_2) * int64(g1)
+ f7g2 := int64(f7) * int64(g2)
+ f7g3_38 := int64(f7_2) * int64(g3_19)
+ f7g4_19 := int64(f7) * int64(g4_19)
+ f7g5_38 := int64(f7_2) * int64(g5_19)
+ f7g6_19 := int64(f7) * int64(g6_19)
+ f7g7_38 := int64(f7_2) * int64(g7_19)
+ f7g8_19 := int64(f7) * int64(g8_19)
+ f7g9_38 := int64(f7_2) * int64(g9_19)
+ f8g0 := int64(f8) * int64(g0)
+ f8g1 := int64(f8) * int64(g1)
+ f8g2_19 := int64(f8) * int64(g2_19)
+ f8g3_19 := int64(f8) * int64(g3_19)
+ f8g4_19 := int64(f8) * int64(g4_19)
+ f8g5_19 := int64(f8) * int64(g5_19)
+ f8g6_19 := int64(f8) * int64(g6_19)
+ f8g7_19 := int64(f8) * int64(g7_19)
+ f8g8_19 := int64(f8) * int64(g8_19)
+ f8g9_19 := int64(f8) * int64(g9_19)
+ f9g0 := int64(f9) * int64(g0)
+ f9g1_38 := int64(f9_2) * int64(g1_19)
+ f9g2_19 := int64(f9) * int64(g2_19)
+ f9g3_38 := int64(f9_2) * int64(g3_19)
+ f9g4_19 := int64(f9) * int64(g4_19)
+ f9g5_38 := int64(f9_2) * int64(g5_19)
+ f9g6_19 := int64(f9) * int64(g6_19)
+ f9g7_38 := int64(f9_2) * int64(g7_19)
+ f9g8_19 := int64(f9) * int64(g8_19)
+ f9g9_38 := int64(f9_2) * int64(g9_19)
+ h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
+ h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
+ h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
+ h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
+ h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
+ h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
+ h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
+ h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
+ h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
+ h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
+ var carry [10]int64
+
+ // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
+ // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
+ // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
+ // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
+
+ carry[0] = (h0 + (1 << 25)) >> 26
+ h1 += carry[0]
+ h0 -= carry[0] << 26
+ carry[4] = (h4 + (1 << 25)) >> 26
+ h5 += carry[4]
+ h4 -= carry[4] << 26
+ // |h0| <= 2^25
+ // |h4| <= 2^25
+ // |h1| <= 1.51*2^58
+ // |h5| <= 1.51*2^58
+
+ carry[1] = (h1 + (1 << 24)) >> 25
+ h2 += carry[1]
+ h1 -= carry[1] << 25
+ carry[5] = (h5 + (1 << 24)) >> 25
+ h6 += carry[5]
+ h5 -= carry[5] << 25
+ // |h1| <= 2^24; from now on fits into int32
+ // |h5| <= 2^24; from now on fits into int32
+ // |h2| <= 1.21*2^59
+ // |h6| <= 1.21*2^59
+
+ carry[2] = (h2 + (1 << 25)) >> 26
+ h3 += carry[2]
+ h2 -= carry[2] << 26
+ carry[6] = (h6 + (1 << 25)) >> 26
+ h7 += carry[6]
+ h6 -= carry[6] << 26
+ // |h2| <= 2^25; from now on fits into int32 unchanged
+ // |h6| <= 2^25; from now on fits into int32 unchanged
+ // |h3| <= 1.51*2^58
+ // |h7| <= 1.51*2^58
+
+ carry[3] = (h3 + (1 << 24)) >> 25
+ h4 += carry[3]
+ h3 -= carry[3] << 25
+ carry[7] = (h7 + (1 << 24)) >> 25
+ h8 += carry[7]
+ h7 -= carry[7] << 25
+ // |h3| <= 2^24; from now on fits into int32 unchanged
+ // |h7| <= 2^24; from now on fits into int32 unchanged
+ // |h4| <= 1.52*2^33
+ // |h8| <= 1.52*2^33
+
+ carry[4] = (h4 + (1 << 25)) >> 26
+ h5 += carry[4]
+ h4 -= carry[4] << 26
+ carry[8] = (h8 + (1 << 25)) >> 26
+ h9 += carry[8]
+ h8 -= carry[8] << 26
+ // |h4| <= 2^25; from now on fits into int32 unchanged
+ // |h8| <= 2^25; from now on fits into int32 unchanged
+ // |h5| <= 1.01*2^24
+ // |h9| <= 1.51*2^58
+
+ carry[9] = (h9 + (1 << 24)) >> 25
+ h0 += carry[9] * 19
+ h9 -= carry[9] << 25
+ // |h9| <= 2^24; from now on fits into int32 unchanged
+ // |h0| <= 1.8*2^37
+
+ carry[0] = (h0 + (1 << 25)) >> 26
+ h1 += carry[0]
+ h0 -= carry[0] << 26
+ // |h0| <= 2^25; from now on fits into int32 unchanged
+ // |h1| <= 1.01*2^24
+
+ h[0] = int32(h0)
+ h[1] = int32(h1)
+ h[2] = int32(h2)
+ h[3] = int32(h3)
+ h[4] = int32(h4)
+ h[5] = int32(h5)
+ h[6] = int32(h6)
+ h[7] = int32(h7)
+ h[8] = int32(h8)
+ h[9] = int32(h9)
+}
+
+// feSquare calculates h = f*f. Can overlap h with f.
+//
+// Preconditions:
+// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func feSquare(h, f *fieldElement) {
+ f0 := f[0]
+ f1 := f[1]
+ f2 := f[2]
+ f3 := f[3]
+ f4 := f[4]
+ f5 := f[5]
+ f6 := f[6]
+ f7 := f[7]
+ f8 := f[8]
+ f9 := f[9]
+ f0_2 := 2 * f0
+ f1_2 := 2 * f1
+ f2_2 := 2 * f2
+ f3_2 := 2 * f3
+ f4_2 := 2 * f4
+ f5_2 := 2 * f5
+ f6_2 := 2 * f6
+ f7_2 := 2 * f7
+ f5_38 := 38 * f5 // 1.31*2^30
+ f6_19 := 19 * f6 // 1.31*2^30
+ f7_38 := 38 * f7 // 1.31*2^30
+ f8_19 := 19 * f8 // 1.31*2^30
+ f9_38 := 38 * f9 // 1.31*2^30
+ f0f0 := int64(f0) * int64(f0)
+ f0f1_2 := int64(f0_2) * int64(f1)
+ f0f2_2 := int64(f0_2) * int64(f2)
+ f0f3_2 := int64(f0_2) * int64(f3)
+ f0f4_2 := int64(f0_2) * int64(f4)
+ f0f5_2 := int64(f0_2) * int64(f5)
+ f0f6_2 := int64(f0_2) * int64(f6)
+ f0f7_2 := int64(f0_2) * int64(f7)
+ f0f8_2 := int64(f0_2) * int64(f8)
+ f0f9_2 := int64(f0_2) * int64(f9)
+ f1f1_2 := int64(f1_2) * int64(f1)
+ f1f2_2 := int64(f1_2) * int64(f2)
+ f1f3_4 := int64(f1_2) * int64(f3_2)
+ f1f4_2 := int64(f1_2) * int64(f4)
+ f1f5_4 := int64(f1_2) * int64(f5_2)
+ f1f6_2 := int64(f1_2) * int64(f6)
+ f1f7_4 := int64(f1_2) * int64(f7_2)
+ f1f8_2 := int64(f1_2) * int64(f8)
+ f1f9_76 := int64(f1_2) * int64(f9_38)
+ f2f2 := int64(f2) * int64(f2)
+ f2f3_2 := int64(f2_2) * int64(f3)
+ f2f4_2 := int64(f2_2) * int64(f4)
+ f2f5_2 := int64(f2_2) * int64(f5)
+ f2f6_2 := int64(f2_2) * int64(f6)
+ f2f7_2 := int64(f2_2) * int64(f7)
+ f2f8_38 := int64(f2_2) * int64(f8_19)
+ f2f9_38 := int64(f2) * int64(f9_38)
+ f3f3_2 := int64(f3_2) * int64(f3)
+ f3f4_2 := int64(f3_2) * int64(f4)
+ f3f5_4 := int64(f3_2) * int64(f5_2)
+ f3f6_2 := int64(f3_2) * int64(f6)
+ f3f7_76 := int64(f3_2) * int64(f7_38)
+ f3f8_38 := int64(f3_2) * int64(f8_19)
+ f3f9_76 := int64(f3_2) * int64(f9_38)
+ f4f4 := int64(f4) * int64(f4)
+ f4f5_2 := int64(f4_2) * int64(f5)
+ f4f6_38 := int64(f4_2) * int64(f6_19)
+ f4f7_38 := int64(f4) * int64(f7_38)
+ f4f8_38 := int64(f4_2) * int64(f8_19)
+ f4f9_38 := int64(f4) * int64(f9_38)
+ f5f5_38 := int64(f5) * int64(f5_38)
+ f5f6_38 := int64(f5_2) * int64(f6_19)
+ f5f7_76 := int64(f5_2) * int64(f7_38)
+ f5f8_38 := int64(f5_2) * int64(f8_19)
+ f5f9_76 := int64(f5_2) * int64(f9_38)
+ f6f6_19 := int64(f6) * int64(f6_19)
+ f6f7_38 := int64(f6) * int64(f7_38)
+ f6f8_38 := int64(f6_2) * int64(f8_19)
+ f6f9_38 := int64(f6) * int64(f9_38)
+ f7f7_38 := int64(f7) * int64(f7_38)
+ f7f8_38 := int64(f7_2) * int64(f8_19)
+ f7f9_76 := int64(f7_2) * int64(f9_38)
+ f8f8_19 := int64(f8) * int64(f8_19)
+ f8f9_38 := int64(f8) * int64(f9_38)
+ f9f9_38 := int64(f9) * int64(f9_38)
+ h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
+ h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
+ h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
+ h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
+ h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
+ h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
+ h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
+ h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
+ h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
+ h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
+ var carry [10]int64
+
+ carry[0] = (h0 + (1 << 25)) >> 26
+ h1 += carry[0]
+ h0 -= carry[0] << 26
+ carry[4] = (h4 + (1 << 25)) >> 26
+ h5 += carry[4]
+ h4 -= carry[4] << 26
+
+ carry[1] = (h1 + (1 << 24)) >> 25
+ h2 += carry[1]
+ h1 -= carry[1] << 25
+ carry[5] = (h5 + (1 << 24)) >> 25
+ h6 += carry[5]
+ h5 -= carry[5] << 25
+
+ carry[2] = (h2 + (1 << 25)) >> 26
+ h3 += carry[2]
+ h2 -= carry[2] << 26
+ carry[6] = (h6 + (1 << 25)) >> 26
+ h7 += carry[6]
+ h6 -= carry[6] << 26
+
+ carry[3] = (h3 + (1 << 24)) >> 25
+ h4 += carry[3]
+ h3 -= carry[3] << 25
+ carry[7] = (h7 + (1 << 24)) >> 25
+ h8 += carry[7]
+ h7 -= carry[7] << 25
+
+ carry[4] = (h4 + (1 << 25)) >> 26
+ h5 += carry[4]
+ h4 -= carry[4] << 26
+ carry[8] = (h8 + (1 << 25)) >> 26
+ h9 += carry[8]
+ h8 -= carry[8] << 26
+
+ carry[9] = (h9 + (1 << 24)) >> 25
+ h0 += carry[9] * 19
+ h9 -= carry[9] << 25
+
+ carry[0] = (h0 + (1 << 25)) >> 26
+ h1 += carry[0]
+ h0 -= carry[0] << 26
+
+ h[0] = int32(h0)
+ h[1] = int32(h1)
+ h[2] = int32(h2)
+ h[3] = int32(h3)
+ h[4] = int32(h4)
+ h[5] = int32(h5)
+ h[6] = int32(h6)
+ h[7] = int32(h7)
+ h[8] = int32(h8)
+ h[9] = int32(h9)
+}
+
+// feMul121666 calculates h = f * 121666. Can overlap h with f.
+//
+// Preconditions:
+// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func feMul121666(h, f *fieldElement) {
+ h0 := int64(f[0]) * 121666
+ h1 := int64(f[1]) * 121666
+ h2 := int64(f[2]) * 121666
+ h3 := int64(f[3]) * 121666
+ h4 := int64(f[4]) * 121666
+ h5 := int64(f[5]) * 121666
+ h6 := int64(f[6]) * 121666
+ h7 := int64(f[7]) * 121666
+ h8 := int64(f[8]) * 121666
+ h9 := int64(f[9]) * 121666
+ var carry [10]int64
+
+ carry[9] = (h9 + (1 << 24)) >> 25
+ h0 += carry[9] * 19
+ h9 -= carry[9] << 25
+ carry[1] = (h1 + (1 << 24)) >> 25
+ h2 += carry[1]
+ h1 -= carry[1] << 25
+ carry[3] = (h3 + (1 << 24)) >> 25
+ h4 += carry[3]
+ h3 -= carry[3] << 25
+ carry[5] = (h5 + (1 << 24)) >> 25
+ h6 += carry[5]
+ h5 -= carry[5] << 25
+ carry[7] = (h7 + (1 << 24)) >> 25
+ h8 += carry[7]
+ h7 -= carry[7] << 25
+
+ carry[0] = (h0 + (1 << 25)) >> 26
+ h1 += carry[0]
+ h0 -= carry[0] << 26
+ carry[2] = (h2 + (1 << 25)) >> 26
+ h3 += carry[2]
+ h2 -= carry[2] << 26
+ carry[4] = (h4 + (1 << 25)) >> 26
+ h5 += carry[4]
+ h4 -= carry[4] << 26
+ carry[6] = (h6 + (1 << 25)) >> 26
+ h7 += carry[6]
+ h6 -= carry[6] << 26
+ carry[8] = (h8 + (1 << 25)) >> 26
+ h9 += carry[8]
+ h8 -= carry[8] << 26
+
+ h[0] = int32(h0)
+ h[1] = int32(h1)
+ h[2] = int32(h2)
+ h[3] = int32(h3)
+ h[4] = int32(h4)
+ h[5] = int32(h5)
+ h[6] = int32(h6)
+ h[7] = int32(h7)
+ h[8] = int32(h8)
+ h[9] = int32(h9)
+}
+
+// feInvert sets out = z^-1.
+func feInvert(out, z *fieldElement) {
+ var t0, t1, t2, t3 fieldElement
+ var i int
+
+ feSquare(&t0, z)
+ for i = 1; i < 1; i++ {
+ feSquare(&t0, &t0)
+ }
+ feSquare(&t1, &t0)
+ for i = 1; i < 2; i++ {
+ feSquare(&t1, &t1)
+ }
+ feMul(&t1, z, &t1)
+ feMul(&t0, &t0, &t1)
+ feSquare(&t2, &t0)
+ for i = 1; i < 1; i++ {
+ feSquare(&t2, &t2)
+ }
+ feMul(&t1, &t1, &t2)
+ feSquare(&t2, &t1)
+ for i = 1; i < 5; i++ {
+ feSquare(&t2, &t2)
+ }
+ feMul(&t1, &t2, &t1)
+ feSquare(&t2, &t1)
+ for i = 1; i < 10; i++ {
+ feSquare(&t2, &t2)
+ }
+ feMul(&t2, &t2, &t1)
+ feSquare(&t3, &t2)
+ for i = 1; i < 20; i++ {
+ feSquare(&t3, &t3)
+ }
+ feMul(&t2, &t3, &t2)
+ feSquare(&t2, &t2)
+ for i = 1; i < 10; i++ {
+ feSquare(&t2, &t2)
+ }
+ feMul(&t1, &t2, &t1)
+ feSquare(&t2, &t1)
+ for i = 1; i < 50; i++ {
+ feSquare(&t2, &t2)
+ }
+ feMul(&t2, &t2, &t1)
+ feSquare(&t3, &t2)
+ for i = 1; i < 100; i++ {
+ feSquare(&t3, &t3)
+ }
+ feMul(&t2, &t3, &t2)
+ feSquare(&t2, &t2)
+ for i = 1; i < 50; i++ {
+ feSquare(&t2, &t2)
+ }
+ feMul(&t1, &t2, &t1)
+ feSquare(&t1, &t1)
+ for i = 1; i < 5; i++ {
+ feSquare(&t1, &t1)
+ }
+ feMul(out, &t1, &t0)
+}
+
+func scalarMult(out, in, base *[32]byte) {
+ var e [32]byte
+
+ copy(e[:], in[:])
+ e[0] &= 248
+ e[31] &= 127
+ e[31] |= 64
+
+ var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
+ feFromBytes(&x1, base)
+ feOne(&x2)
+ feCopy(&x3, &x1)
+ feOne(&z3)
+
+ swap := int32(0)
+ for pos := 254; pos >= 0; pos-- {
+ b := e[pos/8] >> uint(pos&7)
+ b &= 1
+ swap ^= int32(b)
+ feCSwap(&x2, &x3, swap)
+ feCSwap(&z2, &z3, swap)
+ swap = int32(b)
+
+ feSub(&tmp0, &x3, &z3)
+ feSub(&tmp1, &x2, &z2)
+ feAdd(&x2, &x2, &z2)
+ feAdd(&z2, &x3, &z3)
+ feMul(&z3, &tmp0, &x2)
+ feMul(&z2, &z2, &tmp1)
+ feSquare(&tmp0, &tmp1)
+ feSquare(&tmp1, &x2)
+ feAdd(&x3, &z3, &z2)
+ feSub(&z2, &z3, &z2)
+ feMul(&x2, &tmp1, &tmp0)
+ feSub(&tmp1, &tmp1, &tmp0)
+ feSquare(&z2, &z2)
+ feMul121666(&z3, &tmp1)
+ feSquare(&x3, &x3)
+ feAdd(&tmp0, &tmp0, &z3)
+ feMul(&z3, &x1, &z2)
+ feMul(&z2, &tmp1, &tmp0)
+ }
+
+ feCSwap(&x2, &x3, swap)
+ feCSwap(&z2, &z3, swap)
+
+ feInvert(&z2, &z2)
+ feMul(&x2, &x2, &z2)
+ feToBytes(out, &x2)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/curve25519_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/curve25519_test.go
new file mode 100644
index 00000000000..14b0ee87cdd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/curve25519_test.go
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package curve25519
+
+import (
+ "fmt"
+ "testing"
+)
+
+const expectedHex = "89161fde887b2b53de549af483940106ecc114d6982daa98256de23bdf77661a"
+
+func TestBaseScalarMult(t *testing.T) {
+ var a, b [32]byte
+ in := &a
+ out := &b
+ a[0] = 1
+
+ for i := 0; i < 200; i++ {
+ ScalarBaseMult(out, in)
+ in, out = out, in
+ }
+
+ result := fmt.Sprintf("%x", in[:])
+ if result != expectedHex {
+ t.Errorf("incorrect result: got %s, want %s", result, expectedHex)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/doc.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/doc.go
new file mode 100644
index 00000000000..ebeea3c2d6a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/doc.go
@@ -0,0 +1,23 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package curve25519 provides an implementation of scalar multiplication on
+// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html
+package curve25519 // import "golang.org/x/crypto/curve25519"
+
+// basePoint is the x coordinate of the generator of the curve.
+var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+// ScalarMult sets dst to the product in*base where dst and base are the x
+// coordinates of group points and all values are in little-endian form.
+func ScalarMult(dst, in, base *[32]byte) {
+ scalarMult(dst, in, base)
+}
+
+// ScalarBaseMult sets dst to the product in*base where dst and base are the x
+// coordinates of group points, base is the standard generator and all values
+// are in little-endian form.
+func ScalarBaseMult(dst, in *[32]byte) {
+ ScalarMult(dst, in, &basePoint)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/freeze_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/freeze_amd64.s
new file mode 100644
index 00000000000..37599fac043
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/freeze_amd64.s
@@ -0,0 +1,94 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// func freeze(inout *[5]uint64)
+TEXT ·freeze(SB),7,$96-8
+ MOVQ inout+0(FP), DI
+
+ MOVQ SP,R11
+ MOVQ $31,CX
+ NOTQ CX
+ ANDQ CX,SP
+ ADDQ $32,SP
+
+ MOVQ R11,0(SP)
+ MOVQ R12,8(SP)
+ MOVQ R13,16(SP)
+ MOVQ R14,24(SP)
+ MOVQ R15,32(SP)
+ MOVQ BX,40(SP)
+ MOVQ BP,48(SP)
+ MOVQ 0(DI),SI
+ MOVQ 8(DI),DX
+ MOVQ 16(DI),CX
+ MOVQ 24(DI),R8
+ MOVQ 32(DI),R9
+ MOVQ ·REDMASK51(SB),AX
+ MOVQ AX,R10
+ SUBQ $18,R10
+ MOVQ $3,R11
+REDUCELOOP:
+ MOVQ SI,R12
+ SHRQ $51,R12
+ ANDQ AX,SI
+ ADDQ R12,DX
+ MOVQ DX,R12
+ SHRQ $51,R12
+ ANDQ AX,DX
+ ADDQ R12,CX
+ MOVQ CX,R12
+ SHRQ $51,R12
+ ANDQ AX,CX
+ ADDQ R12,R8
+ MOVQ R8,R12
+ SHRQ $51,R12
+ ANDQ AX,R8
+ ADDQ R12,R9
+ MOVQ R9,R12
+ SHRQ $51,R12
+ ANDQ AX,R9
+ IMUL3Q $19,R12,R12
+ ADDQ R12,SI
+ SUBQ $1,R11
+ JA REDUCELOOP
+ MOVQ $1,R12
+ CMPQ R10,SI
+ CMOVQLT R11,R12
+ CMPQ AX,DX
+ CMOVQNE R11,R12
+ CMPQ AX,CX
+ CMOVQNE R11,R12
+ CMPQ AX,R8
+ CMOVQNE R11,R12
+ CMPQ AX,R9
+ CMOVQNE R11,R12
+ NEGQ R12
+ ANDQ R12,AX
+ ANDQ R12,R10
+ SUBQ R10,SI
+ SUBQ AX,DX
+ SUBQ AX,CX
+ SUBQ AX,R8
+ SUBQ AX,R9
+ MOVQ SI,0(DI)
+ MOVQ DX,8(DI)
+ MOVQ CX,16(DI)
+ MOVQ R8,24(DI)
+ MOVQ R9,32(DI)
+ MOVQ 0(SP),R11
+ MOVQ 8(SP),R12
+ MOVQ 16(SP),R13
+ MOVQ 24(SP),R14
+ MOVQ 32(SP),R15
+ MOVQ 40(SP),BX
+ MOVQ 48(SP),BP
+ MOVQ R11,SP
+ MOVQ DI,AX
+ MOVQ SI,DX
+ RET
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/ladderstep_amd64.s
new file mode 100644
index 00000000000..3949f9cfaf4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/ladderstep_amd64.s
@@ -0,0 +1,1398 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// func ladderstep(inout *[5][5]uint64)
+TEXT ·ladderstep(SB),0,$384-8
+ MOVQ inout+0(FP),DI
+
+ MOVQ SP,R11
+ MOVQ $31,CX
+ NOTQ CX
+ ANDQ CX,SP
+ ADDQ $32,SP
+
+ MOVQ R11,0(SP)
+ MOVQ R12,8(SP)
+ MOVQ R13,16(SP)
+ MOVQ R14,24(SP)
+ MOVQ R15,32(SP)
+ MOVQ BX,40(SP)
+ MOVQ BP,48(SP)
+ MOVQ 40(DI),SI
+ MOVQ 48(DI),DX
+ MOVQ 56(DI),CX
+ MOVQ 64(DI),R8
+ MOVQ 72(DI),R9
+ MOVQ SI,AX
+ MOVQ DX,R10
+ MOVQ CX,R11
+ MOVQ R8,R12
+ MOVQ R9,R13
+ ADDQ ·_2P0(SB),AX
+ ADDQ ·_2P1234(SB),R10
+ ADDQ ·_2P1234(SB),R11
+ ADDQ ·_2P1234(SB),R12
+ ADDQ ·_2P1234(SB),R13
+ ADDQ 80(DI),SI
+ ADDQ 88(DI),DX
+ ADDQ 96(DI),CX
+ ADDQ 104(DI),R8
+ ADDQ 112(DI),R9
+ SUBQ 80(DI),AX
+ SUBQ 88(DI),R10
+ SUBQ 96(DI),R11
+ SUBQ 104(DI),R12
+ SUBQ 112(DI),R13
+ MOVQ SI,56(SP)
+ MOVQ DX,64(SP)
+ MOVQ CX,72(SP)
+ MOVQ R8,80(SP)
+ MOVQ R9,88(SP)
+ MOVQ AX,96(SP)
+ MOVQ R10,104(SP)
+ MOVQ R11,112(SP)
+ MOVQ R12,120(SP)
+ MOVQ R13,128(SP)
+ MOVQ 96(SP),AX
+ MULQ 96(SP)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 96(SP),AX
+ SHLQ $1,AX
+ MULQ 104(SP)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 96(SP),AX
+ SHLQ $1,AX
+ MULQ 112(SP)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 96(SP),AX
+ SHLQ $1,AX
+ MULQ 120(SP)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 96(SP),AX
+ SHLQ $1,AX
+ MULQ 128(SP)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 104(SP),AX
+ MULQ 104(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 104(SP),AX
+ SHLQ $1,AX
+ MULQ 112(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 104(SP),AX
+ SHLQ $1,AX
+ MULQ 120(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 104(SP),DX
+ IMUL3Q $38,DX,AX
+ MULQ 128(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 112(SP),AX
+ MULQ 112(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 112(SP),DX
+ IMUL3Q $38,DX,AX
+ MULQ 120(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 112(SP),DX
+ IMUL3Q $38,DX,AX
+ MULQ 128(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 120(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 120(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 120(SP),DX
+ IMUL3Q $38,DX,AX
+ MULQ 128(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 128(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 128(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ ANDQ DX,SI
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ADDQ R10,CX
+ ANDQ DX,R8
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ADDQ R12,CX
+ ANDQ DX,R9
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ADDQ R14,CX
+ ANDQ DX,AX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,136(SP)
+ MOVQ R8,144(SP)
+ MOVQ R9,152(SP)
+ MOVQ AX,160(SP)
+ MOVQ R10,168(SP)
+ MOVQ 56(SP),AX
+ MULQ 56(SP)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 56(SP),AX
+ SHLQ $1,AX
+ MULQ 64(SP)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 56(SP),AX
+ SHLQ $1,AX
+ MULQ 72(SP)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 56(SP),AX
+ SHLQ $1,AX
+ MULQ 80(SP)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 56(SP),AX
+ SHLQ $1,AX
+ MULQ 88(SP)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 64(SP),AX
+ MULQ 64(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 64(SP),AX
+ SHLQ $1,AX
+ MULQ 72(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 64(SP),AX
+ SHLQ $1,AX
+ MULQ 80(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 64(SP),DX
+ IMUL3Q $38,DX,AX
+ MULQ 88(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 72(SP),AX
+ MULQ 72(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 72(SP),DX
+ IMUL3Q $38,DX,AX
+ MULQ 80(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 72(SP),DX
+ IMUL3Q $38,DX,AX
+ MULQ 88(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 80(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 80(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 80(SP),DX
+ IMUL3Q $38,DX,AX
+ MULQ 88(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 88(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 88(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ ANDQ DX,SI
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ADDQ R10,CX
+ ANDQ DX,R8
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ADDQ R12,CX
+ ANDQ DX,R9
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ADDQ R14,CX
+ ANDQ DX,AX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,176(SP)
+ MOVQ R8,184(SP)
+ MOVQ R9,192(SP)
+ MOVQ AX,200(SP)
+ MOVQ R10,208(SP)
+ MOVQ SI,SI
+ MOVQ R8,DX
+ MOVQ R9,CX
+ MOVQ AX,R8
+ MOVQ R10,R9
+ ADDQ ·_2P0(SB),SI
+ ADDQ ·_2P1234(SB),DX
+ ADDQ ·_2P1234(SB),CX
+ ADDQ ·_2P1234(SB),R8
+ ADDQ ·_2P1234(SB),R9
+ SUBQ 136(SP),SI
+ SUBQ 144(SP),DX
+ SUBQ 152(SP),CX
+ SUBQ 160(SP),R8
+ SUBQ 168(SP),R9
+ MOVQ SI,216(SP)
+ MOVQ DX,224(SP)
+ MOVQ CX,232(SP)
+ MOVQ R8,240(SP)
+ MOVQ R9,248(SP)
+ MOVQ 120(DI),SI
+ MOVQ 128(DI),DX
+ MOVQ 136(DI),CX
+ MOVQ 144(DI),R8
+ MOVQ 152(DI),R9
+ MOVQ SI,AX
+ MOVQ DX,R10
+ MOVQ CX,R11
+ MOVQ R8,R12
+ MOVQ R9,R13
+ ADDQ ·_2P0(SB),AX
+ ADDQ ·_2P1234(SB),R10
+ ADDQ ·_2P1234(SB),R11
+ ADDQ ·_2P1234(SB),R12
+ ADDQ ·_2P1234(SB),R13
+ ADDQ 160(DI),SI
+ ADDQ 168(DI),DX
+ ADDQ 176(DI),CX
+ ADDQ 184(DI),R8
+ ADDQ 192(DI),R9
+ SUBQ 160(DI),AX
+ SUBQ 168(DI),R10
+ SUBQ 176(DI),R11
+ SUBQ 184(DI),R12
+ SUBQ 192(DI),R13
+ MOVQ SI,256(SP)
+ MOVQ DX,264(SP)
+ MOVQ CX,272(SP)
+ MOVQ R8,280(SP)
+ MOVQ R9,288(SP)
+ MOVQ AX,296(SP)
+ MOVQ R10,304(SP)
+ MOVQ R11,312(SP)
+ MOVQ R12,320(SP)
+ MOVQ R13,328(SP)
+ MOVQ 280(SP),SI
+ IMUL3Q $19,SI,AX
+ MOVQ AX,336(SP)
+ MULQ 112(SP)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 288(SP),DX
+ IMUL3Q $19,DX,AX
+ MOVQ AX,344(SP)
+ MULQ 104(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 256(SP),AX
+ MULQ 96(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 256(SP),AX
+ MULQ 104(SP)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 256(SP),AX
+ MULQ 112(SP)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 256(SP),AX
+ MULQ 120(SP)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 256(SP),AX
+ MULQ 128(SP)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 264(SP),AX
+ MULQ 96(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 264(SP),AX
+ MULQ 104(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 264(SP),AX
+ MULQ 112(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 264(SP),AX
+ MULQ 120(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 264(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 128(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 272(SP),AX
+ MULQ 96(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 272(SP),AX
+ MULQ 104(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 272(SP),AX
+ MULQ 112(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 272(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 120(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 272(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 128(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 280(SP),AX
+ MULQ 96(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 280(SP),AX
+ MULQ 104(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 336(SP),AX
+ MULQ 120(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 336(SP),AX
+ MULQ 128(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 288(SP),AX
+ MULQ 96(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 344(SP),AX
+ MULQ 112(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 344(SP),AX
+ MULQ 120(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 344(SP),AX
+ MULQ 128(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ANDQ DX,SI
+ ADDQ R10,CX
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ANDQ DX,R8
+ ADDQ R12,CX
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ANDQ DX,R9
+ ADDQ R14,CX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ ANDQ DX,AX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,96(SP)
+ MOVQ R8,104(SP)
+ MOVQ R9,112(SP)
+ MOVQ AX,120(SP)
+ MOVQ R10,128(SP)
+ MOVQ 320(SP),SI
+ IMUL3Q $19,SI,AX
+ MOVQ AX,256(SP)
+ MULQ 72(SP)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 328(SP),DX
+ IMUL3Q $19,DX,AX
+ MOVQ AX,264(SP)
+ MULQ 64(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 296(SP),AX
+ MULQ 56(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 296(SP),AX
+ MULQ 64(SP)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 296(SP),AX
+ MULQ 72(SP)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 296(SP),AX
+ MULQ 80(SP)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 296(SP),AX
+ MULQ 88(SP)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 304(SP),AX
+ MULQ 56(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 304(SP),AX
+ MULQ 64(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 304(SP),AX
+ MULQ 72(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 304(SP),AX
+ MULQ 80(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 304(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 88(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 312(SP),AX
+ MULQ 56(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 312(SP),AX
+ MULQ 64(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 312(SP),AX
+ MULQ 72(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 312(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 80(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 312(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 88(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 320(SP),AX
+ MULQ 56(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 320(SP),AX
+ MULQ 64(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 256(SP),AX
+ MULQ 80(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 256(SP),AX
+ MULQ 88(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 328(SP),AX
+ MULQ 56(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 264(SP),AX
+ MULQ 72(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 264(SP),AX
+ MULQ 80(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 264(SP),AX
+ MULQ 88(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ANDQ DX,SI
+ ADDQ R10,CX
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ANDQ DX,R8
+ ADDQ R12,CX
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ANDQ DX,R9
+ ADDQ R14,CX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ ANDQ DX,AX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,DX
+ MOVQ R8,CX
+ MOVQ R9,R11
+ MOVQ AX,R12
+ MOVQ R10,R13
+ ADDQ ·_2P0(SB),DX
+ ADDQ ·_2P1234(SB),CX
+ ADDQ ·_2P1234(SB),R11
+ ADDQ ·_2P1234(SB),R12
+ ADDQ ·_2P1234(SB),R13
+ ADDQ 96(SP),SI
+ ADDQ 104(SP),R8
+ ADDQ 112(SP),R9
+ ADDQ 120(SP),AX
+ ADDQ 128(SP),R10
+ SUBQ 96(SP),DX
+ SUBQ 104(SP),CX
+ SUBQ 112(SP),R11
+ SUBQ 120(SP),R12
+ SUBQ 128(SP),R13
+ MOVQ SI,120(DI)
+ MOVQ R8,128(DI)
+ MOVQ R9,136(DI)
+ MOVQ AX,144(DI)
+ MOVQ R10,152(DI)
+ MOVQ DX,160(DI)
+ MOVQ CX,168(DI)
+ MOVQ R11,176(DI)
+ MOVQ R12,184(DI)
+ MOVQ R13,192(DI)
+ MOVQ 120(DI),AX
+ MULQ 120(DI)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 120(DI),AX
+ SHLQ $1,AX
+ MULQ 128(DI)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 120(DI),AX
+ SHLQ $1,AX
+ MULQ 136(DI)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 120(DI),AX
+ SHLQ $1,AX
+ MULQ 144(DI)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 120(DI),AX
+ SHLQ $1,AX
+ MULQ 152(DI)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 128(DI),AX
+ MULQ 128(DI)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 128(DI),AX
+ SHLQ $1,AX
+ MULQ 136(DI)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 128(DI),AX
+ SHLQ $1,AX
+ MULQ 144(DI)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 128(DI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 152(DI)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 136(DI),AX
+ MULQ 136(DI)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 136(DI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 144(DI)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 136(DI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 152(DI)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 144(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 144(DI)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 144(DI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 152(DI)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 152(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 152(DI)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ ANDQ DX,SI
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ADDQ R10,CX
+ ANDQ DX,R8
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ADDQ R12,CX
+ ANDQ DX,R9
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ADDQ R14,CX
+ ANDQ DX,AX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,120(DI)
+ MOVQ R8,128(DI)
+ MOVQ R9,136(DI)
+ MOVQ AX,144(DI)
+ MOVQ R10,152(DI)
+ MOVQ 160(DI),AX
+ MULQ 160(DI)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 160(DI),AX
+ SHLQ $1,AX
+ MULQ 168(DI)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 160(DI),AX
+ SHLQ $1,AX
+ MULQ 176(DI)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 160(DI),AX
+ SHLQ $1,AX
+ MULQ 184(DI)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 160(DI),AX
+ SHLQ $1,AX
+ MULQ 192(DI)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 168(DI),AX
+ MULQ 168(DI)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 168(DI),AX
+ SHLQ $1,AX
+ MULQ 176(DI)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 168(DI),AX
+ SHLQ $1,AX
+ MULQ 184(DI)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 168(DI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 192(DI)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 176(DI),AX
+ MULQ 176(DI)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 176(DI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 184(DI)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 176(DI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 192(DI)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 184(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 184(DI)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 184(DI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 192(DI)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 192(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 192(DI)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ ANDQ DX,SI
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ADDQ R10,CX
+ ANDQ DX,R8
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ADDQ R12,CX
+ ANDQ DX,R9
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ADDQ R14,CX
+ ANDQ DX,AX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,160(DI)
+ MOVQ R8,168(DI)
+ MOVQ R9,176(DI)
+ MOVQ AX,184(DI)
+ MOVQ R10,192(DI)
+ MOVQ 184(DI),SI
+ IMUL3Q $19,SI,AX
+ MOVQ AX,56(SP)
+ MULQ 16(DI)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 192(DI),DX
+ IMUL3Q $19,DX,AX
+ MOVQ AX,64(SP)
+ MULQ 8(DI)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 160(DI),AX
+ MULQ 0(DI)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 160(DI),AX
+ MULQ 8(DI)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 160(DI),AX
+ MULQ 16(DI)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 160(DI),AX
+ MULQ 24(DI)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 160(DI),AX
+ MULQ 32(DI)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 168(DI),AX
+ MULQ 0(DI)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 168(DI),AX
+ MULQ 8(DI)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 168(DI),AX
+ MULQ 16(DI)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 168(DI),AX
+ MULQ 24(DI)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 168(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 32(DI)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 176(DI),AX
+ MULQ 0(DI)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 176(DI),AX
+ MULQ 8(DI)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 176(DI),AX
+ MULQ 16(DI)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 176(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 24(DI)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 176(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 32(DI)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 184(DI),AX
+ MULQ 0(DI)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 184(DI),AX
+ MULQ 8(DI)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 56(SP),AX
+ MULQ 24(DI)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 56(SP),AX
+ MULQ 32(DI)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 192(DI),AX
+ MULQ 0(DI)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 64(SP),AX
+ MULQ 16(DI)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 64(SP),AX
+ MULQ 24(DI)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 64(SP),AX
+ MULQ 32(DI)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ANDQ DX,SI
+ ADDQ R10,CX
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ANDQ DX,R8
+ ADDQ R12,CX
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ANDQ DX,R9
+ ADDQ R14,CX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ ANDQ DX,AX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,160(DI)
+ MOVQ R8,168(DI)
+ MOVQ R9,176(DI)
+ MOVQ AX,184(DI)
+ MOVQ R10,192(DI)
+ MOVQ 200(SP),SI
+ IMUL3Q $19,SI,AX
+ MOVQ AX,56(SP)
+ MULQ 152(SP)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 208(SP),DX
+ IMUL3Q $19,DX,AX
+ MOVQ AX,64(SP)
+ MULQ 144(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 176(SP),AX
+ MULQ 136(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 176(SP),AX
+ MULQ 144(SP)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 176(SP),AX
+ MULQ 152(SP)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 176(SP),AX
+ MULQ 160(SP)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 176(SP),AX
+ MULQ 168(SP)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 184(SP),AX
+ MULQ 136(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 184(SP),AX
+ MULQ 144(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 184(SP),AX
+ MULQ 152(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 184(SP),AX
+ MULQ 160(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 184(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 168(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 192(SP),AX
+ MULQ 136(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 192(SP),AX
+ MULQ 144(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 192(SP),AX
+ MULQ 152(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 192(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 160(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 192(SP),DX
+ IMUL3Q $19,DX,AX
+ MULQ 168(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 200(SP),AX
+ MULQ 136(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 200(SP),AX
+ MULQ 144(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 56(SP),AX
+ MULQ 160(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 56(SP),AX
+ MULQ 168(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 208(SP),AX
+ MULQ 136(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 64(SP),AX
+ MULQ 152(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 64(SP),AX
+ MULQ 160(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 64(SP),AX
+ MULQ 168(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ANDQ DX,SI
+ ADDQ R10,CX
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ANDQ DX,R8
+ ADDQ R12,CX
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ANDQ DX,R9
+ ADDQ R14,CX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ ANDQ DX,AX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,40(DI)
+ MOVQ R8,48(DI)
+ MOVQ R9,56(DI)
+ MOVQ AX,64(DI)
+ MOVQ R10,72(DI)
+ MOVQ 216(SP),AX
+ MULQ ·_121666_213(SB)
+ SHRQ $13,AX
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 224(SP),AX
+ MULQ ·_121666_213(SB)
+ SHRQ $13,AX
+ ADDQ AX,CX
+ MOVQ DX,R8
+ MOVQ 232(SP),AX
+ MULQ ·_121666_213(SB)
+ SHRQ $13,AX
+ ADDQ AX,R8
+ MOVQ DX,R9
+ MOVQ 240(SP),AX
+ MULQ ·_121666_213(SB)
+ SHRQ $13,AX
+ ADDQ AX,R9
+ MOVQ DX,R10
+ MOVQ 248(SP),AX
+ MULQ ·_121666_213(SB)
+ SHRQ $13,AX
+ ADDQ AX,R10
+ IMUL3Q $19,DX,DX
+ ADDQ DX,SI
+ ADDQ 136(SP),SI
+ ADDQ 144(SP),CX
+ ADDQ 152(SP),R8
+ ADDQ 160(SP),R9
+ ADDQ 168(SP),R10
+ MOVQ SI,80(DI)
+ MOVQ CX,88(DI)
+ MOVQ R8,96(DI)
+ MOVQ R9,104(DI)
+ MOVQ R10,112(DI)
+ MOVQ 104(DI),SI
+ IMUL3Q $19,SI,AX
+ MOVQ AX,56(SP)
+ MULQ 232(SP)
+ MOVQ AX,SI
+ MOVQ DX,CX
+ MOVQ 112(DI),DX
+ IMUL3Q $19,DX,AX
+ MOVQ AX,64(SP)
+ MULQ 224(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 80(DI),AX
+ MULQ 216(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 80(DI),AX
+ MULQ 224(SP)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 80(DI),AX
+ MULQ 232(SP)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 80(DI),AX
+ MULQ 240(SP)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 80(DI),AX
+ MULQ 248(SP)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 88(DI),AX
+ MULQ 216(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 88(DI),AX
+ MULQ 224(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 88(DI),AX
+ MULQ 232(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 88(DI),AX
+ MULQ 240(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 88(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 248(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 96(DI),AX
+ MULQ 216(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 96(DI),AX
+ MULQ 224(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 96(DI),AX
+ MULQ 232(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 96(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 240(SP)
+ ADDQ AX,SI
+ ADCQ DX,CX
+ MOVQ 96(DI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 248(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 104(DI),AX
+ MULQ 216(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 104(DI),AX
+ MULQ 224(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 56(SP),AX
+ MULQ 240(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 56(SP),AX
+ MULQ 248(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 112(DI),AX
+ MULQ 216(SP)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 64(SP),AX
+ MULQ 232(SP)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 64(SP),AX
+ MULQ 240(SP)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 64(SP),AX
+ MULQ 248(SP)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ ·REDMASK51(SB),DX
+ SHLQ $13,CX:SI
+ ANDQ DX,SI
+ SHLQ $13,R9:R8
+ ANDQ DX,R8
+ ADDQ CX,R8
+ SHLQ $13,R11:R10
+ ANDQ DX,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ DX,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ DX,R14
+ ADDQ R13,R14
+ IMUL3Q $19,R15,CX
+ ADDQ CX,SI
+ MOVQ SI,CX
+ SHRQ $51,CX
+ ADDQ R8,CX
+ MOVQ CX,R8
+ SHRQ $51,CX
+ ANDQ DX,SI
+ ADDQ R10,CX
+ MOVQ CX,R9
+ SHRQ $51,CX
+ ANDQ DX,R8
+ ADDQ R12,CX
+ MOVQ CX,AX
+ SHRQ $51,CX
+ ANDQ DX,R9
+ ADDQ R14,CX
+ MOVQ CX,R10
+ SHRQ $51,CX
+ ANDQ DX,AX
+ IMUL3Q $19,CX,CX
+ ADDQ CX,SI
+ ANDQ DX,R10
+ MOVQ SI,80(DI)
+ MOVQ R8,88(DI)
+ MOVQ R9,96(DI)
+ MOVQ AX,104(DI)
+ MOVQ R10,112(DI)
+ MOVQ 0(SP),R11
+ MOVQ 8(SP),R12
+ MOVQ 16(SP),R13
+ MOVQ 24(SP),R14
+ MOVQ 32(SP),R15
+ MOVQ 40(SP),BX
+ MOVQ 48(SP),BP
+ MOVQ R11,SP
+ MOVQ DI,AX
+ MOVQ SI,DX
+ RET
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/mont25519_amd64.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/mont25519_amd64.go
new file mode 100644
index 00000000000..5822bd53383
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/mont25519_amd64.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+package curve25519
+
+// These functions are implemented in the .s files. The names of the functions
+// in the rest of the file are also taken from the SUPERCOP sources to help
+// people following along.
+
+//go:noescape
+
+func cswap(inout *[5]uint64, v uint64)
+
+//go:noescape
+
+func ladderstep(inout *[5][5]uint64)
+
+//go:noescape
+
+func freeze(inout *[5]uint64)
+
+//go:noescape
+
+func mul(dest, a, b *[5]uint64)
+
+//go:noescape
+
+func square(out, in *[5]uint64)
+
+// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
+func mladder(xr, zr *[5]uint64, s *[32]byte) {
+ var work [5][5]uint64
+
+ work[0] = *xr
+ setint(&work[1], 1)
+ setint(&work[2], 0)
+ work[3] = *xr
+ setint(&work[4], 1)
+
+ j := uint(6)
+ var prevbit byte
+
+ for i := 31; i >= 0; i-- {
+ for j < 8 {
+ bit := ((*s)[i] >> j) & 1
+ swap := bit ^ prevbit
+ prevbit = bit
+ cswap(&work[1], uint64(swap))
+ ladderstep(&work)
+ j--
+ }
+ j = 7
+ }
+
+ *xr = work[1]
+ *zr = work[2]
+}
+
+func scalarMult(out, in, base *[32]byte) {
+ var e [32]byte
+ copy(e[:], (*in)[:])
+ e[0] &= 248
+ e[31] &= 127
+ e[31] |= 64
+
+ var t, z [5]uint64
+ unpack(&t, base)
+ mladder(&t, &z, &e)
+ invert(&z, &z)
+ mul(&t, &t, &z)
+ pack(out, &t)
+}
+
+func setint(r *[5]uint64, v uint64) {
+ r[0] = v
+ r[1] = 0
+ r[2] = 0
+ r[3] = 0
+ r[4] = 0
+}
+
+// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
+// order.
+func unpack(r *[5]uint64, x *[32]byte) {
+ r[0] = uint64(x[0]) |
+ uint64(x[1])<<8 |
+ uint64(x[2])<<16 |
+ uint64(x[3])<<24 |
+ uint64(x[4])<<32 |
+ uint64(x[5])<<40 |
+ uint64(x[6]&7)<<48
+
+ r[1] = uint64(x[6])>>3 |
+ uint64(x[7])<<5 |
+ uint64(x[8])<<13 |
+ uint64(x[9])<<21 |
+ uint64(x[10])<<29 |
+ uint64(x[11])<<37 |
+ uint64(x[12]&63)<<45
+
+ r[2] = uint64(x[12])>>6 |
+ uint64(x[13])<<2 |
+ uint64(x[14])<<10 |
+ uint64(x[15])<<18 |
+ uint64(x[16])<<26 |
+ uint64(x[17])<<34 |
+ uint64(x[18])<<42 |
+ uint64(x[19]&1)<<50
+
+ r[3] = uint64(x[19])>>1 |
+ uint64(x[20])<<7 |
+ uint64(x[21])<<15 |
+ uint64(x[22])<<23 |
+ uint64(x[23])<<31 |
+ uint64(x[24])<<39 |
+ uint64(x[25]&15)<<47
+
+ r[4] = uint64(x[25])>>4 |
+ uint64(x[26])<<4 |
+ uint64(x[27])<<12 |
+ uint64(x[28])<<20 |
+ uint64(x[29])<<28 |
+ uint64(x[30])<<36 |
+ uint64(x[31]&127)<<44
+}
+
+// pack sets out = x where out is the usual, little-endian form of the 5,
+// 51-bit limbs in x.
+func pack(out *[32]byte, x *[5]uint64) {
+ t := *x
+ freeze(&t)
+
+ out[0] = byte(t[0])
+ out[1] = byte(t[0] >> 8)
+ out[2] = byte(t[0] >> 16)
+ out[3] = byte(t[0] >> 24)
+ out[4] = byte(t[0] >> 32)
+ out[5] = byte(t[0] >> 40)
+ out[6] = byte(t[0] >> 48)
+
+ out[6] ^= byte(t[1]<<3) & 0xf8
+ out[7] = byte(t[1] >> 5)
+ out[8] = byte(t[1] >> 13)
+ out[9] = byte(t[1] >> 21)
+ out[10] = byte(t[1] >> 29)
+ out[11] = byte(t[1] >> 37)
+ out[12] = byte(t[1] >> 45)
+
+ out[12] ^= byte(t[2]<<6) & 0xc0
+ out[13] = byte(t[2] >> 2)
+ out[14] = byte(t[2] >> 10)
+ out[15] = byte(t[2] >> 18)
+ out[16] = byte(t[2] >> 26)
+ out[17] = byte(t[2] >> 34)
+ out[18] = byte(t[2] >> 42)
+ out[19] = byte(t[2] >> 50)
+
+ out[19] ^= byte(t[3]<<1) & 0xfe
+ out[20] = byte(t[3] >> 7)
+ out[21] = byte(t[3] >> 15)
+ out[22] = byte(t[3] >> 23)
+ out[23] = byte(t[3] >> 31)
+ out[24] = byte(t[3] >> 39)
+ out[25] = byte(t[3] >> 47)
+
+ out[25] ^= byte(t[4]<<4) & 0xf0
+ out[26] = byte(t[4] >> 4)
+ out[27] = byte(t[4] >> 12)
+ out[28] = byte(t[4] >> 20)
+ out[29] = byte(t[4] >> 28)
+ out[30] = byte(t[4] >> 36)
+ out[31] = byte(t[4] >> 44)
+}
+
+// invert calculates r = x^-1 mod p using Fermat's little theorem.
+func invert(r *[5]uint64, x *[5]uint64) {
+ var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
+
+ square(&z2, x) /* 2 */
+ square(&t, &z2) /* 4 */
+ square(&t, &t) /* 8 */
+ mul(&z9, &t, x) /* 9 */
+ mul(&z11, &z9, &z2) /* 11 */
+ square(&t, &z11) /* 22 */
+ mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
+
+ square(&t, &z2_5_0) /* 2^6 - 2^1 */
+ for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
+ square(&t, &t)
+ }
+ mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
+
+ square(&t, &z2_10_0) /* 2^11 - 2^1 */
+ for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
+ square(&t, &t)
+ }
+ mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
+
+ square(&t, &z2_20_0) /* 2^21 - 2^1 */
+ for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
+ square(&t, &t)
+ }
+ mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
+
+ square(&t, &t) /* 2^41 - 2^1 */
+ for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
+ square(&t, &t)
+ }
+ mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
+
+ square(&t, &z2_50_0) /* 2^51 - 2^1 */
+ for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
+ square(&t, &t)
+ }
+ mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
+
+ square(&t, &z2_100_0) /* 2^101 - 2^1 */
+ for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
+ square(&t, &t)
+ }
+ mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
+
+ square(&t, &t) /* 2^201 - 2^1 */
+ for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
+ square(&t, &t)
+ }
+ mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
+
+ square(&t, &t) /* 2^251 - 2^1 */
+ square(&t, &t) /* 2^252 - 2^2 */
+ square(&t, &t) /* 2^253 - 2^3 */
+
+ square(&t, &t) /* 2^254 - 2^4 */
+
+ square(&t, &t) /* 2^255 - 2^5 */
+ mul(r, &t, &z11) /* 2^255 - 21 */
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/mul_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/mul_amd64.s
new file mode 100644
index 00000000000..e48d183ee56
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/mul_amd64.s
@@ -0,0 +1,191 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// func mul(dest, a, b *[5]uint64)
+TEXT ·mul(SB),0,$128-24
+ MOVQ dest+0(FP), DI
+ MOVQ a+8(FP), SI
+ MOVQ b+16(FP), DX
+
+ MOVQ SP,R11
+ MOVQ $31,CX
+ NOTQ CX
+ ANDQ CX,SP
+ ADDQ $32,SP
+
+ MOVQ R11,0(SP)
+ MOVQ R12,8(SP)
+ MOVQ R13,16(SP)
+ MOVQ R14,24(SP)
+ MOVQ R15,32(SP)
+ MOVQ BX,40(SP)
+ MOVQ BP,48(SP)
+ MOVQ DI,56(SP)
+ MOVQ DX,CX
+ MOVQ 24(SI),DX
+ IMUL3Q $19,DX,AX
+ MOVQ AX,64(SP)
+ MULQ 16(CX)
+ MOVQ AX,R8
+ MOVQ DX,R9
+ MOVQ 32(SI),DX
+ IMUL3Q $19,DX,AX
+ MOVQ AX,72(SP)
+ MULQ 8(CX)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 0(SI),AX
+ MULQ 0(CX)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 0(SI),AX
+ MULQ 8(CX)
+ MOVQ AX,R10
+ MOVQ DX,R11
+ MOVQ 0(SI),AX
+ MULQ 16(CX)
+ MOVQ AX,R12
+ MOVQ DX,R13
+ MOVQ 0(SI),AX
+ MULQ 24(CX)
+ MOVQ AX,R14
+ MOVQ DX,R15
+ MOVQ 0(SI),AX
+ MULQ 32(CX)
+ MOVQ AX,BX
+ MOVQ DX,BP
+ MOVQ 8(SI),AX
+ MULQ 0(CX)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 8(SI),AX
+ MULQ 8(CX)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 8(SI),AX
+ MULQ 16(CX)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 8(SI),AX
+ MULQ 24(CX)
+ ADDQ AX,BX
+ ADCQ DX,BP
+ MOVQ 8(SI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 32(CX)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 16(SI),AX
+ MULQ 0(CX)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 16(SI),AX
+ MULQ 8(CX)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 16(SI),AX
+ MULQ 16(CX)
+ ADDQ AX,BX
+ ADCQ DX,BP
+ MOVQ 16(SI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 24(CX)
+ ADDQ AX,R8
+ ADCQ DX,R9
+ MOVQ 16(SI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 32(CX)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 24(SI),AX
+ MULQ 0(CX)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ 24(SI),AX
+ MULQ 8(CX)
+ ADDQ AX,BX
+ ADCQ DX,BP
+ MOVQ 64(SP),AX
+ MULQ 24(CX)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 64(SP),AX
+ MULQ 32(CX)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 32(SI),AX
+ MULQ 0(CX)
+ ADDQ AX,BX
+ ADCQ DX,BP
+ MOVQ 72(SP),AX
+ MULQ 16(CX)
+ ADDQ AX,R10
+ ADCQ DX,R11
+ MOVQ 72(SP),AX
+ MULQ 24(CX)
+ ADDQ AX,R12
+ ADCQ DX,R13
+ MOVQ 72(SP),AX
+ MULQ 32(CX)
+ ADDQ AX,R14
+ ADCQ DX,R15
+ MOVQ ·REDMASK51(SB),SI
+ SHLQ $13,R9:R8
+ ANDQ SI,R8
+ SHLQ $13,R11:R10
+ ANDQ SI,R10
+ ADDQ R9,R10
+ SHLQ $13,R13:R12
+ ANDQ SI,R12
+ ADDQ R11,R12
+ SHLQ $13,R15:R14
+ ANDQ SI,R14
+ ADDQ R13,R14
+ SHLQ $13,BP:BX
+ ANDQ SI,BX
+ ADDQ R15,BX
+ IMUL3Q $19,BP,DX
+ ADDQ DX,R8
+ MOVQ R8,DX
+ SHRQ $51,DX
+ ADDQ R10,DX
+ MOVQ DX,CX
+ SHRQ $51,DX
+ ANDQ SI,R8
+ ADDQ R12,DX
+ MOVQ DX,R9
+ SHRQ $51,DX
+ ANDQ SI,CX
+ ADDQ R14,DX
+ MOVQ DX,AX
+ SHRQ $51,DX
+ ANDQ SI,R9
+ ADDQ BX,DX
+ MOVQ DX,R10
+ SHRQ $51,DX
+ ANDQ SI,AX
+ IMUL3Q $19,DX,DX
+ ADDQ DX,R8
+ ANDQ SI,R10
+ MOVQ R8,0(DI)
+ MOVQ CX,8(DI)
+ MOVQ R9,16(DI)
+ MOVQ AX,24(DI)
+ MOVQ R10,32(DI)
+ MOVQ 0(SP),R11
+ MOVQ 8(SP),R12
+ MOVQ 16(SP),R13
+ MOVQ 24(SP),R14
+ MOVQ 32(SP),R15
+ MOVQ 40(SP),BX
+ MOVQ 48(SP),BP
+ MOVQ R11,SP
+ MOVQ DI,AX
+ MOVQ SI,DX
+ RET
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/square_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/square_amd64.s
new file mode 100644
index 00000000000..78d1a50ddca
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/curve25519/square_amd64.s
@@ -0,0 +1,153 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// func square(out, in *[5]uint64)
+TEXT ·square(SB),7,$96-16
+ MOVQ out+0(FP), DI
+ MOVQ in+8(FP), SI
+
+ MOVQ SP,R11
+ MOVQ $31,CX
+ NOTQ CX
+ ANDQ CX,SP
+ ADDQ $32, SP
+
+ MOVQ R11,0(SP)
+ MOVQ R12,8(SP)
+ MOVQ R13,16(SP)
+ MOVQ R14,24(SP)
+ MOVQ R15,32(SP)
+ MOVQ BX,40(SP)
+ MOVQ BP,48(SP)
+ MOVQ 0(SI),AX
+ MULQ 0(SI)
+ MOVQ AX,CX
+ MOVQ DX,R8
+ MOVQ 0(SI),AX
+ SHLQ $1,AX
+ MULQ 8(SI)
+ MOVQ AX,R9
+ MOVQ DX,R10
+ MOVQ 0(SI),AX
+ SHLQ $1,AX
+ MULQ 16(SI)
+ MOVQ AX,R11
+ MOVQ DX,R12
+ MOVQ 0(SI),AX
+ SHLQ $1,AX
+ MULQ 24(SI)
+ MOVQ AX,R13
+ MOVQ DX,R14
+ MOVQ 0(SI),AX
+ SHLQ $1,AX
+ MULQ 32(SI)
+ MOVQ AX,R15
+ MOVQ DX,BX
+ MOVQ 8(SI),AX
+ MULQ 8(SI)
+ ADDQ AX,R11
+ ADCQ DX,R12
+ MOVQ 8(SI),AX
+ SHLQ $1,AX
+ MULQ 16(SI)
+ ADDQ AX,R13
+ ADCQ DX,R14
+ MOVQ 8(SI),AX
+ SHLQ $1,AX
+ MULQ 24(SI)
+ ADDQ AX,R15
+ ADCQ DX,BX
+ MOVQ 8(SI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 32(SI)
+ ADDQ AX,CX
+ ADCQ DX,R8
+ MOVQ 16(SI),AX
+ MULQ 16(SI)
+ ADDQ AX,R15
+ ADCQ DX,BX
+ MOVQ 16(SI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 24(SI)
+ ADDQ AX,CX
+ ADCQ DX,R8
+ MOVQ 16(SI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 32(SI)
+ ADDQ AX,R9
+ ADCQ DX,R10
+ MOVQ 24(SI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 24(SI)
+ ADDQ AX,R9
+ ADCQ DX,R10
+ MOVQ 24(SI),DX
+ IMUL3Q $38,DX,AX
+ MULQ 32(SI)
+ ADDQ AX,R11
+ ADCQ DX,R12
+ MOVQ 32(SI),DX
+ IMUL3Q $19,DX,AX
+ MULQ 32(SI)
+ ADDQ AX,R13
+ ADCQ DX,R14
+ MOVQ ·REDMASK51(SB),SI
+ SHLQ $13,R8:CX
+ ANDQ SI,CX
+ SHLQ $13,R10:R9
+ ANDQ SI,R9
+ ADDQ R8,R9
+ SHLQ $13,R12:R11
+ ANDQ SI,R11
+ ADDQ R10,R11
+ SHLQ $13,R14:R13
+ ANDQ SI,R13
+ ADDQ R12,R13
+ SHLQ $13,BX:R15
+ ANDQ SI,R15
+ ADDQ R14,R15
+ IMUL3Q $19,BX,DX
+ ADDQ DX,CX
+ MOVQ CX,DX
+ SHRQ $51,DX
+ ADDQ R9,DX
+ ANDQ SI,CX
+ MOVQ DX,R8
+ SHRQ $51,DX
+ ADDQ R11,DX
+ ANDQ SI,R8
+ MOVQ DX,R9
+ SHRQ $51,DX
+ ADDQ R13,DX
+ ANDQ SI,R9
+ MOVQ DX,AX
+ SHRQ $51,DX
+ ADDQ R15,DX
+ ANDQ SI,AX
+ MOVQ DX,R10
+ SHRQ $51,DX
+ IMUL3Q $19,DX,DX
+ ADDQ DX,CX
+ ANDQ SI,R10
+ MOVQ CX,0(DI)
+ MOVQ R8,8(DI)
+ MOVQ R9,16(DI)
+ MOVQ AX,24(DI)
+ MOVQ R10,32(DI)
+ MOVQ 0(SP),R11
+ MOVQ 8(SP),R12
+ MOVQ 16(SP),R13
+ MOVQ 24(SP),R14
+ MOVQ 32(SP),R15
+ MOVQ 40(SP),BX
+ MOVQ 48(SP),BP
+ MOVQ R11,SP
+ MOVQ DI,AX
+ MOVQ SI,DX
+ RET
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/example_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/example_test.go
new file mode 100644
index 00000000000..df843951291
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/example_test.go
@@ -0,0 +1,61 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hkdf_test
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/sha256"
+ "fmt"
+ "golang.org/x/crypto/hkdf"
+ "io"
+)
+
+// Usage example that expands one master key into three other cryptographically
+// secure keys.
+func Example_usage() {
+ // Underlying hash function to use
+ hash := sha256.New
+
+ // Cryptographically secure master key.
+ master := []byte{0x00, 0x01, 0x02, 0x03} // i.e. NOT this.
+
+ // Non secret salt, optional (can be nil)
+ // Recommended: hash-length sized random
+ salt := make([]byte, hash().Size())
+ n, err := io.ReadFull(rand.Reader, salt)
+ if n != len(salt) || err != nil {
+ fmt.Println("error:", err)
+ return
+ }
+
+ // Non secret context specific info, optional (can be nil).
+ // Note, independent from the master key.
+ info := []byte{0x03, 0x14, 0x15, 0x92, 0x65}
+
+ // Create the key derivation function
+ hkdf := hkdf.New(hash, master, salt, info)
+
+ // Generate the required keys
+ keys := make([][]byte, 3)
+ for i := 0; i < len(keys); i++ {
+ keys[i] = make([]byte, 24)
+ n, err := io.ReadFull(hkdf, keys[i])
+ if n != len(keys[i]) || err != nil {
+ fmt.Println("error:", err)
+ return
+ }
+ }
+
+ // Keys should contain 192 bit random keys
+ for i := 1; i <= len(keys); i++ {
+ fmt.Printf("Key #%d: %v\n", i, !bytes.Equal(keys[i-1], make([]byte, 24)))
+ }
+
+ // Output:
+ // Key #1: true
+ // Key #2: true
+ // Key #3: true
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/hkdf.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/hkdf.go
new file mode 100644
index 00000000000..5bc246355a2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/hkdf.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation
+// Function (HKDF) as defined in RFC 5869.
+//
+// HKDF is a cryptographic key derivation function (KDF) with the goal of
+// expanding limited input keying material into one or more cryptographically
+// strong secret keys.
+//
+// RFC 5869: https://tools.ietf.org/html/rfc5869
+package hkdf // import "golang.org/x/crypto/hkdf"
+
+import (
+ "crypto/hmac"
+ "errors"
+ "hash"
+ "io"
+)
+
+type hkdf struct {
+ expander hash.Hash
+ size int
+
+ info []byte
+ counter byte
+
+ prev []byte
+ cache []byte
+}
+
+func (f *hkdf) Read(p []byte) (int, error) {
+ // Check whether enough data can be generated
+ need := len(p)
+ remains := len(f.cache) + int(255-f.counter+1)*f.size
+ if remains < need {
+ return 0, errors.New("hkdf: entropy limit reached")
+ }
+ // Read from the cache, if enough data is present
+ n := copy(p, f.cache)
+ p = p[n:]
+
+ // Fill the buffer
+ for len(p) > 0 {
+ f.expander.Reset()
+ f.expander.Write(f.prev)
+ f.expander.Write(f.info)
+ f.expander.Write([]byte{f.counter})
+ f.prev = f.expander.Sum(f.prev[:0])
+ f.counter++
+
+ // Copy the new batch into p
+ f.cache = f.prev
+ n = copy(p, f.cache)
+ p = p[n:]
+ }
+ // Save leftovers for next run
+ f.cache = f.cache[n:]
+
+ return need, nil
+}
+
+// New returns a new HKDF using the given hash, the secret keying material to expand
+// and optional salt and info fields.
+func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader {
+ if salt == nil {
+ salt = make([]byte, hash().Size())
+ }
+ extractor := hmac.New(hash, salt)
+ extractor.Write(secret)
+ prk := extractor.Sum(nil)
+
+ return &hkdf{hmac.New(hash, prk), extractor.Size(), info, 1, nil, nil}
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/hkdf_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/hkdf_test.go
new file mode 100644
index 00000000000..cee659bcdb9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/hkdf/hkdf_test.go
@@ -0,0 +1,370 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package hkdf
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "hash"
+ "io"
+ "testing"
+)
+
+type hkdfTest struct {
+ hash func() hash.Hash
+ master []byte
+ salt []byte
+ info []byte
+ out []byte
+}
+
+var hkdfTests = []hkdfTest{
+ // Tests from RFC 5869
+ {
+ sha256.New,
+ []byte{
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ },
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c,
+ },
+ []byte{
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9,
+ },
+ []byte{
+ 0x3c, 0xb2, 0x5f, 0x25, 0xfa, 0xac, 0xd5, 0x7a,
+ 0x90, 0x43, 0x4f, 0x64, 0xd0, 0x36, 0x2f, 0x2a,
+ 0x2d, 0x2d, 0x0a, 0x90, 0xcf, 0x1a, 0x5a, 0x4c,
+ 0x5d, 0xb0, 0x2d, 0x56, 0xec, 0xc4, 0xc5, 0xbf,
+ 0x34, 0x00, 0x72, 0x08, 0xd5, 0xb8, 0x87, 0x18,
+ 0x58, 0x65,
+ },
+ },
+ {
+ sha256.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ },
+ []byte{
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ },
+ []byte{
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+ },
+ []byte{
+ 0xb1, 0x1e, 0x39, 0x8d, 0xc8, 0x03, 0x27, 0xa1,
+ 0xc8, 0xe7, 0xf7, 0x8c, 0x59, 0x6a, 0x49, 0x34,
+ 0x4f, 0x01, 0x2e, 0xda, 0x2d, 0x4e, 0xfa, 0xd8,
+ 0xa0, 0x50, 0xcc, 0x4c, 0x19, 0xaf, 0xa9, 0x7c,
+ 0x59, 0x04, 0x5a, 0x99, 0xca, 0xc7, 0x82, 0x72,
+ 0x71, 0xcb, 0x41, 0xc6, 0x5e, 0x59, 0x0e, 0x09,
+ 0xda, 0x32, 0x75, 0x60, 0x0c, 0x2f, 0x09, 0xb8,
+ 0x36, 0x77, 0x93, 0xa9, 0xac, 0xa3, 0xdb, 0x71,
+ 0xcc, 0x30, 0xc5, 0x81, 0x79, 0xec, 0x3e, 0x87,
+ 0xc1, 0x4c, 0x01, 0xd5, 0xc1, 0xf3, 0x43, 0x4f,
+ 0x1d, 0x87,
+ },
+ },
+ {
+ sha256.New,
+ []byte{
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ },
+ []byte{},
+ []byte{},
+ []byte{
+ 0x8d, 0xa4, 0xe7, 0x75, 0xa5, 0x63, 0xc1, 0x8f,
+ 0x71, 0x5f, 0x80, 0x2a, 0x06, 0x3c, 0x5a, 0x31,
+ 0xb8, 0xa1, 0x1f, 0x5c, 0x5e, 0xe1, 0x87, 0x9e,
+ 0xc3, 0x45, 0x4e, 0x5f, 0x3c, 0x73, 0x8d, 0x2d,
+ 0x9d, 0x20, 0x13, 0x95, 0xfa, 0xa4, 0xb6, 0x1a,
+ 0x96, 0xc8,
+ },
+ },
+ {
+ sha1.New,
+ []byte{
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b,
+ },
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c,
+ },
+ []byte{
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9,
+ },
+ []byte{
+ 0x08, 0x5a, 0x01, 0xea, 0x1b, 0x10, 0xf3, 0x69,
+ 0x33, 0x06, 0x8b, 0x56, 0xef, 0xa5, 0xad, 0x81,
+ 0xa4, 0xf1, 0x4b, 0x82, 0x2f, 0x5b, 0x09, 0x15,
+ 0x68, 0xa9, 0xcd, 0xd4, 0xf1, 0x55, 0xfd, 0xa2,
+ 0xc2, 0x2e, 0x42, 0x24, 0x78, 0xd3, 0x05, 0xf3,
+ 0xf8, 0x96,
+ },
+ },
+ {
+ sha1.New,
+ []byte{
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ },
+ []byte{
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ },
+ []byte{
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+ },
+ []byte{
+ 0x0b, 0xd7, 0x70, 0xa7, 0x4d, 0x11, 0x60, 0xf7,
+ 0xc9, 0xf1, 0x2c, 0xd5, 0x91, 0x2a, 0x06, 0xeb,
+ 0xff, 0x6a, 0xdc, 0xae, 0x89, 0x9d, 0x92, 0x19,
+ 0x1f, 0xe4, 0x30, 0x56, 0x73, 0xba, 0x2f, 0xfe,
+ 0x8f, 0xa3, 0xf1, 0xa4, 0xe5, 0xad, 0x79, 0xf3,
+ 0xf3, 0x34, 0xb3, 0xb2, 0x02, 0xb2, 0x17, 0x3c,
+ 0x48, 0x6e, 0xa3, 0x7c, 0xe3, 0xd3, 0x97, 0xed,
+ 0x03, 0x4c, 0x7f, 0x9d, 0xfe, 0xb1, 0x5c, 0x5e,
+ 0x92, 0x73, 0x36, 0xd0, 0x44, 0x1f, 0x4c, 0x43,
+ 0x00, 0xe2, 0xcf, 0xf0, 0xd0, 0x90, 0x0b, 0x52,
+ 0xd3, 0xb4,
+ },
+ },
+ {
+ sha1.New,
+ []byte{
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ },
+ []byte{},
+ []byte{},
+ []byte{
+ 0x0a, 0xc1, 0xaf, 0x70, 0x02, 0xb3, 0xd7, 0x61,
+ 0xd1, 0xe5, 0x52, 0x98, 0xda, 0x9d, 0x05, 0x06,
+ 0xb9, 0xae, 0x52, 0x05, 0x72, 0x20, 0xa3, 0x06,
+ 0xe0, 0x7b, 0x6b, 0x87, 0xe8, 0xdf, 0x21, 0xd0,
+ 0xea, 0x00, 0x03, 0x3d, 0xe0, 0x39, 0x84, 0xd3,
+ 0x49, 0x18,
+ },
+ },
+ {
+ sha1.New,
+ []byte{
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
+ },
+ nil,
+ []byte{},
+ []byte{
+ 0x2c, 0x91, 0x11, 0x72, 0x04, 0xd7, 0x45, 0xf3,
+ 0x50, 0x0d, 0x63, 0x6a, 0x62, 0xf6, 0x4f, 0x0a,
+ 0xb3, 0xba, 0xe5, 0x48, 0xaa, 0x53, 0xd4, 0x23,
+ 0xb0, 0xd1, 0xf2, 0x7e, 0xbb, 0xa6, 0xf5, 0xe5,
+ 0x67, 0x3a, 0x08, 0x1d, 0x70, 0xcc, 0xe7, 0xac,
+ 0xfc, 0x48,
+ },
+ },
+}
+
+func TestHKDF(t *testing.T) {
+ for i, tt := range hkdfTests {
+ hkdf := New(tt.hash, tt.master, tt.salt, tt.info)
+ out := make([]byte, len(tt.out))
+
+ n, err := io.ReadFull(hkdf, out)
+ if n != len(tt.out) || err != nil {
+ t.Errorf("test %d: not enough output bytes: %d.", i, n)
+ }
+
+ if !bytes.Equal(out, tt.out) {
+ t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out)
+ }
+ }
+}
+
+func TestHKDFMultiRead(t *testing.T) {
+ for i, tt := range hkdfTests {
+ hkdf := New(tt.hash, tt.master, tt.salt, tt.info)
+ out := make([]byte, len(tt.out))
+
+ for b := 0; b < len(tt.out); b++ {
+ n, err := io.ReadFull(hkdf, out[b:b+1])
+ if n != 1 || err != nil {
+ t.Errorf("test %d.%d: not enough output bytes: have %d, need %d .", i, b, n, len(tt.out))
+ }
+ }
+
+ if !bytes.Equal(out, tt.out) {
+ t.Errorf("test %d: incorrect output: have %v, need %v.", i, out, tt.out)
+ }
+ }
+}
+
+func TestHKDFLimit(t *testing.T) {
+ hash := sha1.New
+ master := []byte{0x00, 0x01, 0x02, 0x03}
+ info := []byte{}
+
+ hkdf := New(hash, master, nil, info)
+ limit := hash().Size() * 255
+ out := make([]byte, limit)
+
+ // The maximum output bytes should be extractable
+ n, err := io.ReadFull(hkdf, out)
+ if n != limit || err != nil {
+ t.Errorf("not enough output bytes: %d, %v.", n, err)
+ }
+
+ // Reading one more should fail
+ n, err = io.ReadFull(hkdf, make([]byte, 1))
+ if n > 0 || err == nil {
+ t.Errorf("key expansion overflowed: n = %d, err = %v", n, err)
+ }
+}
+
+func Benchmark16ByteMD5Single(b *testing.B) {
+ benchmarkHKDFSingle(md5.New, 16, b)
+}
+
+func Benchmark20ByteSHA1Single(b *testing.B) {
+ benchmarkHKDFSingle(sha1.New, 20, b)
+}
+
+func Benchmark32ByteSHA256Single(b *testing.B) {
+ benchmarkHKDFSingle(sha256.New, 32, b)
+}
+
+func Benchmark64ByteSHA512Single(b *testing.B) {
+ benchmarkHKDFSingle(sha512.New, 64, b)
+}
+
+func Benchmark8ByteMD5Stream(b *testing.B) {
+ benchmarkHKDFStream(md5.New, 8, b)
+}
+
+func Benchmark16ByteMD5Stream(b *testing.B) {
+ benchmarkHKDFStream(md5.New, 16, b)
+}
+
+func Benchmark8ByteSHA1Stream(b *testing.B) {
+ benchmarkHKDFStream(sha1.New, 8, b)
+}
+
+func Benchmark20ByteSHA1Stream(b *testing.B) {
+ benchmarkHKDFStream(sha1.New, 20, b)
+}
+
+func Benchmark8ByteSHA256Stream(b *testing.B) {
+ benchmarkHKDFStream(sha256.New, 8, b)
+}
+
+func Benchmark32ByteSHA256Stream(b *testing.B) {
+ benchmarkHKDFStream(sha256.New, 32, b)
+}
+
+func Benchmark8ByteSHA512Stream(b *testing.B) {
+ benchmarkHKDFStream(sha512.New, 8, b)
+}
+
+func Benchmark64ByteSHA512Stream(b *testing.B) {
+ benchmarkHKDFStream(sha512.New, 64, b)
+}
+
+func benchmarkHKDFSingle(hasher func() hash.Hash, block int, b *testing.B) {
+ master := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}
+ salt := []byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17}
+ info := []byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27}
+ out := make([]byte, block)
+
+ b.SetBytes(int64(block))
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ hkdf := New(hasher, master, salt, info)
+ io.ReadFull(hkdf, out)
+ }
+}
+
+func benchmarkHKDFStream(hasher func() hash.Hash, block int, b *testing.B) {
+ master := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}
+ salt := []byte{0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17}
+ info := []byte{0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27}
+ out := make([]byte, block)
+
+ b.SetBytes(int64(block))
+ b.ResetTimer()
+
+ hkdf := New(hasher, master, salt, info)
+ for i := 0; i < b.N; i++ {
+ _, err := io.ReadFull(hkdf, out)
+ if err != nil {
+ hkdf = New(hasher, master, salt, info)
+ i--
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4.go
new file mode 100644
index 00000000000..6d9ba9e5f33
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4.go
@@ -0,0 +1,118 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package md4 implements the MD4 hash algorithm as defined in RFC 1320.
+package md4 // import "golang.org/x/crypto/md4"
+
+import (
+ "crypto"
+ "hash"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.MD4, New)
+}
+
+// The size of an MD4 checksum in bytes.
+const Size = 16
+
+// The blocksize of MD4 in bytes.
+const BlockSize = 64
+
+const (
+ _Chunk = 64
+ _Init0 = 0x67452301
+ _Init1 = 0xEFCDAB89
+ _Init2 = 0x98BADCFE
+ _Init3 = 0x10325476
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ s [4]uint32
+ x [_Chunk]byte
+ nx int
+ len uint64
+}
+
+func (d *digest) Reset() {
+ d.s[0] = _Init0
+ d.s[1] = _Init1
+ d.s[2] = _Init2
+ d.s[3] = _Init3
+ d.nx = 0
+ d.len = 0
+}
+
+// New returns a new hash.Hash computing the MD4 checksum.
+func New() hash.Hash {
+ d := new(digest)
+ d.Reset()
+ return d
+}
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Write(p []byte) (nn int, err error) {
+ nn = len(p)
+ d.len += uint64(nn)
+ if d.nx > 0 {
+ n := len(p)
+ if n > _Chunk-d.nx {
+ n = _Chunk - d.nx
+ }
+ for i := 0; i < n; i++ {
+ d.x[d.nx+i] = p[i]
+ }
+ d.nx += n
+ if d.nx == _Chunk {
+ _Block(d, d.x[0:])
+ d.nx = 0
+ }
+ p = p[n:]
+ }
+ n := _Block(d, p)
+ p = p[n:]
+ if len(p) > 0 {
+ d.nx = copy(d.x[:], p)
+ }
+ return
+}
+
+func (d0 *digest) Sum(in []byte) []byte {
+ // Make a copy of d0, so that caller can keep writing and summing.
+ d := new(digest)
+ *d = *d0
+
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ len := d.len
+ var tmp [64]byte
+ tmp[0] = 0x80
+ if len%64 < 56 {
+ d.Write(tmp[0 : 56-len%64])
+ } else {
+ d.Write(tmp[0 : 64+56-len%64])
+ }
+
+ // Length in bits.
+ len <<= 3
+ for i := uint(0); i < 8; i++ {
+ tmp[i] = byte(len >> (8 * i))
+ }
+ d.Write(tmp[0:8])
+
+ if d.nx != 0 {
+ panic("d.nx != 0")
+ }
+
+ for _, s := range d.s {
+ in = append(in, byte(s>>0))
+ in = append(in, byte(s>>8))
+ in = append(in, byte(s>>16))
+ in = append(in, byte(s>>24))
+ }
+ return in
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4_test.go
new file mode 100644
index 00000000000..b56edd7875d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4_test.go
@@ -0,0 +1,71 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package md4
+
+import (
+ "fmt"
+ "io"
+ "testing"
+)
+
+type md4Test struct {
+ out string
+ in string
+}
+
+var golden = []md4Test{
+ {"31d6cfe0d16ae931b73c59d7e0c089c0", ""},
+ {"bde52cb31de33e46245e05fbdbd6fb24", "a"},
+ {"ec388dd78999dfc7cf4632465693b6bf", "ab"},
+ {"a448017aaf21d8525fc10ae87aa6729d", "abc"},
+ {"41decd8f579255c5200f86a4bb3ba740", "abcd"},
+ {"9803f4a34e8eb14f96adba49064a0c41", "abcde"},
+ {"804e7f1c2586e50b49ac65db5b645131", "abcdef"},
+ {"752f4adfe53d1da0241b5bc216d098fc", "abcdefg"},
+ {"ad9daf8d49d81988590a6f0e745d15dd", "abcdefgh"},
+ {"1e4e28b05464316b56402b3815ed2dfd", "abcdefghi"},
+ {"dc959c6f5d6f9e04e4380777cc964b3d", "abcdefghij"},
+ {"1b5701e265778898ef7de5623bbe7cc0", "Discard medicine more than two years old."},
+ {"d7f087e090fe7ad4a01cb59dacc9a572", "He who has a shady past knows that nice guys finish last."},
+ {"a6f8fd6df617c72837592fc3570595c9", "I wouldn't marry him with a ten foot pole."},
+ {"c92a84a9526da8abc240c05d6b1a1ce0", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"},
+ {"f6013160c4dcb00847069fee3bb09803", "The days of the digital watch are numbered. -Tom Stoppard"},
+ {"2c3bb64f50b9107ed57640fe94bec09f", "Nepal premier won't resign."},
+ {"45b7d8a32c7806f2f7f897332774d6e4", "For every action there is an equal and opposite government program."},
+ {"b5b4f9026b175c62d7654bdc3a1cd438", "His money is twice tainted: 'taint yours and 'taint mine."},
+ {"caf44e80f2c20ce19b5ba1cab766e7bd", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"},
+ {"191fae6707f496aa54a6bce9f2ecf74d", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"},
+ {"9ddc753e7a4ccee6081cd1b45b23a834", "size: a.out: bad magic"},
+ {"8d050f55b1cadb9323474564be08a521", "The major problem is with sendmail. -Mark Horton"},
+ {"ad6e2587f74c3e3cc19146f6127fa2e3", "Give me a rock, paper and scissors and I will move the world. CCFestoon"},
+ {"1d616d60a5fabe85589c3f1566ca7fca", "If the enemy is within range, then so are you."},
+ {"aec3326a4f496a2ced65a1963f84577f", "It's well we cannot hear the screams/That we create in others' dreams."},
+ {"77b4fd762d6b9245e61c50bf6ebf118b", "You remind me of a TV show, but that's all right: I watch it anyway."},
+ {"e8f48c726bae5e516f6ddb1a4fe62438", "C is as portable as Stonehedge!!"},
+ {"a3a84366e7219e887423b01f9be7166e", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"},
+ {"a6b7aa35157e984ef5d9b7f32e5fbb52", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"},
+ {"75661f0545955f8f9abeeb17845f3fd6", "How can you write a big system without C++? -Paul Glick"},
+}
+
+func TestGolden(t *testing.T) {
+ for i := 0; i < len(golden); i++ {
+ g := golden[i]
+ c := New()
+ for j := 0; j < 3; j++ {
+ if j < 2 {
+ io.WriteString(c, g.in)
+ } else {
+ io.WriteString(c, g.in[0:len(g.in)/2])
+ c.Sum(nil)
+ io.WriteString(c, g.in[len(g.in)/2:])
+ }
+ s := fmt.Sprintf("%x", c.Sum(nil))
+ if s != g.out {
+ t.Fatalf("md4[%d](%s) = %s want %s", j, g.in, s, g.out)
+ }
+ c.Reset()
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4block.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4block.go
new file mode 100644
index 00000000000..3fed475f3f6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/md4/md4block.go
@@ -0,0 +1,89 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// MD4 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+package md4
+
+var shift1 = []uint{3, 7, 11, 19}
+var shift2 = []uint{3, 5, 9, 13}
+var shift3 = []uint{3, 9, 11, 15}
+
+var xIndex2 = []uint{0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15}
+var xIndex3 = []uint{0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15}
+
+func _Block(dig *digest, p []byte) int {
+ a := dig.s[0]
+ b := dig.s[1]
+ c := dig.s[2]
+ d := dig.s[3]
+ n := 0
+ var X [16]uint32
+ for len(p) >= _Chunk {
+ aa, bb, cc, dd := a, b, c, d
+
+ j := 0
+ for i := 0; i < 16; i++ {
+ X[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
+ j += 4
+ }
+
+ // If this needs to be made faster in the future,
+ // the usual trick is to unroll each of these
+ // loops by a factor of 4; that lets you replace
+ // the shift[] lookups with constants and,
+ // with suitable variable renaming in each
+ // unrolled body, delete the a, b, c, d = d, a, b, c
+ // (or you can let the optimizer do the renaming).
+ //
+ // The index variables are uint so that % by a power
+ // of two can be optimized easily by a compiler.
+
+ // Round 1.
+ for i := uint(0); i < 16; i++ {
+ x := i
+ s := shift1[i%4]
+ f := ((c ^ d) & b) ^ d
+ a += f + X[x]
+ a = a<<s | a>>(32-s)
+ a, b, c, d = d, a, b, c
+ }
+
+ // Round 2.
+ for i := uint(0); i < 16; i++ {
+ x := xIndex2[i]
+ s := shift2[i%4]
+ g := (b & c) | (b & d) | (c & d)
+ a += g + X[x] + 0x5a827999
+ a = a<<s | a>>(32-s)
+ a, b, c, d = d, a, b, c
+ }
+
+ // Round 3.
+ for i := uint(0); i < 16; i++ {
+ x := xIndex3[i]
+ s := shift3[i%4]
+ h := b ^ c ^ d
+ a += h + X[x] + 0x6ed9eba1
+ a = a<<s | a>>(32-s)
+ a, b, c, d = d, a, b, c
+ }
+
+ a += aa
+ b += bb
+ c += cc
+ d += dd
+
+ p = p[_Chunk:]
+ n += _Chunk
+ }
+
+ dig.s[0] = a
+ dig.s[1] = b
+ dig.s[2] = c
+ dig.s[3] = d
+ return n
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/box/box.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/box/box.go
new file mode 100644
index 00000000000..ca48a6dbff9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/box/box.go
@@ -0,0 +1,85 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package box authenticates and encrypts messages using public-key cryptography.
+
+Box uses Curve25519, XSalsa20 and Poly1305 to encrypt and authenticate
+messages. The length of messages is not hidden.
+
+It is the caller's responsibility to ensure the uniqueness of nonces—for
+example, by using nonce 1 for the first message, nonce 2 for the second
+message, etc. Nonces are long enough that randomly generated nonces have
+negligible risk of collision.
+
+This package is interoperable with NaCl: http://nacl.cr.yp.to/box.html.
+*/
+package box // import "golang.org/x/crypto/nacl/box"
+
+import (
+ "golang.org/x/crypto/curve25519"
+ "golang.org/x/crypto/nacl/secretbox"
+ "golang.org/x/crypto/salsa20/salsa"
+ "io"
+)
+
+// Overhead is the number of bytes of overhead when boxing a message.
+const Overhead = secretbox.Overhead
+
+// GenerateKey generates a new public/private key pair suitable for use with
+// Seal and Open.
+func GenerateKey(rand io.Reader) (publicKey, privateKey *[32]byte, err error) {
+ publicKey = new([32]byte)
+ privateKey = new([32]byte)
+ _, err = io.ReadFull(rand, privateKey[:])
+ if err != nil {
+ publicKey = nil
+ privateKey = nil
+ return
+ }
+
+ curve25519.ScalarBaseMult(publicKey, privateKey)
+ return
+}
+
+var zeros [16]byte
+
+// Precompute calculates the shared key between peersPublicKey and privateKey
+// and writes it to sharedKey. The shared key can be used with
+// OpenAfterPrecomputation and SealAfterPrecomputation to speed up processing
+// when using the same pair of keys repeatedly.
+func Precompute(sharedKey, peersPublicKey, privateKey *[32]byte) {
+ curve25519.ScalarMult(sharedKey, privateKey, peersPublicKey)
+ salsa.HSalsa20(sharedKey, &zeros, sharedKey, &salsa.Sigma)
+}
+
+// Seal appends an encrypted and authenticated copy of message to out, which
+// will be Overhead bytes longer than the original and must not overlap. The
+// nonce must be unique for each distinct message for a given pair of keys.
+func Seal(out, message []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) []byte {
+ var sharedKey [32]byte
+ Precompute(&sharedKey, peersPublicKey, privateKey)
+ return secretbox.Seal(out, message, nonce, &sharedKey)
+}
+
+// SealAfterPrecomputation performs the same actions as Seal, but takes a
+// shared key as generated by Precompute.
+func SealAfterPrecomputation(out, message []byte, nonce *[24]byte, sharedKey *[32]byte) []byte {
+ return secretbox.Seal(out, message, nonce, sharedKey)
+}
+
+// Open authenticates and decrypts a box produced by Seal and appends the
+// message to out, which must not overlap box. The output will be Overhead
+// bytes smaller than box.
+func Open(out, box []byte, nonce *[24]byte, peersPublicKey, privateKey *[32]byte) ([]byte, bool) {
+ var sharedKey [32]byte
+ Precompute(&sharedKey, peersPublicKey, privateKey)
+ return secretbox.Open(out, box, nonce, &sharedKey)
+}
+
+// OpenAfterPrecomputation performs the same actions as Open, but takes a
+// shared key as generated by Precompute.
+func OpenAfterPrecomputation(out, box []byte, nonce *[24]byte, sharedKey *[32]byte) ([]byte, bool) {
+ return secretbox.Open(out, box, nonce, sharedKey)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/box/box_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/box/box_test.go
new file mode 100644
index 00000000000..481ade28aec
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/box/box_test.go
@@ -0,0 +1,78 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package box
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "testing"
+
+ "golang.org/x/crypto/curve25519"
+)
+
+func TestSealOpen(t *testing.T) {
+ publicKey1, privateKey1, _ := GenerateKey(rand.Reader)
+ publicKey2, privateKey2, _ := GenerateKey(rand.Reader)
+
+ if *privateKey1 == *privateKey2 {
+ t.Fatalf("private keys are equal!")
+ }
+ if *publicKey1 == *publicKey2 {
+ t.Fatalf("public keys are equal!")
+ }
+ message := []byte("test message")
+ var nonce [24]byte
+
+ box := Seal(nil, message, &nonce, publicKey1, privateKey2)
+ opened, ok := Open(nil, box, &nonce, publicKey2, privateKey1)
+ if !ok {
+ t.Fatalf("failed to open box")
+ }
+
+ if !bytes.Equal(opened, message) {
+ t.Fatalf("got %x, want %x", opened, message)
+ }
+
+ for i := range box {
+ box[i] ^= 0x40
+ _, ok := Open(nil, box, &nonce, publicKey2, privateKey1)
+ if ok {
+ t.Fatalf("opened box with byte %d corrupted", i)
+ }
+ box[i] ^= 0x40
+ }
+}
+
+func TestBox(t *testing.T) {
+ var privateKey1, privateKey2 [32]byte
+ for i := range privateKey1[:] {
+ privateKey1[i] = 1
+ }
+ for i := range privateKey2[:] {
+ privateKey2[i] = 2
+ }
+
+ var publicKey1 [32]byte
+ curve25519.ScalarBaseMult(&publicKey1, &privateKey1)
+ var message [64]byte
+ for i := range message[:] {
+ message[i] = 3
+ }
+
+ var nonce [24]byte
+ for i := range nonce[:] {
+ nonce[i] = 4
+ }
+
+ box := Seal(nil, message[:], &nonce, &publicKey1, &privateKey2)
+
+ // expected was generated using the C implementation of NaCl.
+ expected, _ := hex.DecodeString("78ea30b19d2341ebbdba54180f821eec265cf86312549bea8a37652a8bb94f07b78a73ed1708085e6ddd0e943bbdeb8755079a37eb31d86163ce241164a47629c0539f330b4914cd135b3855bc2a2dfc")
+
+ if !bytes.Equal(box, expected) {
+ t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/secretbox/secretbox.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/secretbox/secretbox.go
new file mode 100644
index 00000000000..dbf31bbf40f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/secretbox/secretbox.go
@@ -0,0 +1,149 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package secretbox encrypts and authenticates small messages.
+
+Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with
+secret-key cryptography. The length of messages is not hidden.
+
+It is the caller's responsibility to ensure the uniqueness of nonces—for
+example, by using nonce 1 for the first message, nonce 2 for the second
+message, etc. Nonces are long enough that randomly generated nonces have
+negligible risk of collision.
+
+This package is interoperable with NaCl: http://nacl.cr.yp.to/secretbox.html.
+*/
+package secretbox // import "golang.org/x/crypto/nacl/secretbox"
+
+import (
+ "golang.org/x/crypto/poly1305"
+ "golang.org/x/crypto/salsa20/salsa"
+)
+
+// Overhead is the number of bytes of overhead when boxing a message.
+const Overhead = poly1305.TagSize
+
+// setup produces a sub-key and Salsa20 counter given a nonce and key.
+func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) {
+ // We use XSalsa20 for encryption so first we need to generate a
+ // key and nonce with HSalsa20.
+ var hNonce [16]byte
+ copy(hNonce[:], nonce[:])
+ salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma)
+
+ // The final 8 bytes of the original nonce form the new nonce.
+ copy(counter[:], nonce[16:])
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+ if total := len(in) + n; cap(in) >= total {
+ head = in[:total]
+ } else {
+ head = make([]byte, total)
+ copy(head, in)
+ }
+ tail = head[len(in):]
+ return
+}
+
+// Seal appends an encrypted and authenticated copy of message to out, which
+// must not overlap message. The key and nonce pair must be unique for each
+// distinct message and the output will be Overhead bytes longer than message.
+func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte {
+ var subKey [32]byte
+ var counter [16]byte
+ setup(&subKey, &counter, nonce, key)
+
+ // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
+ // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
+ // keystream as a side effect.
+ var firstBlock [64]byte
+ salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
+
+ var poly1305Key [32]byte
+ copy(poly1305Key[:], firstBlock[:])
+
+ ret, out := sliceForAppend(out, len(message)+poly1305.TagSize)
+
+ // We XOR up to 32 bytes of message with the keystream generated from
+ // the first block.
+ firstMessageBlock := message
+ if len(firstMessageBlock) > 32 {
+ firstMessageBlock = firstMessageBlock[:32]
+ }
+
+ tagOut := out
+ out = out[poly1305.TagSize:]
+ for i, x := range firstMessageBlock {
+ out[i] = firstBlock[32+i] ^ x
+ }
+ message = message[len(firstMessageBlock):]
+ ciphertext := out
+ out = out[len(firstMessageBlock):]
+
+ // Now encrypt the rest.
+ counter[8] = 1
+ salsa.XORKeyStream(out, message, &counter, &subKey)
+
+ var tag [poly1305.TagSize]byte
+ poly1305.Sum(&tag, ciphertext, &poly1305Key)
+ copy(tagOut, tag[:])
+
+ return ret
+}
+
+// Open authenticates and decrypts a box produced by Seal and appends the
+// message to out, which must not overlap box. The output will be Overhead
+// bytes smaller than box.
+func Open(out []byte, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) {
+ if len(box) < Overhead {
+ return nil, false
+ }
+
+ var subKey [32]byte
+ var counter [16]byte
+ setup(&subKey, &counter, nonce, key)
+
+ // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since
+ // Salsa20 works with 64-byte blocks, we also generate 32 bytes of
+ // keystream as a side effect.
+ var firstBlock [64]byte
+ salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey)
+
+ var poly1305Key [32]byte
+ copy(poly1305Key[:], firstBlock[:])
+ var tag [poly1305.TagSize]byte
+ copy(tag[:], box)
+
+ if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) {
+ return nil, false
+ }
+
+ ret, out := sliceForAppend(out, len(box)-Overhead)
+
+ // We XOR up to 32 bytes of box with the keystream generated from
+ // the first block.
+ box = box[Overhead:]
+ firstMessageBlock := box
+ if len(firstMessageBlock) > 32 {
+ firstMessageBlock = firstMessageBlock[:32]
+ }
+ for i, x := range firstMessageBlock {
+ out[i] = firstBlock[32+i] ^ x
+ }
+
+ box = box[len(firstMessageBlock):]
+ out = out[len(firstMessageBlock):]
+
+ // Now decrypt the rest.
+ counter[8] = 1
+ salsa.XORKeyStream(out, box, &counter, &subKey)
+
+ return ret, true
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go
new file mode 100644
index 00000000000..664dc1521d3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/nacl/secretbox/secretbox_test.go
@@ -0,0 +1,91 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package secretbox
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "testing"
+)
+
+func TestSealOpen(t *testing.T) {
+ var key [32]byte
+ var nonce [24]byte
+
+ rand.Reader.Read(key[:])
+ rand.Reader.Read(nonce[:])
+
+ var box, opened []byte
+
+ for msgLen := 0; msgLen < 128; msgLen += 17 {
+ message := make([]byte, msgLen)
+ rand.Reader.Read(message)
+
+ box = Seal(box[:0], message, &nonce, &key)
+ var ok bool
+ opened, ok = Open(opened[:0], box, &nonce, &key)
+ if !ok {
+ t.Errorf("%d: failed to open box", msgLen)
+ continue
+ }
+
+ if !bytes.Equal(opened, message) {
+ t.Errorf("%d: got %x, expected %x", msgLen, opened, message)
+ continue
+ }
+ }
+
+ for i := range box {
+ box[i] ^= 0x20
+ _, ok := Open(opened[:0], box, &nonce, &key)
+ if ok {
+ t.Errorf("box was opened after corrupting byte %d", i)
+ }
+ box[i] ^= 0x20
+ }
+}
+
+func TestSecretBox(t *testing.T) {
+ var key [32]byte
+ var nonce [24]byte
+ var message [64]byte
+
+ for i := range key[:] {
+ key[i] = 1
+ }
+ for i := range nonce[:] {
+ nonce[i] = 2
+ }
+ for i := range message[:] {
+ message[i] = 3
+ }
+
+ box := Seal(nil, message[:], &nonce, &key)
+ // expected was generated using the C implementation of NaCl.
+ expected, _ := hex.DecodeString("8442bc313f4626f1359e3b50122b6ce6fe66ddfe7d39d14e637eb4fd5b45beadab55198df6ab5368439792a23c87db70acb6156dc5ef957ac04f6276cf6093b84be77ff0849cc33e34b7254d5a8f65ad")
+
+ if !bytes.Equal(box, expected) {
+ t.Fatalf("box didn't match, got\n%x\n, expected\n%x", box, expected)
+ }
+}
+
+func TestAppend(t *testing.T) {
+ var key [32]byte
+ var nonce [24]byte
+ var message [8]byte
+
+ out := make([]byte, 4)
+ box := Seal(out, message[:], &nonce, &key)
+ if !bytes.Equal(box[:4], out[:4]) {
+ t.Fatalf("Seal didn't correctly append")
+ }
+
+ out = make([]byte, 4, 100)
+ box = Seal(out, message[:], &nonce, &key)
+ if !bytes.Equal(box[:4], out[:4]) {
+ t.Fatalf("Seal didn't correctly append with sufficient capacity.")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ocsp/ocsp.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ocsp/ocsp.go
new file mode 100644
index 00000000000..ea61cf49852
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ocsp/ocsp.go
@@ -0,0 +1,673 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses
+// are signed messages attesting to the validity of a certificate for a small
+// period of time. This is used to manage revocation for X.509 certificates.
+package ocsp // import "golang.org/x/crypto/ocsp"
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+ "math/big"
+ "strconv"
+ "time"
+)
+
+var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1})
+
+// ResponseStatus contains the result of an OCSP request. See
+// https://tools.ietf.org/html/rfc6960#section-2.3
+type ResponseStatus int
+
+const (
+ Success ResponseStatus = 0
+ Malformed ResponseStatus = 1
+ InternalError ResponseStatus = 2
+ TryLater ResponseStatus = 3
+ // Status code four is ununsed in OCSP. See
+ // https://tools.ietf.org/html/rfc6960#section-4.2.1
+ SignatureRequired ResponseStatus = 5
+ Unauthorized ResponseStatus = 6
+)
+
+func (r ResponseStatus) String() string {
+ switch r {
+ case Success:
+ return "success"
+ case Malformed:
+ return "malformed"
+ case InternalError:
+ return "internal error"
+ case TryLater:
+ return "try later"
+ case SignatureRequired:
+ return "signature required"
+ case Unauthorized:
+ return "unauthorized"
+ default:
+ return "unknown OCSP status: " + strconv.Itoa(int(r))
+ }
+}
+
+// ResponseError is an error that may be returned by ParseResponse to indicate
+// that the response itself is an error, not just that its indicating that a
+// certificate is revoked, unknown, etc.
+type ResponseError struct {
+ Status ResponseStatus
+}
+
+func (r ResponseError) Error() string {
+ return "ocsp: error from server: " + r.Status.String()
+}
+
+// These are internal structures that reflect the ASN.1 structure of an OCSP
+// response. See RFC 2560, section 4.2.
+
+type certID struct {
+ HashAlgorithm pkix.AlgorithmIdentifier
+ NameHash []byte
+ IssuerKeyHash []byte
+ SerialNumber *big.Int
+}
+
+// https://tools.ietf.org/html/rfc2560#section-4.1.1
+type ocspRequest struct {
+ TBSRequest tbsRequest
+}
+
+type tbsRequest struct {
+ Version int `asn1:"explicit,tag:0,default:0,optional"`
+ RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"`
+ RequestList []request
+}
+
+type request struct {
+ Cert certID
+}
+
+type responseASN1 struct {
+ Status asn1.Enumerated
+ Response responseBytes `asn1:"explicit,tag:0,optional"`
+}
+
+type responseBytes struct {
+ ResponseType asn1.ObjectIdentifier
+ Response []byte
+}
+
+type basicResponse struct {
+ TBSResponseData responseData
+ SignatureAlgorithm pkix.AlgorithmIdentifier
+ Signature asn1.BitString
+ Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"`
+}
+
+type responseData struct {
+ Raw asn1.RawContent
+ Version int `asn1:"optional,default:1,explicit,tag:0"`
+ RawResponderName asn1.RawValue `asn1:"optional,explicit,tag:1"`
+ KeyHash []byte `asn1:"optional,explicit,tag:2"`
+ ProducedAt time.Time `asn1:"generalized"`
+ Responses []singleResponse
+}
+
+type singleResponse struct {
+ CertID certID
+ Good asn1.Flag `asn1:"tag:0,optional"`
+ Revoked revokedInfo `asn1:"tag:1,optional"`
+ Unknown asn1.Flag `asn1:"tag:2,optional"`
+ ThisUpdate time.Time `asn1:"generalized"`
+ NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"`
+ SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"`
+}
+
+type revokedInfo struct {
+ RevocationTime time.Time `asn1:"generalized"`
+ Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"`
+}
+
+var (
+ oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
+ oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
+ oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
+ oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
+ oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
+ oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
+ oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
+ oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 4, 3, 2}
+ oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
+ oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
+ oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
+ oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
+)
+
+var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{
+ crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}),
+ crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}),
+ crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}),
+ crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}),
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+var signatureAlgorithmDetails = []struct {
+ algo x509.SignatureAlgorithm
+ oid asn1.ObjectIdentifier
+ pubKeyAlgo x509.PublicKeyAlgorithm
+ hash crypto.Hash
+}{
+ {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
+ {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
+ {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
+ {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
+ {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
+ {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
+ {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
+ {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
+ {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
+ {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
+ {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
+ {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
+}
+
+// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below
+func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) {
+ var pubType x509.PublicKeyAlgorithm
+
+ switch pub := pub.(type) {
+ case *rsa.PublicKey:
+ pubType = x509.RSA
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureSHA256WithRSA
+ sigAlgo.Parameters = asn1.RawValue{
+ Tag: 5,
+ }
+
+ case *ecdsa.PublicKey:
+ pubType = x509.ECDSA
+
+ switch pub.Curve {
+ case elliptic.P224(), elliptic.P256():
+ hashFunc = crypto.SHA256
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA256
+ case elliptic.P384():
+ hashFunc = crypto.SHA384
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA384
+ case elliptic.P521():
+ hashFunc = crypto.SHA512
+ sigAlgo.Algorithm = oidSignatureECDSAWithSHA512
+ default:
+ err = errors.New("x509: unknown elliptic curve")
+ }
+
+ default:
+ err = errors.New("x509: only RSA and ECDSA keys supported")
+ }
+
+ if err != nil {
+ return
+ }
+
+ if requestedSigAlgo == 0 {
+ return
+ }
+
+ found := false
+ for _, details := range signatureAlgorithmDetails {
+ if details.algo == requestedSigAlgo {
+ if details.pubKeyAlgo != pubType {
+ err = errors.New("x509: requested SignatureAlgorithm does not match private key type")
+ return
+ }
+ sigAlgo.Algorithm, hashFunc = details.oid, details.hash
+ if hashFunc == 0 {
+ err = errors.New("x509: cannot sign with hash function requested")
+ return
+ }
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ err = errors.New("x509: unknown SignatureAlgorithm")
+ }
+
+ return
+}
+
+// TODO(agl): this is taken from crypto/x509 and so should probably be exported
+// from crypto/x509 or crypto/x509/pkix.
+func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm {
+ for _, details := range signatureAlgorithmDetails {
+ if oid.Equal(details.oid) {
+ return details.algo
+ }
+ }
+ return x509.UnknownSignatureAlgorithm
+}
+
+// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form.
+func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash {
+ for hash, oid := range hashOIDs {
+ if oid.Equal(target) {
+ return hash
+ }
+ }
+ return crypto.Hash(0)
+}
+
+// This is the exposed reflection of the internal OCSP structures.
+
+// The status values that can be expressed in OCSP. See RFC 6960.
+const (
+ // Good means that the certificate is valid.
+ Good = iota
+ // Revoked means that the certificate has been deliberately revoked.
+ Revoked
+ // Unknown means that the OCSP responder doesn't know about the certificate.
+ Unknown
+ // ServerFailed is unused and was never used (see
+ // https://go-review.googlesource.com/#/c/18944). ParseResponse will
+ // return a ResponseError when an error response is parsed.
+ ServerFailed
+)
+
+// The enumerated reasons for revoking a certificate. See RFC 5280.
+const (
+ Unspecified = iota
+ KeyCompromise = iota
+ CACompromise = iota
+ AffiliationChanged = iota
+ Superseded = iota
+ CessationOfOperation = iota
+ CertificateHold = iota
+ _ = iota
+ RemoveFromCRL = iota
+ PrivilegeWithdrawn = iota
+ AACompromise = iota
+)
+
+// Request represents an OCSP request. See RFC 6960.
+type Request struct {
+ HashAlgorithm crypto.Hash
+ IssuerNameHash []byte
+ IssuerKeyHash []byte
+ SerialNumber *big.Int
+}
+
+// Response represents an OCSP response containing a single SingleResponse. See
+// RFC 6960.
+type Response struct {
+ // Status is one of {Good, Revoked, Unknown}
+ Status int
+ SerialNumber *big.Int
+ ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time
+ RevocationReason int
+ Certificate *x509.Certificate
+ // TBSResponseData contains the raw bytes of the signed response. If
+ // Certificate is nil then this can be used to verify Signature.
+ TBSResponseData []byte
+ Signature []byte
+ SignatureAlgorithm x509.SignatureAlgorithm
+
+ // Extensions contains raw X.509 extensions from the singleExtensions field
+ // of the OCSP response. When parsing certificates, this can be used to
+ // extract non-critical extensions that are not parsed by this package. When
+ // marshaling OCSP responses, the Extensions field is ignored, see
+ // ExtraExtensions.
+ Extensions []pkix.Extension
+
+ // ExtraExtensions contains extensions to be copied, raw, into any marshaled
+ // OCSP response (in the singleExtensions field). Values override any
+ // extensions that would otherwise be produced based on the other fields. The
+ // ExtraExtensions field is not populated when parsing certificates, see
+ // Extensions.
+ ExtraExtensions []pkix.Extension
+}
+
+// These are pre-serialized error responses for the various non-success codes
+// defined by OCSP. The Unauthorized code in particular can be used by an OCSP
+// responder that supports only pre-signed responses as a response to requests
+// for certificates with unknown status. See RFC 5019.
+var (
+ MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01}
+ InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02}
+ TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03}
+ SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05}
+ UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06}
+)
+
+// CheckSignatureFrom checks that the signature in resp is a valid signature
+// from issuer. This should only be used if resp.Certificate is nil. Otherwise,
+// the OCSP response contained an intermediate certificate that created the
+// signature. That signature is checked by ParseResponse and only
+// resp.Certificate remains to be validated.
+func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error {
+ return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature)
+}
+
+// ParseError results from an invalid OCSP response.
+type ParseError string
+
+func (p ParseError) Error() string {
+ return string(p)
+}
+
+// ParseRequest parses an OCSP request in DER form. It only supports
+// requests for a single certificate. Signed requests are not supported.
+// If a request includes a signature, it will result in a ParseError.
+func ParseRequest(bytes []byte) (*Request, error) {
+ var req ocspRequest
+ rest, err := asn1.Unmarshal(bytes, &req)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP request")
+ }
+
+ if len(req.TBSRequest.RequestList) == 0 {
+ return nil, ParseError("OCSP request contains no request body")
+ }
+ innerRequest := req.TBSRequest.RequestList[0]
+
+ hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm)
+ if hashFunc == crypto.Hash(0) {
+ return nil, ParseError("OCSP request uses unknown hash function")
+ }
+
+ return &Request{
+ HashAlgorithm: hashFunc,
+ IssuerNameHash: innerRequest.Cert.NameHash,
+ IssuerKeyHash: innerRequest.Cert.IssuerKeyHash,
+ SerialNumber: innerRequest.Cert.SerialNumber,
+ }, nil
+}
+
+// ParseResponse parses an OCSP response in DER form. It only supports
+// responses for a single certificate. If the response contains a certificate
+// then the signature over the response is checked. If issuer is not nil then
+// it will be used to validate the signature or embedded certificate.
+//
+// Invalid signatures or parse failures will result in a ParseError. Error
+// responses will result in a ResponseError.
+func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) {
+ var resp responseASN1
+ rest, err := asn1.Unmarshal(bytes, &resp)
+ if err != nil {
+ return nil, err
+ }
+ if len(rest) > 0 {
+ return nil, ParseError("trailing data in OCSP response")
+ }
+
+ if status := ResponseStatus(resp.Status); status != Success {
+ return nil, ResponseError{status}
+ }
+
+ if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) {
+ return nil, ParseError("bad OCSP response type")
+ }
+
+ var basicResp basicResponse
+ rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(basicResp.Certificates) > 1 {
+ return nil, ParseError("OCSP response contains bad number of certificates")
+ }
+
+ if len(basicResp.TBSResponseData.Responses) != 1 {
+ return nil, ParseError("OCSP response contains bad number of responses")
+ }
+
+ ret := &Response{
+ TBSResponseData: basicResp.TBSResponseData.Raw,
+ Signature: basicResp.Signature.RightAlign(),
+ SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm),
+ }
+
+ if len(basicResp.Certificates) > 0 {
+ ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := ret.CheckSignatureFrom(ret.Certificate); err != nil {
+ return nil, ParseError("bad OCSP signature")
+ }
+
+ if issuer != nil {
+ if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil {
+ return nil, ParseError("bad signature on embedded certificate")
+ }
+ }
+ } else if issuer != nil {
+ if err := ret.CheckSignatureFrom(issuer); err != nil {
+ return nil, ParseError("bad OCSP signature")
+ }
+ }
+
+ r := basicResp.TBSResponseData.Responses[0]
+
+ for _, ext := range r.SingleExtensions {
+ if ext.Critical {
+ return nil, ParseError("unsupported critical extension")
+ }
+ }
+ ret.Extensions = r.SingleExtensions
+
+ ret.SerialNumber = r.CertID.SerialNumber
+
+ switch {
+ case bool(r.Good):
+ ret.Status = Good
+ case bool(r.Unknown):
+ ret.Status = Unknown
+ default:
+ ret.Status = Revoked
+ ret.RevokedAt = r.Revoked.RevocationTime
+ ret.RevocationReason = int(r.Revoked.Reason)
+ }
+
+ ret.ProducedAt = basicResp.TBSResponseData.ProducedAt
+ ret.ThisUpdate = r.ThisUpdate
+ ret.NextUpdate = r.NextUpdate
+
+ return ret, nil
+}
+
+// RequestOptions contains options for constructing OCSP requests.
+type RequestOptions struct {
+ // Hash contains the hash function that should be used when
+ // constructing the OCSP request. If zero, SHA-1 will be used.
+ Hash crypto.Hash
+}
+
+func (opts *RequestOptions) hash() crypto.Hash {
+ if opts == nil || opts.Hash == 0 {
+ // SHA-1 is nearly universally used in OCSP.
+ return crypto.SHA1
+ }
+ return opts.Hash
+}
+
+// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If
+// opts is nil then sensible defaults are used.
+func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) {
+ hashFunc := opts.hash()
+
+ // OCSP seems to be the only place where these raw hash identifiers are
+ // used. I took the following from
+ // http://msdn.microsoft.com/en-us/library/ff635603.aspx
+ var hashOID asn1.ObjectIdentifier
+ hashOID, ok := hashOIDs[hashFunc]
+ if !ok {
+ return nil, x509.ErrUnsupportedAlgorithm
+ }
+
+ if !hashFunc.Available() {
+ return nil, x509.ErrUnsupportedAlgorithm
+ }
+ h := opts.hash().New()
+
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+ return nil, err
+ }
+
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ return asn1.Marshal(ocspRequest{
+ tbsRequest{
+ Version: 0,
+ RequestList: []request{
+ {
+ Cert: certID{
+ pkix.AlgorithmIdentifier{
+ Algorithm: hashOID,
+ Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+ },
+ issuerNameHash,
+ issuerKeyHash,
+ cert.SerialNumber,
+ },
+ },
+ },
+ },
+ })
+}
+
+// CreateResponse returns a DER-encoded OCSP response with the specified contents.
+// The fields in the response are populated as follows:
+//
+// The responder cert is used to populate the ResponderName field, and the certificate
+// itself is provided alongside the OCSP response signature.
+//
+// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields.
+// (SHA-1 is used for the hash function; this is not configurable.)
+//
+// The template is used to populate the SerialNumber, RevocationStatus, RevokedAt,
+// RevocationReason, ThisUpdate, and NextUpdate fields.
+//
+// The ProducedAt date is automatically set to the current date, to the nearest minute.
+func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) {
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil {
+ return nil, err
+ }
+
+ h := sha1.New()
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ innerResponse := singleResponse{
+ CertID: certID{
+ HashAlgorithm: pkix.AlgorithmIdentifier{
+ Algorithm: hashOIDs[crypto.SHA1],
+ Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */},
+ },
+ NameHash: issuerNameHash,
+ IssuerKeyHash: issuerKeyHash,
+ SerialNumber: template.SerialNumber,
+ },
+ ThisUpdate: template.ThisUpdate.UTC(),
+ NextUpdate: template.NextUpdate.UTC(),
+ SingleExtensions: template.ExtraExtensions,
+ }
+
+ switch template.Status {
+ case Good:
+ innerResponse.Good = true
+ case Unknown:
+ innerResponse.Unknown = true
+ case Revoked:
+ innerResponse.Revoked = revokedInfo{
+ RevocationTime: template.RevokedAt.UTC(),
+ Reason: asn1.Enumerated(template.RevocationReason),
+ }
+ }
+
+ responderName := asn1.RawValue{
+ Class: 2, // context-specific
+ Tag: 1, // explicit tag
+ IsCompound: true,
+ Bytes: responderCert.RawSubject,
+ }
+ tbsResponseData := responseData{
+ Version: 0,
+ RawResponderName: responderName,
+ ProducedAt: time.Now().Truncate(time.Minute).UTC(),
+ Responses: []singleResponse{innerResponse},
+ }
+
+ tbsResponseDataDER, err := asn1.Marshal(tbsResponseData)
+ if err != nil {
+ return nil, err
+ }
+
+ hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ responseHash := hashFunc.New()
+ responseHash.Write(tbsResponseDataDER)
+ signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ response := basicResponse{
+ TBSResponseData: tbsResponseData,
+ SignatureAlgorithm: signatureAlgorithm,
+ Signature: asn1.BitString{
+ Bytes: signature,
+ BitLength: 8 * len(signature),
+ },
+ }
+ if template.Certificate != nil {
+ response.Certificates = []asn1.RawValue{
+ asn1.RawValue{FullBytes: template.Certificate.Raw},
+ }
+ }
+ responseDER, err := asn1.Marshal(response)
+ if err != nil {
+ return nil, err
+ }
+
+ return asn1.Marshal(responseASN1{
+ Status: asn1.Enumerated(Success),
+ Response: responseBytes{
+ ResponseType: idPKIXOCSPBasic,
+ Response: responseDER,
+ },
+ })
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ocsp/ocsp_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ocsp/ocsp_test.go
new file mode 100644
index 00000000000..33868497381
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ocsp/ocsp_test.go
@@ -0,0 +1,584 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ocsp
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/sha1"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/hex"
+ "math/big"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestOCSPDecode(t *testing.T) {
+ responseBytes, _ := hex.DecodeString(ocspResponseHex)
+ resp, err := ParseResponse(responseBytes, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ expected := Response{
+ Status: Good,
+ SerialNumber: big.NewInt(0x1d0fa),
+ RevocationReason: Unspecified,
+ ThisUpdate: time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC),
+ NextUpdate: time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC),
+ }
+
+ if !reflect.DeepEqual(resp.ThisUpdate, expected.ThisUpdate) {
+ t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, expected.ThisUpdate)
+ }
+
+ if !reflect.DeepEqual(resp.NextUpdate, expected.NextUpdate) {
+ t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, expected.NextUpdate)
+ }
+
+ if resp.Status != expected.Status {
+ t.Errorf("resp.Status: got %d, want %d", resp.Status, expected.Status)
+ }
+
+ if resp.SerialNumber.Cmp(expected.SerialNumber) != 0 {
+ t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, expected.SerialNumber)
+ }
+
+ if resp.RevocationReason != expected.RevocationReason {
+ t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, expected.RevocationReason)
+ }
+}
+
+func TestOCSPDecodeWithoutCert(t *testing.T) {
+ responseBytes, _ := hex.DecodeString(ocspResponseWithoutCertHex)
+ _, err := ParseResponse(responseBytes, nil)
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestOCSPDecodeWithExtensions(t *testing.T) {
+ responseBytes, _ := hex.DecodeString(ocspResponseWithCriticalExtensionHex)
+ _, err := ParseResponse(responseBytes, nil)
+ if err == nil {
+ t.Error(err)
+ }
+
+ responseBytes, _ = hex.DecodeString(ocspResponseWithExtensionHex)
+ response, err := ParseResponse(responseBytes, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(response.Extensions) != 1 {
+ t.Errorf("len(response.Extensions): got %v, want %v", len(response.Extensions), 1)
+ }
+
+ extensionBytes := response.Extensions[0].Value
+ expectedBytes, _ := hex.DecodeString(ocspExtensionValueHex)
+ if !bytes.Equal(extensionBytes, expectedBytes) {
+ t.Errorf("response.Extensions[0]: got %x, want %x", extensionBytes, expectedBytes)
+ }
+}
+
+func TestOCSPSignature(t *testing.T) {
+ issuerCert, _ := hex.DecodeString(startComHex)
+ issuer, err := x509.ParseCertificate(issuerCert)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ response, _ := hex.DecodeString(ocspResponseHex)
+ if _, err := ParseResponse(response, issuer); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestOCSPRequest(t *testing.T) {
+ leafCert, _ := hex.DecodeString(leafCertHex)
+ cert, err := x509.ParseCertificate(leafCert)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ issuerCert, _ := hex.DecodeString(issuerCertHex)
+ issuer, err := x509.ParseCertificate(issuerCert)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ request, err := CreateRequest(cert, issuer, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ expectedBytes, _ := hex.DecodeString(ocspRequestHex)
+ if !bytes.Equal(request, expectedBytes) {
+ t.Errorf("request: got %x, wanted %x", request, expectedBytes)
+ }
+
+ decodedRequest, err := ParseRequest(expectedBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if decodedRequest.HashAlgorithm != crypto.SHA1 {
+ t.Errorf("request.HashAlgorithm: got %v, want %v", decodedRequest.HashAlgorithm, crypto.SHA1)
+ }
+
+ var publicKeyInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ PublicKey asn1.BitString
+ }
+ _, err = asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ h := sha1.New()
+ h.Write(publicKeyInfo.PublicKey.RightAlign())
+ issuerKeyHash := h.Sum(nil)
+
+ h.Reset()
+ h.Write(issuer.RawSubject)
+ issuerNameHash := h.Sum(nil)
+
+ if got := decodedRequest.IssuerKeyHash; !bytes.Equal(got, issuerKeyHash) {
+ t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerKeyHash)
+ }
+
+ if got := decodedRequest.IssuerNameHash; !bytes.Equal(got, issuerNameHash) {
+ t.Errorf("request.IssuerKeyHash: got %x, want %x", got, issuerNameHash)
+ }
+
+ if got := decodedRequest.SerialNumber; got.Cmp(cert.SerialNumber) != 0 {
+ t.Errorf("request.SerialNumber: got %x, want %x", got, cert.SerialNumber)
+ }
+}
+
+func TestOCSPResponse(t *testing.T) {
+ leafCert, _ := hex.DecodeString(leafCertHex)
+ leaf, err := x509.ParseCertificate(leafCert)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ issuerCert, _ := hex.DecodeString(issuerCertHex)
+ issuer, err := x509.ParseCertificate(issuerCert)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ responderCert, _ := hex.DecodeString(responderCertHex)
+ responder, err := x509.ParseCertificate(responderCert)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ responderPrivateKeyDER, _ := hex.DecodeString(responderPrivateKeyHex)
+ responderPrivateKey, err := x509.ParsePKCS1PrivateKey(responderPrivateKeyDER)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ extensionBytes, _ := hex.DecodeString(ocspExtensionValueHex)
+ extensions := []pkix.Extension{
+ pkix.Extension{
+ Id: ocspExtensionOID,
+ Critical: false,
+ Value: extensionBytes,
+ },
+ }
+
+ producedAt := time.Now().Truncate(time.Minute)
+ thisUpdate := time.Date(2010, 7, 7, 15, 1, 5, 0, time.UTC)
+ nextUpdate := time.Date(2010, 7, 7, 18, 35, 17, 0, time.UTC)
+ template := Response{
+ Status: Revoked,
+ SerialNumber: leaf.SerialNumber,
+ ThisUpdate: thisUpdate,
+ NextUpdate: nextUpdate,
+ RevokedAt: thisUpdate,
+ RevocationReason: KeyCompromise,
+ Certificate: responder,
+ ExtraExtensions: extensions,
+ }
+
+ responseBytes, err := CreateResponse(issuer, responder, template, responderPrivateKey)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ resp, err := ParseResponse(responseBytes, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(resp.ThisUpdate, template.ThisUpdate) {
+ t.Errorf("resp.ThisUpdate: got %d, want %d", resp.ThisUpdate, template.ThisUpdate)
+ }
+
+ if !reflect.DeepEqual(resp.NextUpdate, template.NextUpdate) {
+ t.Errorf("resp.NextUpdate: got %d, want %d", resp.NextUpdate, template.NextUpdate)
+ }
+
+ if !reflect.DeepEqual(resp.RevokedAt, template.RevokedAt) {
+ t.Errorf("resp.RevokedAt: got %d, want %d", resp.RevokedAt, template.RevokedAt)
+ }
+
+ if !reflect.DeepEqual(resp.Extensions, template.ExtraExtensions) {
+ t.Errorf("resp.Extensions: got %v, want %v", resp.Extensions, template.ExtraExtensions)
+ }
+
+ if !resp.ProducedAt.Equal(producedAt) {
+ t.Errorf("resp.ProducedAt: got %d, want %d", resp.ProducedAt, producedAt)
+ }
+
+ if resp.Status != template.Status {
+ t.Errorf("resp.Status: got %d, want %d", resp.Status, template.Status)
+ }
+
+ if resp.SerialNumber.Cmp(template.SerialNumber) != 0 {
+ t.Errorf("resp.SerialNumber: got %x, want %x", resp.SerialNumber, template.SerialNumber)
+ }
+
+ if resp.RevocationReason != template.RevocationReason {
+ t.Errorf("resp.RevocationReason: got %d, want %d", resp.RevocationReason, template.RevocationReason)
+ }
+}
+
+func TestErrorResponse(t *testing.T) {
+ responseBytes, _ := hex.DecodeString(errorResponseHex)
+ _, err := ParseResponse(responseBytes, nil)
+
+ respErr, ok := err.(ResponseError)
+ if !ok {
+ t.Fatalf("expected ResponseError from ParseResponse but got %#v", err)
+ }
+ if respErr.Status != Malformed {
+ t.Fatalf("expected Malformed status from ParseResponse but got %d", respErr.Status)
+ }
+}
+
+// This OCSP response was taken from Thawte's public OCSP responder.
+// To recreate:
+// $ openssl s_client -tls1 -showcerts -servername www.google.com -connect www.google.com:443
+// Copy and paste the first certificate into /tmp/cert.crt and the second into
+// /tmp/intermediate.crt
+// $ openssl ocsp -issuer /tmp/intermediate.crt -cert /tmp/cert.crt -url http://ocsp.thawte.com -resp_text -respout /tmp/ocsp.der
+// Then hex encode the result:
+// $ python -c 'print file("/tmp/ocsp.der", "r").read().encode("hex")'
+
+const ocspResponseHex = "308206bc0a0100a08206b5308206b106092b0601050507300101048206a23082069e3081" +
+ "c9a14e304c310b300906035504061302494c31163014060355040a130d5374617274436f" +
+ "6d204c74642e312530230603550403131c5374617274436f6d20436c6173732031204f43" +
+ "5350205369676e6572180f32303130303730373137333531375a30663064303c30090605" +
+ "2b0e03021a050004146568874f40750f016a3475625e1f5c93e5a26d580414eb4234d098" +
+ "b0ab9ff41b6b08f7cc642eef0e2c45020301d0fa8000180f323031303037303731353031" +
+ "30355aa011180f32303130303730373138333531375a300d06092a864886f70d01010505" +
+ "000382010100ab557ff070d1d7cebbb5f0ec91a15c3fed22eb2e1b8244f1b84545f013a4" +
+ "fb46214c5e3fbfbebb8a56acc2b9db19f68fd3c3201046b3824d5ba689f99864328710cb" +
+ "467195eb37d84f539e49f859316b32964dc3e47e36814ce94d6c56dd02733b1d0802f7ff" +
+ "4eebdbbd2927dcf580f16cbc290f91e81b53cb365e7223f1d6e20a88ea064104875e0145" +
+ "672b20fc14829d51ca122f5f5d77d3ad6c83889c55c7dc43680ba2fe3cef8b05dbcabdc0" +
+ "d3e09aaf9725597f8c858c2fa38c0d6aed2e6318194420dd1a1137445d13e1c97ab47896" +
+ "17a4e08925f46f867b72e3a4dc1f08cb870b2b0717f7207faa0ac512e628a029aba7457a" +
+ "e63dcf3281e2162d9349a08204ba308204b6308204b23082039aa003020102020101300d" +
+ "06092a864886f70d010105050030818c310b300906035504061302494c31163014060355" +
+ "040a130d5374617274436f6d204c74642e312b3029060355040b13225365637572652044" +
+ "69676974616c204365727469666963617465205369676e696e6731383036060355040313" +
+ "2f5374617274436f6d20436c6173732031205072696d61727920496e7465726d65646961" +
+ "746520536572766572204341301e170d3037313032353030323330365a170d3132313032" +
+ "333030323330365a304c310b300906035504061302494c31163014060355040a130d5374" +
+ "617274436f6d204c74642e312530230603550403131c5374617274436f6d20436c617373" +
+ "2031204f435350205369676e657230820122300d06092a864886f70d0101010500038201" +
+ "0f003082010a0282010100b9561b4c45318717178084e96e178df2255e18ed8d8ecc7c2b" +
+ "7b51a6c1c2e6bf0aa3603066f132fe10ae97b50e99fa24b83fc53dd2777496387d14e1c3" +
+ "a9b6a4933e2ac12413d085570a95b8147414a0bc007c7bcf222446ef7f1a156d7ea1c577" +
+ "fc5f0facdfd42eb0f5974990cb2f5cefebceef4d1bdc7ae5c1075c5a99a93171f2b0845b" +
+ "4ff0864e973fcfe32f9d7511ff87a3e943410c90a4493a306b6944359340a9ca96f02b66" +
+ "ce67f028df2980a6aaee8d5d5d452b8b0eb93f923cc1e23fcccbdbe7ffcb114d08fa7a6a" +
+ "3c404f825d1a0e715935cf623a8c7b59670014ed0622f6089a9447a7a19010f7fe58f841" +
+ "29a2765ea367824d1c3bb2fda308530203010001a382015c30820158300c0603551d1301" +
+ "01ff04023000300b0603551d0f0404030203a8301e0603551d250417301506082b060105" +
+ "0507030906092b0601050507300105301d0603551d0e0416041445e0a36695414c5dd449" +
+ "bc00e33cdcdbd2343e173081a80603551d230481a030819d8014eb4234d098b0ab9ff41b" +
+ "6b08f7cc642eef0e2c45a18181a47f307d310b300906035504061302494c311630140603" +
+ "55040a130d5374617274436f6d204c74642e312b3029060355040b132253656375726520" +
+ "4469676974616c204365727469666963617465205369676e696e67312930270603550403" +
+ "13205374617274436f6d2043657274696669636174696f6e20417574686f726974798201" +
+ "0a30230603551d12041c301a8618687474703a2f2f7777772e737461727473736c2e636f" +
+ "6d2f302c06096086480186f842010d041f161d5374617274436f6d205265766f63617469" +
+ "6f6e20417574686f72697479300d06092a864886f70d01010505000382010100182d2215" +
+ "8f0fc0291324fa8574c49bb8ff2835085adcbf7b7fc4191c397ab6951328253fffe1e5ec" +
+ "2a7da0d50fca1a404e6968481366939e666c0a6209073eca57973e2fefa9ed1718e8176f" +
+ "1d85527ff522c08db702e3b2b180f1cbff05d98128252cf0f450f7dd2772f4188047f19d" +
+ "c85317366f94bc52d60f453a550af58e308aaab00ced33040b62bf37f5b1ab2a4f7f0f80" +
+ "f763bf4d707bc8841d7ad9385ee2a4244469260b6f2bf085977af9074796048ecc2f9d48" +
+ "a1d24ce16e41a9941568fec5b42771e118f16c106a54ccc339a4b02166445a167902e75e" +
+ "6d8620b0825dcd18a069b90fd851d10fa8effd409deec02860d26d8d833f304b10669b42"
+
+const startComHex = "308206343082041ca003020102020118300d06092a864886f70d0101050500307d310b30" +
+ "0906035504061302494c31163014060355040a130d5374617274436f6d204c74642e312b" +
+ "3029060355040b1322536563757265204469676974616c20436572746966696361746520" +
+ "5369676e696e6731293027060355040313205374617274436f6d20436572746966696361" +
+ "74696f6e20417574686f72697479301e170d3037313032343230353431375a170d313731" +
+ "3032343230353431375a30818c310b300906035504061302494c31163014060355040a13" +
+ "0d5374617274436f6d204c74642e312b3029060355040b13225365637572652044696769" +
+ "74616c204365727469666963617465205369676e696e67313830360603550403132f5374" +
+ "617274436f6d20436c6173732031205072696d61727920496e7465726d65646961746520" +
+ "53657276657220434130820122300d06092a864886f70d01010105000382010f00308201" +
+ "0a0282010100b689c6acef09527807ac9263d0f44418188480561f91aee187fa3250b4d3" +
+ "4706f0e6075f700e10f71dc0ce103634855a0f92ac83c6ac58523fba38e8fce7a724e240" +
+ "a60876c0926e9e2a6d4d3f6e61200adb59ded27d63b33e46fefa215118d7cd30a6ed076e" +
+ "3b7087b4f9faebee823c056f92f7a4dc0a301e9373fe07cad75f809d225852ae06da8b87" +
+ "2369b0e42ad8ea83d2bdf371db705a280faf5a387045123f304dcd3baf17e50fcba0a95d" +
+ "48aab16150cb34cd3c5cc30be810c08c9bf0030362feb26c3e720eee1c432ac9480e5739" +
+ "c43121c810c12c87fe5495521f523c31129b7fe7c0a0a559d5e28f3ef0d5a8e1d77031a9" +
+ "c4b3cfaf6d532f06f4a70203010001a38201ad308201a9300f0603551d130101ff040530" +
+ "030101ff300e0603551d0f0101ff040403020106301d0603551d0e04160414eb4234d098" +
+ "b0ab9ff41b6b08f7cc642eef0e2c45301f0603551d230418301680144e0bef1aa4405ba5" +
+ "17698730ca346843d041aef2306606082b06010505070101045a3058302706082b060105" +
+ "05073001861b687474703a2f2f6f6373702e737461727473736c2e636f6d2f6361302d06" +
+ "082b060105050730028621687474703a2f2f7777772e737461727473736c2e636f6d2f73" +
+ "667363612e637274305b0603551d1f045430523027a025a0238621687474703a2f2f7777" +
+ "772e737461727473736c2e636f6d2f73667363612e63726c3027a025a023862168747470" +
+ "3a2f2f63726c2e737461727473736c2e636f6d2f73667363612e63726c3081800603551d" +
+ "20047930773075060b2b0601040181b5370102013066302e06082b060105050702011622" +
+ "687474703a2f2f7777772e737461727473736c2e636f6d2f706f6c6963792e7064663034" +
+ "06082b060105050702011628687474703a2f2f7777772e737461727473736c2e636f6d2f" +
+ "696e7465726d6564696174652e706466300d06092a864886f70d01010505000382020100" +
+ "2109493ea5886ee00b8b48da314d8ff75657a2e1d36257e9b556f38545753be5501f048b" +
+ "e6a05a3ee700ae85d0fbff200364cbad02e1c69172f8a34dd6dee8cc3fa18aa2e37c37a7" +
+ "c64f8f35d6f4d66e067bdd21d9cf56ffcb302249fe8904f385e5aaf1e71fe875904dddf9" +
+ "46f74234f745580c110d84b0c6da5d3ef9019ee7e1da5595be741c7bfc4d144fac7e5547" +
+ "7d7bf4a50d491e95e8f712c1ccff76a62547d0f37535be97b75816ebaa5c786fec5330af" +
+ "ea044dcca902e3f0b60412f630b1113d904e5664d7dc3c435f7339ef4baf87ebf6fe6888" +
+ "4472ead207c669b0c1a18bef1749d761b145485f3b2021e95bb2ccf4d7e931f50b15613b" +
+ "7a94e3ebd9bc7f94ae6ae3626296a8647cb887f399327e92a252bebbf865cfc9f230fc8b" +
+ "c1c2a696d75f89e15c3480f58f47072fb491bfb1a27e5f4b5ad05b9f248605515a690365" +
+ "434971c5e06f94346bf61bd8a9b04c7e53eb8f48dfca33b548fa364a1a53a6330cd089cd" +
+ "4915cd89313c90c072d7654b52358a461144b93d8e2865a63e799e5c084429adb035112e" +
+ "214eb8d2e7103e5d8483b3c3c2e4d2c6fd094b7409ddf1b3d3193e800da20b19f038e7c5" +
+ "c2afe223db61e29d5c6e2089492e236ab262c145b49faf8ba7f1223bf87de290d07a19fb" +
+ "4a4ce3d27d5f4a8303ed27d6239e6b8db459a2d9ef6c8229dd75193c3f4c108defbb7527" +
+ "d2ae83a7a8ce5ba7"
+
+const ocspResponseWithoutCertHex = "308201d40a0100a08201cd308201c906092b0601050507300101048201ba3082" +
+ "01b630819fa2160414884451ff502a695e2d88f421bad90cf2cecbea7c180f3230313330" +
+ "3631383037323434335a30743072304a300906052b0e03021a0500041448b60d38238df8" +
+ "456e4ee5843ea394111802979f0414884451ff502a695e2d88f421bad90cf2cecbea7c02" +
+ "1100f78b13b946fc9635d8ab49de9d2148218000180f3230313330363138303732343433" +
+ "5aa011180f32303133303632323037323434335a300d06092a864886f70d010105050003" +
+ "82010100103e18b3d297a5e7a6c07a4fc52ac46a15c0eba96f3be17f0ffe84de5b8c8e05" +
+ "5a8f577586a849dc4abd6440eb6fedde4622451e2823c1cbf3558b4e8184959c9fe96eff" +
+ "8bc5f95866c58c6d087519faabfdae37e11d9874f1bc0db292208f645dd848185e4dd38b" +
+ "6a8547dfa7b74d514a8470015719064d35476b95bebb03d4d2845c5ca15202d2784878f2" +
+ "0f904c24f09736f044609e9c271381713400e563023d212db422236440c6f377bbf24b2b" +
+ "9e7dec8698e36a8df68b7592ad3489fb2937afb90eb85d2aa96b81c94c25057dbd4759d9" +
+ "20a1a65c7f0b6427a224b3c98edd96b9b61f706099951188b0289555ad30a216fb774651" +
+ "5a35fca2e054dfa8"
+
+// PKIX nonce extension
+var ocspExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 2}
+var ocspExtensionValueHex = "0403000000"
+
+const ocspResponseWithCriticalExtensionHex = "308204fe0a0100a08204f7308204f306092b0601050507300101048204e4308204e03081" +
+ "dba003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" +
+ "0f32303136303130343137303130305a3081a53081a23049300906052b0e03021a050004" +
+ "14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" +
+ "7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" +
+ "373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" +
+ "3130303730373138333531375aa1193017301506092b06010505073001020101ff040504" +
+ "03000000300d06092a864886f70d01010b0500038201010031c730ca60a7a0d92d8e4010" +
+ "911b469de95b4d27e89de6537552436237967694f76f701cf6b45c932bd308bca4a8d092" +
+ "5c604ba94796903091d9e6c000178e72c1f0a24a277dd262835af5d17d3f9d7869606c9f" +
+ "e7c8e708a41645699895beee38bfa63bb46296683761c5d1d65439b8ab868dc3017c9eeb" +
+ "b70b82dbf3a31c55b457d48bb9e82b335ed49f445042eaf606b06a3e0639824924c89c63" +
+ "eccddfe85e6694314138b2536f5e15e07085d0f6e26d4b2f8244bab0d70de07283ac6384" +
+ "a0501fc3dea7cf0adfd4c7f34871080900e252ddc403e3f0265f2a704af905d3727504ed" +
+ "28f3214a219d898a022463c78439799ca81c8cbafdbcec34ea937cd6a08202ea308202e6" +
+ "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" +
+ "150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" +
+ "33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" +
+ "526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" +
+ "0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" +
+ "c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" +
+ "bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" +
+ "3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" +
+ "9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" +
+ "285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" +
+ "55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" +
+ "a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" +
+ "130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" +
+ "06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" +
+ "31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" +
+ "9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" +
+ "09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" +
+ "d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" +
+ "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" +
+ "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" +
+ "3a25439a94299a65a709756c7a3e568be049d5c38839"
+
+const ocspResponseWithExtensionHex = "308204fb0a0100a08204f4308204f006092b0601050507300101048204e1308204dd3081" +
+ "d8a003020100a11b3019311730150603550403130e4f43535020526573706f6e64657218" +
+ "0f32303136303130343136353930305a3081a230819f3049300906052b0e03021a050004" +
+ "14c0fe0278fc99188891b3f212e9c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b21317" +
+ "7e6f8d157cd4f60210017f77deb3bcbb235d44ccc7dba62e72a116180f32303130303730" +
+ "373135303130355aa0030a0101180f32303130303730373135303130355aa011180f3230" +
+ "3130303730373138333531375aa1163014301206092b0601050507300102040504030000" +
+ "00300d06092a864886f70d01010b05000382010100c09a33e0b2324c852421bb83f85ac9" +
+ "9113f5426012bd2d2279a8166e9241d18a33c870894250622ffc7ed0c4601b16d624f90b" +
+ "779265442cdb6868cf40ab304ab4b66e7315ed02cf663b1601d1d4751772b31bc299db23" +
+ "9aebac78ed6797c06ed815a7a8d18d63cfbb609cafb47ec2e89e37db255216eb09307848" +
+ "d01be0a3e943653c78212b96ff524b74c9ec456b17cdfb950cc97645c577b2e09ff41dde" +
+ "b03afb3adaa381cc0f7c1d95663ef22a0f72f2c45613ae8e2b2d1efc96e8463c7d1d8a1d" +
+ "7e3b35df8fe73a301fc3f804b942b2b3afa337ff105fc1462b7b1c1d75eb4566c8665e59" +
+ "f80393b0adbf8004ff6c3327ed34f007cb4a3348a7d55e06e3a08202ea308202e6308202" +
+ "e2308201caa003020102020101300d06092a864886f70d01010b05003019311730150603" +
+ "550403130e4f43535020526573706f6e646572301e170d3135303133303135353033335a" +
+ "170d3136303133303135353033335a3019311730150603550403130e4f43535020526573" +
+ "706f6e64657230820122300d06092a864886f70d01010105000382010f003082010a0282" +
+ "010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616ec5265b" +
+ "56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbcbec75a" +
+ "70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b723350f0" +
+ "a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b8989ad0f6" +
+ "3aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d285b6a" +
+ "04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e655b104" +
+ "9a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31a77dcf" +
+ "920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030130603" +
+ "551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d06092a" +
+ "864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab861231c15f" +
+ "d5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d22889064f4" +
+ "aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f326709dce5" +
+ "2c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156d67156" +
+ "e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff59e2005" +
+ "d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf966705d" +
+ "e17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d93a2543" +
+ "9a94299a65a709756c7a3e568be049d5c38839"
+
+const ocspRequestHex = "3051304f304d304b3049300906052b0e03021a05000414c0fe0278fc99188891b3f212e9" +
+ "c7e1b21ab7bfc004140dfc1df0a9e0f01ce7f2b213177e6f8d157cd4f60210017f77deb3" +
+ "bcbb235d44ccc7dba62e72"
+
+const leafCertHex = "308203c830820331a0030201020210017f77deb3bcbb235d44ccc7dba62e72300d06092a" +
+ "864886f70d01010505003081ba311f301d060355040a1316566572695369676e20547275" +
+ "7374204e6574776f726b31173015060355040b130e566572695369676e2c20496e632e31" +
+ "333031060355040b132a566572695369676e20496e7465726e6174696f6e616c20536572" +
+ "766572204341202d20436c617373203331493047060355040b13407777772e7665726973" +
+ "69676e2e636f6d2f43505320496e636f72702e6279205265662e204c494142494c495459" +
+ "204c54442e286329393720566572695369676e301e170d3132303632313030303030305a" +
+ "170d3133313233313233353935395a3068310b3009060355040613025553311330110603" +
+ "550408130a43616c69666f726e6961311230100603550407130950616c6f20416c746f31" +
+ "173015060355040a130e46616365626f6f6b2c20496e632e311730150603550403140e2a" +
+ "2e66616365626f6f6b2e636f6d30819f300d06092a864886f70d010101050003818d0030" +
+ "818902818100ae94b171e2deccc1693e051063240102e0689ae83c39b6b3e74b97d48d7b" +
+ "23689100b0b496ee62f0e6d356bcf4aa0f50643402f5d1766aa972835a7564723f39bbef" +
+ "5290ded9bcdbf9d3d55dfad23aa03dc604c54d29cf1d4b3bdbd1a809cfae47b44c7eae17" +
+ "c5109bee24a9cf4a8d911bb0fd0415ae4c3f430aa12a557e2ae10203010001a382011e30" +
+ "82011a30090603551d130402300030440603551d20043d303b3039060b6086480186f845" +
+ "01071703302a302806082b06010505070201161c68747470733a2f2f7777772e76657269" +
+ "7369676e2e636f6d2f727061303c0603551d1f043530333031a02fa02d862b687474703a" +
+ "2f2f535652496e746c2d63726c2e766572697369676e2e636f6d2f535652496e746c2e63" +
+ "726c301d0603551d250416301406082b0601050507030106082b06010505070302300b06" +
+ "03551d0f0404030205a0303406082b0601050507010104283026302406082b0601050507" +
+ "30018618687474703a2f2f6f6373702e766572697369676e2e636f6d30270603551d1104" +
+ "20301e820e2a2e66616365626f6f6b2e636f6d820c66616365626f6f6b2e636f6d300d06" +
+ "092a864886f70d0101050500038181005b6c2b75f8ed30aa51aad36aba595e555141951f" +
+ "81a53b447910ac1f76ff78fc2781616b58f3122afc1c87010425e9ed43df1a7ba6498060" +
+ "67e2688af03db58c7df4ee03309a6afc247ccb134dc33e54c6bc1d5133a532a73273b1d7" +
+ "9cadc08e7e1a83116d34523340b0305427a21742827c98916698ee7eaf8c3bdd71700817"
+
+const issuerCertHex = "30820383308202eca003020102021046fcebbab4d02f0f926098233f93078f300d06092a" +
+ "864886f70d0101050500305f310b300906035504061302555331173015060355040a130e" +
+ "566572695369676e2c20496e632e31373035060355040b132e436c617373203320507562" +
+ "6c6963205072696d6172792043657274696669636174696f6e20417574686f7269747930" +
+ "1e170d3937303431373030303030305a170d3136313032343233353935395a3081ba311f" +
+ "301d060355040a1316566572695369676e205472757374204e6574776f726b3117301506" +
+ "0355040b130e566572695369676e2c20496e632e31333031060355040b132a5665726953" +
+ "69676e20496e7465726e6174696f6e616c20536572766572204341202d20436c61737320" +
+ "3331493047060355040b13407777772e766572697369676e2e636f6d2f43505320496e63" +
+ "6f72702e6279205265662e204c494142494c495459204c54442e28632939372056657269" +
+ "5369676e30819f300d06092a864886f70d010101050003818d0030818902818100d88280" +
+ "e8d619027d1f85183925a2652be1bfd405d3bce6363baaf04c6c5bb6e7aa3c734555b2f1" +
+ "bdea9742ed9a340a15d4a95cf54025ddd907c132b2756cc4cabba3fe56277143aa63f530" +
+ "3e9328e5faf1093bf3b74d4e39f75c495ab8c11dd3b28afe70309542cbfe2b518b5a3c3a" +
+ "f9224f90b202a7539c4f34e7ab04b27b6f0203010001a381e33081e0300f0603551d1304" +
+ "0830060101ff02010030440603551d20043d303b3039060b6086480186f8450107010130" +
+ "2a302806082b06010505070201161c68747470733a2f2f7777772e766572697369676e2e" +
+ "636f6d2f43505330340603551d25042d302b06082b0601050507030106082b0601050507" +
+ "030206096086480186f8420401060a6086480186f845010801300b0603551d0f04040302" +
+ "0106301106096086480186f842010104040302010630310603551d1f042a30283026a024" +
+ "a0228620687474703a2f2f63726c2e766572697369676e2e636f6d2f706361332e63726c" +
+ "300d06092a864886f70d010105050003818100408e4997968a73dd8e4def3e61b7caa062" +
+ "adf40e0abb753de26ed82cc7bff4b98c369bcaa2d09c724639f6a682036511c4bcbf2da6" +
+ "f5d93b0ab598fab378b91ef22b4c62d5fdb27a1ddf33fd73f9a5d82d8c2aead1fcb028b6" +
+ "e94948134b838a1b487b24f738de6f4154b8ab576b06dfc7a2d4a9f6f136628088f28b75" +
+ "d68071"
+
+// Key and certificate for the OCSP responder were not taken from the Thawte
+// responder, since CreateResponse requires that we have the private key.
+// Instead, they were generated randomly.
+const responderPrivateKeyHex = "308204a40201000282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef" +
+ "1099f0f6616ec5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df" +
+ "1701dc6ccfbcbec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074f" +
+ "fde8a99d5b723350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14" +
+ "c9fc0f27b8989ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa7" +
+ "7e7332971c7d285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f" +
+ "1290bafd97e655b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb9" +
+ "6222b12ace31a77dcf920334dc94581b02030100010282010100bcf0b93d7238bda329a8" +
+ "72e7149f61bcb37c154330ccb3f42a85c9002c2e2bdea039d77d8581cd19bed94078794e" +
+ "56293d601547fc4bf6a2f9002fe5772b92b21b254403b403585e3130cc99ccf08f0ef81a" +
+ "575b38f597ba4660448b54f44bfbb97072b5a2bf043bfeca828cf7741d13698e3f38162b" +
+ "679faa646b82abd9a72c5c7d722c5fc577a76d2c2daac588accad18516d1bbad10b0dfa2" +
+ "05cfe246b59e28608a43942e1b71b0c80498075121de5b900d727c31c42c78cf1db5c0aa" +
+ "5b491e10ea4ed5c0962aaf2ae025dd81fa4ce490d9d6b4a4465411d8e542fc88617e5695" +
+ "1aa4fc8ea166f2b4d0eb89ef17f2b206bd5f1014bf8fe0e71fe62f2cccf102818100f2dc" +
+ "ddf878d553286daad68bac4070a82ffec3dc4666a2750f47879eec913f91836f1d976b60" +
+ "daf9356e078446dafab5bd2e489e5d64f8572ba24a4ba4f3729b5e106c4dd831cc2497a7" +
+ "e6c7507df05cb64aeb1bbc81c1e340d58b5964cf39cff84ea30c29ec5d3f005ee1362698" +
+ "07395037955955655292c3e85f6187fa1f9502818100f4a33c102630840705f8c778a47b" +
+ "87e8da31e68809af981ac5e5999cf1551685d761cdf0d6520361b99aebd5777a940fa64d" +
+ "327c09fa63746fbb3247ec73a86edf115f1fe5c83598db803881ade71c33c6e956118345" +
+ "497b98b5e07bb5be75971465ec78f2f9467e1b74956ca9d4c7c3e314e742a72d8b33889c" +
+ "6c093a466cef0281801d3df0d02124766dd0be98349b19eb36a508c4e679e793ba0a8bef" +
+ "4d786888c1e9947078b1ea28938716677b4ad8c5052af12eb73ac194915264a913709a0b" +
+ "7b9f98d4a18edd781a13d49899f91c20dbd8eb2e61d991ba19b5cdc08893f5cb9d39e5a6" +
+ "0629ea16d426244673b1b3ee72bd30e41fac8395acac40077403de5efd028180050731dd" +
+ "d71b1a2b96c8d538ba90bb6b62c8b1c74c03aae9a9f59d21a7a82b0d572ef06fa9c807bf" +
+ "c373d6b30d809c7871df96510c577421d9860c7383fda0919ece19996b3ca13562159193" +
+ "c0c246471e287f975e8e57034e5136aaf44254e2650def3d51292474c515b1588969112e" +
+ "0a85cc77073e9d64d2c2fc497844284b02818100d71d63eabf416cf677401ebf965f8314" +
+ "120b568a57dd3bd9116c629c40dc0c6948bab3a13cc544c31c7da40e76132ef5dd3f7534" +
+ "45a635930c74326ae3df0edd1bfb1523e3aa259873ac7cf1ac31151ec8f37b528c275622" +
+ "48f99b8bed59fd4da2576aa6ee20d93a684900bf907e80c66d6e2261ae15e55284b4ed9d" +
+ "6bdaa059"
+
+const responderCertHex = "308202e2308201caa003020102020101300d06092a864886f70d01010b05003019311730" +
+ "150603550403130e4f43535020526573706f6e646572301e170d31353031333031353530" +
+ "33335a170d3136303133303135353033335a3019311730150603550403130e4f43535020" +
+ "526573706f6e64657230820122300d06092a864886f70d01010105000382010f00308201" +
+ "0a0282010100e8155f2d3e6f2e8d14c62a788bd462f9f844e7a6977c83ef1099f0f6616e" +
+ "c5265b56f356e62c5400f0b06a2e7945a82752c636df32a895152d6074df1701dc6ccfbc" +
+ "bec75a70bd2b55ae2be7e6cad3b5fd4cd5b7790ab401a436d3f5f346074ffde8a99d5b72" +
+ "3350f0a112076614b12ef79c78991b119453445acf2416ab0046b540db14c9fc0f27b898" +
+ "9ad0f63aa4b8aefc91aa8a72160c36307c60fec78a93d3fddf4259902aa77e7332971c7d" +
+ "285b6a04f648993c6922a3e9da9adf5f81508c3228791843e5d49f24db2f1290bafd97e6" +
+ "55b1049a199f652cd603c4fafa330c390b0da78fbbc67e8fa021cbd74eb96222b12ace31" +
+ "a77dcf920334dc94581b0203010001a3353033300e0603551d0f0101ff04040302078030" +
+ "130603551d25040c300a06082b06010505070309300c0603551d130101ff04023000300d" +
+ "06092a864886f70d01010b05000382010100718012761b5063e18f0dc44644d8e6ab8612" +
+ "31c15fd5357805425d82aec1de85bf6d3e30fce205e3e3b8b795bbe52e40a439286d2288" +
+ "9064f4aeeb150359b9425f1da51b3a5c939018555d13ac42c565a0603786a919328f3267" +
+ "09dce52c22ad958ecb7873b9771d1148b1c4be2efe80ba868919fc9f68b6090c2f33c156" +
+ "d67156e42766a50b5d51e79637b7e58af74c2a951b1e642fa7741fec982cc937de37eff5" +
+ "9e2005d5939bfc031589ca143e6e8ab83f40ee08cc20a6b4a95a318352c28d18528dcaf9" +
+ "66705de17afa19d6e8ae91ddf33179d16ebb6ac2c69cae8373d408ebf8c55308be6c04d9" +
+ "3a25439a94299a65a709756c7a3e568be049d5c38839"
+
+const errorResponseHex = "30030a0101"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/armor.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/armor.go
new file mode 100644
index 00000000000..592d1864361
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/armor.go
@@ -0,0 +1,219 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
+// very similar to PEM except that it has an additional CRC checksum.
+package armor // import "golang.org/x/crypto/openpgp/armor"
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/base64"
+ "golang.org/x/crypto/openpgp/errors"
+ "io"
+)
+
+// A Block represents an OpenPGP armored structure.
+//
+// The encoded form is:
+// -----BEGIN Type-----
+// Headers
+//
+// base64-encoded Bytes
+// '=' base64 encoded checksum
+// -----END Type-----
+// where Headers is a possibly empty sequence of Key: Value lines.
+//
+// Since the armored data can be very large, this package presents a streaming
+// interface.
+type Block struct {
+ Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
+ Header map[string]string // Optional headers.
+ Body io.Reader // A Reader from which the contents can be read
+ lReader lineReader
+ oReader openpgpReader
+}
+
+var ArmorCorrupt error = errors.StructuralError("armor invalid")
+
+const crc24Init = 0xb704ce
+const crc24Poly = 0x1864cfb
+const crc24Mask = 0xffffff
+
+// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
+func crc24(crc uint32, d []byte) uint32 {
+ for _, b := range d {
+ crc ^= uint32(b) << 16
+ for i := 0; i < 8; i++ {
+ crc <<= 1
+ if crc&0x1000000 != 0 {
+ crc ^= crc24Poly
+ }
+ }
+ }
+ return crc
+}
+
+var armorStart = []byte("-----BEGIN ")
+var armorEnd = []byte("-----END ")
+var armorEndOfLine = []byte("-----")
+
+// lineReader wraps a line based reader. It watches for the end of an armor
+// block and records the expected CRC value.
+type lineReader struct {
+ in *bufio.Reader
+ buf []byte
+ eof bool
+ crc uint32
+}
+
+func (l *lineReader) Read(p []byte) (n int, err error) {
+ if l.eof {
+ return 0, io.EOF
+ }
+
+ if len(l.buf) > 0 {
+ n = copy(p, l.buf)
+ l.buf = l.buf[n:]
+ return
+ }
+
+ line, isPrefix, err := l.in.ReadLine()
+ if err != nil {
+ return
+ }
+ if isPrefix {
+ return 0, ArmorCorrupt
+ }
+
+ if len(line) == 5 && line[0] == '=' {
+ // This is the checksum line
+ var expectedBytes [3]byte
+ var m int
+ m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:])
+ if m != 3 || err != nil {
+ return
+ }
+ l.crc = uint32(expectedBytes[0])<<16 |
+ uint32(expectedBytes[1])<<8 |
+ uint32(expectedBytes[2])
+
+ line, _, err = l.in.ReadLine()
+ if err != nil && err != io.EOF {
+ return
+ }
+ if !bytes.HasPrefix(line, armorEnd) {
+ return 0, ArmorCorrupt
+ }
+
+ l.eof = true
+ return 0, io.EOF
+ }
+
+ if len(line) > 96 {
+ return 0, ArmorCorrupt
+ }
+
+ n = copy(p, line)
+ bytesToSave := len(line) - n
+ if bytesToSave > 0 {
+ if cap(l.buf) < bytesToSave {
+ l.buf = make([]byte, 0, bytesToSave)
+ }
+ l.buf = l.buf[0:bytesToSave]
+ copy(l.buf, line[n:])
+ }
+
+ return
+}
+
+// openpgpReader passes Read calls to the underlying base64 decoder, but keeps
+// a running CRC of the resulting data and checks the CRC against the value
+// found by the lineReader at EOF.
+type openpgpReader struct {
+ lReader *lineReader
+ b64Reader io.Reader
+ currentCRC uint32
+}
+
+func (r *openpgpReader) Read(p []byte) (n int, err error) {
+ n, err = r.b64Reader.Read(p)
+ r.currentCRC = crc24(r.currentCRC, p[:n])
+
+ if err == io.EOF {
+ if r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
+ return 0, ArmorCorrupt
+ }
+ }
+
+ return
+}
+
+// Decode reads a PGP armored block from the given Reader. It will ignore
+// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
+// given Reader is not usable after calling this function: an arbitrary amount
+// of data may have been read past the end of the block.
+func Decode(in io.Reader) (p *Block, err error) {
+ r := bufio.NewReaderSize(in, 100)
+ var line []byte
+ ignoreNext := false
+
+TryNextBlock:
+ p = nil
+
+ // Skip leading garbage
+ for {
+ ignoreThis := ignoreNext
+ line, ignoreNext, err = r.ReadLine()
+ if err != nil {
+ return
+ }
+ if ignoreNext || ignoreThis {
+ continue
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
+ break
+ }
+ }
+
+ p = new(Block)
+ p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
+ p.Header = make(map[string]string)
+ nextIsContinuation := false
+ var lastKey string
+
+ // Read headers
+ for {
+ isContinuation := nextIsContinuation
+ line, nextIsContinuation, err = r.ReadLine()
+ if err != nil {
+ p = nil
+ return
+ }
+ if isContinuation {
+ p.Header[lastKey] += string(line)
+ continue
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 {
+ break
+ }
+
+ i := bytes.Index(line, []byte(": "))
+ if i == -1 {
+ goto TryNextBlock
+ }
+ lastKey = string(line[:i])
+ p.Header[lastKey] = string(line[i+2:])
+ }
+
+ p.lReader.in = r
+ p.oReader.currentCRC = crc24Init
+ p.oReader.lReader = &p.lReader
+ p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
+ p.Body = &p.oReader
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/armor_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/armor_test.go
new file mode 100644
index 00000000000..9334e94e96c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/armor_test.go
@@ -0,0 +1,95 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package armor
+
+import (
+ "bytes"
+ "hash/adler32"
+ "io/ioutil"
+ "testing"
+)
+
+func TestDecodeEncode(t *testing.T) {
+ buf := bytes.NewBuffer([]byte(armorExample1))
+ result, err := Decode(buf)
+ if err != nil {
+ t.Error(err)
+ }
+ expectedType := "PGP SIGNATURE"
+ if result.Type != expectedType {
+ t.Errorf("result.Type: got:%s want:%s", result.Type, expectedType)
+ }
+ if len(result.Header) != 1 {
+ t.Errorf("len(result.Header): got:%d want:1", len(result.Header))
+ }
+ v, ok := result.Header["Version"]
+ if !ok || v != "GnuPG v1.4.10 (GNU/Linux)" {
+ t.Errorf("result.Header: got:%#v", result.Header)
+ }
+
+ contents, err := ioutil.ReadAll(result.Body)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if adler32.Checksum(contents) != 0x27b144be {
+ t.Errorf("contents: got: %x", contents)
+ }
+
+ buf = bytes.NewBuffer(nil)
+ w, err := Encode(buf, result.Type, result.Header)
+ if err != nil {
+ t.Error(err)
+ }
+ _, err = w.Write(contents)
+ if err != nil {
+ t.Error(err)
+ }
+ w.Close()
+
+ if !bytes.Equal(buf.Bytes(), []byte(armorExample1)) {
+ t.Errorf("got: %s\nwant: %s", string(buf.Bytes()), armorExample1)
+ }
+}
+
+func TestLongHeader(t *testing.T) {
+ buf := bytes.NewBuffer([]byte(armorLongLine))
+ result, err := Decode(buf)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ value, ok := result.Header["Version"]
+ if !ok {
+ t.Errorf("missing Version header")
+ }
+ if value != longValueExpected {
+ t.Errorf("got: %s want: %s", value, longValueExpected)
+ }
+}
+
+const armorExample1 = `-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+iJwEAAECAAYFAk1Fv/0ACgkQo01+GMIMMbsYTwQAiAw+QAaNfY6WBdplZ/uMAccm
+4g+81QPmTSGHnetSb6WBiY13kVzK4HQiZH8JSkmmroMLuGeJwsRTEL4wbjRyUKEt
+p1xwUZDECs234F1xiG5enc5SGlRtP7foLBz9lOsjx+LEcA4sTl5/2eZR9zyFZqWW
+TxRjs+fJCIFuo71xb1g=
+=/teI
+-----END PGP SIGNATURE-----`
+
+const armorLongLine = `-----BEGIN PGP SIGNATURE-----
+Version: 0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz
+
+iQEcBAABAgAGBQJMtFESAAoJEKsQXJGvOPsVj40H/1WW6jaMXv4BW+1ueDSMDwM8
+kx1fLOXbVM5/Kn5LStZNt1jWWnpxdz7eq3uiqeCQjmqUoRde3YbB2EMnnwRbAhpp
+cacnAvy9ZQ78OTxUdNW1mhX5bS6q1MTEJnl+DcyigD70HG/yNNQD7sOPMdYQw0TA
+byQBwmLwmTsuZsrYqB68QyLHI+DUugn+kX6Hd2WDB62DKa2suoIUIHQQCd/ofwB3
+WfCYInXQKKOSxu2YOg2Eb4kLNhSMc1i9uKUWAH+sdgJh7NBgdoE4MaNtBFkHXRvv
+okWuf3+xA9ksp1npSY/mDvgHijmjvtpRDe6iUeqfCn8N9u9CBg8geANgaG8+QA4=
+=wfQG
+-----END PGP SIGNATURE-----`
+
+const longValueExpected = "0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/encode.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/encode.go
new file mode 100644
index 00000000000..6f07582c37c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/armor/encode.go
@@ -0,0 +1,160 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package armor
+
+import (
+ "encoding/base64"
+ "io"
+)
+
+var armorHeaderSep = []byte(": ")
+var blockEnd = []byte("\n=")
+var newline = []byte("\n")
+var armorEndOfLineOut = []byte("-----\n")
+
+// writeSlices writes its arguments to the given Writer.
+func writeSlices(out io.Writer, slices ...[]byte) (err error) {
+ for _, s := range slices {
+ _, err = out.Write(s)
+ if err != nil {
+ return err
+ }
+ }
+ return
+}
+
+// lineBreaker breaks data across several lines, all of the same byte length
+// (except possibly the last). Lines are broken with a single '\n'.
+type lineBreaker struct {
+ lineLength int
+ line []byte
+ used int
+ out io.Writer
+ haveWritten bool
+}
+
+func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
+ return &lineBreaker{
+ lineLength: lineLength,
+ line: make([]byte, lineLength),
+ used: 0,
+ out: out,
+ }
+}
+
+func (l *lineBreaker) Write(b []byte) (n int, err error) {
+ n = len(b)
+
+ if n == 0 {
+ return
+ }
+
+ if l.used == 0 && l.haveWritten {
+ _, err = l.out.Write([]byte{'\n'})
+ if err != nil {
+ return
+ }
+ }
+
+ if l.used+len(b) < l.lineLength {
+ l.used += copy(l.line[l.used:], b)
+ return
+ }
+
+ l.haveWritten = true
+ _, err = l.out.Write(l.line[0:l.used])
+ if err != nil {
+ return
+ }
+ excess := l.lineLength - l.used
+ l.used = 0
+
+ _, err = l.out.Write(b[0:excess])
+ if err != nil {
+ return
+ }
+
+ _, err = l.Write(b[excess:])
+ return
+}
+
+func (l *lineBreaker) Close() (err error) {
+ if l.used > 0 {
+ _, err = l.out.Write(l.line[0:l.used])
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+// encoding keeps track of a running CRC24 over the data which has been written
+// to it and outputs a OpenPGP checksum when closed, followed by an armor
+// trailer.
+//
+// It's built into a stack of io.Writers:
+// encoding -> base64 encoder -> lineBreaker -> out
+type encoding struct {
+ out io.Writer
+ breaker *lineBreaker
+ b64 io.WriteCloser
+ crc uint32
+ blockType []byte
+}
+
+func (e *encoding) Write(data []byte) (n int, err error) {
+ e.crc = crc24(e.crc, data)
+ return e.b64.Write(data)
+}
+
+func (e *encoding) Close() (err error) {
+ err = e.b64.Close()
+ if err != nil {
+ return
+ }
+ e.breaker.Close()
+
+ var checksumBytes [3]byte
+ checksumBytes[0] = byte(e.crc >> 16)
+ checksumBytes[1] = byte(e.crc >> 8)
+ checksumBytes[2] = byte(e.crc)
+
+ var b64ChecksumBytes [4]byte
+ base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
+
+ return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
+}
+
+// Encode returns a WriteCloser which will encode the data written to it in
+// OpenPGP armor.
+func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
+ bType := []byte(blockType)
+ err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
+ if err != nil {
+ return
+ }
+
+ for k, v := range headers {
+ err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
+ if err != nil {
+ return
+ }
+ }
+
+ _, err = out.Write(newline)
+ if err != nil {
+ return
+ }
+
+ e := &encoding{
+ out: out,
+ breaker: newLineBreaker(out, 64),
+ crc: crc24Init,
+ blockType: bType,
+ }
+ e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
+ return e, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/canonical_text.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/canonical_text.go
new file mode 100644
index 00000000000..e601e389f12
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/canonical_text.go
@@ -0,0 +1,59 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import "hash"
+
+// NewCanonicalTextHash reformats text written to it into the canonical
+// form and then applies the hash h. See RFC 4880, section 5.2.1.
+func NewCanonicalTextHash(h hash.Hash) hash.Hash {
+ return &canonicalTextHash{h, 0}
+}
+
+type canonicalTextHash struct {
+ h hash.Hash
+ s int
+}
+
+var newline = []byte{'\r', '\n'}
+
+func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
+ start := 0
+
+ for i, c := range buf {
+ switch cth.s {
+ case 0:
+ if c == '\r' {
+ cth.s = 1
+ } else if c == '\n' {
+ cth.h.Write(buf[start:i])
+ cth.h.Write(newline)
+ start = i + 1
+ }
+ case 1:
+ cth.s = 0
+ }
+ }
+
+ cth.h.Write(buf[start:])
+ return len(buf), nil
+}
+
+func (cth *canonicalTextHash) Sum(in []byte) []byte {
+ return cth.h.Sum(in)
+}
+
+func (cth *canonicalTextHash) Reset() {
+ cth.h.Reset()
+ cth.s = 0
+}
+
+func (cth *canonicalTextHash) Size() int {
+ return cth.h.Size()
+}
+
+func (cth *canonicalTextHash) BlockSize() int {
+ return cth.h.BlockSize()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/canonical_text_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/canonical_text_test.go
new file mode 100644
index 00000000000..8f3ba2a8814
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/canonical_text_test.go
@@ -0,0 +1,52 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import (
+ "bytes"
+ "testing"
+)
+
+type recordingHash struct {
+ buf *bytes.Buffer
+}
+
+func (r recordingHash) Write(b []byte) (n int, err error) {
+ return r.buf.Write(b)
+}
+
+func (r recordingHash) Sum(in []byte) []byte {
+ return append(in, r.buf.Bytes()...)
+}
+
+func (r recordingHash) Reset() {
+ panic("shouldn't be called")
+}
+
+func (r recordingHash) Size() int {
+ panic("shouldn't be called")
+}
+
+func (r recordingHash) BlockSize() int {
+ panic("shouldn't be called")
+}
+
+func testCanonicalText(t *testing.T, input, expected string) {
+ r := recordingHash{bytes.NewBuffer(nil)}
+ c := NewCanonicalTextHash(r)
+ c.Write([]byte(input))
+ result := c.Sum(nil)
+ if expected != string(result) {
+ t.Errorf("input: %x got: %x want: %x", input, result, expected)
+ }
+}
+
+func TestCanonicalText(t *testing.T) {
+ testCanonicalText(t, "foo\n", "foo\r\n")
+ testCanonicalText(t, "foo", "foo")
+ testCanonicalText(t, "foo\r\n", "foo\r\n")
+ testCanonicalText(t, "foo\r\nbar", "foo\r\nbar")
+ testCanonicalText(t, "foo\r\nbar\n\n", "foo\r\nbar\r\n\r\n")
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/clearsign/clearsign.go
new file mode 100644
index 00000000000..6454d22c7e3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/clearsign/clearsign.go
@@ -0,0 +1,372 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package clearsign generates and processes OpenPGP, clear-signed data. See
+// RFC 4880, section 7.
+//
+// Clearsigned messages are cryptographically signed, but the contents of the
+// message are kept in plaintext so that it can be read without special tools.
+package clearsign // import "golang.org/x/crypto/openpgp/clearsign"
+
+import (
+ "bufio"
+ "bytes"
+ "crypto"
+ "hash"
+ "io"
+ "net/textproto"
+ "strconv"
+
+ "golang.org/x/crypto/openpgp/armor"
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+// A Block represents a clearsigned message. A signature on a Block can
+// be checked by passing Bytes into openpgp.CheckDetachedSignature.
+type Block struct {
+ Headers textproto.MIMEHeader // Optional message headers
+ Plaintext []byte // The original message text
+ Bytes []byte // The signed message
+ ArmoredSignature *armor.Block // The signature block
+}
+
+// start is the marker which denotes the beginning of a clearsigned message.
+var start = []byte("\n-----BEGIN PGP SIGNED MESSAGE-----")
+
+// dashEscape is prefixed to any lines that begin with a hyphen so that they
+// can't be confused with endText.
+var dashEscape = []byte("- ")
+
+// endText is a marker which denotes the end of the message and the start of
+// an armored signature.
+var endText = []byte("-----BEGIN PGP SIGNATURE-----")
+
+// end is a marker which denotes the end of the armored signature.
+var end = []byte("\n-----END PGP SIGNATURE-----")
+
+var crlf = []byte("\r\n")
+var lf = byte('\n')
+
+// getLine returns the first \r\n or \n delineated line from the given byte
+// array. The line does not include the \r\n or \n. The remainder of the byte
+// array (also not including the new line bytes) is also returned and this will
+// always be smaller than the original argument.
+func getLine(data []byte) (line, rest []byte) {
+ i := bytes.Index(data, []byte{'\n'})
+ var j int
+ if i < 0 {
+ i = len(data)
+ j = i
+ } else {
+ j = i + 1
+ if i > 0 && data[i-1] == '\r' {
+ i--
+ }
+ }
+ return data[0:i], data[j:]
+}
+
+// Decode finds the first clearsigned message in data and returns it, as well
+// as the suffix of data which remains after the message.
+func Decode(data []byte) (b *Block, rest []byte) {
+ // start begins with a newline. However, at the very beginning of
+ // the byte array, we'll accept the start string without it.
+ rest = data
+ if bytes.HasPrefix(data, start[1:]) {
+ rest = rest[len(start)-1:]
+ } else if i := bytes.Index(data, start); i >= 0 {
+ rest = rest[i+len(start):]
+ } else {
+ return nil, data
+ }
+
+ // Consume the start line.
+ _, rest = getLine(rest)
+
+ var line []byte
+ b = &Block{
+ Headers: make(textproto.MIMEHeader),
+ }
+
+ // Next come a series of header lines.
+ for {
+ // This loop terminates because getLine's second result is
+ // always smaller than its argument.
+ if len(rest) == 0 {
+ return nil, data
+ }
+ // An empty line marks the end of the headers.
+ if line, rest = getLine(rest); len(line) == 0 {
+ break
+ }
+
+ i := bytes.Index(line, []byte{':'})
+ if i == -1 {
+ return nil, data
+ }
+
+ key, val := line[0:i], line[i+1:]
+ key = bytes.TrimSpace(key)
+ val = bytes.TrimSpace(val)
+ b.Headers.Add(string(key), string(val))
+ }
+
+ firstLine := true
+ for {
+ start := rest
+
+ line, rest = getLine(rest)
+ if bytes.Equal(line, endText) {
+ // Back up to the start of the line because armor expects to see the
+ // header line.
+ rest = start
+ break
+ }
+
+ // The final CRLF isn't included in the hash so we don't write it until
+ // we've seen the next line.
+ if firstLine {
+ firstLine = false
+ } else {
+ b.Bytes = append(b.Bytes, crlf...)
+ }
+
+ if bytes.HasPrefix(line, dashEscape) {
+ line = line[2:]
+ }
+ line = bytes.TrimRight(line, " \t")
+ b.Bytes = append(b.Bytes, line...)
+
+ b.Plaintext = append(b.Plaintext, line...)
+ b.Plaintext = append(b.Plaintext, lf)
+ }
+
+ // We want to find the extent of the armored data (including any newlines at
+ // the end).
+ i := bytes.Index(rest, end)
+ if i == -1 {
+ return nil, data
+ }
+ i += len(end)
+ for i < len(rest) && (rest[i] == '\r' || rest[i] == '\n') {
+ i++
+ }
+ armored := rest[:i]
+ rest = rest[i:]
+
+ var err error
+ b.ArmoredSignature, err = armor.Decode(bytes.NewBuffer(armored))
+ if err != nil {
+ return nil, data
+ }
+
+ return b, rest
+}
+
+// A dashEscaper is an io.WriteCloser which processes the body of a clear-signed
+// message. The clear-signed message is written to buffered and a hash, suitable
+// for signing, is maintained in h.
+//
+// When closed, an armored signature is created and written to complete the
+// message.
+type dashEscaper struct {
+ buffered *bufio.Writer
+ h hash.Hash
+ hashType crypto.Hash
+
+ atBeginningOfLine bool
+ isFirstLine bool
+
+ whitespace []byte
+ byteBuf []byte // a one byte buffer to save allocations
+
+ privateKey *packet.PrivateKey
+ config *packet.Config
+}
+
+func (d *dashEscaper) Write(data []byte) (n int, err error) {
+ for _, b := range data {
+ d.byteBuf[0] = b
+
+ if d.atBeginningOfLine {
+ // The final CRLF isn't included in the hash so we have to wait
+ // until this point (the start of the next line) before writing it.
+ if !d.isFirstLine {
+ d.h.Write(crlf)
+ }
+ d.isFirstLine = false
+ }
+
+ // Any whitespace at the end of the line has to be removed so we
+ // buffer it until we find out whether there's more on this line.
+ if b == ' ' || b == '\t' || b == '\r' {
+ d.whitespace = append(d.whitespace, b)
+ d.atBeginningOfLine = false
+ continue
+ }
+
+ if d.atBeginningOfLine {
+ // At the beginning of a line, hyphens have to be escaped.
+ if b == '-' {
+ // The signature isn't calculated over the dash-escaped text so
+ // the escape is only written to buffered.
+ if _, err = d.buffered.Write(dashEscape); err != nil {
+ return
+ }
+ d.h.Write(d.byteBuf)
+ d.atBeginningOfLine = false
+ } else if b == '\n' {
+ // Nothing to do because we delay writing CRLF to the hash.
+ } else {
+ d.h.Write(d.byteBuf)
+ d.atBeginningOfLine = false
+ }
+ if err = d.buffered.WriteByte(b); err != nil {
+ return
+ }
+ } else {
+ if b == '\n' {
+ // We got a raw \n. Drop any trailing whitespace and write a
+ // CRLF.
+ d.whitespace = d.whitespace[:0]
+ // We delay writing CRLF to the hash until the start of the
+ // next line.
+ if err = d.buffered.WriteByte(b); err != nil {
+ return
+ }
+ d.atBeginningOfLine = true
+ } else {
+ // Any buffered whitespace wasn't at the end of the line so
+ // we need to write it out.
+ if len(d.whitespace) > 0 {
+ d.h.Write(d.whitespace)
+ if _, err = d.buffered.Write(d.whitespace); err != nil {
+ return
+ }
+ d.whitespace = d.whitespace[:0]
+ }
+ d.h.Write(d.byteBuf)
+ if err = d.buffered.WriteByte(b); err != nil {
+ return
+ }
+ }
+ }
+ }
+
+ n = len(data)
+ return
+}
+
+func (d *dashEscaper) Close() (err error) {
+ if !d.atBeginningOfLine {
+ if err = d.buffered.WriteByte(lf); err != nil {
+ return
+ }
+ }
+ sig := new(packet.Signature)
+ sig.SigType = packet.SigTypeText
+ sig.PubKeyAlgo = d.privateKey.PubKeyAlgo
+ sig.Hash = d.hashType
+ sig.CreationTime = d.config.Now()
+ sig.IssuerKeyId = &d.privateKey.KeyId
+
+ if err = sig.Sign(d.h, d.privateKey, d.config); err != nil {
+ return
+ }
+
+ out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil)
+ if err != nil {
+ return
+ }
+
+ if err = sig.Serialize(out); err != nil {
+ return
+ }
+ if err = out.Close(); err != nil {
+ return
+ }
+ if err = d.buffered.Flush(); err != nil {
+ return
+ }
+ return
+}
+
+// Encode returns a WriteCloser which will clear-sign a message with privateKey
+// and write it to w. If config is nil, sensible defaults are used.
+func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) {
+ if privateKey.Encrypted {
+ return nil, errors.InvalidArgumentError("signing key is encrypted")
+ }
+
+ hashType := config.Hash()
+ name := nameOfHash(hashType)
+ if len(name) == 0 {
+ return nil, errors.UnsupportedError("unknown hash type: " + strconv.Itoa(int(hashType)))
+ }
+
+ if !hashType.Available() {
+ return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType)))
+ }
+ h := hashType.New()
+
+ buffered := bufio.NewWriter(w)
+ // start has a \n at the beginning that we don't want here.
+ if _, err = buffered.Write(start[1:]); err != nil {
+ return
+ }
+ if err = buffered.WriteByte(lf); err != nil {
+ return
+ }
+ if _, err = buffered.WriteString("Hash: "); err != nil {
+ return
+ }
+ if _, err = buffered.WriteString(name); err != nil {
+ return
+ }
+ if err = buffered.WriteByte(lf); err != nil {
+ return
+ }
+ if err = buffered.WriteByte(lf); err != nil {
+ return
+ }
+
+ plaintext = &dashEscaper{
+ buffered: buffered,
+ h: h,
+ hashType: hashType,
+
+ atBeginningOfLine: true,
+ isFirstLine: true,
+
+ byteBuf: make([]byte, 1),
+
+ privateKey: privateKey,
+ config: config,
+ }
+
+ return
+}
+
+// nameOfHash returns the OpenPGP name for the given hash, or the empty string
+// if the name isn't known. See RFC 4880, section 9.4.
+func nameOfHash(h crypto.Hash) string {
+ switch h {
+ case crypto.MD5:
+ return "MD5"
+ case crypto.SHA1:
+ return "SHA1"
+ case crypto.RIPEMD160:
+ return "RIPEMD160"
+ case crypto.SHA224:
+ return "SHA224"
+ case crypto.SHA256:
+ return "SHA256"
+ case crypto.SHA384:
+ return "SHA384"
+ case crypto.SHA512:
+ return "SHA512"
+ }
+ return ""
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go
new file mode 100644
index 00000000000..406377c6712
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/clearsign/clearsign_test.go
@@ -0,0 +1,197 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package clearsign
+
+import (
+ "bytes"
+ "golang.org/x/crypto/openpgp"
+ "testing"
+)
+
+func testParse(t *testing.T, input []byte, expected, expectedPlaintext string) {
+ b, rest := Decode(input)
+ if b == nil {
+ t.Fatal("failed to decode clearsign message")
+ }
+ if !bytes.Equal(rest, []byte("trailing")) {
+ t.Errorf("unexpected remaining bytes returned: %s", string(rest))
+ }
+ if b.ArmoredSignature.Type != "PGP SIGNATURE" {
+ t.Errorf("bad armor type, got:%s, want:PGP SIGNATURE", b.ArmoredSignature.Type)
+ }
+ if !bytes.Equal(b.Bytes, []byte(expected)) {
+ t.Errorf("bad body, got:%x want:%x", b.Bytes, expected)
+ }
+
+ if !bytes.Equal(b.Plaintext, []byte(expectedPlaintext)) {
+ t.Errorf("bad plaintext, got:%x want:%x", b.Plaintext, expectedPlaintext)
+ }
+
+ keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(signingKey))
+ if err != nil {
+ t.Errorf("failed to parse public key: %s", err)
+ }
+
+ if _, err := openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body); err != nil {
+ t.Errorf("failed to check signature: %s", err)
+ }
+}
+
+func TestParse(t *testing.T) {
+ testParse(t, clearsignInput, "Hello world\r\nline 2", "Hello world\nline 2\n")
+ testParse(t, clearsignInput2, "\r\n\r\n(This message has a couple of blank lines at the start and end.)\r\n\r\n", "\n\n(This message has a couple of blank lines at the start and end.)\n\n\n")
+}
+
+func TestParseWithNoNewlineAtEnd(t *testing.T) {
+ input := clearsignInput
+ input = input[:len(input)-len("trailing")-1]
+ b, rest := Decode(input)
+ if b == nil {
+ t.Fatal("failed to decode clearsign message")
+ }
+ if len(rest) > 0 {
+ t.Errorf("unexpected remaining bytes returned: %s", string(rest))
+ }
+}
+
+var signingTests = []struct {
+ in, signed, plaintext string
+}{
+ {"", "", ""},
+ {"a", "a", "a\n"},
+ {"a\n", "a", "a\n"},
+ {"-a\n", "-a", "-a\n"},
+ {"--a\nb", "--a\r\nb", "--a\nb\n"},
+ // leading whitespace
+ {" a\n", " a", " a\n"},
+ {" a\n", " a", " a\n"},
+ // trailing whitespace (should be stripped)
+ {"a \n", "a", "a\n"},
+ {"a ", "a", "a\n"},
+ // whitespace-only lines (should be stripped)
+ {" \n", "", "\n"},
+ {" ", "", "\n"},
+ {"a\n \n \nb\n", "a\r\n\r\n\r\nb", "a\n\n\nb\n"},
+}
+
+func TestSigning(t *testing.T) {
+ keyring, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(signingKey))
+ if err != nil {
+ t.Errorf("failed to parse public key: %s", err)
+ }
+
+ for i, test := range signingTests {
+ var buf bytes.Buffer
+
+ plaintext, err := Encode(&buf, keyring[0].PrivateKey, nil)
+ if err != nil {
+ t.Errorf("#%d: error from Encode: %s", i, err)
+ continue
+ }
+ if _, err := plaintext.Write([]byte(test.in)); err != nil {
+ t.Errorf("#%d: error from Write: %s", i, err)
+ continue
+ }
+ if err := plaintext.Close(); err != nil {
+ t.Fatalf("#%d: error from Close: %s", i, err)
+ continue
+ }
+
+ b, _ := Decode(buf.Bytes())
+ if b == nil {
+ t.Errorf("#%d: failed to decode clearsign message", i)
+ continue
+ }
+ if !bytes.Equal(b.Bytes, []byte(test.signed)) {
+ t.Errorf("#%d: bad result, got:%x, want:%x", i, b.Bytes, test.signed)
+ continue
+ }
+ if !bytes.Equal(b.Plaintext, []byte(test.plaintext)) {
+ t.Errorf("#%d: bad result, got:%x, want:%x", i, b.Plaintext, test.plaintext)
+ continue
+ }
+
+ if _, err := openpgp.CheckDetachedSignature(keyring, bytes.NewBuffer(b.Bytes), b.ArmoredSignature.Body); err != nil {
+ t.Errorf("#%d: failed to check signature: %s", i, err)
+ }
+ }
+}
+
+var clearsignInput = []byte(`
+;lasjlkfdsa
+
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA1
+
+Hello world
+line 2
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+iJwEAQECAAYFAk8kMuEACgkQO9o98PRieSpMsAQAhmY/vwmNpflrPgmfWsYhk5O8
+pjnBUzZwqTDoDeINjZEoPDSpQAHGhjFjgaDx/Gj4fAl0dM4D0wuUEBb6QOrwflog
+2A2k9kfSOMOtk0IH/H5VuFN1Mie9L/erYXjTQIptv9t9J7NoRBMU0QOOaFU0JaO9
+MyTpno24AjIAGb+mH1U=
+=hIJ6
+-----END PGP SIGNATURE-----
+trailing`)
+
+var clearsignInput2 = []byte(`
+asdlfkjasdlkfjsadf
+
+-----BEGIN PGP SIGNED MESSAGE-----
+Hash: SHA256
+
+
+
+(This message has a couple of blank lines at the start and end.)
+
+
+-----BEGIN PGP SIGNATURE-----
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+iJwEAQEIAAYFAlPpSREACgkQO9o98PRieSpZTAP+M8QUoCt/7Rf3YbXPcdzIL32v
+pt1I+cMNeopzfLy0u4ioEFi8s5VkwpL1AFmirvgViCwlf82inoRxzZRiW05JQ5LI
+ESEzeCoy2LIdRCQ2hcrG8pIUPzUO4TqO5D/dMbdHwNH4h5nNmGJUAEG6FpURlPm+
+qZg6BaTvOxepqOxnhVU=
+=e+C6
+-----END PGP SIGNATURE-----
+
+trailing`)
+
+var signingKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp
+idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn
+vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB
+AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X
+0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL
+IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk
+VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn
+gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9
+TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx
+q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz
+dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA
+CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1
+ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+
+eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid
+AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV
+bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK
+/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA
+A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX
+TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc
+lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6
+rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN
+oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8
+QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU
+nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC
+AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp
+BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad
+AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL
+VrM0m72/jnpKo04=
+=zNCn
+-----END PGP PRIVATE KEY BLOCK-----
+`
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/elgamal/elgamal.go
new file mode 100644
index 00000000000..73f4fe37859
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/elgamal/elgamal.go
@@ -0,0 +1,122 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
+// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
+// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
+// n. 4, 1985, pp. 469-472.
+//
+// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
+// unsuitable for other protocols. RSA should be used in preference in any
+// case.
+package elgamal // import "golang.org/x/crypto/openpgp/elgamal"
+
+import (
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "io"
+ "math/big"
+)
+
+// PublicKey represents an ElGamal public key.
+type PublicKey struct {
+ G, P, Y *big.Int
+}
+
+// PrivateKey represents an ElGamal private key.
+type PrivateKey struct {
+ PublicKey
+ X *big.Int
+}
+
+// Encrypt encrypts the given message to the given public key. The result is a
+// pair of integers. Errors can result from reading random, or because msg is
+// too large to be encrypted to the public key.
+func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
+ pLen := (pub.P.BitLen() + 7) / 8
+ if len(msg) > pLen-11 {
+ err = errors.New("elgamal: message too long")
+ return
+ }
+
+ // EM = 0x02 || PS || 0x00 || M
+ em := make([]byte, pLen-1)
+ em[0] = 2
+ ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
+ err = nonZeroRandomBytes(ps, random)
+ if err != nil {
+ return
+ }
+ em[len(em)-len(msg)-1] = 0
+ copy(mm, msg)
+
+ m := new(big.Int).SetBytes(em)
+
+ k, err := rand.Int(random, pub.P)
+ if err != nil {
+ return
+ }
+
+ c1 = new(big.Int).Exp(pub.G, k, pub.P)
+ s := new(big.Int).Exp(pub.Y, k, pub.P)
+ c2 = s.Mul(s, m)
+ c2.Mod(c2, pub.P)
+
+ return
+}
+
+// Decrypt takes two integers, resulting from an ElGamal encryption, and
+// returns the plaintext of the message. An error can result only if the
+// ciphertext is invalid. Users should keep in mind that this is a padding
+// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
+// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks
+// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel
+// Bleichenbacher, Advances in Cryptology (Crypto '98),
+func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
+ s := new(big.Int).Exp(c1, priv.X, priv.P)
+ s.ModInverse(s, priv.P)
+ s.Mul(s, c2)
+ s.Mod(s, priv.P)
+ em := s.Bytes()
+
+ firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
+
+ // The remainder of the plaintext must be a string of non-zero random
+ // octets, followed by a 0, followed by the message.
+ // lookingForIndex: 1 iff we are still looking for the zero.
+ // index: the offset of the first zero byte.
+ var lookingForIndex, index int
+ lookingForIndex = 1
+
+ for i := 1; i < len(em); i++ {
+ equals0 := subtle.ConstantTimeByteEq(em[i], 0)
+ index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
+ lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
+ }
+
+ if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
+ return nil, errors.New("elgamal: decryption error")
+ }
+ return em[index+1:], nil
+}
+
+// nonZeroRandomBytes fills the given slice with non-zero random octets.
+func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
+ _, err = io.ReadFull(rand, s)
+ if err != nil {
+ return
+ }
+
+ for i := 0; i < len(s); i++ {
+ for s[i] == 0 {
+ _, err = io.ReadFull(rand, s[i:i+1])
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go
new file mode 100644
index 00000000000..c4f99f5c48c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/elgamal/elgamal_test.go
@@ -0,0 +1,49 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elgamal
+
+import (
+ "bytes"
+ "crypto/rand"
+ "math/big"
+ "testing"
+)
+
+// This is the 1024-bit MODP group from RFC 5114, section 2.1:
+const primeHex = "B10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C69A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C013ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD7098488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708DF1FB2BC2E4A4371"
+
+const generatorHex = "A4D1CBD5C3FD34126765A442EFB99905F8104DD258AC507FD6406CFF14266D31266FEA1E5C41564B777E690F5504F213160217B4B01B886A5E91547F9E2749F4D7FBD7D3B9A92EE1909D0D2263F80A76A6A24C087A091F531DBF0A0169B6A28AD662A4D18E73AFA32D779D5918D08BC8858F4DCEF97C2A24855E6EEB22B3B2E5"
+
+func fromHex(hex string) *big.Int {
+ n, ok := new(big.Int).SetString(hex, 16)
+ if !ok {
+ panic("failed to parse hex number")
+ }
+ return n
+}
+
+func TestEncryptDecrypt(t *testing.T) {
+ priv := &PrivateKey{
+ PublicKey: PublicKey{
+ G: fromHex(generatorHex),
+ P: fromHex(primeHex),
+ },
+ X: fromHex("42"),
+ }
+ priv.Y = new(big.Int).Exp(priv.G, priv.X, priv.P)
+
+ message := []byte("hello world")
+ c1, c2, err := Encrypt(rand.Reader, &priv.PublicKey, message)
+ if err != nil {
+ t.Errorf("error encrypting: %s", err)
+ }
+ message2, err := Decrypt(priv, c1, c2)
+ if err != nil {
+ t.Errorf("error decrypting: %s", err)
+ }
+ if !bytes.Equal(message2, message) {
+ t.Errorf("decryption failed, got: %x, want: %x", message2, message)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/errors/errors.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/errors/errors.go
new file mode 100644
index 00000000000..eb0550b2d04
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/errors/errors.go
@@ -0,0 +1,72 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package errors contains common error types for the OpenPGP packages.
+package errors // import "golang.org/x/crypto/openpgp/errors"
+
+import (
+ "strconv"
+)
+
+// A StructuralError is returned when OpenPGP data is found to be syntactically
+// invalid.
+type StructuralError string
+
+func (s StructuralError) Error() string {
+ return "openpgp: invalid data: " + string(s)
+}
+
+// UnsupportedError indicates that, although the OpenPGP data is valid, it
+// makes use of currently unimplemented features.
+type UnsupportedError string
+
+func (s UnsupportedError) Error() string {
+ return "openpgp: unsupported feature: " + string(s)
+}
+
+// InvalidArgumentError indicates that the caller is in error and passed an
+// incorrect value.
+type InvalidArgumentError string
+
+func (i InvalidArgumentError) Error() string {
+ return "openpgp: invalid argument: " + string(i)
+}
+
+// SignatureError indicates that a syntactically valid signature failed to
+// validate.
+type SignatureError string
+
+func (b SignatureError) Error() string {
+ return "openpgp: invalid signature: " + string(b)
+}
+
+type keyIncorrectError int
+
+func (ki keyIncorrectError) Error() string {
+ return "openpgp: incorrect key"
+}
+
+var ErrKeyIncorrect error = keyIncorrectError(0)
+
+type unknownIssuerError int
+
+func (unknownIssuerError) Error() string {
+ return "openpgp: signature made by unknown entity"
+}
+
+var ErrUnknownIssuer error = unknownIssuerError(0)
+
+type keyRevokedError int
+
+func (keyRevokedError) Error() string {
+ return "openpgp: signature made by revoked key"
+}
+
+var ErrKeyRevoked error = keyRevokedError(0)
+
+type UnknownPacketTypeError uint8
+
+func (upte UnknownPacketTypeError) Error() string {
+ return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/keys.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/keys.go
new file mode 100644
index 00000000000..bfe32603155
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/keys.go
@@ -0,0 +1,633 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import (
+ "crypto/rsa"
+ "io"
+ "time"
+
+ "golang.org/x/crypto/openpgp/armor"
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+// PublicKeyType is the armor type for a PGP public key.
+var PublicKeyType = "PGP PUBLIC KEY BLOCK"
+
+// PrivateKeyType is the armor type for a PGP private key.
+var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
+
+// An Entity represents the components of an OpenPGP key: a primary public key
+// (which must be a signing key), one or more identities claimed by that key,
+// and zero or more subkeys, which may be encryption keys.
+type Entity struct {
+ PrimaryKey *packet.PublicKey
+ PrivateKey *packet.PrivateKey
+ Identities map[string]*Identity // indexed by Identity.Name
+ Revocations []*packet.Signature
+ Subkeys []Subkey
+}
+
+// An Identity represents an identity claimed by an Entity and zero or more
+// assertions by other entities about that claim.
+type Identity struct {
+ Name string // by convention, has the form "Full Name (comment) <email@example.com>"
+ UserId *packet.UserId
+ SelfSignature *packet.Signature
+ Signatures []*packet.Signature
+}
+
+// A Subkey is an additional public key in an Entity. Subkeys can be used for
+// encryption.
+type Subkey struct {
+ PublicKey *packet.PublicKey
+ PrivateKey *packet.PrivateKey
+ Sig *packet.Signature
+}
+
+// A Key identifies a specific public key in an Entity. This is either the
+// Entity's primary key or a subkey.
+type Key struct {
+ Entity *Entity
+ PublicKey *packet.PublicKey
+ PrivateKey *packet.PrivateKey
+ SelfSignature *packet.Signature
+}
+
+// A KeyRing provides access to public and private keys.
+type KeyRing interface {
+ // KeysById returns the set of keys that have the given key id.
+ KeysById(id uint64) []Key
+ // KeysByIdAndUsage returns the set of keys with the given id
+ // that also meet the key usage given by requiredUsage.
+ // The requiredUsage is expressed as the bitwise-OR of
+ // packet.KeyFlag* values.
+ KeysByIdUsage(id uint64, requiredUsage byte) []Key
+ // DecryptionKeys returns all private keys that are valid for
+ // decryption.
+ DecryptionKeys() []Key
+}
+
+// primaryIdentity returns the Identity marked as primary or the first identity
+// if none are so marked.
+func (e *Entity) primaryIdentity() *Identity {
+ var firstIdentity *Identity
+ for _, ident := range e.Identities {
+ if firstIdentity == nil {
+ firstIdentity = ident
+ }
+ if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
+ return ident
+ }
+ }
+ return firstIdentity
+}
+
+// encryptionKey returns the best candidate Key for encrypting a message to the
+// given Entity.
+func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
+ candidateSubkey := -1
+
+ // Iterate the keys to find the newest key
+ var maxTime time.Time
+ for i, subkey := range e.Subkeys {
+ if subkey.Sig.FlagsValid &&
+ subkey.Sig.FlagEncryptCommunications &&
+ subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
+ !subkey.Sig.KeyExpired(now) &&
+ (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
+ candidateSubkey = i
+ maxTime = subkey.Sig.CreationTime
+ }
+ }
+
+ if candidateSubkey != -1 {
+ subkey := e.Subkeys[candidateSubkey]
+ return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
+ }
+
+ // If we don't have any candidate subkeys for encryption and
+ // the primary key doesn't have any usage metadata then we
+ // assume that the primary key is ok. Or, if the primary key is
+ // marked as ok to encrypt to, then we can obviously use it.
+ i := e.primaryIdentity()
+ if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications &&
+ e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
+ !i.SelfSignature.KeyExpired(now) {
+ return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
+ }
+
+ // This Entity appears to be signing only.
+ return Key{}, false
+}
+
+// signingKey return the best candidate Key for signing a message with this
+// Entity.
+func (e *Entity) signingKey(now time.Time) (Key, bool) {
+ candidateSubkey := -1
+
+ for i, subkey := range e.Subkeys {
+ if subkey.Sig.FlagsValid &&
+ subkey.Sig.FlagSign &&
+ subkey.PublicKey.PubKeyAlgo.CanSign() &&
+ !subkey.Sig.KeyExpired(now) {
+ candidateSubkey = i
+ break
+ }
+ }
+
+ if candidateSubkey != -1 {
+ subkey := e.Subkeys[candidateSubkey]
+ return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
+ }
+
+ // If we have no candidate subkey then we assume that it's ok to sign
+ // with the primary key.
+ i := e.primaryIdentity()
+ if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign &&
+ !i.SelfSignature.KeyExpired(now) {
+ return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
+ }
+
+ return Key{}, false
+}
+
+// An EntityList contains one or more Entities.
+type EntityList []*Entity
+
+// KeysById returns the set of keys that have the given key id.
+func (el EntityList) KeysById(id uint64) (keys []Key) {
+ for _, e := range el {
+ if e.PrimaryKey.KeyId == id {
+ var selfSig *packet.Signature
+ for _, ident := range e.Identities {
+ if selfSig == nil {
+ selfSig = ident.SelfSignature
+ } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId {
+ selfSig = ident.SelfSignature
+ break
+ }
+ }
+ keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
+ }
+
+ for _, subKey := range e.Subkeys {
+ if subKey.PublicKey.KeyId == id {
+ keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
+ }
+ }
+ }
+ return
+}
+
+// KeysByIdAndUsage returns the set of keys with the given id that also meet
+// the key usage given by requiredUsage. The requiredUsage is expressed as
+// the bitwise-OR of packet.KeyFlag* values.
+func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
+ for _, key := range el.KeysById(id) {
+ if len(key.Entity.Revocations) > 0 {
+ continue
+ }
+
+ if key.SelfSignature.RevocationReason != nil {
+ continue
+ }
+
+ if key.SelfSignature.FlagsValid && requiredUsage != 0 {
+ var usage byte
+ if key.SelfSignature.FlagCertify {
+ usage |= packet.KeyFlagCertify
+ }
+ if key.SelfSignature.FlagSign {
+ usage |= packet.KeyFlagSign
+ }
+ if key.SelfSignature.FlagEncryptCommunications {
+ usage |= packet.KeyFlagEncryptCommunications
+ }
+ if key.SelfSignature.FlagEncryptStorage {
+ usage |= packet.KeyFlagEncryptStorage
+ }
+ if usage&requiredUsage != requiredUsage {
+ continue
+ }
+ }
+
+ keys = append(keys, key)
+ }
+ return
+}
+
+// DecryptionKeys returns all private keys that are valid for decryption.
+func (el EntityList) DecryptionKeys() (keys []Key) {
+ for _, e := range el {
+ for _, subKey := range e.Subkeys {
+ if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
+ keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
+ }
+ }
+ }
+ return
+}
+
+// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
+func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
+ block, err := armor.Decode(r)
+ if err == io.EOF {
+ return nil, errors.InvalidArgumentError("no armored data found")
+ }
+ if err != nil {
+ return nil, err
+ }
+ if block.Type != PublicKeyType && block.Type != PrivateKeyType {
+ return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
+ }
+
+ return ReadKeyRing(block.Body)
+}
+
+// ReadKeyRing reads one or more public/private keys. Unsupported keys are
+// ignored as long as at least a single valid key is found.
+func ReadKeyRing(r io.Reader) (el EntityList, err error) {
+ packets := packet.NewReader(r)
+ var lastUnsupportedError error
+
+ for {
+ var e *Entity
+ e, err = ReadEntity(packets)
+ if err != nil {
+ // TODO: warn about skipped unsupported/unreadable keys
+ if _, ok := err.(errors.UnsupportedError); ok {
+ lastUnsupportedError = err
+ err = readToNextPublicKey(packets)
+ } else if _, ok := err.(errors.StructuralError); ok {
+ // Skip unreadable, badly-formatted keys
+ lastUnsupportedError = err
+ err = readToNextPublicKey(packets)
+ }
+ if err == io.EOF {
+ err = nil
+ break
+ }
+ if err != nil {
+ el = nil
+ break
+ }
+ } else {
+ el = append(el, e)
+ }
+ }
+
+ if len(el) == 0 && err == nil {
+ err = lastUnsupportedError
+ }
+ return
+}
+
+// readToNextPublicKey reads packets until the start of the entity and leaves
+// the first packet of the new entity in the Reader.
+func readToNextPublicKey(packets *packet.Reader) (err error) {
+ var p packet.Packet
+ for {
+ p, err = packets.Next()
+ if err == io.EOF {
+ return
+ } else if err != nil {
+ if _, ok := err.(errors.UnsupportedError); ok {
+ err = nil
+ continue
+ }
+ return
+ }
+
+ if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
+ packets.Unread(p)
+ return
+ }
+ }
+
+ panic("unreachable")
+}
+
+// ReadEntity reads an entity (public key, identities, subkeys etc) from the
+// given Reader.
+func ReadEntity(packets *packet.Reader) (*Entity, error) {
+ e := new(Entity)
+ e.Identities = make(map[string]*Identity)
+
+ p, err := packets.Next()
+ if err != nil {
+ return nil, err
+ }
+
+ var ok bool
+ if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
+ if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
+ packets.Unread(p)
+ return nil, errors.StructuralError("first packet was not a public/private key")
+ } else {
+ e.PrimaryKey = &e.PrivateKey.PublicKey
+ }
+ }
+
+ if !e.PrimaryKey.PubKeyAlgo.CanSign() {
+ return nil, errors.StructuralError("primary key cannot be used for signatures")
+ }
+
+ var current *Identity
+ var revocations []*packet.Signature
+EachPacket:
+ for {
+ p, err := packets.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return nil, err
+ }
+
+ switch pkt := p.(type) {
+ case *packet.UserId:
+ current = new(Identity)
+ current.Name = pkt.Id
+ current.UserId = pkt
+ e.Identities[pkt.Id] = current
+
+ for {
+ p, err = packets.Next()
+ if err == io.EOF {
+ return nil, io.ErrUnexpectedEOF
+ } else if err != nil {
+ return nil, err
+ }
+
+ sig, ok := p.(*packet.Signature)
+ if !ok {
+ return nil, errors.StructuralError("user ID packet not followed by self-signature")
+ }
+
+ if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
+ if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
+ return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
+ }
+ current.SelfSignature = sig
+ break
+ }
+ current.Signatures = append(current.Signatures, sig)
+ }
+ case *packet.Signature:
+ if pkt.SigType == packet.SigTypeKeyRevocation {
+ revocations = append(revocations, pkt)
+ } else if pkt.SigType == packet.SigTypeDirectSignature {
+ // TODO: RFC4880 5.2.1 permits signatures
+ // directly on keys (eg. to bind additional
+ // revocation keys).
+ } else if current == nil {
+ return nil, errors.StructuralError("signature packet found before user id packet")
+ } else {
+ current.Signatures = append(current.Signatures, pkt)
+ }
+ case *packet.PrivateKey:
+ if pkt.IsSubkey == false {
+ packets.Unread(p)
+ break EachPacket
+ }
+ err = addSubkey(e, packets, &pkt.PublicKey, pkt)
+ if err != nil {
+ return nil, err
+ }
+ case *packet.PublicKey:
+ if pkt.IsSubkey == false {
+ packets.Unread(p)
+ break EachPacket
+ }
+ err = addSubkey(e, packets, pkt, nil)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ // we ignore unknown packets
+ }
+ }
+
+ if len(e.Identities) == 0 {
+ return nil, errors.StructuralError("entity without any identities")
+ }
+
+ for _, revocation := range revocations {
+ err = e.PrimaryKey.VerifyRevocationSignature(revocation)
+ if err == nil {
+ e.Revocations = append(e.Revocations, revocation)
+ } else {
+ // TODO: RFC 4880 5.2.3.15 defines revocation keys.
+ return nil, errors.StructuralError("revocation signature signed by alternate key")
+ }
+ }
+
+ return e, nil
+}
+
+func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
+ var subKey Subkey
+ subKey.PublicKey = pub
+ subKey.PrivateKey = priv
+ p, err := packets.Next()
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ if err != nil {
+ return errors.StructuralError("subkey signature invalid: " + err.Error())
+ }
+ var ok bool
+ subKey.Sig, ok = p.(*packet.Signature)
+ if !ok {
+ return errors.StructuralError("subkey packet not followed by signature")
+ }
+ if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
+ return errors.StructuralError("subkey signature with wrong type")
+ }
+ err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
+ if err != nil {
+ return errors.StructuralError("subkey signature invalid: " + err.Error())
+ }
+ e.Subkeys = append(e.Subkeys, subKey)
+ return nil
+}
+
+const defaultRSAKeyBits = 2048
+
+// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
+// single identity composed of the given full name, comment and email, any of
+// which may be empty but must not contain any of "()<>\x00".
+// If config is nil, sensible defaults will be used.
+func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
+ currentTime := config.Now()
+
+ bits := defaultRSAKeyBits
+ if config != nil && config.RSABits != 0 {
+ bits = config.RSABits
+ }
+
+ uid := packet.NewUserId(name, comment, email)
+ if uid == nil {
+ return nil, errors.InvalidArgumentError("user id field contained invalid characters")
+ }
+ signingPriv, err := rsa.GenerateKey(config.Random(), bits)
+ if err != nil {
+ return nil, err
+ }
+ encryptingPriv, err := rsa.GenerateKey(config.Random(), bits)
+ if err != nil {
+ return nil, err
+ }
+
+ e := &Entity{
+ PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
+ PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
+ Identities: make(map[string]*Identity),
+ }
+ isPrimaryId := true
+ e.Identities[uid.Id] = &Identity{
+ Name: uid.Name,
+ UserId: uid,
+ SelfSignature: &packet.Signature{
+ CreationTime: currentTime,
+ SigType: packet.SigTypePositiveCert,
+ PubKeyAlgo: packet.PubKeyAlgoRSA,
+ Hash: config.Hash(),
+ IsPrimaryId: &isPrimaryId,
+ FlagsValid: true,
+ FlagSign: true,
+ FlagCertify: true,
+ IssuerKeyId: &e.PrimaryKey.KeyId,
+ },
+ }
+
+ e.Subkeys = make([]Subkey, 1)
+ e.Subkeys[0] = Subkey{
+ PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
+ PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
+ Sig: &packet.Signature{
+ CreationTime: currentTime,
+ SigType: packet.SigTypeSubkeyBinding,
+ PubKeyAlgo: packet.PubKeyAlgoRSA,
+ Hash: config.Hash(),
+ FlagsValid: true,
+ FlagEncryptStorage: true,
+ FlagEncryptCommunications: true,
+ IssuerKeyId: &e.PrimaryKey.KeyId,
+ },
+ }
+ e.Subkeys[0].PublicKey.IsSubkey = true
+ e.Subkeys[0].PrivateKey.IsSubkey = true
+
+ return e, nil
+}
+
+// SerializePrivate serializes an Entity, including private key material, to
+// the given Writer. For now, it must only be used on an Entity returned from
+// NewEntity.
+// If config is nil, sensible defaults will be used.
+func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
+ err = e.PrivateKey.Serialize(w)
+ if err != nil {
+ return
+ }
+ for _, ident := range e.Identities {
+ err = ident.UserId.Serialize(w)
+ if err != nil {
+ return
+ }
+ err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
+ if err != nil {
+ return
+ }
+ err = ident.SelfSignature.Serialize(w)
+ if err != nil {
+ return
+ }
+ }
+ for _, subkey := range e.Subkeys {
+ err = subkey.PrivateKey.Serialize(w)
+ if err != nil {
+ return
+ }
+ err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
+ if err != nil {
+ return
+ }
+ err = subkey.Sig.Serialize(w)
+ if err != nil {
+ return
+ }
+ }
+ return nil
+}
+
+// Serialize writes the public part of the given Entity to w. (No private
+// key material will be output).
+func (e *Entity) Serialize(w io.Writer) error {
+ err := e.PrimaryKey.Serialize(w)
+ if err != nil {
+ return err
+ }
+ for _, ident := range e.Identities {
+ err = ident.UserId.Serialize(w)
+ if err != nil {
+ return err
+ }
+ err = ident.SelfSignature.Serialize(w)
+ if err != nil {
+ return err
+ }
+ for _, sig := range ident.Signatures {
+ err = sig.Serialize(w)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for _, subkey := range e.Subkeys {
+ err = subkey.PublicKey.Serialize(w)
+ if err != nil {
+ return err
+ }
+ err = subkey.Sig.Serialize(w)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SignIdentity adds a signature to e, from signer, attesting that identity is
+// associated with e. The provided identity must already be an element of
+// e.Identities and the private key of signer must have been decrypted if
+// necessary.
+// If config is nil, sensible defaults will be used.
+func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
+ if signer.PrivateKey == nil {
+ return errors.InvalidArgumentError("signing Entity must have a private key")
+ }
+ if signer.PrivateKey.Encrypted {
+ return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
+ }
+ ident, ok := e.Identities[identity]
+ if !ok {
+ return errors.InvalidArgumentError("given identity string not found in Entity")
+ }
+
+ sig := &packet.Signature{
+ SigType: packet.SigTypeGenericCert,
+ PubKeyAlgo: signer.PrivateKey.PubKeyAlgo,
+ Hash: config.Hash(),
+ CreationTime: config.Now(),
+ IssuerKeyId: &signer.PrivateKey.KeyId,
+ }
+ if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil {
+ return err
+ }
+ ident.Signatures = append(ident.Signatures, sig)
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/keys_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/keys_test.go
new file mode 100644
index 00000000000..d5e2056bb82
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/keys_test.go
@@ -0,0 +1,370 @@
+package openpgp
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "time"
+
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+func TestKeyExpiry(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(expiringKeyHex))
+ entity := kring[0]
+
+ const timeFormat = "2006-01-02"
+ time1, _ := time.Parse(timeFormat, "2013-07-01")
+
+ // The expiringKeyHex key is structured as:
+ //
+ // pub 1024R/5E237D8C created: 2013-07-01 expires: 2013-07-31 usage: SC
+ // sub 1024R/1ABB25A0 created: 2013-07-01 23:11:07 +0200 CEST expires: 2013-07-08 usage: E
+ // sub 1024R/96A672F5 created: 2013-07-01 23:11:23 +0200 CEST expires: 2013-07-31 usage: E
+ //
+ // So this should select the newest, non-expired encryption key.
+ key, _ := entity.encryptionKey(time1)
+ if id := key.PublicKey.KeyIdShortString(); id != "96A672F5" {
+ t.Errorf("Expected key 1ABB25A0 at time %s, but got key %s", time1.Format(timeFormat), id)
+ }
+
+ // Once the first encryption subkey has expired, the second should be
+ // selected.
+ time2, _ := time.Parse(timeFormat, "2013-07-09")
+ key, _ = entity.encryptionKey(time2)
+ if id := key.PublicKey.KeyIdShortString(); id != "96A672F5" {
+ t.Errorf("Expected key 96A672F5 at time %s, but got key %s", time2.Format(timeFormat), id)
+ }
+
+ // Once all the keys have expired, nothing should be returned.
+ time3, _ := time.Parse(timeFormat, "2013-08-01")
+ if key, ok := entity.encryptionKey(time3); ok {
+ t.Errorf("Expected no key at time %s, but got key %s", time3.Format(timeFormat), key.PublicKey.KeyIdShortString())
+ }
+}
+
+func TestMissingCrossSignature(t *testing.T) {
+ // This public key has a signing subkey, but the subkey does not
+ // contain a cross-signature.
+ keys, err := ReadArmoredKeyRing(bytes.NewBufferString(missingCrossSignatureKey))
+ if len(keys) != 0 {
+ t.Errorf("Accepted key with missing cross signature")
+ }
+ if err == nil {
+ t.Fatal("Failed to detect error in keyring with missing cross signature")
+ }
+ structural, ok := err.(errors.StructuralError)
+ if !ok {
+ t.Fatalf("Unexpected class of error: %T. Wanted StructuralError", err)
+ }
+ const expectedMsg = "signing subkey is missing cross-signature"
+ if !strings.Contains(string(structural), expectedMsg) {
+ t.Fatalf("Unexpected error: %q. Expected it to contain %q", err, expectedMsg)
+ }
+}
+
+func TestInvalidCrossSignature(t *testing.T) {
+ // This public key has a signing subkey, and the subkey has an
+ // embedded cross-signature. However, the cross-signature does
+ // not correctly validate over the primary and subkey.
+ keys, err := ReadArmoredKeyRing(bytes.NewBufferString(invalidCrossSignatureKey))
+ if len(keys) != 0 {
+ t.Errorf("Accepted key with invalid cross signature")
+ }
+ if err == nil {
+ t.Fatal("Failed to detect error in keyring with an invalid cross signature")
+ }
+ structural, ok := err.(errors.StructuralError)
+ if !ok {
+ t.Fatalf("Unexpected class of error: %T. Wanted StructuralError", err)
+ }
+ const expectedMsg = "subkey signature invalid"
+ if !strings.Contains(string(structural), expectedMsg) {
+ t.Fatalf("Unexpected error: %q. Expected it to contain %q", err, expectedMsg)
+ }
+}
+
+func TestGoodCrossSignature(t *testing.T) {
+ // This public key has a signing subkey, and the subkey has an
+ // embedded cross-signature which correctly validates over the
+ // primary and subkey.
+ keys, err := ReadArmoredKeyRing(bytes.NewBufferString(goodCrossSignatureKey))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(keys) != 1 {
+ t.Errorf("Failed to accept key with good cross signature, %d", len(keys))
+ }
+ if len(keys[0].Subkeys) != 1 {
+ t.Errorf("Failed to accept good subkey, %d", len(keys[0].Subkeys))
+ }
+}
+
+// TestExternallyRevokableKey attempts to load and parse a key with a third party revocation permission.
+func TestExternallyRevocableKey(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(subkeyUsageHex))
+
+ // The 0xA42704B92866382A key can be revoked by 0xBE3893CB843D0FE70C
+ // according to this signature that appears within the key:
+ // :signature packet: algo 1, keyid A42704B92866382A
+ // version 4, created 1396409682, md5len 0, sigclass 0x1f
+ // digest algo 2, begin of digest a9 84
+ // hashed subpkt 2 len 4 (sig created 2014-04-02)
+ // hashed subpkt 12 len 22 (revocation key: c=80 a=1 f=CE094AA433F7040BB2DDF0BE3893CB843D0FE70C)
+ // hashed subpkt 7 len 1 (not revocable)
+ // subpkt 16 len 8 (issuer key ID A42704B92866382A)
+ // data: [1024 bits]
+
+ id := uint64(0xA42704B92866382A)
+ keys := kring.KeysById(id)
+ if len(keys) != 1 {
+ t.Errorf("Expected to find key id %X, but got %d matches", id, len(keys))
+ }
+}
+
+func TestKeyRevocation(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(revokedKeyHex))
+
+ // revokedKeyHex contains these keys:
+ // pub 1024R/9A34F7C0 2014-03-25 [revoked: 2014-03-25]
+ // sub 1024R/1BA3CD60 2014-03-25 [revoked: 2014-03-25]
+ ids := []uint64{0xA401D9F09A34F7C0, 0x5CD3BE0A1BA3CD60}
+
+ for _, id := range ids {
+ keys := kring.KeysById(id)
+ if len(keys) != 1 {
+ t.Errorf("Expected KeysById to find revoked key %X, but got %d matches", id, len(keys))
+ }
+ keys = kring.KeysByIdUsage(id, 0)
+ if len(keys) != 0 {
+ t.Errorf("Expected KeysByIdUsage to filter out revoked key %X, but got %d matches", id, len(keys))
+ }
+ }
+}
+
+func TestSubkeyRevocation(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(revokedSubkeyHex))
+
+ // revokedSubkeyHex contains these keys:
+ // pub 1024R/4EF7E4BECCDE97F0 2014-03-25
+ // sub 1024R/D63636E2B96AE423 2014-03-25
+ // sub 1024D/DBCE4EE19529437F 2014-03-25
+ // sub 1024R/677815E371C2FD23 2014-03-25 [revoked: 2014-03-25]
+ validKeys := []uint64{0x4EF7E4BECCDE97F0, 0xD63636E2B96AE423, 0xDBCE4EE19529437F}
+ revokedKey := uint64(0x677815E371C2FD23)
+
+ for _, id := range validKeys {
+ keys := kring.KeysById(id)
+ if len(keys) != 1 {
+ t.Errorf("Expected KeysById to find key %X, but got %d matches", id, len(keys))
+ }
+ keys = kring.KeysByIdUsage(id, 0)
+ if len(keys) != 1 {
+ t.Errorf("Expected KeysByIdUsage to find key %X, but got %d matches", id, len(keys))
+ }
+ }
+
+ keys := kring.KeysById(revokedKey)
+ if len(keys) != 1 {
+ t.Errorf("Expected KeysById to find key %X, but got %d matches", revokedKey, len(keys))
+ }
+
+ keys = kring.KeysByIdUsage(revokedKey, 0)
+ if len(keys) != 0 {
+ t.Errorf("Expected KeysByIdUsage to filter out revoked key %X, but got %d matches", revokedKey, len(keys))
+ }
+}
+
+func TestKeyUsage(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(subkeyUsageHex))
+
+ // subkeyUsageHex contains these keys:
+ // pub 1024R/2866382A created: 2014-04-01 expires: never usage: SC
+ // sub 1024R/936C9153 created: 2014-04-01 expires: never usage: E
+ // sub 1024R/64D5F5BB created: 2014-04-02 expires: never usage: E
+ // sub 1024D/BC0BA992 created: 2014-04-02 expires: never usage: S
+ certifiers := []uint64{0xA42704B92866382A}
+ signers := []uint64{0xA42704B92866382A, 0x42CE2C64BC0BA992}
+ encrypters := []uint64{0x09C0C7D9936C9153, 0xC104E98664D5F5BB}
+
+ for _, id := range certifiers {
+ keys := kring.KeysByIdUsage(id, packet.KeyFlagCertify)
+ if len(keys) == 1 {
+ if keys[0].PublicKey.KeyId != id {
+ t.Errorf("Expected to find certifier key id %X, but got %X", id, keys[0].PublicKey.KeyId)
+ }
+ } else {
+ t.Errorf("Expected one match for certifier key id %X, but got %d matches", id, len(keys))
+ }
+ }
+
+ for _, id := range signers {
+ keys := kring.KeysByIdUsage(id, packet.KeyFlagSign)
+ if len(keys) == 1 {
+ if keys[0].PublicKey.KeyId != id {
+ t.Errorf("Expected to find signing key id %X, but got %X", id, keys[0].PublicKey.KeyId)
+ }
+ } else {
+ t.Errorf("Expected one match for signing key id %X, but got %d matches", id, len(keys))
+ }
+
+ // This keyring contains no encryption keys that are also good for signing.
+ keys = kring.KeysByIdUsage(id, packet.KeyFlagEncryptStorage|packet.KeyFlagEncryptCommunications)
+ if len(keys) != 0 {
+ t.Errorf("Unexpected match for encryption key id %X", id)
+ }
+ }
+
+ for _, id := range encrypters {
+ keys := kring.KeysByIdUsage(id, packet.KeyFlagEncryptStorage|packet.KeyFlagEncryptCommunications)
+ if len(keys) == 1 {
+ if keys[0].PublicKey.KeyId != id {
+ t.Errorf("Expected to find encryption key id %X, but got %X", id, keys[0].PublicKey.KeyId)
+ }
+ } else {
+ t.Errorf("Expected one match for encryption key id %X, but got %d matches", id, len(keys))
+ }
+
+ // This keyring contains no encryption keys that are also good for signing.
+ keys = kring.KeysByIdUsage(id, packet.KeyFlagSign)
+ if len(keys) != 0 {
+ t.Errorf("Unexpected match for signing key id %X", id)
+ }
+ }
+}
+
+func TestIdVerification(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := kring[1].PrivateKey.Decrypt([]byte("passphrase")); err != nil {
+ t.Fatal(err)
+ }
+
+ const identity = "Test Key 1 (RSA)"
+ if err := kring[0].SignIdentity(identity, kring[1], nil); err != nil {
+ t.Fatal(err)
+ }
+
+ ident, ok := kring[0].Identities[identity]
+ if !ok {
+ t.Fatal("identity missing from key after signing")
+ }
+
+ checked := false
+ for _, sig := range ident.Signatures {
+ if sig.IssuerKeyId == nil || *sig.IssuerKeyId != kring[1].PrimaryKey.KeyId {
+ continue
+ }
+
+ if err := kring[1].PrimaryKey.VerifyUserIdSignature(identity, kring[0].PrimaryKey, sig); err != nil {
+ t.Fatalf("error verifying new identity signature: %s", err)
+ }
+ checked = true
+ break
+ }
+
+ if !checked {
+ t.Fatal("didn't find identity signature in Entity")
+ }
+}
+
+const expiringKeyHex = "988d0451d1ec5d010400ba3385721f2dc3f4ab096b2ee867ab77213f0a27a8538441c35d2fa225b08798a1439a66a5150e6bdc3f40f5d28d588c712394c632b6299f77db8c0d48d37903fb72ebd794d61be6aa774688839e5fdecfe06b2684cc115d240c98c66cb1ef22ae84e3aa0c2b0c28665c1e7d4d044e7f270706193f5223c8d44e0d70b7b8da830011010001b40f4578706972792074657374206b657988be041301020028050251d1ec5d021b03050900278d00060b090807030206150802090a0b0416020301021e01021780000a091072589ad75e237d8c033503fd10506d72837834eb7f994117740723adc39227104b0d326a1161871c0b415d25b4aedef946ca77ea4c05af9c22b32cf98be86ab890111fced1ee3f75e87b7cc3c00dc63bbc85dfab91c0dc2ad9de2c4d13a34659333a85c6acc1a669c5e1d6cecb0cf1e56c10e72d855ae177ddc9e766f9b2dda57ccbb75f57156438bbdb4e42b88d0451d1ec5d0104009c64906559866c5cb61578f5846a94fcee142a489c9b41e67b12bb54cfe86eb9bc8566460f9a720cb00d6526fbccfd4f552071a8e3f7744b1882d01036d811ee5a3fb91a1c568055758f43ba5d2c6a9676b012f3a1a89e47bbf624f1ad571b208f3cc6224eb378f1645dd3d47584463f9eadeacfd1ce6f813064fbfdcc4b5a53001101000188a504180102000f021b0c050251d1f06b050900093e89000a091072589ad75e237d8c20e00400ab8310a41461425b37889c4da28129b5fae6084fafbc0a47dd1adc74a264c6e9c9cc125f40462ee1433072a58384daef88c961c390ed06426a81b464a53194c4e291ddd7e2e2ba3efced01537d713bd111f48437bde2363446200995e8e0d4e528dda377fd1e8f8ede9c8e2198b393bd86852ce7457a7e3daf74d510461a5b77b88d0451d1ece8010400b3a519f83ab0010307e83bca895170acce8964a044190a2b368892f7a244758d9fc193482648acb1fb9780d28cc22d171931f38bb40279389fc9bf2110876d4f3db4fcfb13f22f7083877fe56592b3b65251312c36f83ffcb6d313c6a17f197dd471f0712aad15a8537b435a92471ba2e5b0c72a6c72536c3b567c558d7b6051001101000188a504180102000f021b0c050251d1f07b050900279091000a091072589ad75e237d8ce69e03fe286026afacf7c97ee20673864d4459a2240b5655219950643c7dba0ac384b1d4359c67805b21d98211f7b09c2a0ccf6410c8c04d4ff4a51293725d8d6570d9d8bb0e10c07d22357caeb49626df99c180be02d77d1fe8ed25e7a54481237646083a9f89a11566cd20b9e995b1487c5f9e02aeb434f3a1897cd416dd0a87861838da3e9e"
+const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98"
+const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f"
+const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011"
+const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Charset: UTF-8
+
+mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
+ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
+zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
+QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
+QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
+9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
+Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
+dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
+JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
+ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
+RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
+/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
+yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv
+2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR
+bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL
+C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP
+WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y
+MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA
+EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ
+MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N
+1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm
++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N
+lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW
+CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF
+4artDmrG
+=7FfJ
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+
+mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
+ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
+zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
+QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
+QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
+9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
+Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
+dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
+JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
+ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
+RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
+/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
+yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ
+UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe
+iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK
+FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8
+R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh
++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA
+EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO
+52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb
+u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl
+w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep
+54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+
+YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL
+bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E
+i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB
+DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1
+8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY
+s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745
+U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL
+6LCg2mg=
+=Dhm4
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v1
+
+mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo
+7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom
+lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0
+E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC
+CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw
+6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH
+7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv
+X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7
+GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl
+y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw
+R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW
+CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+
+LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO
+aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx
+yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl
+BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr
+Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK
+CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp
+C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ
+SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/
+MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70=
+=vtbN
+-----END PGP PUBLIC KEY BLOCK-----`
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/compressed.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/compressed.go
new file mode 100644
index 00000000000..e8f0b5caa7d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/compressed.go
@@ -0,0 +1,123 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "compress/bzip2"
+ "compress/flate"
+ "compress/zlib"
+ "golang.org/x/crypto/openpgp/errors"
+ "io"
+ "strconv"
+)
+
+// Compressed represents a compressed OpenPGP packet. The decompressed contents
+// will contain more OpenPGP packets. See RFC 4880, section 5.6.
+type Compressed struct {
+ Body io.Reader
+}
+
+const (
+ NoCompression = flate.NoCompression
+ BestSpeed = flate.BestSpeed
+ BestCompression = flate.BestCompression
+ DefaultCompression = flate.DefaultCompression
+)
+
+// CompressionConfig contains compressor configuration settings.
+type CompressionConfig struct {
+ // Level is the compression level to use. It must be set to
+ // between -1 and 9, with -1 causing the compressor to use the
+ // default compression level, 0 causing the compressor to use
+ // no compression and 1 to 9 representing increasing (better,
+ // slower) compression levels. If Level is less than -1 or
+ // more then 9, a non-nil error will be returned during
+ // encryption. See the constants above for convenient common
+ // settings for Level.
+ Level int
+}
+
+func (c *Compressed) parse(r io.Reader) error {
+ var buf [1]byte
+ _, err := readFull(r, buf[:])
+ if err != nil {
+ return err
+ }
+
+ switch buf[0] {
+ case 1:
+ c.Body = flate.NewReader(r)
+ case 2:
+ c.Body, err = zlib.NewReader(r)
+ case 3:
+ c.Body = bzip2.NewReader(r)
+ default:
+ err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
+ }
+
+ return err
+}
+
+// compressedWriterCloser represents the serialized compression stream
+// header and the compressor. Its Close() method ensures that both the
+// compressor and serialized stream header are closed. Its Write()
+// method writes to the compressor.
+type compressedWriteCloser struct {
+ sh io.Closer // Stream Header
+ c io.WriteCloser // Compressor
+}
+
+func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
+ return cwc.c.Write(p)
+}
+
+func (cwc compressedWriteCloser) Close() (err error) {
+ err = cwc.c.Close()
+ if err != nil {
+ return err
+ }
+
+ return cwc.sh.Close()
+}
+
+// SerializeCompressed serializes a compressed data packet to w and
+// returns a WriteCloser to which the literal data packets themselves
+// can be written and which MUST be closed on completion. If cc is
+// nil, sensible defaults will be used to configure the compression
+// algorithm.
+func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
+ compressed, err := serializeStreamHeader(w, packetTypeCompressed)
+ if err != nil {
+ return
+ }
+
+ _, err = compressed.Write([]byte{uint8(algo)})
+ if err != nil {
+ return
+ }
+
+ level := DefaultCompression
+ if cc != nil {
+ level = cc.Level
+ }
+
+ var compressor io.WriteCloser
+ switch algo {
+ case CompressionZIP:
+ compressor, err = flate.NewWriter(compressed, level)
+ case CompressionZLIB:
+ compressor, err = zlib.NewWriterLevel(compressed, level)
+ default:
+ s := strconv.Itoa(int(algo))
+ err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
+ }
+ if err != nil {
+ return
+ }
+
+ literaldata = compressedWriteCloser{compressed, compressor}
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/compressed_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/compressed_test.go
new file mode 100644
index 00000000000..cb2d70bd411
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/compressed_test.go
@@ -0,0 +1,41 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestCompressed(t *testing.T) {
+ packet, err := Read(readerFromHex(compressedHex))
+ if err != nil {
+ t.Errorf("failed to read Compressed: %s", err)
+ return
+ }
+
+ c, ok := packet.(*Compressed)
+ if !ok {
+ t.Error("didn't find Compressed packet")
+ return
+ }
+
+ contents, err := ioutil.ReadAll(c.Body)
+ if err != nil && err != io.EOF {
+ t.Error(err)
+ return
+ }
+
+ expected, _ := hex.DecodeString(compressedExpectedHex)
+ if !bytes.Equal(expected, contents) {
+ t.Errorf("got:%x want:%x", contents, expected)
+ }
+}
+
+const compressedHex = "a3013b2d90c4e02b72e25f727e5e496a5e49b11e1700"
+const compressedExpectedHex = "cb1062004d14c8fe636f6e74656e74732e0a"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/config.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/config.go
new file mode 100644
index 00000000000..c76eecc963a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/config.go
@@ -0,0 +1,91 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "crypto"
+ "crypto/rand"
+ "io"
+ "time"
+)
+
+// Config collects a number of parameters along with sensible defaults.
+// A nil *Config is valid and results in all default values.
+type Config struct {
+ // Rand provides the source of entropy.
+ // If nil, the crypto/rand Reader is used.
+ Rand io.Reader
+ // DefaultHash is the default hash function to be used.
+ // If zero, SHA-256 is used.
+ DefaultHash crypto.Hash
+ // DefaultCipher is the cipher to be used.
+ // If zero, AES-128 is used.
+ DefaultCipher CipherFunction
+ // Time returns the current time as the number of seconds since the
+ // epoch. If Time is nil, time.Now is used.
+ Time func() time.Time
+ // DefaultCompressionAlgo is the compression algorithm to be
+ // applied to the plaintext before encryption. If zero, no
+ // compression is done.
+ DefaultCompressionAlgo CompressionAlgo
+ // CompressionConfig configures the compression settings.
+ CompressionConfig *CompressionConfig
+ // S2KCount is only used for symmetric encryption. It
+ // determines the strength of the passphrase stretching when
+ // the said passphrase is hashed to produce a key. S2KCount
+ // should be between 1024 and 65011712, inclusive. If Config
+ // is nil or S2KCount is 0, the value 65536 used. Not all
+ // values in the above range can be represented. S2KCount will
+ // be rounded up to the next representable value if it cannot
+ // be encoded exactly. When set, it is strongly encrouraged to
+ // use a value that is at least 65536. See RFC 4880 Section
+ // 3.7.1.3.
+ S2KCount int
+ // RSABits is the number of bits in new RSA keys made with NewEntity.
+ // If zero, then 2048 bit keys are created.
+ RSABits int
+}
+
+func (c *Config) Random() io.Reader {
+ if c == nil || c.Rand == nil {
+ return rand.Reader
+ }
+ return c.Rand
+}
+
+func (c *Config) Hash() crypto.Hash {
+ if c == nil || uint(c.DefaultHash) == 0 {
+ return crypto.SHA256
+ }
+ return c.DefaultHash
+}
+
+func (c *Config) Cipher() CipherFunction {
+ if c == nil || uint8(c.DefaultCipher) == 0 {
+ return CipherAES128
+ }
+ return c.DefaultCipher
+}
+
+func (c *Config) Now() time.Time {
+ if c == nil || c.Time == nil {
+ return time.Now()
+ }
+ return c.Time()
+}
+
+func (c *Config) Compression() CompressionAlgo {
+ if c == nil {
+ return CompressionNone
+ }
+ return c.DefaultCompressionAlgo
+}
+
+func (c *Config) PasswordHashIterations() int {
+ if c == nil || c.S2KCount == 0 {
+ return 0
+ }
+ return c.S2KCount
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/encrypted_key.go
new file mode 100644
index 00000000000..266840d05a3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/encrypted_key.go
@@ -0,0 +1,199 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "crypto/rsa"
+ "encoding/binary"
+ "io"
+ "math/big"
+ "strconv"
+
+ "golang.org/x/crypto/openpgp/elgamal"
+ "golang.org/x/crypto/openpgp/errors"
+)
+
+const encryptedKeyVersion = 3
+
+// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
+// section 5.1.
+type EncryptedKey struct {
+ KeyId uint64
+ Algo PublicKeyAlgorithm
+ CipherFunc CipherFunction // only valid after a successful Decrypt
+ Key []byte // only valid after a successful Decrypt
+
+ encryptedMPI1, encryptedMPI2 parsedMPI
+}
+
+func (e *EncryptedKey) parse(r io.Reader) (err error) {
+ var buf [10]byte
+ _, err = readFull(r, buf[:])
+ if err != nil {
+ return
+ }
+ if buf[0] != encryptedKeyVersion {
+ return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
+ }
+ e.KeyId = binary.BigEndian.Uint64(buf[1:9])
+ e.Algo = PublicKeyAlgorithm(buf[9])
+ switch e.Algo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
+ e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
+ case PubKeyAlgoElGamal:
+ e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+ e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
+ }
+ _, err = consumeAll(r)
+ return
+}
+
+func checksumKeyMaterial(key []byte) uint16 {
+ var checksum uint16
+ for _, v := range key {
+ checksum += uint16(v)
+ }
+ return checksum
+}
+
+// Decrypt decrypts an encrypted session key with the given private key. The
+// private key must have been decrypted first.
+// If config is nil, sensible defaults will be used.
+func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
+ var err error
+ var b []byte
+
+ // TODO(agl): use session key decryption routines here to avoid
+ // padding oracle attacks.
+ switch priv.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
+ b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes)
+ case PubKeyAlgoElGamal:
+ c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
+ c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
+ b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
+ default:
+ err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
+ }
+
+ if err != nil {
+ return err
+ }
+
+ e.CipherFunc = CipherFunction(b[0])
+ e.Key = b[1 : len(b)-2]
+ expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1])
+ checksum := checksumKeyMaterial(e.Key)
+ if checksum != expectedChecksum {
+ return errors.StructuralError("EncryptedKey checksum incorrect")
+ }
+
+ return nil
+}
+
+// Serialize writes the encrypted key packet, e, to w.
+func (e *EncryptedKey) Serialize(w io.Writer) error {
+ var mpiLen int
+ switch e.Algo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
+ mpiLen = 2 + len(e.encryptedMPI1.bytes)
+ case PubKeyAlgoElGamal:
+ mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes)
+ default:
+ return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
+ }
+
+ serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen)
+
+ w.Write([]byte{encryptedKeyVersion})
+ binary.Write(w, binary.BigEndian, e.KeyId)
+ w.Write([]byte{byte(e.Algo)})
+
+ switch e.Algo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
+ writeMPIs(w, e.encryptedMPI1)
+ case PubKeyAlgoElGamal:
+ writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2)
+ default:
+ panic("internal error")
+ }
+
+ return nil
+}
+
+// SerializeEncryptedKey serializes an encrypted key packet to w that contains
+// key, encrypted to pub.
+// If config is nil, sensible defaults will be used.
+func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
+ var buf [10]byte
+ buf[0] = encryptedKeyVersion
+ binary.BigEndian.PutUint64(buf[1:9], pub.KeyId)
+ buf[9] = byte(pub.PubKeyAlgo)
+
+ keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */)
+ keyBlock[0] = byte(cipherFunc)
+ copy(keyBlock[1:], key)
+ checksum := checksumKeyMaterial(key)
+ keyBlock[1+len(key)] = byte(checksum >> 8)
+ keyBlock[1+len(key)+1] = byte(checksum)
+
+ switch pub.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
+ return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock)
+ case PubKeyAlgoElGamal:
+ return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock)
+ case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly:
+ return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
+ }
+
+ return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
+}
+
+func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error {
+ cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
+ if err != nil {
+ return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
+ }
+
+ packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText)
+
+ err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(header[:])
+ if err != nil {
+ return err
+ }
+ return writeMPI(w, 8*uint16(len(cipherText)), cipherText)
+}
+
+func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error {
+ c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
+ if err != nil {
+ return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
+ }
+
+ packetLen := 10 /* header length */
+ packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
+ packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
+
+ err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(header[:])
+ if err != nil {
+ return err
+ }
+ err = writeBig(w, c1)
+ if err != nil {
+ return err
+ }
+ return writeBig(w, c2)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go
new file mode 100644
index 00000000000..fee14cf3cf0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/encrypted_key_test.go
@@ -0,0 +1,146 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "encoding/hex"
+ "fmt"
+ "math/big"
+ "testing"
+)
+
+func bigFromBase10(s string) *big.Int {
+ b, ok := new(big.Int).SetString(s, 10)
+ if !ok {
+ panic("bigFromBase10 failed")
+ }
+ return b
+}
+
+var encryptedKeyPub = rsa.PublicKey{
+ E: 65537,
+ N: bigFromBase10("115804063926007623305902631768113868327816898845124614648849934718568541074358183759250136204762053879858102352159854352727097033322663029387610959884180306668628526686121021235757016368038585212410610742029286439607686208110250133174279811431933746643015923132833417396844716207301518956640020862630546868823"),
+}
+
+var encryptedKeyRSAPriv = &rsa.PrivateKey{
+ PublicKey: encryptedKeyPub,
+ D: bigFromBase10("32355588668219869544751561565313228297765464314098552250409557267371233892496951383426602439009993875125222579159850054973310859166139474359774543943714622292329487391199285040721944491839695981199720170366763547754915493640685849961780092241140181198779299712578774460837139360803883139311171713302987058393"),
+}
+
+var encryptedKeyPriv = &PrivateKey{
+ PublicKey: PublicKey{
+ PubKeyAlgo: PubKeyAlgoRSA,
+ },
+ PrivateKey: encryptedKeyRSAPriv,
+}
+
+func TestDecryptingEncryptedKey(t *testing.T) {
+ const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8"
+ const expectedKeyHex = "d930363f7e0308c333b9618617ea728963d8df993665ae7be1092d4926fd864b"
+
+ p, err := Read(readerFromHex(encryptedKeyHex))
+ if err != nil {
+ t.Errorf("error from Read: %s", err)
+ return
+ }
+ ek, ok := p.(*EncryptedKey)
+ if !ok {
+ t.Errorf("didn't parse an EncryptedKey, got %#v", p)
+ return
+ }
+
+ if ek.KeyId != 0x2a67d68660df41c7 || ek.Algo != PubKeyAlgoRSA {
+ t.Errorf("unexpected EncryptedKey contents: %#v", ek)
+ return
+ }
+
+ err = ek.Decrypt(encryptedKeyPriv, nil)
+ if err != nil {
+ t.Errorf("error from Decrypt: %s", err)
+ return
+ }
+
+ if ek.CipherFunc != CipherAES256 {
+ t.Errorf("unexpected EncryptedKey contents: %#v", ek)
+ return
+ }
+
+ keyHex := fmt.Sprintf("%x", ek.Key)
+ if keyHex != expectedKeyHex {
+ t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex)
+ }
+}
+
+func TestEncryptingEncryptedKey(t *testing.T) {
+ key := []byte{1, 2, 3, 4}
+ const expectedKeyHex = "01020304"
+ const keyId = 42
+
+ pub := &PublicKey{
+ PublicKey: &encryptedKeyPub,
+ KeyId: keyId,
+ PubKeyAlgo: PubKeyAlgoRSAEncryptOnly,
+ }
+
+ buf := new(bytes.Buffer)
+ err := SerializeEncryptedKey(buf, pub, CipherAES128, key, nil)
+ if err != nil {
+ t.Errorf("error writing encrypted key packet: %s", err)
+ }
+
+ p, err := Read(buf)
+ if err != nil {
+ t.Errorf("error from Read: %s", err)
+ return
+ }
+ ek, ok := p.(*EncryptedKey)
+ if !ok {
+ t.Errorf("didn't parse an EncryptedKey, got %#v", p)
+ return
+ }
+
+ if ek.KeyId != keyId || ek.Algo != PubKeyAlgoRSAEncryptOnly {
+ t.Errorf("unexpected EncryptedKey contents: %#v", ek)
+ return
+ }
+
+ err = ek.Decrypt(encryptedKeyPriv, nil)
+ if err != nil {
+ t.Errorf("error from Decrypt: %s", err)
+ return
+ }
+
+ if ek.CipherFunc != CipherAES128 {
+ t.Errorf("unexpected EncryptedKey contents: %#v", ek)
+ return
+ }
+
+ keyHex := fmt.Sprintf("%x", ek.Key)
+ if keyHex != expectedKeyHex {
+ t.Errorf("bad key, got %s want %x", keyHex, expectedKeyHex)
+ }
+}
+
+func TestSerializingEncryptedKey(t *testing.T) {
+ const encryptedKeyHex = "c18c032a67d68660df41c70104005789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8"
+
+ p, err := Read(readerFromHex(encryptedKeyHex))
+ if err != nil {
+ t.Fatalf("error from Read: %s", err)
+ }
+ ek, ok := p.(*EncryptedKey)
+ if !ok {
+ t.Fatalf("didn't parse an EncryptedKey, got %#v", p)
+ }
+
+ var buf bytes.Buffer
+ ek.Serialize(&buf)
+
+ if bufHex := hex.EncodeToString(buf.Bytes()); bufHex != encryptedKeyHex {
+ t.Fatalf("serialization of encrypted key differed from original. Original was %s, but reserialized as %s", encryptedKeyHex, bufHex)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/literal.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/literal.go
new file mode 100644
index 00000000000..1a9ec6e51e8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/literal.go
@@ -0,0 +1,89 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "encoding/binary"
+ "io"
+)
+
+// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
+type LiteralData struct {
+ IsBinary bool
+ FileName string
+ Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
+ Body io.Reader
+}
+
+// ForEyesOnly returns whether the contents of the LiteralData have been marked
+// as especially sensitive.
+func (l *LiteralData) ForEyesOnly() bool {
+ return l.FileName == "_CONSOLE"
+}
+
+func (l *LiteralData) parse(r io.Reader) (err error) {
+ var buf [256]byte
+
+ _, err = readFull(r, buf[:2])
+ if err != nil {
+ return
+ }
+
+ l.IsBinary = buf[0] == 'b'
+ fileNameLen := int(buf[1])
+
+ _, err = readFull(r, buf[:fileNameLen])
+ if err != nil {
+ return
+ }
+
+ l.FileName = string(buf[:fileNameLen])
+
+ _, err = readFull(r, buf[:4])
+ if err != nil {
+ return
+ }
+
+ l.Time = binary.BigEndian.Uint32(buf[:4])
+ l.Body = r
+ return
+}
+
+// SerializeLiteral serializes a literal data packet to w and returns a
+// WriteCloser to which the data itself can be written and which MUST be closed
+// on completion. The fileName is truncated to 255 bytes.
+func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
+ var buf [4]byte
+ buf[0] = 't'
+ if isBinary {
+ buf[0] = 'b'
+ }
+ if len(fileName) > 255 {
+ fileName = fileName[:255]
+ }
+ buf[1] = byte(len(fileName))
+
+ inner, err := serializeStreamHeader(w, packetTypeLiteralData)
+ if err != nil {
+ return
+ }
+
+ _, err = inner.Write(buf[:2])
+ if err != nil {
+ return
+ }
+ _, err = inner.Write([]byte(fileName))
+ if err != nil {
+ return
+ }
+ binary.BigEndian.PutUint32(buf[:], time)
+ _, err = inner.Write(buf[:])
+ if err != nil {
+ return
+ }
+
+ plaintext = inner
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/ocfb.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/ocfb.go
new file mode 100644
index 00000000000..ce2a33a547c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/ocfb.go
@@ -0,0 +1,143 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
+
+package packet
+
+import (
+ "crypto/cipher"
+)
+
+type ocfbEncrypter struct {
+ b cipher.Block
+ fre []byte
+ outUsed int
+}
+
+// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
+// performed.
+type OCFBResyncOption bool
+
+const (
+ OCFBResync OCFBResyncOption = true
+ OCFBNoResync OCFBResyncOption = false
+)
+
+// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
+// cipher feedback mode using the given cipher.Block, and an initial amount of
+// ciphertext. randData must be random bytes and be the same length as the
+// cipher.Block's block size. Resync determines if the "resynchronization step"
+// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
+// this point.
+func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
+ blockSize := block.BlockSize()
+ if len(randData) != blockSize {
+ return nil, nil
+ }
+
+ x := &ocfbEncrypter{
+ b: block,
+ fre: make([]byte, blockSize),
+ outUsed: 0,
+ }
+ prefix := make([]byte, blockSize+2)
+
+ block.Encrypt(x.fre, x.fre)
+ for i := 0; i < blockSize; i++ {
+ prefix[i] = randData[i] ^ x.fre[i]
+ }
+
+ block.Encrypt(x.fre, prefix[:blockSize])
+ prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
+ prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
+
+ if resync {
+ block.Encrypt(x.fre, prefix[2:])
+ } else {
+ x.fre[0] = prefix[blockSize]
+ x.fre[1] = prefix[blockSize+1]
+ x.outUsed = 2
+ }
+ return x, prefix
+}
+
+func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
+ for i := 0; i < len(src); i++ {
+ if x.outUsed == len(x.fre) {
+ x.b.Encrypt(x.fre, x.fre)
+ x.outUsed = 0
+ }
+
+ x.fre[x.outUsed] ^= src[i]
+ dst[i] = x.fre[x.outUsed]
+ x.outUsed++
+ }
+}
+
+type ocfbDecrypter struct {
+ b cipher.Block
+ fre []byte
+ outUsed int
+}
+
+// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
+// cipher feedback mode using the given cipher.Block. Prefix must be the first
+// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
+// block size. If an incorrect key is detected then nil is returned. On
+// successful exit, blockSize+2 bytes of decrypted data are written into
+// prefix. Resync determines if the "resynchronization step" from RFC 4880,
+// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
+func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
+ blockSize := block.BlockSize()
+ if len(prefix) != blockSize+2 {
+ return nil
+ }
+
+ x := &ocfbDecrypter{
+ b: block,
+ fre: make([]byte, blockSize),
+ outUsed: 0,
+ }
+ prefixCopy := make([]byte, len(prefix))
+ copy(prefixCopy, prefix)
+
+ block.Encrypt(x.fre, x.fre)
+ for i := 0; i < blockSize; i++ {
+ prefixCopy[i] ^= x.fre[i]
+ }
+
+ block.Encrypt(x.fre, prefix[:blockSize])
+ prefixCopy[blockSize] ^= x.fre[0]
+ prefixCopy[blockSize+1] ^= x.fre[1]
+
+ if prefixCopy[blockSize-2] != prefixCopy[blockSize] ||
+ prefixCopy[blockSize-1] != prefixCopy[blockSize+1] {
+ return nil
+ }
+
+ if resync {
+ block.Encrypt(x.fre, prefix[2:])
+ } else {
+ x.fre[0] = prefix[blockSize]
+ x.fre[1] = prefix[blockSize+1]
+ x.outUsed = 2
+ }
+ copy(prefix, prefixCopy)
+ return x
+}
+
+func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
+ for i := 0; i < len(src); i++ {
+ if x.outUsed == len(x.fre) {
+ x.b.Encrypt(x.fre, x.fre)
+ x.outUsed = 0
+ }
+
+ c := src[i]
+ dst[i] = x.fre[x.outUsed] ^ src[i]
+ x.fre[x.outUsed] = c
+ x.outUsed++
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go
new file mode 100644
index 00000000000..91022c042d4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/ocfb_test.go
@@ -0,0 +1,46 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/rand"
+ "testing"
+)
+
+var commonKey128 = []byte{0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c}
+
+func testOCFB(t *testing.T, resync OCFBResyncOption) {
+ block, err := aes.NewCipher(commonKey128)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ plaintext := []byte("this is the plaintext, which is long enough to span several blocks.")
+ randData := make([]byte, block.BlockSize())
+ rand.Reader.Read(randData)
+ ocfb, prefix := NewOCFBEncrypter(block, randData, resync)
+ ciphertext := make([]byte, len(plaintext))
+ ocfb.XORKeyStream(ciphertext, plaintext)
+
+ ocfbdec := NewOCFBDecrypter(block, prefix, resync)
+ if ocfbdec == nil {
+ t.Errorf("NewOCFBDecrypter failed (resync: %t)", resync)
+ return
+ }
+ plaintextCopy := make([]byte, len(plaintext))
+ ocfbdec.XORKeyStream(plaintextCopy, ciphertext)
+
+ if !bytes.Equal(plaintextCopy, plaintext) {
+ t.Errorf("got: %x, want: %x (resync: %t)", plaintextCopy, plaintext, resync)
+ }
+}
+
+func TestOCFB(t *testing.T) {
+ testOCFB(t, OCFBNoResync)
+ testOCFB(t, OCFBResync)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
new file mode 100644
index 00000000000..1713503395e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/one_pass_signature.go
@@ -0,0 +1,73 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "crypto"
+ "encoding/binary"
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/s2k"
+ "io"
+ "strconv"
+)
+
+// OnePassSignature represents a one-pass signature packet. See RFC 4880,
+// section 5.4.
+type OnePassSignature struct {
+ SigType SignatureType
+ Hash crypto.Hash
+ PubKeyAlgo PublicKeyAlgorithm
+ KeyId uint64
+ IsLast bool
+}
+
+const onePassSignatureVersion = 3
+
+func (ops *OnePassSignature) parse(r io.Reader) (err error) {
+ var buf [13]byte
+
+ _, err = readFull(r, buf[:])
+ if err != nil {
+ return
+ }
+ if buf[0] != onePassSignatureVersion {
+ err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
+ }
+
+ var ok bool
+ ops.Hash, ok = s2k.HashIdToHash(buf[2])
+ if !ok {
+ return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
+ }
+
+ ops.SigType = SignatureType(buf[1])
+ ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
+ ops.KeyId = binary.BigEndian.Uint64(buf[4:12])
+ ops.IsLast = buf[12] != 0
+ return
+}
+
+// Serialize marshals the given OnePassSignature to w.
+func (ops *OnePassSignature) Serialize(w io.Writer) error {
+ var buf [13]byte
+ buf[0] = onePassSignatureVersion
+ buf[1] = uint8(ops.SigType)
+ var ok bool
+ buf[2], ok = s2k.HashToHashId(ops.Hash)
+ if !ok {
+ return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
+ }
+ buf[3] = uint8(ops.PubKeyAlgo)
+ binary.BigEndian.PutUint64(buf[4:12], ops.KeyId)
+ if ops.IsLast {
+ buf[12] = 1
+ }
+
+ if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil {
+ return err
+ }
+ _, err := w.Write(buf[:])
+ return err
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/opaque.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/opaque.go
new file mode 100644
index 00000000000..456d807f255
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/opaque.go
@@ -0,0 +1,162 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+
+ "golang.org/x/crypto/openpgp/errors"
+)
+
+// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
+// useful for splitting and storing the original packet contents separately,
+// handling unsupported packet types or accessing parts of the packet not yet
+// implemented by this package.
+type OpaquePacket struct {
+ // Packet type
+ Tag uint8
+ // Reason why the packet was parsed opaquely
+ Reason error
+ // Binary contents of the packet data
+ Contents []byte
+}
+
+func (op *OpaquePacket) parse(r io.Reader) (err error) {
+ op.Contents, err = ioutil.ReadAll(r)
+ return
+}
+
+// Serialize marshals the packet to a writer in its original form, including
+// the packet header.
+func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
+ err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
+ if err == nil {
+ _, err = w.Write(op.Contents)
+ }
+ return
+}
+
+// Parse attempts to parse the opaque contents into a structure supported by
+// this package. If the packet is not known then the result will be another
+// OpaquePacket.
+func (op *OpaquePacket) Parse() (p Packet, err error) {
+ hdr := bytes.NewBuffer(nil)
+ err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
+ if err != nil {
+ op.Reason = err
+ return op, err
+ }
+ p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
+ if err != nil {
+ op.Reason = err
+ p = op
+ }
+ return
+}
+
+// OpaqueReader reads OpaquePackets from an io.Reader.
+type OpaqueReader struct {
+ r io.Reader
+}
+
+func NewOpaqueReader(r io.Reader) *OpaqueReader {
+ return &OpaqueReader{r: r}
+}
+
+// Read the next OpaquePacket.
+func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
+ tag, _, contents, err := readHeader(or.r)
+ if err != nil {
+ return
+ }
+ op = &OpaquePacket{Tag: uint8(tag), Reason: err}
+ err = op.parse(contents)
+ if err != nil {
+ consumeAll(contents)
+ }
+ return
+}
+
+// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
+// as found in signature and user attribute packets.
+type OpaqueSubpacket struct {
+ SubType uint8
+ Contents []byte
+}
+
+// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
+// their byte representation.
+func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
+ var (
+ subHeaderLen int
+ subPacket *OpaqueSubpacket
+ )
+ for len(contents) > 0 {
+ subHeaderLen, subPacket, err = nextSubpacket(contents)
+ if err != nil {
+ break
+ }
+ result = append(result, subPacket)
+ contents = contents[subHeaderLen+len(subPacket.Contents):]
+ }
+ return
+}
+
+func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
+ // RFC 4880, section 5.2.3.1
+ var subLen uint32
+ if len(contents) < 1 {
+ goto Truncated
+ }
+ subPacket = &OpaqueSubpacket{}
+ switch {
+ case contents[0] < 192:
+ subHeaderLen = 2 // 1 length byte, 1 subtype byte
+ if len(contents) < subHeaderLen {
+ goto Truncated
+ }
+ subLen = uint32(contents[0])
+ contents = contents[1:]
+ case contents[0] < 255:
+ subHeaderLen = 3 // 2 length bytes, 1 subtype
+ if len(contents) < subHeaderLen {
+ goto Truncated
+ }
+ subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
+ contents = contents[2:]
+ default:
+ subHeaderLen = 6 // 5 length bytes, 1 subtype
+ if len(contents) < subHeaderLen {
+ goto Truncated
+ }
+ subLen = uint32(contents[1])<<24 |
+ uint32(contents[2])<<16 |
+ uint32(contents[3])<<8 |
+ uint32(contents[4])
+ contents = contents[5:]
+ }
+ if subLen > uint32(len(contents)) || subLen == 0 {
+ goto Truncated
+ }
+ subPacket.SubType = contents[0]
+ subPacket.Contents = contents[1:subLen]
+ return
+Truncated:
+ err = errors.StructuralError("subpacket truncated")
+ return
+}
+
+func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
+ buf := make([]byte, 6)
+ n := serializeSubpacketLength(buf, len(osp.Contents)+1)
+ buf[n] = osp.SubType
+ if _, err = w.Write(buf[:n+1]); err != nil {
+ return
+ }
+ _, err = w.Write(osp.Contents)
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/opaque_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/opaque_test.go
new file mode 100644
index 00000000000..f27bbfe090b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/opaque_test.go
@@ -0,0 +1,67 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "io"
+ "testing"
+)
+
+// Test packet.Read error handling in OpaquePacket.Parse,
+// which attempts to re-read an OpaquePacket as a supported
+// Packet type.
+func TestOpaqueParseReason(t *testing.T) {
+ buf, err := hex.DecodeString(UnsupportedKeyHex)
+ if err != nil {
+ t.Fatal(err)
+ }
+ or := NewOpaqueReader(bytes.NewBuffer(buf))
+ count := 0
+ badPackets := 0
+ var uid *UserId
+ for {
+ op, err := or.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ t.Errorf("#%d: opaque read error: %v", count, err)
+ break
+ }
+ // try to parse opaque packet
+ p, err := op.Parse()
+ switch pkt := p.(type) {
+ case *UserId:
+ uid = pkt
+ case *OpaquePacket:
+ // If an OpaquePacket can't re-parse, packet.Read
+ // certainly had its reasons.
+ if pkt.Reason == nil {
+ t.Errorf("#%d: opaque packet, no reason", count)
+ } else {
+ badPackets++
+ }
+ }
+ count++
+ }
+
+ const expectedBad = 3
+ // Test post-conditions, make sure we actually parsed packets as expected.
+ if badPackets != expectedBad {
+ t.Errorf("unexpected # unparseable packets: %d (want %d)", badPackets, expectedBad)
+ }
+ if uid == nil {
+ t.Errorf("failed to find expected UID in unsupported keyring")
+ } else if uid.Id != "Armin M. Warda <warda@nephilim.ruhr.de>" {
+ t.Errorf("unexpected UID: %v", uid.Id)
+ }
+}
+
+// This key material has public key and signature packet versions modified to
+// an unsupported value (1), so that trying to parse the OpaquePacket to
+// a typed packet will get an error. It also contains a GnuPG trust packet.
+// (Created with: od -An -t x1 pubring.gpg | xargs | sed 's/ //g')
+const UnsupportedKeyHex = `988d012e7a18a20000010400d6ac00d92b89c1f4396c243abb9b76d2e9673ad63483291fed88e22b82e255e441c078c6abbbf7d2d195e50b62eeaa915b85b0ec20c225ce2c64c167cacb6e711daf2e45da4a8356a059b8160e3b3628ac0dd8437b31f06d53d6e8ea4214d4a26406a6b63e1001406ef23e0bb3069fac9a99a91f77dfafd5de0f188a5da5e3c9000511b42741726d696e204d2e205761726461203c7761726461406e657068696c696d2e727568722e64653e8900950105102e8936c705d1eb399e58489901013f0e03ff5a0c4f421e34fcfa388129166420c08cd76987bcdec6f01bd0271459a85cc22048820dd4e44ac2c7d23908d540f54facf1b36b0d9c20488781ce9dca856531e76e2e846826e9951338020a03a09b57aa5faa82e9267458bd76105399885ac35af7dc1cbb6aaed7c39e1039f3b5beda2c0e916bd38560509bab81235d1a0ead83b0020000`
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/packet.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/packet.go
new file mode 100644
index 00000000000..e2bde1111e2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/packet.go
@@ -0,0 +1,539 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package packet implements parsing and serialization of OpenPGP packets, as
+// specified in RFC 4880.
+package packet // import "golang.org/x/crypto/openpgp/packet"
+
+import (
+ "bufio"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/des"
+ "golang.org/x/crypto/cast5"
+ "golang.org/x/crypto/openpgp/errors"
+ "io"
+ "math/big"
+)
+
+// readFull is the same as io.ReadFull except that reading zero bytes returns
+// ErrUnexpectedEOF rather than EOF.
+func readFull(r io.Reader, buf []byte) (n int, err error) {
+ n, err = io.ReadFull(r, buf)
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
+func readLength(r io.Reader) (length int64, isPartial bool, err error) {
+ var buf [4]byte
+ _, err = readFull(r, buf[:1])
+ if err != nil {
+ return
+ }
+ switch {
+ case buf[0] < 192:
+ length = int64(buf[0])
+ case buf[0] < 224:
+ length = int64(buf[0]-192) << 8
+ _, err = readFull(r, buf[0:1])
+ if err != nil {
+ return
+ }
+ length += int64(buf[0]) + 192
+ case buf[0] < 255:
+ length = int64(1) << (buf[0] & 0x1f)
+ isPartial = true
+ default:
+ _, err = readFull(r, buf[0:4])
+ if err != nil {
+ return
+ }
+ length = int64(buf[0])<<24 |
+ int64(buf[1])<<16 |
+ int64(buf[2])<<8 |
+ int64(buf[3])
+ }
+ return
+}
+
+// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
+// The continuation lengths are parsed and removed from the stream and EOF is
+// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
+type partialLengthReader struct {
+ r io.Reader
+ remaining int64
+ isPartial bool
+}
+
+func (r *partialLengthReader) Read(p []byte) (n int, err error) {
+ for r.remaining == 0 {
+ if !r.isPartial {
+ return 0, io.EOF
+ }
+ r.remaining, r.isPartial, err = readLength(r.r)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ toRead := int64(len(p))
+ if toRead > r.remaining {
+ toRead = r.remaining
+ }
+
+ n, err = r.r.Read(p[:int(toRead)])
+ r.remaining -= int64(n)
+ if n < int(toRead) && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
+// See RFC 4880, section 4.2.2.4.
+type partialLengthWriter struct {
+ w io.WriteCloser
+ lengthByte [1]byte
+}
+
+func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
+ for len(p) > 0 {
+ for power := uint(14); power < 32; power-- {
+ l := 1 << power
+ if len(p) >= l {
+ w.lengthByte[0] = 224 + uint8(power)
+ _, err = w.w.Write(w.lengthByte[:])
+ if err != nil {
+ return
+ }
+ var m int
+ m, err = w.w.Write(p[:l])
+ n += m
+ if err != nil {
+ return
+ }
+ p = p[l:]
+ break
+ }
+ }
+ }
+ return
+}
+
+func (w *partialLengthWriter) Close() error {
+ w.lengthByte[0] = 0
+ _, err := w.w.Write(w.lengthByte[:])
+ if err != nil {
+ return err
+ }
+ return w.w.Close()
+}
+
+// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
+// underlying Reader returns EOF before the limit has been reached.
+type spanReader struct {
+ r io.Reader
+ n int64
+}
+
+func (l *spanReader) Read(p []byte) (n int, err error) {
+ if l.n <= 0 {
+ return 0, io.EOF
+ }
+ if int64(len(p)) > l.n {
+ p = p[0:l.n]
+ }
+ n, err = l.r.Read(p)
+ l.n -= int64(n)
+ if l.n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// readHeader parses a packet header and returns an io.Reader which will return
+// the contents of the packet. See RFC 4880, section 4.2.
+func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
+ var buf [4]byte
+ _, err = io.ReadFull(r, buf[:1])
+ if err != nil {
+ return
+ }
+ if buf[0]&0x80 == 0 {
+ err = errors.StructuralError("tag byte does not have MSB set")
+ return
+ }
+ if buf[0]&0x40 == 0 {
+ // Old format packet
+ tag = packetType((buf[0] & 0x3f) >> 2)
+ lengthType := buf[0] & 3
+ if lengthType == 3 {
+ length = -1
+ contents = r
+ return
+ }
+ lengthBytes := 1 << lengthType
+ _, err = readFull(r, buf[0:lengthBytes])
+ if err != nil {
+ return
+ }
+ for i := 0; i < lengthBytes; i++ {
+ length <<= 8
+ length |= int64(buf[i])
+ }
+ contents = &spanReader{r, length}
+ return
+ }
+
+ // New format packet
+ tag = packetType(buf[0] & 0x3f)
+ length, isPartial, err := readLength(r)
+ if err != nil {
+ return
+ }
+ if isPartial {
+ contents = &partialLengthReader{
+ remaining: length,
+ isPartial: true,
+ r: r,
+ }
+ length = -1
+ } else {
+ contents = &spanReader{r, length}
+ }
+ return
+}
+
+// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
+// 4.2.
+func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
+ var buf [6]byte
+ var n int
+
+ buf[0] = 0x80 | 0x40 | byte(ptype)
+ if length < 192 {
+ buf[1] = byte(length)
+ n = 2
+ } else if length < 8384 {
+ length -= 192
+ buf[1] = 192 + byte(length>>8)
+ buf[2] = byte(length)
+ n = 3
+ } else {
+ buf[1] = 255
+ buf[2] = byte(length >> 24)
+ buf[3] = byte(length >> 16)
+ buf[4] = byte(length >> 8)
+ buf[5] = byte(length)
+ n = 6
+ }
+
+ _, err = w.Write(buf[:n])
+ return
+}
+
+// serializeStreamHeader writes an OpenPGP packet header to w where the
+// length of the packet is unknown. It returns a io.WriteCloser which can be
+// used to write the contents of the packet. See RFC 4880, section 4.2.
+func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
+ var buf [1]byte
+ buf[0] = 0x80 | 0x40 | byte(ptype)
+ _, err = w.Write(buf[:])
+ if err != nil {
+ return
+ }
+ out = &partialLengthWriter{w: w}
+ return
+}
+
+// Packet represents an OpenPGP packet. Users are expected to try casting
+// instances of this interface to specific packet types.
+type Packet interface {
+ parse(io.Reader) error
+}
+
+// consumeAll reads from the given Reader until error, returning the number of
+// bytes read.
+func consumeAll(r io.Reader) (n int64, err error) {
+ var m int
+ var buf [1024]byte
+
+ for {
+ m, err = r.Read(buf[:])
+ n += int64(m)
+ if err == io.EOF {
+ err = nil
+ return
+ }
+ if err != nil {
+ return
+ }
+ }
+
+ panic("unreachable")
+}
+
+// packetType represents the numeric ids of the different OpenPGP packet types. See
+// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
+type packetType uint8
+
+const (
+ packetTypeEncryptedKey packetType = 1
+ packetTypeSignature packetType = 2
+ packetTypeSymmetricKeyEncrypted packetType = 3
+ packetTypeOnePassSignature packetType = 4
+ packetTypePrivateKey packetType = 5
+ packetTypePublicKey packetType = 6
+ packetTypePrivateSubkey packetType = 7
+ packetTypeCompressed packetType = 8
+ packetTypeSymmetricallyEncrypted packetType = 9
+ packetTypeLiteralData packetType = 11
+ packetTypeUserId packetType = 13
+ packetTypePublicSubkey packetType = 14
+ packetTypeUserAttribute packetType = 17
+ packetTypeSymmetricallyEncryptedMDC packetType = 18
+)
+
+// peekVersion detects the version of a public key packet about to
+// be read. A bufio.Reader at the original position of the io.Reader
+// is returned.
+func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) {
+ bufr = bufio.NewReader(r)
+ var verBuf []byte
+ if verBuf, err = bufr.Peek(1); err != nil {
+ return
+ }
+ ver = verBuf[0]
+ return
+}
+
+// Read reads a single OpenPGP packet from the given io.Reader. If there is an
+// error parsing a packet, the whole packet is consumed from the input.
+func Read(r io.Reader) (p Packet, err error) {
+ tag, _, contents, err := readHeader(r)
+ if err != nil {
+ return
+ }
+
+ switch tag {
+ case packetTypeEncryptedKey:
+ p = new(EncryptedKey)
+ case packetTypeSignature:
+ var version byte
+ // Detect signature version
+ if contents, version, err = peekVersion(contents); err != nil {
+ return
+ }
+ if version < 4 {
+ p = new(SignatureV3)
+ } else {
+ p = new(Signature)
+ }
+ case packetTypeSymmetricKeyEncrypted:
+ p = new(SymmetricKeyEncrypted)
+ case packetTypeOnePassSignature:
+ p = new(OnePassSignature)
+ case packetTypePrivateKey, packetTypePrivateSubkey:
+ pk := new(PrivateKey)
+ if tag == packetTypePrivateSubkey {
+ pk.IsSubkey = true
+ }
+ p = pk
+ case packetTypePublicKey, packetTypePublicSubkey:
+ var version byte
+ if contents, version, err = peekVersion(contents); err != nil {
+ return
+ }
+ isSubkey := tag == packetTypePublicSubkey
+ if version < 4 {
+ p = &PublicKeyV3{IsSubkey: isSubkey}
+ } else {
+ p = &PublicKey{IsSubkey: isSubkey}
+ }
+ case packetTypeCompressed:
+ p = new(Compressed)
+ case packetTypeSymmetricallyEncrypted:
+ p = new(SymmetricallyEncrypted)
+ case packetTypeLiteralData:
+ p = new(LiteralData)
+ case packetTypeUserId:
+ p = new(UserId)
+ case packetTypeUserAttribute:
+ p = new(UserAttribute)
+ case packetTypeSymmetricallyEncryptedMDC:
+ se := new(SymmetricallyEncrypted)
+ se.MDC = true
+ p = se
+ default:
+ err = errors.UnknownPacketTypeError(tag)
+ }
+ if p != nil {
+ err = p.parse(contents)
+ }
+ if err != nil {
+ consumeAll(contents)
+ }
+ return
+}
+
+// SignatureType represents the different semantic meanings of an OpenPGP
+// signature. See RFC 4880, section 5.2.1.
+type SignatureType uint8
+
+const (
+ SigTypeBinary SignatureType = 0
+ SigTypeText = 1
+ SigTypeGenericCert = 0x10
+ SigTypePersonaCert = 0x11
+ SigTypeCasualCert = 0x12
+ SigTypePositiveCert = 0x13
+ SigTypeSubkeyBinding = 0x18
+ SigTypePrimaryKeyBinding = 0x19
+ SigTypeDirectSignature = 0x1F
+ SigTypeKeyRevocation = 0x20
+ SigTypeSubkeyRevocation = 0x28
+)
+
+// PublicKeyAlgorithm represents the different public key system specified for
+// OpenPGP. See
+// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
+type PublicKeyAlgorithm uint8
+
+const (
+ PubKeyAlgoRSA PublicKeyAlgorithm = 1
+ PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
+ PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
+ PubKeyAlgoElGamal PublicKeyAlgorithm = 16
+ PubKeyAlgoDSA PublicKeyAlgorithm = 17
+ // RFC 6637, Section 5.
+ PubKeyAlgoECDH PublicKeyAlgorithm = 18
+ PubKeyAlgoECDSA PublicKeyAlgorithm = 19
+)
+
+// CanEncrypt returns true if it's possible to encrypt a message to a public
+// key of the given type.
+func (pka PublicKeyAlgorithm) CanEncrypt() bool {
+ switch pka {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal:
+ return true
+ }
+ return false
+}
+
+// CanSign returns true if it's possible for a public key of the given type to
+// sign a message.
+func (pka PublicKeyAlgorithm) CanSign() bool {
+ switch pka {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
+ return true
+ }
+ return false
+}
+
+// CipherFunction represents the different block ciphers specified for OpenPGP. See
+// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
+type CipherFunction uint8
+
+const (
+ Cipher3DES CipherFunction = 2
+ CipherCAST5 CipherFunction = 3
+ CipherAES128 CipherFunction = 7
+ CipherAES192 CipherFunction = 8
+ CipherAES256 CipherFunction = 9
+)
+
+// KeySize returns the key size, in bytes, of cipher.
+func (cipher CipherFunction) KeySize() int {
+ switch cipher {
+ case Cipher3DES:
+ return 24
+ case CipherCAST5:
+ return cast5.KeySize
+ case CipherAES128:
+ return 16
+ case CipherAES192:
+ return 24
+ case CipherAES256:
+ return 32
+ }
+ return 0
+}
+
+// blockSize returns the block size, in bytes, of cipher.
+func (cipher CipherFunction) blockSize() int {
+ switch cipher {
+ case Cipher3DES:
+ return des.BlockSize
+ case CipherCAST5:
+ return 8
+ case CipherAES128, CipherAES192, CipherAES256:
+ return 16
+ }
+ return 0
+}
+
+// new returns a fresh instance of the given cipher.
+func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
+ switch cipher {
+ case Cipher3DES:
+ block, _ = des.NewTripleDESCipher(key)
+ case CipherCAST5:
+ block, _ = cast5.NewCipher(key)
+ case CipherAES128, CipherAES192, CipherAES256:
+ block, _ = aes.NewCipher(key)
+ }
+ return
+}
+
+// readMPI reads a big integer from r. The bit length returned is the bit
+// length that was specified in r. This is preserved so that the integer can be
+// reserialized exactly.
+func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
+ var buf [2]byte
+ _, err = readFull(r, buf[0:])
+ if err != nil {
+ return
+ }
+ bitLength = uint16(buf[0])<<8 | uint16(buf[1])
+ numBytes := (int(bitLength) + 7) / 8
+ mpi = make([]byte, numBytes)
+ _, err = readFull(r, mpi)
+ return
+}
+
+// mpiLength returns the length of the given *big.Int when serialized as an
+// MPI.
+func mpiLength(n *big.Int) (mpiLengthInBytes int) {
+ mpiLengthInBytes = 2 /* MPI length */
+ mpiLengthInBytes += (n.BitLen() + 7) / 8
+ return
+}
+
+// writeMPI serializes a big integer to w.
+func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
+ _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
+ if err == nil {
+ _, err = w.Write(mpiBytes)
+ }
+ return
+}
+
+// writeBig serializes a *big.Int to w.
+func writeBig(w io.Writer, i *big.Int) error {
+ return writeMPI(w, uint16(i.BitLen()), i.Bytes())
+}
+
+// CompressionAlgo Represents the different compression algorithms
+// supported by OpenPGP (except for BZIP2, which is not currently
+// supported). See Section 9.3 of RFC 4880.
+type CompressionAlgo uint8
+
+const (
+ CompressionNone CompressionAlgo = 0
+ CompressionZIP CompressionAlgo = 1
+ CompressionZLIB CompressionAlgo = 2
+)
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/packet_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/packet_test.go
new file mode 100644
index 00000000000..1dab5c3d588
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/packet_test.go
@@ -0,0 +1,255 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "golang.org/x/crypto/openpgp/errors"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestReadFull(t *testing.T) {
+ var out [4]byte
+
+ b := bytes.NewBufferString("foo")
+ n, err := readFull(b, out[:3])
+ if n != 3 || err != nil {
+ t.Errorf("full read failed n:%d err:%s", n, err)
+ }
+
+ b = bytes.NewBufferString("foo")
+ n, err = readFull(b, out[:4])
+ if n != 3 || err != io.ErrUnexpectedEOF {
+ t.Errorf("partial read failed n:%d err:%s", n, err)
+ }
+
+ b = bytes.NewBuffer(nil)
+ n, err = readFull(b, out[:3])
+ if n != 0 || err != io.ErrUnexpectedEOF {
+ t.Errorf("empty read failed n:%d err:%s", n, err)
+ }
+}
+
+func readerFromHex(s string) io.Reader {
+ data, err := hex.DecodeString(s)
+ if err != nil {
+ panic("readerFromHex: bad input")
+ }
+ return bytes.NewBuffer(data)
+}
+
+var readLengthTests = []struct {
+ hexInput string
+ length int64
+ isPartial bool
+ err error
+}{
+ {"", 0, false, io.ErrUnexpectedEOF},
+ {"1f", 31, false, nil},
+ {"c0", 0, false, io.ErrUnexpectedEOF},
+ {"c101", 256 + 1 + 192, false, nil},
+ {"e0", 1, true, nil},
+ {"e1", 2, true, nil},
+ {"e2", 4, true, nil},
+ {"ff", 0, false, io.ErrUnexpectedEOF},
+ {"ff00", 0, false, io.ErrUnexpectedEOF},
+ {"ff0000", 0, false, io.ErrUnexpectedEOF},
+ {"ff000000", 0, false, io.ErrUnexpectedEOF},
+ {"ff00000000", 0, false, nil},
+ {"ff01020304", 16909060, false, nil},
+}
+
+func TestReadLength(t *testing.T) {
+ for i, test := range readLengthTests {
+ length, isPartial, err := readLength(readerFromHex(test.hexInput))
+ if test.err != nil {
+ if err != test.err {
+ t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("%d: unexpected error: %s", i, err)
+ continue
+ }
+ if length != test.length || isPartial != test.isPartial {
+ t.Errorf("%d: bad result got:(%d,%t) want:(%d,%t)", i, length, isPartial, test.length, test.isPartial)
+ }
+ }
+}
+
+var partialLengthReaderTests = []struct {
+ hexInput string
+ err error
+ hexOutput string
+}{
+ {"e0", io.ErrUnexpectedEOF, ""},
+ {"e001", io.ErrUnexpectedEOF, ""},
+ {"e0010102", nil, "0102"},
+ {"ff00000000", nil, ""},
+ {"e10102e1030400", nil, "01020304"},
+ {"e101", io.ErrUnexpectedEOF, ""},
+}
+
+func TestPartialLengthReader(t *testing.T) {
+ for i, test := range partialLengthReaderTests {
+ r := &partialLengthReader{readerFromHex(test.hexInput), 0, true}
+ out, err := ioutil.ReadAll(r)
+ if test.err != nil {
+ if err != test.err {
+ t.Errorf("%d: expected different error got:%s want:%s", i, err, test.err)
+ }
+ continue
+ }
+ if err != nil {
+ t.Errorf("%d: unexpected error: %s", i, err)
+ continue
+ }
+
+ got := fmt.Sprintf("%x", out)
+ if got != test.hexOutput {
+ t.Errorf("%d: got:%s want:%s", i, test.hexOutput, got)
+ }
+ }
+}
+
+var readHeaderTests = []struct {
+ hexInput string
+ structuralError bool
+ unexpectedEOF bool
+ tag int
+ length int64
+ hexOutput string
+}{
+ {"", false, false, 0, 0, ""},
+ {"7f", true, false, 0, 0, ""},
+
+ // Old format headers
+ {"80", false, true, 0, 0, ""},
+ {"8001", false, true, 0, 1, ""},
+ {"800102", false, false, 0, 1, "02"},
+ {"81000102", false, false, 0, 1, "02"},
+ {"820000000102", false, false, 0, 1, "02"},
+ {"860000000102", false, false, 1, 1, "02"},
+ {"83010203", false, false, 0, -1, "010203"},
+
+ // New format headers
+ {"c0", false, true, 0, 0, ""},
+ {"c000", false, false, 0, 0, ""},
+ {"c00102", false, false, 0, 1, "02"},
+ {"c0020203", false, false, 0, 2, "0203"},
+ {"c00202", false, true, 0, 2, ""},
+ {"c3020203", false, false, 3, 2, "0203"},
+}
+
+func TestReadHeader(t *testing.T) {
+ for i, test := range readHeaderTests {
+ tag, length, contents, err := readHeader(readerFromHex(test.hexInput))
+ if test.structuralError {
+ if _, ok := err.(errors.StructuralError); ok {
+ continue
+ }
+ t.Errorf("%d: expected StructuralError, got:%s", i, err)
+ continue
+ }
+ if err != nil {
+ if len(test.hexInput) == 0 && err == io.EOF {
+ continue
+ }
+ if !test.unexpectedEOF || err != io.ErrUnexpectedEOF {
+ t.Errorf("%d: unexpected error from readHeader: %s", i, err)
+ }
+ continue
+ }
+ if int(tag) != test.tag || length != test.length {
+ t.Errorf("%d: got:(%d,%d) want:(%d,%d)", i, int(tag), length, test.tag, test.length)
+ continue
+ }
+
+ body, err := ioutil.ReadAll(contents)
+ if err != nil {
+ if !test.unexpectedEOF || err != io.ErrUnexpectedEOF {
+ t.Errorf("%d: unexpected error from contents: %s", i, err)
+ }
+ continue
+ }
+ if test.unexpectedEOF {
+ t.Errorf("%d: expected ErrUnexpectedEOF from contents but got no error", i)
+ continue
+ }
+ got := fmt.Sprintf("%x", body)
+ if got != test.hexOutput {
+ t.Errorf("%d: got:%s want:%s", i, got, test.hexOutput)
+ }
+ }
+}
+
+func TestSerializeHeader(t *testing.T) {
+ tag := packetTypePublicKey
+ lengths := []int{0, 1, 2, 64, 192, 193, 8000, 8384, 8385, 10000}
+
+ for _, length := range lengths {
+ buf := bytes.NewBuffer(nil)
+ serializeHeader(buf, tag, length)
+ tag2, length2, _, err := readHeader(buf)
+ if err != nil {
+ t.Errorf("length %d, err: %s", length, err)
+ }
+ if tag2 != tag {
+ t.Errorf("length %d, tag incorrect (got %d, want %d)", length, tag2, tag)
+ }
+ if int(length2) != length {
+ t.Errorf("length %d, length incorrect (got %d)", length, length2)
+ }
+ }
+}
+
+func TestPartialLengths(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ w := new(partialLengthWriter)
+ w.w = noOpCloser{buf}
+
+ const maxChunkSize = 64
+
+ var b [maxChunkSize]byte
+ var n uint8
+ for l := 1; l <= maxChunkSize; l++ {
+ for i := 0; i < l; i++ {
+ b[i] = n
+ n++
+ }
+ m, err := w.Write(b[:l])
+ if m != l {
+ t.Errorf("short write got: %d want: %d", m, l)
+ }
+ if err != nil {
+ t.Errorf("error from write: %s", err)
+ }
+ }
+ w.Close()
+
+ want := (maxChunkSize * (maxChunkSize + 1)) / 2
+ copyBuf := bytes.NewBuffer(nil)
+ r := &partialLengthReader{buf, 0, true}
+ m, err := io.Copy(copyBuf, r)
+ if m != int64(want) {
+ t.Errorf("short copy got: %d want: %d", m, want)
+ }
+ if err != nil {
+ t.Errorf("error from copy: %s", err)
+ }
+
+ copyBytes := copyBuf.Bytes()
+ for i := 0; i < want; i++ {
+ if copyBytes[i] != uint8(i) {
+ t.Errorf("bad pattern in copy at %d", i)
+ break
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/private_key.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/private_key.go
new file mode 100644
index 00000000000..740a27deacb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/private_key.go
@@ -0,0 +1,326 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/dsa"
+ "crypto/rsa"
+ "crypto/sha1"
+ "golang.org/x/crypto/openpgp/elgamal"
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/s2k"
+ "io"
+ "io/ioutil"
+ "math/big"
+ "strconv"
+ "time"
+)
+
+// PrivateKey represents a possibly encrypted private key. See RFC 4880,
+// section 5.5.3.
+type PrivateKey struct {
+ PublicKey
+ Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
+ encryptedData []byte
+ cipher CipherFunction
+ s2k func(out, in []byte)
+ PrivateKey interface{} // An *rsa.PrivateKey or *dsa.PrivateKey.
+ sha1Checksum bool
+ iv []byte
+}
+
+func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
+ pk := new(PrivateKey)
+ pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
+ pk.PrivateKey = priv
+ return pk
+}
+
+func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
+ pk := new(PrivateKey)
+ pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
+ pk.PrivateKey = priv
+ return pk
+}
+
+func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
+ pk := new(PrivateKey)
+ pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey)
+ pk.PrivateKey = priv
+ return pk
+}
+
+func (pk *PrivateKey) parse(r io.Reader) (err error) {
+ err = (&pk.PublicKey).parse(r)
+ if err != nil {
+ return
+ }
+ var buf [1]byte
+ _, err = readFull(r, buf[:])
+ if err != nil {
+ return
+ }
+
+ s2kType := buf[0]
+
+ switch s2kType {
+ case 0:
+ pk.s2k = nil
+ pk.Encrypted = false
+ case 254, 255:
+ _, err = readFull(r, buf[:])
+ if err != nil {
+ return
+ }
+ pk.cipher = CipherFunction(buf[0])
+ pk.Encrypted = true
+ pk.s2k, err = s2k.Parse(r)
+ if err != nil {
+ return
+ }
+ if s2kType == 254 {
+ pk.sha1Checksum = true
+ }
+ default:
+ return errors.UnsupportedError("deprecated s2k function in private key")
+ }
+
+ if pk.Encrypted {
+ blockSize := pk.cipher.blockSize()
+ if blockSize == 0 {
+ return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
+ }
+ pk.iv = make([]byte, blockSize)
+ _, err = readFull(r, pk.iv)
+ if err != nil {
+ return
+ }
+ }
+
+ pk.encryptedData, err = ioutil.ReadAll(r)
+ if err != nil {
+ return
+ }
+
+ if !pk.Encrypted {
+ return pk.parsePrivateKey(pk.encryptedData)
+ }
+
+ return
+}
+
+func mod64kHash(d []byte) uint16 {
+ var h uint16
+ for _, b := range d {
+ h += uint16(b)
+ }
+ return h
+}
+
+func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
+ // TODO(agl): support encrypted private keys
+ buf := bytes.NewBuffer(nil)
+ err = pk.PublicKey.serializeWithoutHeaders(buf)
+ if err != nil {
+ return
+ }
+ buf.WriteByte(0 /* no encryption */)
+
+ privateKeyBuf := bytes.NewBuffer(nil)
+
+ switch priv := pk.PrivateKey.(type) {
+ case *rsa.PrivateKey:
+ err = serializeRSAPrivateKey(privateKeyBuf, priv)
+ case *dsa.PrivateKey:
+ err = serializeDSAPrivateKey(privateKeyBuf, priv)
+ case *elgamal.PrivateKey:
+ err = serializeElGamalPrivateKey(privateKeyBuf, priv)
+ default:
+ err = errors.InvalidArgumentError("unknown private key type")
+ }
+ if err != nil {
+ return
+ }
+
+ ptype := packetTypePrivateKey
+ contents := buf.Bytes()
+ privateKeyBytes := privateKeyBuf.Bytes()
+ if pk.IsSubkey {
+ ptype = packetTypePrivateSubkey
+ }
+ err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2)
+ if err != nil {
+ return
+ }
+ _, err = w.Write(contents)
+ if err != nil {
+ return
+ }
+ _, err = w.Write(privateKeyBytes)
+ if err != nil {
+ return
+ }
+
+ checksum := mod64kHash(privateKeyBytes)
+ var checksumBytes [2]byte
+ checksumBytes[0] = byte(checksum >> 8)
+ checksumBytes[1] = byte(checksum)
+ _, err = w.Write(checksumBytes[:])
+
+ return
+}
+
+func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
+ err := writeBig(w, priv.D)
+ if err != nil {
+ return err
+ }
+ err = writeBig(w, priv.Primes[1])
+ if err != nil {
+ return err
+ }
+ err = writeBig(w, priv.Primes[0])
+ if err != nil {
+ return err
+ }
+ return writeBig(w, priv.Precomputed.Qinv)
+}
+
+func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
+ return writeBig(w, priv.X)
+}
+
+func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
+ return writeBig(w, priv.X)
+}
+
+// Decrypt decrypts an encrypted private key using a passphrase.
+func (pk *PrivateKey) Decrypt(passphrase []byte) error {
+ if !pk.Encrypted {
+ return nil
+ }
+
+ key := make([]byte, pk.cipher.KeySize())
+ pk.s2k(key, passphrase)
+ block := pk.cipher.new(key)
+ cfb := cipher.NewCFBDecrypter(block, pk.iv)
+
+ data := make([]byte, len(pk.encryptedData))
+ cfb.XORKeyStream(data, pk.encryptedData)
+
+ if pk.sha1Checksum {
+ if len(data) < sha1.Size {
+ return errors.StructuralError("truncated private key data")
+ }
+ h := sha1.New()
+ h.Write(data[:len(data)-sha1.Size])
+ sum := h.Sum(nil)
+ if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
+ return errors.StructuralError("private key checksum failure")
+ }
+ data = data[:len(data)-sha1.Size]
+ } else {
+ if len(data) < 2 {
+ return errors.StructuralError("truncated private key data")
+ }
+ var sum uint16
+ for i := 0; i < len(data)-2; i++ {
+ sum += uint16(data[i])
+ }
+ if data[len(data)-2] != uint8(sum>>8) ||
+ data[len(data)-1] != uint8(sum) {
+ return errors.StructuralError("private key checksum failure")
+ }
+ data = data[:len(data)-2]
+ }
+
+ return pk.parsePrivateKey(data)
+}
+
+func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
+ switch pk.PublicKey.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
+ return pk.parseRSAPrivateKey(data)
+ case PubKeyAlgoDSA:
+ return pk.parseDSAPrivateKey(data)
+ case PubKeyAlgoElGamal:
+ return pk.parseElGamalPrivateKey(data)
+ }
+ panic("impossible")
+}
+
+func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
+ rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
+ rsaPriv := new(rsa.PrivateKey)
+ rsaPriv.PublicKey = *rsaPub
+
+ buf := bytes.NewBuffer(data)
+ d, _, err := readMPI(buf)
+ if err != nil {
+ return
+ }
+ p, _, err := readMPI(buf)
+ if err != nil {
+ return
+ }
+ q, _, err := readMPI(buf)
+ if err != nil {
+ return
+ }
+
+ rsaPriv.D = new(big.Int).SetBytes(d)
+ rsaPriv.Primes = make([]*big.Int, 2)
+ rsaPriv.Primes[0] = new(big.Int).SetBytes(p)
+ rsaPriv.Primes[1] = new(big.Int).SetBytes(q)
+ if err := rsaPriv.Validate(); err != nil {
+ return err
+ }
+ rsaPriv.Precompute()
+ pk.PrivateKey = rsaPriv
+ pk.Encrypted = false
+ pk.encryptedData = nil
+
+ return nil
+}
+
+func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
+ dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
+ dsaPriv := new(dsa.PrivateKey)
+ dsaPriv.PublicKey = *dsaPub
+
+ buf := bytes.NewBuffer(data)
+ x, _, err := readMPI(buf)
+ if err != nil {
+ return
+ }
+
+ dsaPriv.X = new(big.Int).SetBytes(x)
+ pk.PrivateKey = dsaPriv
+ pk.Encrypted = false
+ pk.encryptedData = nil
+
+ return nil
+}
+
+func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
+ pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
+ priv := new(elgamal.PrivateKey)
+ priv.PublicKey = *pub
+
+ buf := bytes.NewBuffer(data)
+ x, _, err := readMPI(buf)
+ if err != nil {
+ return
+ }
+
+ priv.X = new(big.Int).SetBytes(x)
+ pk.PrivateKey = priv
+ pk.Encrypted = false
+ pk.encryptedData = nil
+
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/private_key_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/private_key_test.go
new file mode 100644
index 00000000000..25c8931f2ca
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/private_key_test.go
@@ -0,0 +1,69 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "testing"
+ "time"
+)
+
+var privateKeyTests = []struct {
+ privateKeyHex string
+ creationTime time.Time
+}{
+ {
+ privKeyRSAHex,
+ time.Unix(0x4cc349a8, 0),
+ },
+ {
+ privKeyElGamalHex,
+ time.Unix(0x4df9ee1a, 0),
+ },
+}
+
+func TestPrivateKeyRead(t *testing.T) {
+ for i, test := range privateKeyTests {
+ packet, err := Read(readerFromHex(test.privateKeyHex))
+ if err != nil {
+ t.Errorf("#%d: failed to parse: %s", i, err)
+ continue
+ }
+
+ privKey := packet.(*PrivateKey)
+
+ if !privKey.Encrypted {
+ t.Errorf("#%d: private key isn't encrypted", i)
+ continue
+ }
+
+ err = privKey.Decrypt([]byte("wrong password"))
+ if err == nil {
+ t.Errorf("#%d: decrypted with incorrect key", i)
+ continue
+ }
+
+ err = privKey.Decrypt([]byte("testing"))
+ if err != nil {
+ t.Errorf("#%d: failed to decrypt: %s", i, err)
+ continue
+ }
+
+ if !privKey.CreationTime.Equal(test.creationTime) || privKey.Encrypted {
+ t.Errorf("#%d: bad result, got: %#v", i, privKey)
+ }
+ }
+}
+
+func TestIssue11505(t *testing.T) {
+ // parsing a rsa private key with p or q == 1 used to panic due to a divide by zero
+ _, _ = Read(readerFromHex("9c3004303030300100000011303030000000000000010130303030303030303030303030303030303030303030303030303030303030303030303030303030303030"))
+}
+
+// Generated with `gpg --export-secret-keys "Test Key 2"`
+const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec"
+
+// Generated by `gpg --export-secret-keys` followed by a manual extraction of
+// the ElGamal subkey from the packets.
+const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key.go
new file mode 100644
index 00000000000..37a6472e561
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key.go
@@ -0,0 +1,724 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "io"
+ "math/big"
+ "strconv"
+ "time"
+
+ "golang.org/x/crypto/openpgp/elgamal"
+ "golang.org/x/crypto/openpgp/errors"
+)
+
+var (
+ // NIST curve P-256
+ oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}
+ // NIST curve P-384
+ oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22}
+ // NIST curve P-521
+ oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23}
+)
+
+const maxOIDLength = 8
+
+// ecdsaKey stores the algorithm-specific fields for ECDSA keys.
+// as defined in RFC 6637, Section 9.
+type ecdsaKey struct {
+ // oid contains the OID byte sequence identifying the elliptic curve used
+ oid []byte
+ // p contains the elliptic curve point that represents the public key
+ p parsedMPI
+}
+
+// parseOID reads the OID for the curve as defined in RFC 6637, Section 9.
+func parseOID(r io.Reader) (oid []byte, err error) {
+ buf := make([]byte, maxOIDLength)
+ if _, err = readFull(r, buf[:1]); err != nil {
+ return
+ }
+ oidLen := buf[0]
+ if int(oidLen) > len(buf) {
+ err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen)))
+ return
+ }
+ oid = buf[:oidLen]
+ _, err = readFull(r, oid)
+ return
+}
+
+func (f *ecdsaKey) parse(r io.Reader) (err error) {
+ if f.oid, err = parseOID(r); err != nil {
+ return err
+ }
+ f.p.bytes, f.p.bitLength, err = readMPI(r)
+ return
+}
+
+func (f *ecdsaKey) serialize(w io.Writer) (err error) {
+ buf := make([]byte, maxOIDLength+1)
+ buf[0] = byte(len(f.oid))
+ copy(buf[1:], f.oid)
+ if _, err = w.Write(buf[:len(f.oid)+1]); err != nil {
+ return
+ }
+ return writeMPIs(w, f.p)
+}
+
+func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) {
+ var c elliptic.Curve
+ if bytes.Equal(f.oid, oidCurveP256) {
+ c = elliptic.P256()
+ } else if bytes.Equal(f.oid, oidCurveP384) {
+ c = elliptic.P384()
+ } else if bytes.Equal(f.oid, oidCurveP521) {
+ c = elliptic.P521()
+ } else {
+ return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid))
+ }
+ x, y := elliptic.Unmarshal(c, f.p.bytes)
+ if x == nil {
+ return nil, errors.UnsupportedError("failed to parse EC point")
+ }
+ return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil
+}
+
+func (f *ecdsaKey) byteLen() int {
+ return 1 + len(f.oid) + 2 + len(f.p.bytes)
+}
+
+type kdfHashFunction byte
+type kdfAlgorithm byte
+
+// ecdhKdf stores key derivation function parameters
+// used for ECDH encryption. See RFC 6637, Section 9.
+type ecdhKdf struct {
+ KdfHash kdfHashFunction
+ KdfAlgo kdfAlgorithm
+}
+
+func (f *ecdhKdf) parse(r io.Reader) (err error) {
+ buf := make([]byte, 1)
+ if _, err = readFull(r, buf); err != nil {
+ return
+ }
+ kdfLen := int(buf[0])
+ if kdfLen < 3 {
+ return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
+ }
+ buf = make([]byte, kdfLen)
+ if _, err = readFull(r, buf); err != nil {
+ return
+ }
+ reserved := int(buf[0])
+ f.KdfHash = kdfHashFunction(buf[1])
+ f.KdfAlgo = kdfAlgorithm(buf[2])
+ if reserved != 0x01 {
+ return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved))
+ }
+ return
+}
+
+func (f *ecdhKdf) serialize(w io.Writer) (err error) {
+ buf := make([]byte, 4)
+ // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys.
+ buf[0] = byte(0x03) // Length of the following fields
+ buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now
+ buf[2] = byte(f.KdfHash)
+ buf[3] = byte(f.KdfAlgo)
+ _, err = w.Write(buf[:])
+ return
+}
+
+func (f *ecdhKdf) byteLen() int {
+ return 4
+}
+
+// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
+type PublicKey struct {
+ CreationTime time.Time
+ PubKeyAlgo PublicKeyAlgorithm
+ PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey
+ Fingerprint [20]byte
+ KeyId uint64
+ IsSubkey bool
+
+ n, e, p, q, g, y parsedMPI
+
+ // RFC 6637 fields
+ ec *ecdsaKey
+ ecdh *ecdhKdf
+}
+
+// signingKey provides a convenient abstraction over signature verification
+// for v3 and v4 public keys.
+type signingKey interface {
+ SerializeSignaturePrefix(io.Writer)
+ serializeWithoutHeaders(io.Writer) error
+}
+
+func fromBig(n *big.Int) parsedMPI {
+ return parsedMPI{
+ bytes: n.Bytes(),
+ bitLength: uint16(n.BitLen()),
+ }
+}
+
+// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
+func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
+ pk := &PublicKey{
+ CreationTime: creationTime,
+ PubKeyAlgo: PubKeyAlgoRSA,
+ PublicKey: pub,
+ n: fromBig(pub.N),
+ e: fromBig(big.NewInt(int64(pub.E))),
+ }
+
+ pk.setFingerPrintAndKeyId()
+ return pk
+}
+
+// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
+func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
+ pk := &PublicKey{
+ CreationTime: creationTime,
+ PubKeyAlgo: PubKeyAlgoDSA,
+ PublicKey: pub,
+ p: fromBig(pub.P),
+ q: fromBig(pub.Q),
+ g: fromBig(pub.G),
+ y: fromBig(pub.Y),
+ }
+
+ pk.setFingerPrintAndKeyId()
+ return pk
+}
+
+// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
+func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
+ pk := &PublicKey{
+ CreationTime: creationTime,
+ PubKeyAlgo: PubKeyAlgoElGamal,
+ PublicKey: pub,
+ p: fromBig(pub.P),
+ g: fromBig(pub.G),
+ y: fromBig(pub.Y),
+ }
+
+ pk.setFingerPrintAndKeyId()
+ return pk
+}
+
+func (pk *PublicKey) parse(r io.Reader) (err error) {
+ // RFC 4880, section 5.5.2
+ var buf [6]byte
+ _, err = readFull(r, buf[:])
+ if err != nil {
+ return
+ }
+ if buf[0] != 4 {
+ return errors.UnsupportedError("public key version")
+ }
+ pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
+ pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ err = pk.parseRSA(r)
+ case PubKeyAlgoDSA:
+ err = pk.parseDSA(r)
+ case PubKeyAlgoElGamal:
+ err = pk.parseElGamal(r)
+ case PubKeyAlgoECDSA:
+ pk.ec = new(ecdsaKey)
+ if err = pk.ec.parse(r); err != nil {
+ return err
+ }
+ pk.PublicKey, err = pk.ec.newECDSA()
+ case PubKeyAlgoECDH:
+ pk.ec = new(ecdsaKey)
+ if err = pk.ec.parse(r); err != nil {
+ return
+ }
+ pk.ecdh = new(ecdhKdf)
+ if err = pk.ecdh.parse(r); err != nil {
+ return
+ }
+ // The ECDH key is stored in an ecdsa.PublicKey for convenience.
+ pk.PublicKey, err = pk.ec.newECDSA()
+ default:
+ err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
+ }
+ if err != nil {
+ return
+ }
+
+ pk.setFingerPrintAndKeyId()
+ return
+}
+
+func (pk *PublicKey) setFingerPrintAndKeyId() {
+ // RFC 4880, section 12.2
+ fingerPrint := sha1.New()
+ pk.SerializeSignaturePrefix(fingerPrint)
+ pk.serializeWithoutHeaders(fingerPrint)
+ copy(pk.Fingerprint[:], fingerPrint.Sum(nil))
+ pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
+}
+
+// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
+// section 5.5.2.
+func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
+ pk.n.bytes, pk.n.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+ pk.e.bytes, pk.e.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+
+ if len(pk.e.bytes) > 3 {
+ err = errors.UnsupportedError("large public exponent")
+ return
+ }
+ rsa := &rsa.PublicKey{
+ N: new(big.Int).SetBytes(pk.n.bytes),
+ E: 0,
+ }
+ for i := 0; i < len(pk.e.bytes); i++ {
+ rsa.E <<= 8
+ rsa.E |= int(pk.e.bytes[i])
+ }
+ pk.PublicKey = rsa
+ return
+}
+
+// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
+// section 5.5.2.
+func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
+ pk.p.bytes, pk.p.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+ pk.q.bytes, pk.q.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+ pk.g.bytes, pk.g.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+ pk.y.bytes, pk.y.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+
+ dsa := new(dsa.PublicKey)
+ dsa.P = new(big.Int).SetBytes(pk.p.bytes)
+ dsa.Q = new(big.Int).SetBytes(pk.q.bytes)
+ dsa.G = new(big.Int).SetBytes(pk.g.bytes)
+ dsa.Y = new(big.Int).SetBytes(pk.y.bytes)
+ pk.PublicKey = dsa
+ return
+}
+
+// parseElGamal parses ElGamal public key material from the given Reader. See
+// RFC 4880, section 5.5.2.
+func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
+ pk.p.bytes, pk.p.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+ pk.g.bytes, pk.g.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+ pk.y.bytes, pk.y.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
+
+ elgamal := new(elgamal.PublicKey)
+ elgamal.P = new(big.Int).SetBytes(pk.p.bytes)
+ elgamal.G = new(big.Int).SetBytes(pk.g.bytes)
+ elgamal.Y = new(big.Int).SetBytes(pk.y.bytes)
+ pk.PublicKey = elgamal
+ return
+}
+
+// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
+// The prefix is used when calculating a signature over this public key. See
+// RFC 4880, section 5.2.4.
+func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) {
+ var pLength uint16
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ pLength += 2 + uint16(len(pk.n.bytes))
+ pLength += 2 + uint16(len(pk.e.bytes))
+ case PubKeyAlgoDSA:
+ pLength += 2 + uint16(len(pk.p.bytes))
+ pLength += 2 + uint16(len(pk.q.bytes))
+ pLength += 2 + uint16(len(pk.g.bytes))
+ pLength += 2 + uint16(len(pk.y.bytes))
+ case PubKeyAlgoElGamal:
+ pLength += 2 + uint16(len(pk.p.bytes))
+ pLength += 2 + uint16(len(pk.g.bytes))
+ pLength += 2 + uint16(len(pk.y.bytes))
+ case PubKeyAlgoECDSA:
+ pLength += uint16(pk.ec.byteLen())
+ case PubKeyAlgoECDH:
+ pLength += uint16(pk.ec.byteLen())
+ pLength += uint16(pk.ecdh.byteLen())
+ default:
+ panic("unknown public key algorithm")
+ }
+ pLength += 6
+ h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
+ return
+}
+
+func (pk *PublicKey) Serialize(w io.Writer) (err error) {
+ length := 6 // 6 byte header
+
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ length += 2 + len(pk.n.bytes)
+ length += 2 + len(pk.e.bytes)
+ case PubKeyAlgoDSA:
+ length += 2 + len(pk.p.bytes)
+ length += 2 + len(pk.q.bytes)
+ length += 2 + len(pk.g.bytes)
+ length += 2 + len(pk.y.bytes)
+ case PubKeyAlgoElGamal:
+ length += 2 + len(pk.p.bytes)
+ length += 2 + len(pk.g.bytes)
+ length += 2 + len(pk.y.bytes)
+ case PubKeyAlgoECDSA:
+ length += pk.ec.byteLen()
+ case PubKeyAlgoECDH:
+ length += pk.ec.byteLen()
+ length += pk.ecdh.byteLen()
+ default:
+ panic("unknown public key algorithm")
+ }
+
+ packetType := packetTypePublicKey
+ if pk.IsSubkey {
+ packetType = packetTypePublicSubkey
+ }
+ err = serializeHeader(w, packetType, length)
+ if err != nil {
+ return
+ }
+ return pk.serializeWithoutHeaders(w)
+}
+
+// serializeWithoutHeaders marshals the PublicKey to w in the form of an
+// OpenPGP public key packet, not including the packet header.
+func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
+ var buf [6]byte
+ buf[0] = 4
+ t := uint32(pk.CreationTime.Unix())
+ buf[1] = byte(t >> 24)
+ buf[2] = byte(t >> 16)
+ buf[3] = byte(t >> 8)
+ buf[4] = byte(t)
+ buf[5] = byte(pk.PubKeyAlgo)
+
+ _, err = w.Write(buf[:])
+ if err != nil {
+ return
+ }
+
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ return writeMPIs(w, pk.n, pk.e)
+ case PubKeyAlgoDSA:
+ return writeMPIs(w, pk.p, pk.q, pk.g, pk.y)
+ case PubKeyAlgoElGamal:
+ return writeMPIs(w, pk.p, pk.g, pk.y)
+ case PubKeyAlgoECDSA:
+ return pk.ec.serialize(w)
+ case PubKeyAlgoECDH:
+ if err = pk.ec.serialize(w); err != nil {
+ return
+ }
+ return pk.ecdh.serialize(w)
+ }
+ return errors.InvalidArgumentError("bad public-key algorithm")
+}
+
+// CanSign returns true iff this public key can generate signatures
+func (pk *PublicKey) CanSign() bool {
+ return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal
+}
+
+// VerifySignature returns nil iff sig is a valid signature, made by this
+// public key, of the data hashed into signed. signed is mutated by this call.
+func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
+ if !pk.CanSign() {
+ return errors.InvalidArgumentError("public key cannot generate signatures")
+ }
+
+ signed.Write(sig.HashSuffix)
+ hashBytes := signed.Sum(nil)
+
+ if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
+ return errors.SignatureError("hash tag doesn't match")
+ }
+
+ if pk.PubKeyAlgo != sig.PubKeyAlgo {
+ return errors.InvalidArgumentError("public key and signature use different algorithms")
+ }
+
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
+ err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes)
+ if err != nil {
+ return errors.SignatureError("RSA verification failure")
+ }
+ return nil
+ case PubKeyAlgoDSA:
+ dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
+ // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
+ subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
+ if len(hashBytes) > subgroupSize {
+ hashBytes = hashBytes[:subgroupSize]
+ }
+ if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
+ return errors.SignatureError("DSA verification failure")
+ }
+ return nil
+ case PubKeyAlgoECDSA:
+ ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
+ if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) {
+ return errors.SignatureError("ECDSA verification failure")
+ }
+ return nil
+ default:
+ return errors.SignatureError("Unsupported public key algorithm used in signature")
+ }
+ panic("unreachable")
+}
+
+// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
+// public key, of the data hashed into signed. signed is mutated by this call.
+func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
+ if !pk.CanSign() {
+ return errors.InvalidArgumentError("public key cannot generate signatures")
+ }
+
+ suffix := make([]byte, 5)
+ suffix[0] = byte(sig.SigType)
+ binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
+ signed.Write(suffix)
+ hashBytes := signed.Sum(nil)
+
+ if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
+ return errors.SignatureError("hash tag doesn't match")
+ }
+
+ if pk.PubKeyAlgo != sig.PubKeyAlgo {
+ return errors.InvalidArgumentError("public key and signature use different algorithms")
+ }
+
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
+ if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
+ return errors.SignatureError("RSA verification failure")
+ }
+ return
+ case PubKeyAlgoDSA:
+ dsaPublicKey := pk.PublicKey.(*dsa.PublicKey)
+ // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
+ subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
+ if len(hashBytes) > subgroupSize {
+ hashBytes = hashBytes[:subgroupSize]
+ }
+ if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) {
+ return errors.SignatureError("DSA verification failure")
+ }
+ return nil
+ default:
+ panic("shouldn't happen")
+ }
+ panic("unreachable")
+}
+
+// keySignatureHash returns a Hash of the message that needs to be signed for
+// pk to assert a subkey relationship to signed.
+func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
+ if !hashFunc.Available() {
+ return nil, errors.UnsupportedError("hash function")
+ }
+ h = hashFunc.New()
+
+ // RFC 4880, section 5.2.4
+ pk.SerializeSignaturePrefix(h)
+ pk.serializeWithoutHeaders(h)
+ signed.SerializeSignaturePrefix(h)
+ signed.serializeWithoutHeaders(h)
+ return
+}
+
+// VerifyKeySignature returns nil iff sig is a valid signature, made by this
+// public key, of signed.
+func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
+ h, err := keySignatureHash(pk, signed, sig.Hash)
+ if err != nil {
+ return err
+ }
+ if err = pk.VerifySignature(h, sig); err != nil {
+ return err
+ }
+
+ if sig.FlagSign {
+ // Signing subkeys must be cross-signed. See
+ // https://www.gnupg.org/faq/subkey-cross-certify.html.
+ if sig.EmbeddedSignature == nil {
+ return errors.StructuralError("signing subkey is missing cross-signature")
+ }
+ // Verify the cross-signature. This is calculated over the same
+ // data as the main signature, so we cannot just recursively
+ // call signed.VerifyKeySignature(...)
+ if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil {
+ return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
+ }
+ if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
+ return errors.StructuralError("error while verifying cross-signature: " + err.Error())
+ }
+ }
+
+ return nil
+}
+
+func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
+ if !hashFunc.Available() {
+ return nil, errors.UnsupportedError("hash function")
+ }
+ h = hashFunc.New()
+
+ // RFC 4880, section 5.2.4
+ pk.SerializeSignaturePrefix(h)
+ pk.serializeWithoutHeaders(h)
+
+ return
+}
+
+// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
+// public key.
+func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
+ h, err := keyRevocationHash(pk, sig.Hash)
+ if err != nil {
+ return err
+ }
+ return pk.VerifySignature(h, sig)
+}
+
+// userIdSignatureHash returns a Hash of the message that needs to be signed
+// to assert that pk is a valid key for id.
+func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) {
+ if !hashFunc.Available() {
+ return nil, errors.UnsupportedError("hash function")
+ }
+ h = hashFunc.New()
+
+ // RFC 4880, section 5.2.4
+ pk.SerializeSignaturePrefix(h)
+ pk.serializeWithoutHeaders(h)
+
+ var buf [5]byte
+ buf[0] = 0xb4
+ buf[1] = byte(len(id) >> 24)
+ buf[2] = byte(len(id) >> 16)
+ buf[3] = byte(len(id) >> 8)
+ buf[4] = byte(len(id))
+ h.Write(buf[:])
+ h.Write([]byte(id))
+
+ return
+}
+
+// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
+// public key, that id is the identity of pub.
+func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
+ h, err := userIdSignatureHash(id, pub, sig.Hash)
+ if err != nil {
+ return err
+ }
+ return pk.VerifySignature(h, sig)
+}
+
+// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
+// public key, that id is the identity of pub.
+func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) {
+ h, err := userIdSignatureV3Hash(id, pub, sig.Hash)
+ if err != nil {
+ return err
+ }
+ return pk.VerifySignatureV3(h, sig)
+}
+
+// KeyIdString returns the public key's fingerprint in capital hex
+// (e.g. "6C7EE1B8621CC013").
+func (pk *PublicKey) KeyIdString() string {
+ return fmt.Sprintf("%X", pk.Fingerprint[12:20])
+}
+
+// KeyIdShortString returns the short form of public key's fingerprint
+// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
+func (pk *PublicKey) KeyIdShortString() string {
+ return fmt.Sprintf("%X", pk.Fingerprint[16:20])
+}
+
+// A parsedMPI is used to store the contents of a big integer, along with the
+// bit length that was specified in the original input. This allows the MPI to
+// be reserialized exactly.
+type parsedMPI struct {
+ bytes []byte
+ bitLength uint16
+}
+
+// writeMPIs is a utility function for serializing several big integers to the
+// given Writer.
+func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) {
+ for _, mpi := range mpis {
+ err = writeMPI(w, mpi.bitLength, mpi.bytes)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// BitLength returns the bit length for the given public key.
+func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ bitLength = pk.n.bitLength
+ case PubKeyAlgoDSA:
+ bitLength = pk.p.bitLength
+ case PubKeyAlgoElGamal:
+ bitLength = pk.p.bitLength
+ default:
+ err = errors.InvalidArgumentError("bad public-key algorithm")
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_test.go
new file mode 100644
index 00000000000..7ad7d91856d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_test.go
@@ -0,0 +1,202 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+ "time"
+)
+
+var pubKeyTests = []struct {
+ hexData string
+ hexFingerprint string
+ creationTime time.Time
+ pubKeyAlgo PublicKeyAlgorithm
+ keyId uint64
+ keyIdString string
+ keyIdShort string
+}{
+ {rsaPkDataHex, rsaFingerprintHex, time.Unix(0x4d3c5c10, 0), PubKeyAlgoRSA, 0xa34d7e18c20c31bb, "A34D7E18C20C31BB", "C20C31BB"},
+ {dsaPkDataHex, dsaFingerprintHex, time.Unix(0x4d432f89, 0), PubKeyAlgoDSA, 0x8e8fbe54062f19ed, "8E8FBE54062F19ED", "062F19ED"},
+ {ecdsaPkDataHex, ecdsaFingerprintHex, time.Unix(0x5071c294, 0), PubKeyAlgoECDSA, 0x43fe956c542ca00b, "43FE956C542CA00B", "542CA00B"},
+}
+
+func TestPublicKeyRead(t *testing.T) {
+ for i, test := range pubKeyTests {
+ packet, err := Read(readerFromHex(test.hexData))
+ if err != nil {
+ t.Errorf("#%d: Read error: %s", i, err)
+ continue
+ }
+ pk, ok := packet.(*PublicKey)
+ if !ok {
+ t.Errorf("#%d: failed to parse, got: %#v", i, packet)
+ continue
+ }
+ if pk.PubKeyAlgo != test.pubKeyAlgo {
+ t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo)
+ }
+ if !pk.CreationTime.Equal(test.creationTime) {
+ t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime)
+ }
+ expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint)
+ if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) {
+ t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint)
+ }
+ if pk.KeyId != test.keyId {
+ t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId)
+ }
+ if g, e := pk.KeyIdString(), test.keyIdString; g != e {
+ t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e)
+ }
+ if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e {
+ t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e)
+ }
+ }
+}
+
+func TestPublicKeySerialize(t *testing.T) {
+ for i, test := range pubKeyTests {
+ packet, err := Read(readerFromHex(test.hexData))
+ if err != nil {
+ t.Errorf("#%d: Read error: %s", i, err)
+ continue
+ }
+ pk, ok := packet.(*PublicKey)
+ if !ok {
+ t.Errorf("#%d: failed to parse, got: %#v", i, packet)
+ continue
+ }
+ serializeBuf := bytes.NewBuffer(nil)
+ err = pk.Serialize(serializeBuf)
+ if err != nil {
+ t.Errorf("#%d: failed to serialize: %s", i, err)
+ continue
+ }
+
+ packet, err = Read(serializeBuf)
+ if err != nil {
+ t.Errorf("#%d: Read error (from serialized data): %s", i, err)
+ continue
+ }
+ pk, ok = packet.(*PublicKey)
+ if !ok {
+ t.Errorf("#%d: failed to parse serialized data, got: %#v", i, packet)
+ continue
+ }
+ }
+}
+
+func TestEcc384Serialize(t *testing.T) {
+ r := readerFromHex(ecc384PubHex)
+ var w bytes.Buffer
+ for i := 0; i < 2; i++ {
+ // Public key
+ p, err := Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ pubkey := p.(*PublicKey)
+ if !bytes.Equal(pubkey.ec.oid, []byte{0x2b, 0x81, 0x04, 0x00, 0x22}) {
+ t.Errorf("Unexpected pubkey OID: %x", pubkey.ec.oid)
+ }
+ if !bytes.Equal(pubkey.ec.p.bytes[:5], []byte{0x04, 0xf6, 0xb8, 0xc5, 0xac}) {
+ t.Errorf("Unexpected pubkey P[:5]: %x", pubkey.ec.p.bytes)
+ }
+ if pubkey.KeyId != 0x098033880F54719F {
+ t.Errorf("Unexpected pubkey ID: %x", pubkey.KeyId)
+ }
+ err = pubkey.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // User ID
+ p, err = Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ uid := p.(*UserId)
+ if uid.Id != "ec_dsa_dh_384 <openpgp@brainhub.org>" {
+ t.Error("Unexpected UID:", uid.Id)
+ }
+ err = uid.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // User ID Sig
+ p, err = Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ uidSig := p.(*Signature)
+ err = pubkey.VerifyUserIdSignature(uid.Id, pubkey, uidSig)
+ if err != nil {
+ t.Error(err, ": UID")
+ }
+ err = uidSig.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // Subkey
+ p, err = Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ subkey := p.(*PublicKey)
+ if !bytes.Equal(subkey.ec.oid, []byte{0x2b, 0x81, 0x04, 0x00, 0x22}) {
+ t.Errorf("Unexpected subkey OID: %x", subkey.ec.oid)
+ }
+ if !bytes.Equal(subkey.ec.p.bytes[:5], []byte{0x04, 0x2f, 0xaa, 0x84, 0x02}) {
+ t.Errorf("Unexpected subkey P[:5]: %x", subkey.ec.p.bytes)
+ }
+ if subkey.ecdh.KdfHash != 0x09 {
+ t.Error("Expected KDF hash function SHA384 (0x09), got", subkey.ecdh.KdfHash)
+ }
+ if subkey.ecdh.KdfAlgo != 0x09 {
+ t.Error("Expected KDF symmetric alg AES256 (0x09), got", subkey.ecdh.KdfAlgo)
+ }
+ if subkey.KeyId != 0xAA8B938F9A201946 {
+ t.Errorf("Unexpected subkey ID: %x", subkey.KeyId)
+ }
+ err = subkey.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // Subkey Sig
+ p, err = Read(r)
+ if err != nil {
+ t.Error(err)
+ }
+ subkeySig := p.(*Signature)
+ err = pubkey.VerifyKeySignature(subkey, subkeySig)
+ if err != nil {
+ t.Error(err)
+ }
+ err = subkeySig.Serialize(&w)
+ if err != nil {
+ t.Error(err)
+ }
+ // Now read back what we've written again
+ r = bytes.NewBuffer(w.Bytes())
+ w.Reset()
+ }
+}
+
+const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb"
+
+const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001"
+
+const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed"
+
+const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0"
+
+const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b"
+
+const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4"
+
+// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key
+const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267`
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_v3.go
new file mode 100644
index 00000000000..26337f5aaf1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_v3.go
@@ -0,0 +1,280 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "crypto"
+ "crypto/md5"
+ "crypto/rsa"
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "io"
+ "math/big"
+ "strconv"
+ "time"
+
+ "golang.org/x/crypto/openpgp/errors"
+)
+
+// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and
+// should not be used for signing or encrypting. They are supported here only for
+// parsing version 3 key material and validating signatures.
+// See RFC 4880, section 5.5.2.
+type PublicKeyV3 struct {
+ CreationTime time.Time
+ DaysToExpire uint16
+ PubKeyAlgo PublicKeyAlgorithm
+ PublicKey *rsa.PublicKey
+ Fingerprint [16]byte
+ KeyId uint64
+ IsSubkey bool
+
+ n, e parsedMPI
+}
+
+// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey.
+// Included here for testing purposes only. RFC 4880, section 5.5.2:
+// "an implementation MUST NOT generate a V3 key, but MAY accept it."
+func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 {
+ pk := &PublicKeyV3{
+ CreationTime: creationTime,
+ PublicKey: pub,
+ n: fromBig(pub.N),
+ e: fromBig(big.NewInt(int64(pub.E))),
+ }
+
+ pk.setFingerPrintAndKeyId()
+ return pk
+}
+
+func (pk *PublicKeyV3) parse(r io.Reader) (err error) {
+ // RFC 4880, section 5.5.2
+ var buf [8]byte
+ if _, err = readFull(r, buf[:]); err != nil {
+ return
+ }
+ if buf[0] < 2 || buf[0] > 3 {
+ return errors.UnsupportedError("public key version")
+ }
+ pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
+ pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7])
+ pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7])
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ err = pk.parseRSA(r)
+ default:
+ err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
+ }
+ if err != nil {
+ return
+ }
+
+ pk.setFingerPrintAndKeyId()
+ return
+}
+
+func (pk *PublicKeyV3) setFingerPrintAndKeyId() {
+ // RFC 4880, section 12.2
+ fingerPrint := md5.New()
+ fingerPrint.Write(pk.n.bytes)
+ fingerPrint.Write(pk.e.bytes)
+ fingerPrint.Sum(pk.Fingerprint[:0])
+ pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:])
+}
+
+// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
+// section 5.5.2.
+func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) {
+ if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil {
+ return
+ }
+ if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil {
+ return
+ }
+
+ // RFC 4880 Section 12.2 requires the low 8 bytes of the
+ // modulus to form the key id.
+ if len(pk.n.bytes) < 8 {
+ return errors.StructuralError("v3 public key modulus is too short")
+ }
+ if len(pk.e.bytes) > 3 {
+ err = errors.UnsupportedError("large public exponent")
+ return
+ }
+ rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)}
+ for i := 0; i < len(pk.e.bytes); i++ {
+ rsa.E <<= 8
+ rsa.E |= int(pk.e.bytes[i])
+ }
+ pk.PublicKey = rsa
+ return
+}
+
+// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
+// The prefix is used when calculating a signature over this public key. See
+// RFC 4880, section 5.2.4.
+func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) {
+ var pLength uint16
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ pLength += 2 + uint16(len(pk.n.bytes))
+ pLength += 2 + uint16(len(pk.e.bytes))
+ default:
+ panic("unknown public key algorithm")
+ }
+ pLength += 6
+ w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)})
+ return
+}
+
+func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) {
+ length := 8 // 8 byte header
+
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ length += 2 + len(pk.n.bytes)
+ length += 2 + len(pk.e.bytes)
+ default:
+ panic("unknown public key algorithm")
+ }
+
+ packetType := packetTypePublicKey
+ if pk.IsSubkey {
+ packetType = packetTypePublicSubkey
+ }
+ if err = serializeHeader(w, packetType, length); err != nil {
+ return
+ }
+ return pk.serializeWithoutHeaders(w)
+}
+
+// serializeWithoutHeaders marshals the PublicKey to w in the form of an
+// OpenPGP public key packet, not including the packet header.
+func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) {
+ var buf [8]byte
+ // Version 3
+ buf[0] = 3
+ // Creation time
+ t := uint32(pk.CreationTime.Unix())
+ buf[1] = byte(t >> 24)
+ buf[2] = byte(t >> 16)
+ buf[3] = byte(t >> 8)
+ buf[4] = byte(t)
+ // Days to expire
+ buf[5] = byte(pk.DaysToExpire >> 8)
+ buf[6] = byte(pk.DaysToExpire)
+ // Public key algorithm
+ buf[7] = byte(pk.PubKeyAlgo)
+
+ if _, err = w.Write(buf[:]); err != nil {
+ return
+ }
+
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ return writeMPIs(w, pk.n, pk.e)
+ }
+ return errors.InvalidArgumentError("bad public-key algorithm")
+}
+
+// CanSign returns true iff this public key can generate signatures
+func (pk *PublicKeyV3) CanSign() bool {
+ return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly
+}
+
+// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
+// public key, of the data hashed into signed. signed is mutated by this call.
+func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) {
+ if !pk.CanSign() {
+ return errors.InvalidArgumentError("public key cannot generate signatures")
+ }
+
+ suffix := make([]byte, 5)
+ suffix[0] = byte(sig.SigType)
+ binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix()))
+ signed.Write(suffix)
+ hashBytes := signed.Sum(nil)
+
+ if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] {
+ return errors.SignatureError("hash tag doesn't match")
+ }
+
+ if pk.PubKeyAlgo != sig.PubKeyAlgo {
+ return errors.InvalidArgumentError("public key and signature use different algorithms")
+ }
+
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
+ return errors.SignatureError("RSA verification failure")
+ }
+ return
+ default:
+ // V3 public keys only support RSA.
+ panic("shouldn't happen")
+ }
+ panic("unreachable")
+}
+
+// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
+// public key, that id is the identity of pub.
+func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) {
+ h, err := userIdSignatureV3Hash(id, pk, sig.Hash)
+ if err != nil {
+ return err
+ }
+ return pk.VerifySignatureV3(h, sig)
+}
+
+// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this
+// public key, of signed.
+func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) {
+ h, err := keySignatureHash(pk, signed, sig.Hash)
+ if err != nil {
+ return err
+ }
+ return pk.VerifySignatureV3(h, sig)
+}
+
+// userIdSignatureV3Hash returns a Hash of the message that needs to be signed
+// to assert that pk is a valid key for id.
+func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) {
+ if !hfn.Available() {
+ return nil, errors.UnsupportedError("hash function")
+ }
+ h = hfn.New()
+
+ // RFC 4880, section 5.2.4
+ pk.SerializeSignaturePrefix(h)
+ pk.serializeWithoutHeaders(h)
+
+ h.Write([]byte(id))
+
+ return
+}
+
+// KeyIdString returns the public key's fingerprint in capital hex
+// (e.g. "6C7EE1B8621CC013").
+func (pk *PublicKeyV3) KeyIdString() string {
+ return fmt.Sprintf("%X", pk.KeyId)
+}
+
+// KeyIdShortString returns the short form of public key's fingerprint
+// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
+func (pk *PublicKeyV3) KeyIdShortString() string {
+ return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF)
+}
+
+// BitLength returns the bit length for the given public key.
+func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) {
+ switch pk.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
+ bitLength = pk.n.bitLength
+ default:
+ err = errors.InvalidArgumentError("bad public-key algorithm")
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go
new file mode 100644
index 00000000000..e06405904b3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/public_key_v3_test.go
@@ -0,0 +1,82 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+ "time"
+)
+
+var pubKeyV3Test = struct {
+ hexFingerprint string
+ creationTime time.Time
+ pubKeyAlgo PublicKeyAlgorithm
+ keyId uint64
+ keyIdString string
+ keyIdShort string
+}{
+ "103BECF5BD1E837C89D19E98487767F7",
+ time.Unix(779753634, 0),
+ PubKeyAlgoRSA,
+ 0xDE0F188A5DA5E3C9,
+ "DE0F188A5DA5E3C9",
+ "5DA5E3C9"}
+
+func TestPublicKeyV3Read(t *testing.T) {
+ i, test := 0, pubKeyV3Test
+ packet, err := Read(v3KeyReader(t))
+ if err != nil {
+ t.Fatalf("#%d: Read error: %s", i, err)
+ }
+ pk, ok := packet.(*PublicKeyV3)
+ if !ok {
+ t.Fatalf("#%d: failed to parse, got: %#v", i, packet)
+ }
+ if pk.PubKeyAlgo != test.pubKeyAlgo {
+ t.Errorf("#%d: bad public key algorithm got:%x want:%x", i, pk.PubKeyAlgo, test.pubKeyAlgo)
+ }
+ if !pk.CreationTime.Equal(test.creationTime) {
+ t.Errorf("#%d: bad creation time got:%v want:%v", i, pk.CreationTime, test.creationTime)
+ }
+ expectedFingerprint, _ := hex.DecodeString(test.hexFingerprint)
+ if !bytes.Equal(expectedFingerprint, pk.Fingerprint[:]) {
+ t.Errorf("#%d: bad fingerprint got:%x want:%x", i, pk.Fingerprint[:], expectedFingerprint)
+ }
+ if pk.KeyId != test.keyId {
+ t.Errorf("#%d: bad keyid got:%x want:%x", i, pk.KeyId, test.keyId)
+ }
+ if g, e := pk.KeyIdString(), test.keyIdString; g != e {
+ t.Errorf("#%d: bad KeyIdString got:%q want:%q", i, g, e)
+ }
+ if g, e := pk.KeyIdShortString(), test.keyIdShort; g != e {
+ t.Errorf("#%d: bad KeyIdShortString got:%q want:%q", i, g, e)
+ }
+}
+
+func TestPublicKeyV3Serialize(t *testing.T) {
+ //for i, test := range pubKeyV3Tests {
+ i := 0
+ packet, err := Read(v3KeyReader(t))
+ if err != nil {
+ t.Fatalf("#%d: Read error: %s", i, err)
+ }
+ pk, ok := packet.(*PublicKeyV3)
+ if !ok {
+ t.Fatalf("#%d: failed to parse, got: %#v", i, packet)
+ }
+ var serializeBuf bytes.Buffer
+ if err = pk.Serialize(&serializeBuf); err != nil {
+ t.Fatalf("#%d: failed to serialize: %s", i, err)
+ }
+
+ if packet, err = Read(bytes.NewBuffer(serializeBuf.Bytes())); err != nil {
+ t.Fatalf("#%d: Read error (from serialized data): %s", i, err)
+ }
+ if pk, ok = packet.(*PublicKeyV3); !ok {
+ t.Fatalf("#%d: failed to parse serialized data, got: %#v", i, packet)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/reader.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/reader.go
new file mode 100644
index 00000000000..34bc7c613e6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/reader.go
@@ -0,0 +1,76 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "golang.org/x/crypto/openpgp/errors"
+ "io"
+)
+
+// Reader reads packets from an io.Reader and allows packets to be 'unread' so
+// that they result from the next call to Next.
+type Reader struct {
+ q []Packet
+ readers []io.Reader
+}
+
+// New io.Readers are pushed when a compressed or encrypted packet is processed
+// and recursively treated as a new source of packets. However, a carefully
+// crafted packet can trigger an infinite recursive sequence of packets. See
+// http://mumble.net/~campbell/misc/pgp-quine
+// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
+// This constant limits the number of recursive packets that may be pushed.
+const maxReaders = 32
+
+// Next returns the most recently unread Packet, or reads another packet from
+// the top-most io.Reader. Unknown packet types are skipped.
+func (r *Reader) Next() (p Packet, err error) {
+ if len(r.q) > 0 {
+ p = r.q[len(r.q)-1]
+ r.q = r.q[:len(r.q)-1]
+ return
+ }
+
+ for len(r.readers) > 0 {
+ p, err = Read(r.readers[len(r.readers)-1])
+ if err == nil {
+ return
+ }
+ if err == io.EOF {
+ r.readers = r.readers[:len(r.readers)-1]
+ continue
+ }
+ if _, ok := err.(errors.UnknownPacketTypeError); !ok {
+ return nil, err
+ }
+ }
+
+ return nil, io.EOF
+}
+
+// Push causes the Reader to start reading from a new io.Reader. When an EOF
+// error is seen from the new io.Reader, it is popped and the Reader continues
+// to read from the next most recent io.Reader. Push returns a StructuralError
+// if pushing the reader would exceed the maximum recursion level, otherwise it
+// returns nil.
+func (r *Reader) Push(reader io.Reader) (err error) {
+ if len(r.readers) >= maxReaders {
+ return errors.StructuralError("too many layers of packets")
+ }
+ r.readers = append(r.readers, reader)
+ return nil
+}
+
+// Unread causes the given Packet to be returned from the next call to Next.
+func (r *Reader) Unread(p Packet) {
+ r.q = append(r.q, p)
+}
+
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ q: nil,
+ readers: []io.Reader{r},
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature.go
new file mode 100644
index 00000000000..1f29d3df388
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature.go
@@ -0,0 +1,699 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/rsa"
+ "encoding/binary"
+ "hash"
+ "io"
+ "strconv"
+ "time"
+
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/s2k"
+)
+
+const (
+ // See RFC 4880, section 5.2.3.21 for details.
+ KeyFlagCertify = 1 << iota
+ KeyFlagSign
+ KeyFlagEncryptCommunications
+ KeyFlagEncryptStorage
+)
+
+// Signature represents a signature. See RFC 4880, section 5.2.
+type Signature struct {
+ SigType SignatureType
+ PubKeyAlgo PublicKeyAlgorithm
+ Hash crypto.Hash
+
+ // HashSuffix is extra data that is hashed in after the signed data.
+ HashSuffix []byte
+ // HashTag contains the first two bytes of the hash for fast rejection
+ // of bad signed data.
+ HashTag [2]byte
+ CreationTime time.Time
+
+ RSASignature parsedMPI
+ DSASigR, DSASigS parsedMPI
+ ECDSASigR, ECDSASigS parsedMPI
+
+ // rawSubpackets contains the unparsed subpackets, in order.
+ rawSubpackets []outputSubpacket
+
+ // The following are optional so are nil when not included in the
+ // signature.
+
+ SigLifetimeSecs, KeyLifetimeSecs *uint32
+ PreferredSymmetric, PreferredHash, PreferredCompression []uint8
+ IssuerKeyId *uint64
+ IsPrimaryId *bool
+
+ // FlagsValid is set if any flags were given. See RFC 4880, section
+ // 5.2.3.21 for details.
+ FlagsValid bool
+ FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool
+
+ // RevocationReason is set if this signature has been revoked.
+ // See RFC 4880, section 5.2.3.23 for details.
+ RevocationReason *uint8
+ RevocationReasonText string
+
+ // MDC is set if this signature has a feature packet that indicates
+ // support for MDC subpackets.
+ MDC bool
+
+ // EmbeddedSignature, if non-nil, is a signature of the parent key, by
+ // this key. This prevents an attacker from claiming another's signing
+ // subkey as their own.
+ EmbeddedSignature *Signature
+
+ outSubpackets []outputSubpacket
+}
+
+func (sig *Signature) parse(r io.Reader) (err error) {
+ // RFC 4880, section 5.2.3
+ var buf [5]byte
+ _, err = readFull(r, buf[:1])
+ if err != nil {
+ return
+ }
+ if buf[0] != 4 {
+ err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
+ return
+ }
+
+ _, err = readFull(r, buf[:5])
+ if err != nil {
+ return
+ }
+ sig.SigType = SignatureType(buf[0])
+ sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
+ switch sig.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA:
+ default:
+ err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
+ return
+ }
+
+ var ok bool
+ sig.Hash, ok = s2k.HashIdToHash(buf[2])
+ if !ok {
+ return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
+ }
+
+ hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4])
+ l := 6 + hashedSubpacketsLength
+ sig.HashSuffix = make([]byte, l+6)
+ sig.HashSuffix[0] = 4
+ copy(sig.HashSuffix[1:], buf[:5])
+ hashedSubpackets := sig.HashSuffix[6:l]
+ _, err = readFull(r, hashedSubpackets)
+ if err != nil {
+ return
+ }
+ // See RFC 4880, section 5.2.4
+ trailer := sig.HashSuffix[l:]
+ trailer[0] = 4
+ trailer[1] = 0xff
+ trailer[2] = uint8(l >> 24)
+ trailer[3] = uint8(l >> 16)
+ trailer[4] = uint8(l >> 8)
+ trailer[5] = uint8(l)
+
+ err = parseSignatureSubpackets(sig, hashedSubpackets, true)
+ if err != nil {
+ return
+ }
+
+ _, err = readFull(r, buf[:2])
+ if err != nil {
+ return
+ }
+ unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1])
+ unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
+ _, err = readFull(r, unhashedSubpackets)
+ if err != nil {
+ return
+ }
+ err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
+ if err != nil {
+ return
+ }
+
+ _, err = readFull(r, sig.HashTag[:2])
+ if err != nil {
+ return
+ }
+
+ switch sig.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
+ case PubKeyAlgoDSA:
+ sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r)
+ if err == nil {
+ sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
+ }
+ case PubKeyAlgoECDSA:
+ sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r)
+ if err == nil {
+ sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r)
+ }
+ default:
+ panic("unreachable")
+ }
+ return
+}
+
+// parseSignatureSubpackets parses subpackets of the main signature packet. See
+// RFC 4880, section 5.2.3.1.
+func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
+ for len(subpackets) > 0 {
+ subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
+ if err != nil {
+ return
+ }
+ }
+
+ if sig.CreationTime.IsZero() {
+ err = errors.StructuralError("no creation time in signature")
+ }
+
+ return
+}
+
+type signatureSubpacketType uint8
+
+const (
+ creationTimeSubpacket signatureSubpacketType = 2
+ signatureExpirationSubpacket signatureSubpacketType = 3
+ keyExpirationSubpacket signatureSubpacketType = 9
+ prefSymmetricAlgosSubpacket signatureSubpacketType = 11
+ issuerSubpacket signatureSubpacketType = 16
+ prefHashAlgosSubpacket signatureSubpacketType = 21
+ prefCompressionSubpacket signatureSubpacketType = 22
+ primaryUserIdSubpacket signatureSubpacketType = 25
+ keyFlagsSubpacket signatureSubpacketType = 27
+ reasonForRevocationSubpacket signatureSubpacketType = 29
+ featuresSubpacket signatureSubpacketType = 30
+ embeddedSignatureSubpacket signatureSubpacketType = 32
+)
+
+// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
+func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
+ // RFC 4880, section 5.2.3.1
+ var (
+ length uint32
+ packetType signatureSubpacketType
+ isCritical bool
+ )
+ switch {
+ case subpacket[0] < 192:
+ length = uint32(subpacket[0])
+ subpacket = subpacket[1:]
+ case subpacket[0] < 255:
+ if len(subpacket) < 2 {
+ goto Truncated
+ }
+ length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
+ subpacket = subpacket[2:]
+ default:
+ if len(subpacket) < 5 {
+ goto Truncated
+ }
+ length = uint32(subpacket[1])<<24 |
+ uint32(subpacket[2])<<16 |
+ uint32(subpacket[3])<<8 |
+ uint32(subpacket[4])
+ subpacket = subpacket[5:]
+ }
+ if length > uint32(len(subpacket)) {
+ goto Truncated
+ }
+ rest = subpacket[length:]
+ subpacket = subpacket[:length]
+ if len(subpacket) == 0 {
+ err = errors.StructuralError("zero length signature subpacket")
+ return
+ }
+ packetType = signatureSubpacketType(subpacket[0] & 0x7f)
+ isCritical = subpacket[0]&0x80 == 0x80
+ subpacket = subpacket[1:]
+ sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
+ switch packetType {
+ case creationTimeSubpacket:
+ if !isHashed {
+ err = errors.StructuralError("signature creation time in non-hashed area")
+ return
+ }
+ if len(subpacket) != 4 {
+ err = errors.StructuralError("signature creation time not four bytes")
+ return
+ }
+ t := binary.BigEndian.Uint32(subpacket)
+ sig.CreationTime = time.Unix(int64(t), 0)
+ case signatureExpirationSubpacket:
+ // Signature expiration time, section 5.2.3.10
+ if !isHashed {
+ return
+ }
+ if len(subpacket) != 4 {
+ err = errors.StructuralError("expiration subpacket with bad length")
+ return
+ }
+ sig.SigLifetimeSecs = new(uint32)
+ *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
+ case keyExpirationSubpacket:
+ // Key expiration time, section 5.2.3.6
+ if !isHashed {
+ return
+ }
+ if len(subpacket) != 4 {
+ err = errors.StructuralError("key expiration subpacket with bad length")
+ return
+ }
+ sig.KeyLifetimeSecs = new(uint32)
+ *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
+ case prefSymmetricAlgosSubpacket:
+ // Preferred symmetric algorithms, section 5.2.3.7
+ if !isHashed {
+ return
+ }
+ sig.PreferredSymmetric = make([]byte, len(subpacket))
+ copy(sig.PreferredSymmetric, subpacket)
+ case issuerSubpacket:
+ // Issuer, section 5.2.3.5
+ if len(subpacket) != 8 {
+ err = errors.StructuralError("issuer subpacket with bad length")
+ return
+ }
+ sig.IssuerKeyId = new(uint64)
+ *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
+ case prefHashAlgosSubpacket:
+ // Preferred hash algorithms, section 5.2.3.8
+ if !isHashed {
+ return
+ }
+ sig.PreferredHash = make([]byte, len(subpacket))
+ copy(sig.PreferredHash, subpacket)
+ case prefCompressionSubpacket:
+ // Preferred compression algorithms, section 5.2.3.9
+ if !isHashed {
+ return
+ }
+ sig.PreferredCompression = make([]byte, len(subpacket))
+ copy(sig.PreferredCompression, subpacket)
+ case primaryUserIdSubpacket:
+ // Primary User ID, section 5.2.3.19
+ if !isHashed {
+ return
+ }
+ if len(subpacket) != 1 {
+ err = errors.StructuralError("primary user id subpacket with bad length")
+ return
+ }
+ sig.IsPrimaryId = new(bool)
+ if subpacket[0] > 0 {
+ *sig.IsPrimaryId = true
+ }
+ case keyFlagsSubpacket:
+ // Key flags, section 5.2.3.21
+ if !isHashed {
+ return
+ }
+ if len(subpacket) == 0 {
+ err = errors.StructuralError("empty key flags subpacket")
+ return
+ }
+ sig.FlagsValid = true
+ if subpacket[0]&KeyFlagCertify != 0 {
+ sig.FlagCertify = true
+ }
+ if subpacket[0]&KeyFlagSign != 0 {
+ sig.FlagSign = true
+ }
+ if subpacket[0]&KeyFlagEncryptCommunications != 0 {
+ sig.FlagEncryptCommunications = true
+ }
+ if subpacket[0]&KeyFlagEncryptStorage != 0 {
+ sig.FlagEncryptStorage = true
+ }
+ case reasonForRevocationSubpacket:
+ // Reason For Revocation, section 5.2.3.23
+ if !isHashed {
+ return
+ }
+ if len(subpacket) == 0 {
+ err = errors.StructuralError("empty revocation reason subpacket")
+ return
+ }
+ sig.RevocationReason = new(uint8)
+ *sig.RevocationReason = subpacket[0]
+ sig.RevocationReasonText = string(subpacket[1:])
+ case featuresSubpacket:
+ // Features subpacket, section 5.2.3.24 specifies a very general
+ // mechanism for OpenPGP implementations to signal support for new
+ // features. In practice, the subpacket is used exclusively to
+ // indicate support for MDC-protected encryption.
+ sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1
+ case embeddedSignatureSubpacket:
+ // Only usage is in signatures that cross-certify
+ // signing subkeys. section 5.2.3.26 describes the
+ // format, with its usage described in section 11.1
+ if sig.EmbeddedSignature != nil {
+ err = errors.StructuralError("Cannot have multiple embedded signatures")
+ return
+ }
+ sig.EmbeddedSignature = new(Signature)
+ // Embedded signatures are required to be v4 signatures see
+ // section 12.1. However, we only parse v4 signatures in this
+ // file anyway.
+ if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
+ return nil, err
+ }
+ if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
+ return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
+ }
+ default:
+ if isCritical {
+ err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
+ return
+ }
+ }
+ return
+
+Truncated:
+ err = errors.StructuralError("signature subpacket truncated")
+ return
+}
+
+// subpacketLengthLength returns the length, in bytes, of an encoded length value.
+func subpacketLengthLength(length int) int {
+ if length < 192 {
+ return 1
+ }
+ if length < 16320 {
+ return 2
+ }
+ return 5
+}
+
+// serializeSubpacketLength marshals the given length into to.
+func serializeSubpacketLength(to []byte, length int) int {
+ // RFC 4880, Section 4.2.2.
+ if length < 192 {
+ to[0] = byte(length)
+ return 1
+ }
+ if length < 16320 {
+ length -= 192
+ to[0] = byte((length >> 8) + 192)
+ to[1] = byte(length)
+ return 2
+ }
+ to[0] = 255
+ to[1] = byte(length >> 24)
+ to[2] = byte(length >> 16)
+ to[3] = byte(length >> 8)
+ to[4] = byte(length)
+ return 5
+}
+
+// subpacketsLength returns the serialized length, in bytes, of the given
+// subpackets.
+func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
+ for _, subpacket := range subpackets {
+ if subpacket.hashed == hashed {
+ length += subpacketLengthLength(len(subpacket.contents) + 1)
+ length += 1 // type byte
+ length += len(subpacket.contents)
+ }
+ }
+ return
+}
+
+// serializeSubpackets marshals the given subpackets into to.
+func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
+ for _, subpacket := range subpackets {
+ if subpacket.hashed == hashed {
+ n := serializeSubpacketLength(to, len(subpacket.contents)+1)
+ to[n] = byte(subpacket.subpacketType)
+ to = to[1+n:]
+ n = copy(to, subpacket.contents)
+ to = to[n:]
+ }
+ }
+ return
+}
+
+// KeyExpired returns whether sig is a self-signature of a key that has
+// expired.
+func (sig *Signature) KeyExpired(currentTime time.Time) bool {
+ if sig.KeyLifetimeSecs == nil {
+ return false
+ }
+ expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
+ return currentTime.After(expiry)
+}
+
+// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
+func (sig *Signature) buildHashSuffix() (err error) {
+ hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
+
+ var ok bool
+ l := 6 + hashedSubpacketsLen
+ sig.HashSuffix = make([]byte, l+6)
+ sig.HashSuffix[0] = 4
+ sig.HashSuffix[1] = uint8(sig.SigType)
+ sig.HashSuffix[2] = uint8(sig.PubKeyAlgo)
+ sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash)
+ if !ok {
+ sig.HashSuffix = nil
+ return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
+ }
+ sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8)
+ sig.HashSuffix[5] = byte(hashedSubpacketsLen)
+ serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true)
+ trailer := sig.HashSuffix[l:]
+ trailer[0] = 4
+ trailer[1] = 0xff
+ trailer[2] = byte(l >> 24)
+ trailer[3] = byte(l >> 16)
+ trailer[4] = byte(l >> 8)
+ trailer[5] = byte(l)
+ return
+}
+
+func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
+ err = sig.buildHashSuffix()
+ if err != nil {
+ return
+ }
+
+ h.Write(sig.HashSuffix)
+ digest = h.Sum(nil)
+ copy(sig.HashTag[:], digest)
+ return
+}
+
+// Sign signs a message with a private key. The hash, h, must contain
+// the hash of the message to be signed and will be mutated by this function.
+// On success, the signature is stored in sig. Call Serialize to write it out.
+// If config is nil, sensible defaults will be used.
+func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
+ sig.outSubpackets = sig.buildSubpackets()
+ digest, err := sig.signPrepareHash(h)
+ if err != nil {
+ return
+ }
+
+ switch priv.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ sig.RSASignature.bytes, err = rsa.SignPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), sig.Hash, digest)
+ sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes))
+ case PubKeyAlgoDSA:
+ dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
+
+ // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
+ subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
+ if len(digest) > subgroupSize {
+ digest = digest[:subgroupSize]
+ }
+ r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
+ if err == nil {
+ sig.DSASigR.bytes = r.Bytes()
+ sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes))
+ sig.DSASigS.bytes = s.Bytes()
+ sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes))
+ }
+ default:
+ err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
+ }
+
+ return
+}
+
+// SignUserId computes a signature from priv, asserting that pub is a valid
+// key for the identity id. On success, the signature is stored in sig. Call
+// Serialize to write it out.
+// If config is nil, sensible defaults will be used.
+func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
+ h, err := userIdSignatureHash(id, pub, sig.Hash)
+ if err != nil {
+ return nil
+ }
+ return sig.Sign(h, priv, config)
+}
+
+// SignKey computes a signature from priv, asserting that pub is a subkey. On
+// success, the signature is stored in sig. Call Serialize to write it out.
+// If config is nil, sensible defaults will be used.
+func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
+ h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash)
+ if err != nil {
+ return err
+ }
+ return sig.Sign(h, priv, config)
+}
+
+// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
+// called first.
+func (sig *Signature) Serialize(w io.Writer) (err error) {
+ if len(sig.outSubpackets) == 0 {
+ sig.outSubpackets = sig.rawSubpackets
+ }
+ if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil {
+ return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
+ }
+
+ sigLength := 0
+ switch sig.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ sigLength = 2 + len(sig.RSASignature.bytes)
+ case PubKeyAlgoDSA:
+ sigLength = 2 + len(sig.DSASigR.bytes)
+ sigLength += 2 + len(sig.DSASigS.bytes)
+ case PubKeyAlgoECDSA:
+ sigLength = 2 + len(sig.ECDSASigR.bytes)
+ sigLength += 2 + len(sig.ECDSASigS.bytes)
+ default:
+ panic("impossible")
+ }
+
+ unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
+ length := len(sig.HashSuffix) - 6 /* trailer not included */ +
+ 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
+ 2 /* hash tag */ + sigLength
+ err = serializeHeader(w, packetTypeSignature, length)
+ if err != nil {
+ return
+ }
+
+ _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6])
+ if err != nil {
+ return
+ }
+
+ unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen)
+ unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
+ unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
+ serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
+
+ _, err = w.Write(unhashedSubpackets)
+ if err != nil {
+ return
+ }
+ _, err = w.Write(sig.HashTag[:])
+ if err != nil {
+ return
+ }
+
+ switch sig.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ err = writeMPIs(w, sig.RSASignature)
+ case PubKeyAlgoDSA:
+ err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
+ case PubKeyAlgoECDSA:
+ err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS)
+ default:
+ panic("impossible")
+ }
+ return
+}
+
+// outputSubpacket represents a subpacket to be marshaled.
+type outputSubpacket struct {
+ hashed bool // true if this subpacket is in the hashed area.
+ subpacketType signatureSubpacketType
+ isCritical bool
+ contents []byte
+}
+
+func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) {
+ creationTime := make([]byte, 4)
+ binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
+ subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
+
+ if sig.IssuerKeyId != nil {
+ keyId := make([]byte, 8)
+ binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
+ subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
+ }
+
+ if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
+ sigLifetime := make([]byte, 4)
+ binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
+ subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
+ }
+
+ // Key flags may only appear in self-signatures or certification signatures.
+
+ if sig.FlagsValid {
+ var flags byte
+ if sig.FlagCertify {
+ flags |= KeyFlagCertify
+ }
+ if sig.FlagSign {
+ flags |= KeyFlagSign
+ }
+ if sig.FlagEncryptCommunications {
+ flags |= KeyFlagEncryptCommunications
+ }
+ if sig.FlagEncryptStorage {
+ flags |= KeyFlagEncryptStorage
+ }
+ subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
+ }
+
+ // The following subpackets may only appear in self-signatures
+
+ if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
+ keyLifetime := make([]byte, 4)
+ binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
+ subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
+ }
+
+ if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
+ subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
+ }
+
+ if len(sig.PreferredSymmetric) > 0 {
+ subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
+ }
+
+ if len(sig.PreferredHash) > 0 {
+ subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
+ }
+
+ if len(sig.PreferredCompression) > 0 {
+ subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
+ }
+
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_test.go
new file mode 100644
index 00000000000..c1bbde8b0c3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_test.go
@@ -0,0 +1,42 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/hex"
+ "testing"
+)
+
+func TestSignatureRead(t *testing.T) {
+ packet, err := Read(readerFromHex(signatureDataHex))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ sig, ok := packet.(*Signature)
+ if !ok || sig.SigType != SigTypeBinary || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.SHA1 {
+ t.Errorf("failed to parse, got: %#v", packet)
+ }
+}
+
+func TestSignatureReserialize(t *testing.T) {
+ packet, _ := Read(readerFromHex(signatureDataHex))
+ sig := packet.(*Signature)
+ out := new(bytes.Buffer)
+ err := sig.Serialize(out)
+ if err != nil {
+ t.Errorf("error reserializing: %s", err)
+ return
+ }
+
+ expected, _ := hex.DecodeString(signatureDataHex)
+ if !bytes.Equal(expected, out.Bytes()) {
+ t.Errorf("output doesn't match input (got vs expected):\n%s\n%s", hex.Dump(out.Bytes()), hex.Dump(expected))
+ }
+}
+
+const signatureDataHex = "c2c05c04000102000605024cb45112000a0910ab105c91af38fb158f8d07ff5596ea368c5efe015bed6e78348c0f033c931d5f2ce5db54ce7f2a7e4b4ad64db758d65a7a71773edeab7ba2a9e0908e6a94a1175edd86c1d843279f045b021a6971a72702fcbd650efc393c5474d5b59a15f96d2eaad4c4c426797e0dcca2803ef41c6ff234d403eec38f31d610c344c06f2401c262f0993b2e66cad8a81ebc4322c723e0d4ba09fe917e8777658307ad8329adacba821420741009dfe87f007759f0982275d028a392c6ed983a0d846f890b36148c7358bdb8a516007fac760261ecd06076813831a36d0459075d1befa245ae7f7fb103d92ca759e9498fe60ef8078a39a3beda510deea251ea9f0a7f0df6ef42060f20780360686f3e400e"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_v3.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_v3.go
new file mode 100644
index 00000000000..6edff889349
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_v3.go
@@ -0,0 +1,146 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "crypto"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "strconv"
+ "time"
+
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/s2k"
+)
+
+// SignatureV3 represents older version 3 signatures. These signatures are less secure
+// than version 4 and should not be used to create new signatures. They are included
+// here for backwards compatibility to read and validate with older key material.
+// See RFC 4880, section 5.2.2.
+type SignatureV3 struct {
+ SigType SignatureType
+ CreationTime time.Time
+ IssuerKeyId uint64
+ PubKeyAlgo PublicKeyAlgorithm
+ Hash crypto.Hash
+ HashTag [2]byte
+
+ RSASignature parsedMPI
+ DSASigR, DSASigS parsedMPI
+}
+
+func (sig *SignatureV3) parse(r io.Reader) (err error) {
+ // RFC 4880, section 5.2.2
+ var buf [8]byte
+ if _, err = readFull(r, buf[:1]); err != nil {
+ return
+ }
+ if buf[0] < 2 || buf[0] > 3 {
+ err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
+ return
+ }
+ if _, err = readFull(r, buf[:1]); err != nil {
+ return
+ }
+ if buf[0] != 5 {
+ err = errors.UnsupportedError(
+ "invalid hashed material length " + strconv.Itoa(int(buf[0])))
+ return
+ }
+
+ // Read hashed material: signature type + creation time
+ if _, err = readFull(r, buf[:5]); err != nil {
+ return
+ }
+ sig.SigType = SignatureType(buf[0])
+ t := binary.BigEndian.Uint32(buf[1:5])
+ sig.CreationTime = time.Unix(int64(t), 0)
+
+ // Eight-octet Key ID of signer.
+ if _, err = readFull(r, buf[:8]); err != nil {
+ return
+ }
+ sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:])
+
+ // Public-key and hash algorithm
+ if _, err = readFull(r, buf[:2]); err != nil {
+ return
+ }
+ sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0])
+ switch sig.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA:
+ default:
+ err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
+ return
+ }
+ var ok bool
+ if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok {
+ return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
+ }
+
+ // Two-octet field holding left 16 bits of signed hash value.
+ if _, err = readFull(r, sig.HashTag[:2]); err != nil {
+ return
+ }
+
+ switch sig.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r)
+ case PubKeyAlgoDSA:
+ if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil {
+ return
+ }
+ sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r)
+ default:
+ panic("unreachable")
+ }
+ return
+}
+
+// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
+// called first.
+func (sig *SignatureV3) Serialize(w io.Writer) (err error) {
+ buf := make([]byte, 8)
+
+ // Write the sig type and creation time
+ buf[0] = byte(sig.SigType)
+ binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix()))
+ if _, err = w.Write(buf[:5]); err != nil {
+ return
+ }
+
+ // Write the issuer long key ID
+ binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId)
+ if _, err = w.Write(buf[:8]); err != nil {
+ return
+ }
+
+ // Write public key algorithm, hash ID, and hash value
+ buf[0] = byte(sig.PubKeyAlgo)
+ hashId, ok := s2k.HashToHashId(sig.Hash)
+ if !ok {
+ return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash))
+ }
+ buf[1] = hashId
+ copy(buf[2:4], sig.HashTag[:])
+ if _, err = w.Write(buf[:4]); err != nil {
+ return
+ }
+
+ if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil {
+ return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
+ }
+
+ switch sig.PubKeyAlgo {
+ case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
+ err = writeMPIs(w, sig.RSASignature)
+ case PubKeyAlgoDSA:
+ err = writeMPIs(w, sig.DSASigR, sig.DSASigS)
+ default:
+ panic("impossible")
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go
new file mode 100644
index 00000000000..ad7b62ac193
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/signature_v3_test.go
@@ -0,0 +1,92 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "testing"
+
+ "golang.org/x/crypto/openpgp/armor"
+)
+
+func TestSignatureV3Read(t *testing.T) {
+ r := v3KeyReader(t)
+ Read(r) // Skip public key
+ Read(r) // Skip uid
+ packet, err := Read(r) // Signature
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ sig, ok := packet.(*SignatureV3)
+ if !ok || sig.SigType != SigTypeGenericCert || sig.PubKeyAlgo != PubKeyAlgoRSA || sig.Hash != crypto.MD5 {
+ t.Errorf("failed to parse, got: %#v", packet)
+ }
+}
+
+func TestSignatureV3Reserialize(t *testing.T) {
+ r := v3KeyReader(t)
+ Read(r) // Skip public key
+ Read(r) // Skip uid
+ packet, err := Read(r)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ sig := packet.(*SignatureV3)
+ out := new(bytes.Buffer)
+ if err = sig.Serialize(out); err != nil {
+ t.Errorf("error reserializing: %s", err)
+ return
+ }
+ expected, err := ioutil.ReadAll(v3KeyReader(t))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ expected = expected[4+141+4+39:] // See pgpdump offsets below, this is where the sig starts
+ if !bytes.Equal(expected, out.Bytes()) {
+ t.Errorf("output doesn't match input (got vs expected):\n%s\n%s", hex.Dump(out.Bytes()), hex.Dump(expected))
+ }
+}
+
+func v3KeyReader(t *testing.T) io.Reader {
+ armorBlock, err := armor.Decode(bytes.NewBufferString(keySigV3Armor))
+ if err != nil {
+ t.Fatalf("armor Decode failed: %v", err)
+ }
+ return armorBlock.Body
+}
+
+// keySigV3Armor is some V3 public key I found in an SKS dump.
+// Old: Public Key Packet(tag 6)(141 bytes)
+// Ver 4 - new
+// Public key creation time - Fri Sep 16 17:13:54 CDT 1994
+// Pub alg - unknown(pub 0)
+// Unknown public key(pub 0)
+// Old: User ID Packet(tag 13)(39 bytes)
+// User ID - Armin M. Warda <warda@nephilim.ruhr.de>
+// Old: Signature Packet(tag 2)(149 bytes)
+// Ver 4 - new
+// Sig type - unknown(05)
+// Pub alg - ElGamal Encrypt-Only(pub 16)
+// Hash alg - unknown(hash 46)
+// Hashed Sub: unknown(sub 81, critical)(1988 bytes)
+const keySigV3Armor = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: SKS 1.0.10
+
+mI0CLnoYogAAAQQA1qwA2SuJwfQ5bCQ6u5t20ulnOtY0gykf7YjiK4LiVeRBwHjGq7v30tGV
+5Qti7qqRW4Ww7CDCJc4sZMFnystucR2vLkXaSoNWoFm4Fg47NiisDdhDezHwbVPW6OpCFNSi
+ZAamtj4QAUBu8j4LswafrJqZqR9336/V3g8Yil2l48kABRG0J0FybWluIE0uIFdhcmRhIDx3
+YXJkYUBuZXBoaWxpbS5ydWhyLmRlPoiVAgUQLok2xwXR6zmeWEiZAQE/DgP/WgxPQh40/Po4
+gSkWZCDAjNdph7zexvAb0CcUWahcwiBIgg3U5ErCx9I5CNVA9U+s8bNrDZwgSIeBzp3KhWUx
+524uhGgm6ZUTOAIKA6CbV6pfqoLpJnRYvXYQU5mIWsNa99wcu2qu18OeEDnztb7aLA6Ra9OF
+YFCbq4EjXRoOrYM=
+=LPjs
+-----END PGP PUBLIC KEY BLOCK-----`
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
new file mode 100644
index 00000000000..4b1105b6f6b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
@@ -0,0 +1,155 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "io"
+ "strconv"
+
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/s2k"
+)
+
+// This is the largest session key that we'll support. Since no 512-bit cipher
+// has even been seriously used, this is comfortably large.
+const maxSessionKeySizeInBytes = 64
+
+// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
+// 4880, section 5.3.
+type SymmetricKeyEncrypted struct {
+ CipherFunc CipherFunction
+ s2k func(out, in []byte)
+ encryptedKey []byte
+}
+
+const symmetricKeyEncryptedVersion = 4
+
+func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
+ // RFC 4880, section 5.3.
+ var buf [2]byte
+ if _, err := readFull(r, buf[:]); err != nil {
+ return err
+ }
+ if buf[0] != symmetricKeyEncryptedVersion {
+ return errors.UnsupportedError("SymmetricKeyEncrypted version")
+ }
+ ske.CipherFunc = CipherFunction(buf[1])
+
+ if ske.CipherFunc.KeySize() == 0 {
+ return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1])))
+ }
+
+ var err error
+ ske.s2k, err = s2k.Parse(r)
+ if err != nil {
+ return err
+ }
+
+ encryptedKey := make([]byte, maxSessionKeySizeInBytes)
+ // The session key may follow. We just have to try and read to find
+ // out. If it exists then we limit it to maxSessionKeySizeInBytes.
+ n, err := readFull(r, encryptedKey)
+ if err != nil && err != io.ErrUnexpectedEOF {
+ return err
+ }
+
+ if n != 0 {
+ if n == maxSessionKeySizeInBytes {
+ return errors.UnsupportedError("oversized encrypted session key")
+ }
+ ske.encryptedKey = encryptedKey[:n]
+ }
+
+ return nil
+}
+
+// Decrypt attempts to decrypt an encrypted session key and returns the key and
+// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
+// packet.
+func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
+ key := make([]byte, ske.CipherFunc.KeySize())
+ ske.s2k(key, passphrase)
+
+ if len(ske.encryptedKey) == 0 {
+ return key, ske.CipherFunc, nil
+ }
+
+ // the IV is all zeros
+ iv := make([]byte, ske.CipherFunc.blockSize())
+ c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
+ plaintextKey := make([]byte, len(ske.encryptedKey))
+ c.XORKeyStream(plaintextKey, ske.encryptedKey)
+ cipherFunc := CipherFunction(plaintextKey[0])
+ if cipherFunc.blockSize() == 0 {
+ return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
+ }
+ plaintextKey = plaintextKey[1:]
+ if l := len(plaintextKey); l == 0 || l%cipherFunc.blockSize() != 0 {
+ return nil, cipherFunc, errors.StructuralError("length of decrypted key not a multiple of block size")
+ }
+
+ return plaintextKey, cipherFunc, nil
+}
+
+// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The
+// packet contains a random session key, encrypted by a key derived from the
+// given passphrase. The session key is returned and must be passed to
+// SerializeSymmetricallyEncrypted.
+// If config is nil, sensible defaults will be used.
+func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
+ cipherFunc := config.Cipher()
+ keySize := cipherFunc.KeySize()
+ if keySize == 0 {
+ return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
+ }
+
+ s2kBuf := new(bytes.Buffer)
+ keyEncryptingKey := make([]byte, keySize)
+ // s2k.Serialize salts and stretches the passphrase, and writes the
+ // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
+ err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()})
+ if err != nil {
+ return
+ }
+ s2kBytes := s2kBuf.Bytes()
+
+ packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
+ err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
+ if err != nil {
+ return
+ }
+
+ var buf [2]byte
+ buf[0] = symmetricKeyEncryptedVersion
+ buf[1] = byte(cipherFunc)
+ _, err = w.Write(buf[:])
+ if err != nil {
+ return
+ }
+ _, err = w.Write(s2kBytes)
+ if err != nil {
+ return
+ }
+
+ sessionKey := make([]byte, keySize)
+ _, err = io.ReadFull(config.Random(), sessionKey)
+ if err != nil {
+ return
+ }
+ iv := make([]byte, cipherFunc.blockSize())
+ c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
+ encryptedCipherAndKey := make([]byte, keySize+1)
+ c.XORKeyStream(encryptedCipherAndKey, buf[1:])
+ c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
+ _, err = w.Write(encryptedCipherAndKey)
+ if err != nil {
+ return
+ }
+
+ key = sessionKey
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go
new file mode 100644
index 00000000000..19538df77c9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted_test.go
@@ -0,0 +1,103 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+func TestSymmetricKeyEncrypted(t *testing.T) {
+ buf := readerFromHex(symmetricallyEncryptedHex)
+ packet, err := Read(buf)
+ if err != nil {
+ t.Errorf("failed to read SymmetricKeyEncrypted: %s", err)
+ return
+ }
+ ske, ok := packet.(*SymmetricKeyEncrypted)
+ if !ok {
+ t.Error("didn't find SymmetricKeyEncrypted packet")
+ return
+ }
+ key, cipherFunc, err := ske.Decrypt([]byte("password"))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ packet, err = Read(buf)
+ if err != nil {
+ t.Errorf("failed to read SymmetricallyEncrypted: %s", err)
+ return
+ }
+ se, ok := packet.(*SymmetricallyEncrypted)
+ if !ok {
+ t.Error("didn't find SymmetricallyEncrypted packet")
+ return
+ }
+ r, err := se.Decrypt(cipherFunc, key)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ contents, err := ioutil.ReadAll(r)
+ if err != nil && err != io.EOF {
+ t.Error(err)
+ return
+ }
+
+ expectedContents, _ := hex.DecodeString(symmetricallyEncryptedContentsHex)
+ if !bytes.Equal(expectedContents, contents) {
+ t.Errorf("bad contents got:%x want:%x", contents, expectedContents)
+ }
+}
+
+const symmetricallyEncryptedHex = "8c0d04030302371a0b38d884f02060c91cf97c9973b8e58e028e9501708ccfe618fb92afef7fa2d80ddadd93cf"
+const symmetricallyEncryptedContentsHex = "cb1062004d14c4df636f6e74656e74732e0a"
+
+func TestSerializeSymmetricKeyEncrypted(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ passphrase := []byte("testing")
+ const cipherFunc = CipherAES128
+ config := &Config{
+ DefaultCipher: cipherFunc,
+ }
+
+ key, err := SerializeSymmetricKeyEncrypted(buf, passphrase, config)
+ if err != nil {
+ t.Errorf("failed to serialize: %s", err)
+ return
+ }
+
+ p, err := Read(buf)
+ if err != nil {
+ t.Errorf("failed to reparse: %s", err)
+ return
+ }
+ ske, ok := p.(*SymmetricKeyEncrypted)
+ if !ok {
+ t.Errorf("parsed a different packet type: %#v", p)
+ return
+ }
+
+ if ske.CipherFunc != config.DefaultCipher {
+ t.Errorf("SKE cipher function is %d (expected %d)", ske.CipherFunc, config.DefaultCipher)
+ }
+ parsedKey, parsedCipherFunc, err := ske.Decrypt(passphrase)
+ if err != nil {
+ t.Errorf("failed to decrypt reparsed SKE: %s", err)
+ return
+ }
+ if !bytes.Equal(key, parsedKey) {
+ t.Errorf("keys don't match after Decrypt: %x (original) vs %x (parsed)", key, parsedKey)
+ }
+ if parsedCipherFunc != cipherFunc {
+ t.Errorf("cipher function doesn't match after Decrypt: %d (original) vs %d (parsed)", cipherFunc, parsedCipherFunc)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
new file mode 100644
index 00000000000..6126030eb90
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go
@@ -0,0 +1,290 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "crypto/cipher"
+ "crypto/sha1"
+ "crypto/subtle"
+ "golang.org/x/crypto/openpgp/errors"
+ "hash"
+ "io"
+ "strconv"
+)
+
+// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
+// encrypted contents will consist of more OpenPGP packets. See RFC 4880,
+// sections 5.7 and 5.13.
+type SymmetricallyEncrypted struct {
+ MDC bool // true iff this is a type 18 packet and thus has an embedded MAC.
+ contents io.Reader
+ prefix []byte
+}
+
+const symmetricallyEncryptedVersion = 1
+
+func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
+ if se.MDC {
+ // See RFC 4880, section 5.13.
+ var buf [1]byte
+ _, err := readFull(r, buf[:])
+ if err != nil {
+ return err
+ }
+ if buf[0] != symmetricallyEncryptedVersion {
+ return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
+ }
+ }
+ se.contents = r
+ return nil
+}
+
+// Decrypt returns a ReadCloser, from which the decrypted contents of the
+// packet can be read. An incorrect key can, with high probability, be detected
+// immediately and this will result in a KeyIncorrect error being returned.
+func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
+ keySize := c.KeySize()
+ if keySize == 0 {
+ return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c)))
+ }
+ if len(key) != keySize {
+ return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
+ }
+
+ if se.prefix == nil {
+ se.prefix = make([]byte, c.blockSize()+2)
+ _, err := readFull(se.contents, se.prefix)
+ if err != nil {
+ return nil, err
+ }
+ } else if len(se.prefix) != c.blockSize()+2 {
+ return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
+ }
+
+ ocfbResync := OCFBResync
+ if se.MDC {
+ // MDC packets use a different form of OCFB mode.
+ ocfbResync = OCFBNoResync
+ }
+
+ s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
+ if s == nil {
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ plaintext := cipher.StreamReader{S: s, R: se.contents}
+
+ if se.MDC {
+ // MDC packets have an embedded hash that we need to check.
+ h := sha1.New()
+ h.Write(se.prefix)
+ return &seMDCReader{in: plaintext, h: h}, nil
+ }
+
+ // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
+ return seReader{plaintext}, nil
+}
+
+// seReader wraps an io.Reader with a no-op Close method.
+type seReader struct {
+ in io.Reader
+}
+
+func (ser seReader) Read(buf []byte) (int, error) {
+ return ser.in.Read(buf)
+}
+
+func (ser seReader) Close() error {
+ return nil
+}
+
+const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
+
+// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
+// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
+// MDC packet containing a hash of the previous contents which is checked
+// against the running hash. See RFC 4880, section 5.13.
+type seMDCReader struct {
+ in io.Reader
+ h hash.Hash
+ trailer [mdcTrailerSize]byte
+ scratch [mdcTrailerSize]byte
+ trailerUsed int
+ error bool
+ eof bool
+}
+
+func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
+ if ser.error {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ if ser.eof {
+ err = io.EOF
+ return
+ }
+
+ // If we haven't yet filled the trailer buffer then we must do that
+ // first.
+ for ser.trailerUsed < mdcTrailerSize {
+ n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
+ ser.trailerUsed += n
+ if err == io.EOF {
+ if ser.trailerUsed != mdcTrailerSize {
+ n = 0
+ err = io.ErrUnexpectedEOF
+ ser.error = true
+ return
+ }
+ ser.eof = true
+ n = 0
+ return
+ }
+
+ if err != nil {
+ n = 0
+ return
+ }
+ }
+
+ // If it's a short read then we read into a temporary buffer and shift
+ // the data into the caller's buffer.
+ if len(buf) <= mdcTrailerSize {
+ n, err = readFull(ser.in, ser.scratch[:len(buf)])
+ copy(buf, ser.trailer[:n])
+ ser.h.Write(buf[:n])
+ copy(ser.trailer[:], ser.trailer[n:])
+ copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
+ if n < len(buf) {
+ ser.eof = true
+ err = io.EOF
+ }
+ return
+ }
+
+ n, err = ser.in.Read(buf[mdcTrailerSize:])
+ copy(buf, ser.trailer[:])
+ ser.h.Write(buf[:n])
+ copy(ser.trailer[:], buf[n:])
+
+ if err == io.EOF {
+ ser.eof = true
+ }
+ return
+}
+
+// This is a new-format packet tag byte for a type 19 (MDC) packet.
+const mdcPacketTagByte = byte(0x80) | 0x40 | 19
+
+func (ser *seMDCReader) Close() error {
+ if ser.error {
+ return errors.SignatureError("error during reading")
+ }
+
+ for !ser.eof {
+ // We haven't seen EOF so we need to read to the end
+ var buf [1024]byte
+ _, err := ser.Read(buf[:])
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return errors.SignatureError("error during reading")
+ }
+ }
+
+ if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
+ return errors.SignatureError("MDC packet not found")
+ }
+ ser.h.Write(ser.trailer[:2])
+
+ final := ser.h.Sum(nil)
+ if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
+ return errors.SignatureError("hash mismatch")
+ }
+ return nil
+}
+
+// An seMDCWriter writes through to an io.WriteCloser while maintains a running
+// hash of the data written. On close, it emits an MDC packet containing the
+// running hash.
+type seMDCWriter struct {
+ w io.WriteCloser
+ h hash.Hash
+}
+
+func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
+ w.h.Write(buf)
+ return w.w.Write(buf)
+}
+
+func (w *seMDCWriter) Close() (err error) {
+ var buf [mdcTrailerSize]byte
+
+ buf[0] = mdcPacketTagByte
+ buf[1] = sha1.Size
+ w.h.Write(buf[:2])
+ digest := w.h.Sum(nil)
+ copy(buf[2:], digest)
+
+ _, err = w.w.Write(buf[:])
+ if err != nil {
+ return
+ }
+ return w.w.Close()
+}
+
+// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
+type noOpCloser struct {
+ w io.Writer
+}
+
+func (c noOpCloser) Write(data []byte) (n int, err error) {
+ return c.w.Write(data)
+}
+
+func (c noOpCloser) Close() error {
+ return nil
+}
+
+// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
+// to w and returns a WriteCloser to which the to-be-encrypted packets can be
+// written.
+// If config is nil, sensible defaults will be used.
+func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) {
+ if c.KeySize() != len(key) {
+ return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length")
+ }
+ writeCloser := noOpCloser{w}
+ ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC)
+ if err != nil {
+ return
+ }
+
+ _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion})
+ if err != nil {
+ return
+ }
+
+ block := c.new(key)
+ blockSize := block.BlockSize()
+ iv := make([]byte, blockSize)
+ _, err = config.Random().Read(iv)
+ if err != nil {
+ return
+ }
+ s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
+ _, err = ciphertext.Write(prefix)
+ if err != nil {
+ return
+ }
+ plaintext := cipher.StreamWriter{S: s, W: ciphertext}
+
+ h := sha1.New()
+ h.Write(iv)
+ h.Write(iv[blockSize-2:])
+ contents = &seMDCWriter{w: plaintext, h: h}
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go
new file mode 100644
index 00000000000..c5c00f7b9c3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted_test.go
@@ -0,0 +1,123 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/hex"
+ "golang.org/x/crypto/openpgp/errors"
+ "io"
+ "io/ioutil"
+ "testing"
+)
+
+// TestReader wraps a []byte and returns reads of a specific length.
+type testReader struct {
+ data []byte
+ stride int
+}
+
+func (t *testReader) Read(buf []byte) (n int, err error) {
+ n = t.stride
+ if n > len(t.data) {
+ n = len(t.data)
+ }
+ if n > len(buf) {
+ n = len(buf)
+ }
+ copy(buf, t.data)
+ t.data = t.data[n:]
+ if len(t.data) == 0 {
+ err = io.EOF
+ }
+ return
+}
+
+func testMDCReader(t *testing.T) {
+ mdcPlaintext, _ := hex.DecodeString(mdcPlaintextHex)
+
+ for stride := 1; stride < len(mdcPlaintext)/2; stride++ {
+ r := &testReader{data: mdcPlaintext, stride: stride}
+ mdcReader := &seMDCReader{in: r, h: sha1.New()}
+ body, err := ioutil.ReadAll(mdcReader)
+ if err != nil {
+ t.Errorf("stride: %d, error: %s", stride, err)
+ continue
+ }
+ if !bytes.Equal(body, mdcPlaintext[:len(mdcPlaintext)-22]) {
+ t.Errorf("stride: %d: bad contents %x", stride, body)
+ continue
+ }
+
+ err = mdcReader.Close()
+ if err != nil {
+ t.Errorf("stride: %d, error on Close: %s", stride, err)
+ }
+ }
+
+ mdcPlaintext[15] ^= 80
+
+ r := &testReader{data: mdcPlaintext, stride: 2}
+ mdcReader := &seMDCReader{in: r, h: sha1.New()}
+ _, err := ioutil.ReadAll(mdcReader)
+ if err != nil {
+ t.Errorf("corruption test, error: %s", err)
+ return
+ }
+ err = mdcReader.Close()
+ if err == nil {
+ t.Error("corruption: no error")
+ } else if _, ok := err.(*errors.SignatureError); !ok {
+ t.Errorf("corruption: expected SignatureError, got: %s", err)
+ }
+}
+
+const mdcPlaintextHex = "a302789c3b2d93c4e0eb9aba22283539b3203335af44a134afb800c849cb4c4de10200aff40b45d31432c80cb384299a0655966d6939dfdeed1dddf980"
+
+func TestSerialize(t *testing.T) {
+ buf := bytes.NewBuffer(nil)
+ c := CipherAES128
+ key := make([]byte, c.KeySize())
+
+ w, err := SerializeSymmetricallyEncrypted(buf, c, key, nil)
+ if err != nil {
+ t.Errorf("error from SerializeSymmetricallyEncrypted: %s", err)
+ return
+ }
+
+ contents := []byte("hello world\n")
+
+ w.Write(contents)
+ w.Close()
+
+ p, err := Read(buf)
+ if err != nil {
+ t.Errorf("error from Read: %s", err)
+ return
+ }
+
+ se, ok := p.(*SymmetricallyEncrypted)
+ if !ok {
+ t.Errorf("didn't read a *SymmetricallyEncrypted")
+ return
+ }
+
+ r, err := se.Decrypt(c, key)
+ if err != nil {
+ t.Errorf("error from Decrypt: %s", err)
+ return
+ }
+
+ contentsCopy := bytes.NewBuffer(nil)
+ _, err = io.Copy(contentsCopy, r)
+ if err != nil {
+ t.Errorf("error from io.Copy: %s", err)
+ return
+ }
+ if !bytes.Equal(contentsCopy.Bytes(), contents) {
+ t.Errorf("contents not equal got: %x want: %x", contentsCopy.Bytes(), contents)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userattribute.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userattribute.go
new file mode 100644
index 00000000000..96a2b382a1d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userattribute.go
@@ -0,0 +1,91 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "image"
+ "image/jpeg"
+ "io"
+ "io/ioutil"
+)
+
+const UserAttrImageSubpacket = 1
+
+// UserAttribute is capable of storing other types of data about a user
+// beyond name, email and a text comment. In practice, user attributes are typically used
+// to store a signed thumbnail photo JPEG image of the user.
+// See RFC 4880, section 5.12.
+type UserAttribute struct {
+ Contents []*OpaqueSubpacket
+}
+
+// NewUserAttributePhoto creates a user attribute packet
+// containing the given images.
+func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
+ uat = new(UserAttribute)
+ for _, photo := range photos {
+ var buf bytes.Buffer
+ // RFC 4880, Section 5.12.1.
+ data := []byte{
+ 0x10, 0x00, // Little-endian image header length (16 bytes)
+ 0x01, // Image header version 1
+ 0x01, // JPEG
+ 0, 0, 0, 0, // 12 reserved octets, must be all zero.
+ 0, 0, 0, 0,
+ 0, 0, 0, 0}
+ if _, err = buf.Write(data); err != nil {
+ return
+ }
+ if err = jpeg.Encode(&buf, photo, nil); err != nil {
+ return
+ }
+ uat.Contents = append(uat.Contents, &OpaqueSubpacket{
+ SubType: UserAttrImageSubpacket,
+ Contents: buf.Bytes()})
+ }
+ return
+}
+
+// NewUserAttribute creates a new user attribute packet containing the given subpackets.
+func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
+ return &UserAttribute{Contents: contents}
+}
+
+func (uat *UserAttribute) parse(r io.Reader) (err error) {
+ // RFC 4880, section 5.13
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return
+ }
+ uat.Contents, err = OpaqueSubpackets(b)
+ return
+}
+
+// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
+// header.
+func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
+ var buf bytes.Buffer
+ for _, sp := range uat.Contents {
+ sp.Serialize(&buf)
+ }
+ if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
+ return err
+ }
+ _, err = w.Write(buf.Bytes())
+ return
+}
+
+// ImageData returns zero or more byte slices, each containing
+// JPEG File Interchange Format (JFIF), for each photo in the
+// the user attribute packet.
+func (uat *UserAttribute) ImageData() (imageData [][]byte) {
+ for _, sp := range uat.Contents {
+ if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
+ imageData = append(imageData, sp.Contents[16:])
+ }
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go
new file mode 100644
index 00000000000..13ca5143cee
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userattribute_test.go
@@ -0,0 +1,109 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "bytes"
+ "encoding/base64"
+ "image/color"
+ "image/jpeg"
+ "testing"
+)
+
+func TestParseUserAttribute(t *testing.T) {
+ r := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(userAttributePacket))
+ for i := 0; i < 2; i++ {
+ p, err := Read(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ uat := p.(*UserAttribute)
+ imgs := uat.ImageData()
+ if len(imgs) != 1 {
+ t.Errorf("Unexpected number of images in user attribute packet: %d", len(imgs))
+ }
+ if len(imgs[0]) != 3395 {
+ t.Errorf("Unexpected JPEG image size: %d", len(imgs[0]))
+ }
+ img, err := jpeg.Decode(bytes.NewBuffer(imgs[0]))
+ if err != nil {
+ t.Errorf("Error decoding JPEG image: %v", err)
+ }
+ // A pixel in my right eye.
+ pixel := color.NRGBAModel.Convert(img.At(56, 36))
+ ref := color.NRGBA{R: 157, G: 128, B: 124, A: 255}
+ if pixel != ref {
+ t.Errorf("Unexpected pixel color: %v", pixel)
+ }
+ w := bytes.NewBuffer(nil)
+ err = uat.Serialize(w)
+ if err != nil {
+ t.Errorf("Error writing user attribute: %v", err)
+ }
+ r = bytes.NewBuffer(w.Bytes())
+ }
+}
+
+const userAttributePacket = `
+0cyWzJQBEAABAQAAAAAAAAAAAAAAAP/Y/+AAEEpGSUYAAQIAAAEAAQAA/9sAQwAFAwQEBAMFBAQE
+BQUFBgcMCAcHBwcPCgsJDBEPEhIRDxEQExYcFxMUGhUQERghGBocHR8fHxMXIiQiHiQcHh8e/9sA
+QwEFBQUHBgcOCAgOHhQRFB4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4eHh4e
+Hh4eHh4eHh4e/8AAEQgAZABkAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYH
+CAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHw
+JDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6
+g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk
+5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIB
+AgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEX
+GBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKT
+lJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX2
+9/j5+v/aAAwDAQACEQMRAD8A5uGP06VehQ4pIox04q5EnHSvAep+hIIl4zVuMHGPWmRrUWtalaaN
+pU2oXsgSGJSxPr6ClvoitErs0Itqjc7BQOpPAFYmrfEnwjojtHNqaXEynBjtx5hH4jj9a8B8d+Od
+W8UXZjWR4LJT+7t0Jwfc+prnIdO1CWZEW2mZ3HyDactXXDB3V5s8evm1namj6r0H4weCLtxG+ova
+ueP30RA/MV6not1bX0Ed1ZzxzwyDKvGwZSPqK+Ff+ES8R8t/ZV2oHUmM10Hgbxp4m8BatEfNnWBH
+/eWshOxx9Kmpg4te49RUM1kn+8Wh9zQ4P1FaMC7l465rjPh14y0fxnoseoaXOpfaPOgJ+eI98j09
+67W19M15bi4uzPSqTU480WXkjZkAyAR61DPE6OCSOalWRRgZxjvTb598sfU4FBwx5uY4T4feIm8P
+TeJbAgc65NIM+8cX+FFeLfF3Vr3SfiNrMFrMypJMJcDPUqP8KK+kpVFyLU+ar037SXqX4hxVpMY7
+1UhPpVlT2rybKx9smWYz3NeH/EDVLzxt40j8O6bITaQybPlbKkjq39K9O8fasdH8IahfKxWQRFIy
+Ou9uB/OuE/Z/0y3j1d9TuyoZCMs5xjuea1pLli5nn46q240l13PcfhN8EvDNtpcEl/CklyVBLuMk
+mvU/Dfwo0BL/AO13FjEDD/qyV7Vn+CvGPg8zRpJrVm8ikLtEg6+1ew2dxZ3EQaJgysuQPasH7eXW
+1zzsbVhT92kk/PsYieEND+zlPs6c/wCyAPyryH4wfCPRtW0u6j+xRLOxLxSoADkDpXY+MPjJ4c0S
+9k082d3O8ZKkxw5XI96ytK+IGk+IpFjRpod+Qq3C7QT6A1E6NenaXbqRg6rlLlqS0fRnxjpd1r/w
+w8afa7GWRPKbZLGeBKmeVNfZngLxNaeKfDdprVjxHcLlkJ5Vh1H5185/tDad9h8XOsqAw3Cb0cjq
+CfX61P8AsveKf7L8T3fhe5nxa3g324YniQdh9R/KuivTdSmp9TXB1/Z1nRlsfU249QBx1pWfcwI7
+Cq6u2Ovamb9rYz16V5x7Psz5q/aJhZfibcupIElvE3H+7j+lFbXx9szP45jlUfeso8/99OKK9elL
+3EeNVopzZVharCtxVRGGMk02S5JyFOB69zWTieypnL/GksfB+0cr9oQt69awPhPpD69Y3Ky3DWth
+CWluGU4LAdq3vibGs/g68BJygVxjrwRW5+ztoRv/AAs8EeCZnO/J/hzz/Kumi4wp3kePjlOdZKPY
+ml8Mvo6WM9ppi7J0EkQYMzkb1X0wW+bJHGACa+ivg14huZPCkjXUO6SImIYOQAP6UQ2sGneHmiWF
+CYoSAAuM8etXfhBpMr+EZ3SSNRcMx6ZxWdes6ytBGSwkMNFuo7pnP614Ut9Zn1C4uLySKcwObGFA
+Qnm4+XcR71h+CfDHiKCQWuv2YWFtw+bBZQD8rcE8n2Ney+GbGGQSM6I7xvtI681rXdp8hKRRp6t3
+FYPE1VDlsY1nQjWdl+J8w/tOeDZZ/AMd/EGefTHyxxyYjwfyODXg3waRh8UtEcFh+8Jb8FNfZPxh
+Ak8J6nbPIsiyW7LnseK+Ofh99ptPHFnf2lu0y2twGcKuSEPB/Q1WHk50miq1o14TXU+xop+On61H
+NMC6Nis1LgsAcUTSt1APFcXJZn0EqmhyvxA037friTYziBV6f7Tf40Vr3k4aXLx5OMZIzRXZB2ik
+efJXbPHJJcnaD9aN2R1qoGO8/WkuLlIV+YjdjpXSonQ5lTxfiTwzqCnkeQxx9BWx+zPrQsrBFYja
+zEfrXL6lfie3khcjY6lSPUGud+G3iA6FrY0uQ/KJsA9gCa0jSvFpnBi6tpKSPu++nsIfDFxeXciR
+qIicscY4rxTwB8RUkn1axsPEf2LTYx85kTGzqCUP8VcJ47+JOs+I0Hhq1njjt/ufIeSvq1VtE+Gs
+eoaUbSHUrkHdu3WtuX5Ix81XRh7OL5jirVpV5Whdn0F8C/iX4auVn0i612T7bASoe8wjTAd89K9g
+vtSt5NMa4t5lkRhgOh3Dn6V8aaz8KZrIR3OlQ6r56LySmSxxz06Vo/CHx34h0rxBP4XvJ5AjK2RP
+nEbAEj6ZxjPrWM6fMmoswqJxqJ1VZnqHxn1NLPwveqWHmNC2BnnNcD8DfDkGi+CH1m+ijN1qMzNA
+4GSIiAMf+hVxPxU8Tapc3c0F9MGCn5GU5BX0Pau3+HmrT3XgXSIJCBHDGdgAx1NYSpezha52Yauq
+1dya2Wh2onAIwTj1p0lxxWWLkhRyCKWa5O3ORXOos9KVQluZm83j0oqi84JyWH50Vdmc7ep43d3I
+t1Z2Iz2FYdxeSTsxyRnvTdVuDNcNluM9KrKcg817NOnZGNbEXdkNckjrXGeIIprPxFFdRHAlIwem
+COtdmxrG8Q2cd/ZNExw45RvQ1bVjim+dWNzw7eaTD4mN3dndCQCo6hmI5zXpj/Ea/wBHjkh0kwRW
+xXEfl4yTxXzXZalJDL9nuWKMmRnHcV2Hh3WreCyYXW2SWQhd5P3F6n+lS43d2cTm6d7Ox9EWPxH1
+ODQxPqWpCaSU/ukUc4z3/WvKW8UhviAdaMewYZG98gj9c1ymoa8LyWOJHwkTDaVPb0qpr+q2m6Nb
+cfvNo349az9mou9iZVXNWbub3jm98/Vza2ReV7lsJg/e3dsV654UR9N0K0sZP9ZDGFbHr3rzL4P+
+H7rXfEEWr3I3W1qf3IYdW9fwqDxf4k8UeH/G95p08kscHmk25dPlZT0we9YTj7SXKjpw1aNG8mj3
+FLv5ccU959ycnmvKPDnxB82YQarGsZPAlTp+IrvIr1ZIgySKwIyCOhFYTpyg9T0qWIhVV4svzPvf
+IdhgY4orPachj81FRdmtzxqdiZmJ9aQEgdqZcPtmbJ71DJcAZ5r20kkeXJtsfPIQDwPzrG1a+S3i
+LyHAHvmp7y7HOD1rlNdm+1T7Acovf3o+J2RMpezjzMvrob67pX9o2ShZlYgg/wAWKxZLLWLZ/Ke3
+mVh14yK9M+BMC3dre2ko3LHKCB7EV7EngeGQJdQ7HyBkMKS0djgq1W3c+XtK03U522RwzsTwNiEk
+ntXoHgf4calql9El/G8UZbLfLyfr7V9FeGvh+s+0Lbxxcglu2K1NW1nwN4Gk/wBLuI57tV5jjwzE
+/QVNS+0dWYRqNvXRFv4eeCodKsY1ggVIY1G3K4z714h+1Jqul3GpwaXYeXJLbzgyyrg4b+6D+HNb
+vjz436zq9m+naHF/ZdkeGfOZXH17V4Vqt2b29K+ZuOc5bnce5zWdPBShL2lTfojSeJhy+zp/NjVz
+1Bwa6DSfFGq6fbJFDKrov8DjPFcu97ZxsUe4jVhwVJ5Bpp1mwQiLewJPXacVq6fNpYyjOUXdHoKf
+EG8VQHsInbuVcgflRXnt5fIs2FYHgcgUVi8LG+xusdW/mN7U2KgEVkTzPt60UVfQ9eHxGHrV1MGi
+iD4V25x1qvdgLAMd6KK0pbHm4x++dp8FtUubLxJ5EIjMc+A4Za+qfD8pe1JZVOBmiinW3RyRPMfi
+R8QPE638+k2l6LK0Hylbddhb6nOa80mlkcmWR2kcnlnOSaKK7qCXKcNdu5narcSrAoBxvODWJIga
+VckjDdqKKwq/EaQ0gUdbjQ6mr7QGBUcd6tPBC6gtGpOOuKKKie5qn7qIpEXd0HSiiimSf//Z`
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userid.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userid.go
new file mode 100644
index 00000000000..d6bea7d4acc
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userid.go
@@ -0,0 +1,160 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+// UserId contains text that is intended to represent the name and email
+// address of the key holder. See RFC 4880, section 5.11. By convention, this
+// takes the form "Full Name (Comment) <email@example.com>"
+type UserId struct {
+ Id string // By convention, this takes the form "Full Name (Comment) <email@example.com>" which is split out in the fields below.
+
+ Name, Comment, Email string
+}
+
+func hasInvalidCharacters(s string) bool {
+ for _, c := range s {
+ switch c {
+ case '(', ')', '<', '>', 0:
+ return true
+ }
+ }
+ return false
+}
+
+// NewUserId returns a UserId or nil if any of the arguments contain invalid
+// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
+func NewUserId(name, comment, email string) *UserId {
+ // RFC 4880 doesn't deal with the structure of userid strings; the
+ // name, comment and email form is just a convention. However, there's
+ // no convention about escaping the metacharacters and GPG just refuses
+ // to create user ids where, say, the name contains a '('. We mirror
+ // this behaviour.
+
+ if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
+ return nil
+ }
+
+ uid := new(UserId)
+ uid.Name, uid.Comment, uid.Email = name, comment, email
+ uid.Id = name
+ if len(comment) > 0 {
+ if len(uid.Id) > 0 {
+ uid.Id += " "
+ }
+ uid.Id += "("
+ uid.Id += comment
+ uid.Id += ")"
+ }
+ if len(email) > 0 {
+ if len(uid.Id) > 0 {
+ uid.Id += " "
+ }
+ uid.Id += "<"
+ uid.Id += email
+ uid.Id += ">"
+ }
+ return uid
+}
+
+func (uid *UserId) parse(r io.Reader) (err error) {
+ // RFC 4880, section 5.11
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return
+ }
+ uid.Id = string(b)
+ uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
+ return
+}
+
+// Serialize marshals uid to w in the form of an OpenPGP packet, including
+// header.
+func (uid *UserId) Serialize(w io.Writer) error {
+ err := serializeHeader(w, packetTypeUserId, len(uid.Id))
+ if err != nil {
+ return err
+ }
+ _, err = w.Write([]byte(uid.Id))
+ return err
+}
+
+// parseUserId extracts the name, comment and email from a user id string that
+// is formatted as "Full Name (Comment) <email@example.com>".
+func parseUserId(id string) (name, comment, email string) {
+ var n, c, e struct {
+ start, end int
+ }
+ var state int
+
+ for offset, rune := range id {
+ switch state {
+ case 0:
+ // Entering name
+ n.start = offset
+ state = 1
+ fallthrough
+ case 1:
+ // In name
+ if rune == '(' {
+ state = 2
+ n.end = offset
+ } else if rune == '<' {
+ state = 5
+ n.end = offset
+ }
+ case 2:
+ // Entering comment
+ c.start = offset
+ state = 3
+ fallthrough
+ case 3:
+ // In comment
+ if rune == ')' {
+ state = 4
+ c.end = offset
+ }
+ case 4:
+ // Between comment and email
+ if rune == '<' {
+ state = 5
+ }
+ case 5:
+ // Entering email
+ e.start = offset
+ state = 6
+ fallthrough
+ case 6:
+ // In email
+ if rune == '>' {
+ state = 7
+ e.end = offset
+ }
+ default:
+ // After email
+ }
+ }
+ switch state {
+ case 1:
+ // ended in the name
+ n.end = len(id)
+ case 3:
+ // ended in comment
+ c.end = len(id)
+ case 6:
+ // ended in email
+ e.end = len(id)
+ }
+
+ name = strings.TrimSpace(id[n.start:n.end])
+ comment = strings.TrimSpace(id[c.start:c.end])
+ email = strings.TrimSpace(id[e.start:e.end])
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userid_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userid_test.go
new file mode 100644
index 00000000000..29681938938
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/packet/userid_test.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package packet
+
+import (
+ "testing"
+)
+
+var userIdTests = []struct {
+ id string
+ name, comment, email string
+}{
+ {"", "", "", ""},
+ {"John Smith", "John Smith", "", ""},
+ {"John Smith ()", "John Smith", "", ""},
+ {"John Smith () <>", "John Smith", "", ""},
+ {"(comment", "", "comment", ""},
+ {"(comment)", "", "comment", ""},
+ {"<email", "", "", "email"},
+ {"<email> sdfk", "", "", "email"},
+ {" John Smith ( Comment ) asdkflj < email > lksdfj", "John Smith", "Comment", "email"},
+ {" John Smith < email > lksdfj", "John Smith", "", "email"},
+ {"(<foo", "", "<foo", ""},
+ {"René Descartes (العربي)", "René Descartes", "العربي", ""},
+}
+
+func TestParseUserId(t *testing.T) {
+ for i, test := range userIdTests {
+ name, comment, email := parseUserId(test.id)
+ if name != test.name {
+ t.Errorf("%d: name mismatch got:%s want:%s", i, name, test.name)
+ }
+ if comment != test.comment {
+ t.Errorf("%d: comment mismatch got:%s want:%s", i, comment, test.comment)
+ }
+ if email != test.email {
+ t.Errorf("%d: email mismatch got:%s want:%s", i, email, test.email)
+ }
+ }
+}
+
+var newUserIdTests = []struct {
+ name, comment, email, id string
+}{
+ {"foo", "", "", "foo"},
+ {"", "bar", "", "(bar)"},
+ {"", "", "baz", "<baz>"},
+ {"foo", "bar", "", "foo (bar)"},
+ {"foo", "", "baz", "foo <baz>"},
+ {"", "bar", "baz", "(bar) <baz>"},
+ {"foo", "bar", "baz", "foo (bar) <baz>"},
+}
+
+func TestNewUserId(t *testing.T) {
+ for i, test := range newUserIdTests {
+ uid := NewUserId(test.name, test.comment, test.email)
+ if uid == nil {
+ t.Errorf("#%d: returned nil", i)
+ continue
+ }
+ if uid.Id != test.id {
+ t.Errorf("#%d: got '%s', want '%s'", i, uid.Id, test.id)
+ }
+ }
+}
+
+var invalidNewUserIdTests = []struct {
+ name, comment, email string
+}{
+ {"foo(", "", ""},
+ {"foo<", "", ""},
+ {"", "bar)", ""},
+ {"", "bar<", ""},
+ {"", "", "baz>"},
+ {"", "", "baz)"},
+ {"", "", "baz\x00"},
+}
+
+func TestNewUserIdWithInvalidInput(t *testing.T) {
+ for i, test := range invalidNewUserIdTests {
+ if uid := NewUserId(test.name, test.comment, test.email); uid != nil {
+ t.Errorf("#%d: returned non-nil value: %#v", i, uid)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/read.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/read.go
new file mode 100644
index 00000000000..dfffc398d5b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/read.go
@@ -0,0 +1,439 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package openpgp implements high level operations on OpenPGP messages.
+package openpgp // import "golang.org/x/crypto/openpgp"
+
+import (
+ "crypto"
+ _ "crypto/sha256"
+ "hash"
+ "io"
+ "strconv"
+
+ "golang.org/x/crypto/openpgp/armor"
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+// SignatureType is the armor type for a PGP signature.
+var SignatureType = "PGP SIGNATURE"
+
+// readArmored reads an armored block with the given type.
+func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
+ block, err := armor.Decode(r)
+ if err != nil {
+ return
+ }
+
+ if block.Type != expectedType {
+ return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
+ }
+
+ return block.Body, nil
+}
+
+// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
+// signed message.
+type MessageDetails struct {
+ IsEncrypted bool // true if the message was encrypted.
+ EncryptedToKeyIds []uint64 // the list of recipient key ids.
+ IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
+ DecryptedWith Key // the private key used to decrypt the message, if any.
+ IsSigned bool // true if the message is signed.
+ SignedByKeyId uint64 // the key id of the signer, if any.
+ SignedBy *Key // the key of the signer, if available.
+ LiteralData *packet.LiteralData // the metadata of the contents
+ UnverifiedBody io.Reader // the contents of the message.
+
+ // If IsSigned is true and SignedBy is non-zero then the signature will
+ // be verified as UnverifiedBody is read. The signature cannot be
+ // checked until the whole of UnverifiedBody is read so UnverifiedBody
+ // must be consumed until EOF before the data can trusted. Even if a
+ // message isn't signed (or the signer is unknown) the data may contain
+ // an authentication code that is only checked once UnverifiedBody has
+ // been consumed. Once EOF has been seen, the following fields are
+ // valid. (An authentication code failure is reported as a
+ // SignatureError error when reading from UnverifiedBody.)
+ SignatureError error // nil if the signature is good.
+ Signature *packet.Signature // the signature packet itself.
+
+ decrypted io.ReadCloser
+}
+
+// A PromptFunction is used as a callback by functions that may need to decrypt
+// a private key, or prompt for a passphrase. It is called with a list of
+// acceptable, encrypted private keys and a boolean that indicates whether a
+// passphrase is usable. It should either decrypt a private key or return a
+// passphrase to try. If the decrypted private key or given passphrase isn't
+// correct, the function will be called again, forever. Any error returned will
+// be passed up.
+type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
+
+// A keyEnvelopePair is used to store a private key with the envelope that
+// contains a symmetric key, encrypted with that key.
+type keyEnvelopePair struct {
+ key Key
+ encryptedKey *packet.EncryptedKey
+}
+
+// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
+// The given KeyRing should contain both public keys (for signature
+// verification) and, possibly encrypted, private keys for decrypting.
+// If config is nil, sensible defaults will be used.
+func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
+ var p packet.Packet
+
+ var symKeys []*packet.SymmetricKeyEncrypted
+ var pubKeys []keyEnvelopePair
+ var se *packet.SymmetricallyEncrypted
+
+ packets := packet.NewReader(r)
+ md = new(MessageDetails)
+ md.IsEncrypted = true
+
+ // The message, if encrypted, starts with a number of packets
+ // containing an encrypted decryption key. The decryption key is either
+ // encrypted to a public key, or with a passphrase. This loop
+ // collects these packets.
+ParsePackets:
+ for {
+ p, err = packets.Next()
+ if err != nil {
+ return nil, err
+ }
+ switch p := p.(type) {
+ case *packet.SymmetricKeyEncrypted:
+ // This packet contains the decryption key encrypted with a passphrase.
+ md.IsSymmetricallyEncrypted = true
+ symKeys = append(symKeys, p)
+ case *packet.EncryptedKey:
+ // This packet contains the decryption key encrypted to a public key.
+ md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
+ switch p.Algo {
+ case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal:
+ break
+ default:
+ continue
+ }
+ var keys []Key
+ if p.KeyId == 0 {
+ keys = keyring.DecryptionKeys()
+ } else {
+ keys = keyring.KeysById(p.KeyId)
+ }
+ for _, k := range keys {
+ pubKeys = append(pubKeys, keyEnvelopePair{k, p})
+ }
+ case *packet.SymmetricallyEncrypted:
+ se = p
+ break ParsePackets
+ case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
+ // This message isn't encrypted.
+ if len(symKeys) != 0 || len(pubKeys) != 0 {
+ return nil, errors.StructuralError("key material not followed by encrypted message")
+ }
+ packets.Unread(p)
+ return readSignedMessage(packets, nil, keyring)
+ }
+ }
+
+ var candidates []Key
+ var decrypted io.ReadCloser
+
+ // Now that we have the list of encrypted keys we need to decrypt at
+ // least one of them or, if we cannot, we need to call the prompt
+ // function so that it can decrypt a key or give us a passphrase.
+FindKey:
+ for {
+ // See if any of the keys already have a private key available
+ candidates = candidates[:0]
+ candidateFingerprints := make(map[string]bool)
+
+ for _, pk := range pubKeys {
+ if pk.key.PrivateKey == nil {
+ continue
+ }
+ if !pk.key.PrivateKey.Encrypted {
+ if len(pk.encryptedKey.Key) == 0 {
+ pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
+ }
+ if len(pk.encryptedKey.Key) == 0 {
+ continue
+ }
+ decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
+ if err != nil && err != errors.ErrKeyIncorrect {
+ return nil, err
+ }
+ if decrypted != nil {
+ md.DecryptedWith = pk.key
+ break FindKey
+ }
+ } else {
+ fpr := string(pk.key.PublicKey.Fingerprint[:])
+ if v := candidateFingerprints[fpr]; v {
+ continue
+ }
+ candidates = append(candidates, pk.key)
+ candidateFingerprints[fpr] = true
+ }
+ }
+
+ if len(candidates) == 0 && len(symKeys) == 0 {
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ if prompt == nil {
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ passphrase, err := prompt(candidates, len(symKeys) != 0)
+ if err != nil {
+ return nil, err
+ }
+
+ // Try the symmetric passphrase first
+ if len(symKeys) != 0 && passphrase != nil {
+ for _, s := range symKeys {
+ key, cipherFunc, err := s.Decrypt(passphrase)
+ if err == nil {
+ decrypted, err = se.Decrypt(cipherFunc, key)
+ if err != nil && err != errors.ErrKeyIncorrect {
+ return nil, err
+ }
+ if decrypted != nil {
+ break FindKey
+ }
+ }
+
+ }
+ }
+ }
+
+ md.decrypted = decrypted
+ if err := packets.Push(decrypted); err != nil {
+ return nil, err
+ }
+ return readSignedMessage(packets, md, keyring)
+}
+
+// readSignedMessage reads a possibly signed message if mdin is non-zero then
+// that structure is updated and returned. Otherwise a fresh MessageDetails is
+// used.
+func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
+ if mdin == nil {
+ mdin = new(MessageDetails)
+ }
+ md = mdin
+
+ var p packet.Packet
+ var h hash.Hash
+ var wrappedHash hash.Hash
+FindLiteralData:
+ for {
+ p, err = packets.Next()
+ if err != nil {
+ return nil, err
+ }
+ switch p := p.(type) {
+ case *packet.Compressed:
+ if err := packets.Push(p.Body); err != nil {
+ return nil, err
+ }
+ case *packet.OnePassSignature:
+ if !p.IsLast {
+ return nil, errors.UnsupportedError("nested signatures")
+ }
+
+ h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
+ if err != nil {
+ md = nil
+ return
+ }
+
+ md.IsSigned = true
+ md.SignedByKeyId = p.KeyId
+ keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
+ if len(keys) > 0 {
+ md.SignedBy = &keys[0]
+ }
+ case *packet.LiteralData:
+ md.LiteralData = p
+ break FindLiteralData
+ }
+ }
+
+ if md.SignedBy != nil {
+ md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md}
+ } else if md.decrypted != nil {
+ md.UnverifiedBody = checkReader{md}
+ } else {
+ md.UnverifiedBody = md.LiteralData.Body
+ }
+
+ return md, nil
+}
+
+// hashForSignature returns a pair of hashes that can be used to verify a
+// signature. The signature may specify that the contents of the signed message
+// should be preprocessed (i.e. to normalize line endings). Thus this function
+// returns two hashes. The second should be used to hash the message itself and
+// performs any needed preprocessing.
+func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
+ if !hashId.Available() {
+ return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
+ }
+ h := hashId.New()
+
+ switch sigType {
+ case packet.SigTypeBinary:
+ return h, h, nil
+ case packet.SigTypeText:
+ return h, NewCanonicalTextHash(h), nil
+ }
+
+ return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
+}
+
+// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
+// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
+// MDC checks.
+type checkReader struct {
+ md *MessageDetails
+}
+
+func (cr checkReader) Read(buf []byte) (n int, err error) {
+ n, err = cr.md.LiteralData.Body.Read(buf)
+ if err == io.EOF {
+ mdcErr := cr.md.decrypted.Close()
+ if mdcErr != nil {
+ err = mdcErr
+ }
+ }
+ return
+}
+
+// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
+// the data as it is read. When it sees an EOF from the underlying io.Reader
+// it parses and checks a trailing Signature packet and triggers any MDC checks.
+type signatureCheckReader struct {
+ packets *packet.Reader
+ h, wrappedHash hash.Hash
+ md *MessageDetails
+}
+
+func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
+ n, err = scr.md.LiteralData.Body.Read(buf)
+ scr.wrappedHash.Write(buf[:n])
+ if err == io.EOF {
+ var p packet.Packet
+ p, scr.md.SignatureError = scr.packets.Next()
+ if scr.md.SignatureError != nil {
+ return
+ }
+
+ var ok bool
+ if scr.md.Signature, ok = p.(*packet.Signature); !ok {
+ scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
+ return
+ }
+
+ scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
+
+ // The SymmetricallyEncrypted packet, if any, might have an
+ // unsigned hash of its own. In order to check this we need to
+ // close that Reader.
+ if scr.md.decrypted != nil {
+ mdcErr := scr.md.decrypted.Close()
+ if mdcErr != nil {
+ err = mdcErr
+ }
+ }
+ }
+ return
+}
+
+// CheckDetachedSignature takes a signed file and a detached signature and
+// returns the signer if the signature is valid. If the signer isn't known,
+// ErrUnknownIssuer is returned.
+func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
+ var issuerKeyId uint64
+ var hashFunc crypto.Hash
+ var sigType packet.SignatureType
+ var keys []Key
+ var p packet.Packet
+
+ packets := packet.NewReader(signature)
+ for {
+ p, err = packets.Next()
+ if err == io.EOF {
+ return nil, errors.ErrUnknownIssuer
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ switch sig := p.(type) {
+ case *packet.Signature:
+ if sig.IssuerKeyId == nil {
+ return nil, errors.StructuralError("signature doesn't have an issuer")
+ }
+ issuerKeyId = *sig.IssuerKeyId
+ hashFunc = sig.Hash
+ sigType = sig.SigType
+ case *packet.SignatureV3:
+ issuerKeyId = sig.IssuerKeyId
+ hashFunc = sig.Hash
+ sigType = sig.SigType
+ default:
+ return nil, errors.StructuralError("non signature packet found")
+ }
+
+ keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
+ if len(keys) > 0 {
+ break
+ }
+ }
+
+ if len(keys) == 0 {
+ panic("unreachable")
+ }
+
+ h, wrappedHash, err := hashForSignature(hashFunc, sigType)
+ if err != nil {
+ return nil, err
+ }
+
+ if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ for _, key := range keys {
+ switch sig := p.(type) {
+ case *packet.Signature:
+ err = key.PublicKey.VerifySignature(h, sig)
+ case *packet.SignatureV3:
+ err = key.PublicKey.VerifySignatureV3(h, sig)
+ default:
+ panic("unreachable")
+ }
+
+ if err == nil {
+ return key.Entity, nil
+ }
+ }
+
+ return nil, err
+}
+
+// CheckArmoredDetachedSignature performs the same actions as
+// CheckDetachedSignature but expects the signature to be armored.
+func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
+ body, err := readArmored(signature, SignatureType)
+ if err != nil {
+ return
+ }
+
+ return CheckDetachedSignature(keyring, signed, body)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/read_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/read_test.go
new file mode 100644
index 00000000000..7524a02e56d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/read_test.go
@@ -0,0 +1,512 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import (
+ "bytes"
+ _ "crypto/sha512"
+ "encoding/hex"
+ "io"
+ "io/ioutil"
+ "strings"
+ "testing"
+
+ "golang.org/x/crypto/openpgp/errors"
+)
+
+func readerFromHex(s string) io.Reader {
+ data, err := hex.DecodeString(s)
+ if err != nil {
+ panic("readerFromHex: bad input")
+ }
+ return bytes.NewBuffer(data)
+}
+
+func TestReadKeyRing(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if len(kring) != 2 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB || uint32(kring[1].PrimaryKey.KeyId) != 0x1E35246B {
+ t.Errorf("bad keyring: %#v", kring)
+ }
+}
+
+func TestRereadKeyRing(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ if err != nil {
+ t.Errorf("error in initial parse: %s", err)
+ return
+ }
+ out := new(bytes.Buffer)
+ err = kring[0].Serialize(out)
+ if err != nil {
+ t.Errorf("error in serialization: %s", err)
+ return
+ }
+ kring, err = ReadKeyRing(out)
+ if err != nil {
+ t.Errorf("error in second parse: %s", err)
+ return
+ }
+
+ if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB {
+ t.Errorf("bad keyring: %#v", kring)
+ }
+}
+
+func TestReadPrivateKeyRing(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if len(kring) != 2 || uint32(kring[0].PrimaryKey.KeyId) != 0xC20C31BB || uint32(kring[1].PrimaryKey.KeyId) != 0x1E35246B || kring[0].PrimaryKey == nil {
+ t.Errorf("bad keyring: %#v", kring)
+ }
+}
+
+func TestReadDSAKey(t *testing.T) {
+ kring, err := ReadKeyRing(readerFromHex(dsaTestKeyHex))
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if len(kring) != 1 || uint32(kring[0].PrimaryKey.KeyId) != 0x0CCC0360 {
+ t.Errorf("bad parse: %#v", kring)
+ }
+}
+
+func TestDSAHashTruncatation(t *testing.T) {
+ // dsaKeyWithSHA512 was generated with GnuPG and --cert-digest-algo
+ // SHA512 in order to require DSA hash truncation to verify correctly.
+ _, err := ReadKeyRing(readerFromHex(dsaKeyWithSHA512))
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func TestGetKeyById(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+
+ keys := kring.KeysById(0xa34d7e18c20c31bb)
+ if len(keys) != 1 || keys[0].Entity != kring[0] {
+ t.Errorf("bad result for 0xa34d7e18c20c31bb: %#v", keys)
+ }
+
+ keys = kring.KeysById(0xfd94408d4543314f)
+ if len(keys) != 1 || keys[0].Entity != kring[0] {
+ t.Errorf("bad result for 0xa34d7e18c20c31bb: %#v", keys)
+ }
+}
+
+func checkSignedMessage(t *testing.T, signedHex, expected string) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+
+ md, err := ReadMessage(readerFromHex(signedHex), kring, nil, nil)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ if !md.IsSigned || md.SignedByKeyId != 0xa34d7e18c20c31bb || md.SignedBy == nil || md.IsEncrypted || md.IsSymmetricallyEncrypted || len(md.EncryptedToKeyIds) != 0 || md.IsSymmetricallyEncrypted {
+ t.Errorf("bad MessageDetails: %#v", md)
+ }
+
+ contents, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("error reading UnverifiedBody: %s", err)
+ }
+ if string(contents) != expected {
+ t.Errorf("bad UnverifiedBody got:%s want:%s", string(contents), expected)
+ }
+ if md.SignatureError != nil || md.Signature == nil {
+ t.Errorf("failed to validate: %s", md.SignatureError)
+ }
+}
+
+func TestSignedMessage(t *testing.T) {
+ checkSignedMessage(t, signedMessageHex, signedInput)
+}
+
+func TestTextSignedMessage(t *testing.T) {
+ checkSignedMessage(t, signedTextMessageHex, signedTextInput)
+}
+
+// The reader should detect "compressed quines", which are compressed
+// packets that expand into themselves and cause an infinite recursive
+// parsing loop.
+// The packet in this test case comes from Taylor R. Campbell at
+// http://mumble.net/~campbell/misc/pgp-quine/
+func TestCampbellQuine(t *testing.T) {
+ md, err := ReadMessage(readerFromHex(campbellQuine), nil, nil, nil)
+ if md != nil {
+ t.Errorf("Reading a compressed quine should not return any data: %#v", md)
+ }
+ structural, ok := err.(errors.StructuralError)
+ if !ok {
+ t.Fatalf("Unexpected class of error: %T", err)
+ }
+ if !strings.Contains(string(structural), "too many layers of packets") {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+}
+
+var signedEncryptedMessageTests = []struct {
+ keyRingHex string
+ messageHex string
+ signedByKeyId uint64
+ encryptedToKeyId uint64
+}{
+ {
+ testKeys1And2PrivateHex,
+ signedEncryptedMessageHex,
+ 0xa34d7e18c20c31bb,
+ 0x2a67d68660df41c7,
+ },
+ {
+ dsaElGamalTestKeysHex,
+ signedEncryptedMessage2Hex,
+ 0x33af447ccd759b09,
+ 0xcf6a7abcd43e3673,
+ },
+}
+
+func TestSignedEncryptedMessage(t *testing.T) {
+ for i, test := range signedEncryptedMessageTests {
+ expected := "Signed and encrypted message\n"
+ kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex))
+ prompt := func(keys []Key, symmetric bool) ([]byte, error) {
+ if symmetric {
+ t.Errorf("prompt: message was marked as symmetrically encrypted")
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ if len(keys) == 0 {
+ t.Error("prompt: no keys requested")
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ err := keys[0].PrivateKey.Decrypt([]byte("passphrase"))
+ if err != nil {
+ t.Errorf("prompt: error decrypting key: %s", err)
+ return nil, errors.ErrKeyIncorrect
+ }
+
+ return nil, nil
+ }
+
+ md, err := ReadMessage(readerFromHex(test.messageHex), kring, prompt, nil)
+ if err != nil {
+ t.Errorf("#%d: error reading message: %s", i, err)
+ return
+ }
+
+ if !md.IsSigned || md.SignedByKeyId != test.signedByKeyId || md.SignedBy == nil || !md.IsEncrypted || md.IsSymmetricallyEncrypted || len(md.EncryptedToKeyIds) == 0 || md.EncryptedToKeyIds[0] != test.encryptedToKeyId {
+ t.Errorf("#%d: bad MessageDetails: %#v", i, md)
+ }
+
+ contents, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("#%d: error reading UnverifiedBody: %s", i, err)
+ }
+ if string(contents) != expected {
+ t.Errorf("#%d: bad UnverifiedBody got:%s want:%s", i, string(contents), expected)
+ }
+
+ if md.SignatureError != nil || md.Signature == nil {
+ t.Errorf("#%d: failed to validate: %s", i, md.SignatureError)
+ }
+ }
+}
+
+func TestUnspecifiedRecipient(t *testing.T) {
+ expected := "Recipient unspecified\n"
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+
+ md, err := ReadMessage(readerFromHex(recipientUnspecifiedHex), kring, nil, nil)
+ if err != nil {
+ t.Errorf("error reading message: %s", err)
+ return
+ }
+
+ contents, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("error reading UnverifiedBody: %s", err)
+ }
+ if string(contents) != expected {
+ t.Errorf("bad UnverifiedBody got:%s want:%s", string(contents), expected)
+ }
+}
+
+func TestSymmetricallyEncrypted(t *testing.T) {
+ firstTimeCalled := true
+
+ prompt := func(keys []Key, symmetric bool) ([]byte, error) {
+ if len(keys) != 0 {
+ t.Errorf("prompt: len(keys) = %d (want 0)", len(keys))
+ }
+
+ if !symmetric {
+ t.Errorf("symmetric is not set")
+ }
+
+ if firstTimeCalled {
+ firstTimeCalled = false
+ return []byte("wrongpassword"), nil
+ }
+
+ return []byte("password"), nil
+ }
+
+ md, err := ReadMessage(readerFromHex(symmetricallyEncryptedCompressedHex), nil, prompt, nil)
+ if err != nil {
+ t.Errorf("ReadMessage: %s", err)
+ return
+ }
+
+ contents, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("ReadAll: %s", err)
+ }
+
+ expectedCreationTime := uint32(1295992998)
+ if md.LiteralData.Time != expectedCreationTime {
+ t.Errorf("LiteralData.Time is %d, want %d", md.LiteralData.Time, expectedCreationTime)
+ }
+
+ const expected = "Symmetrically encrypted.\n"
+ if string(contents) != expected {
+ t.Errorf("contents got: %s want: %s", string(contents), expected)
+ }
+}
+
+func testDetachedSignature(t *testing.T, kring KeyRing, signature io.Reader, sigInput, tag string, expectedSignerKeyId uint64) {
+ signed := bytes.NewBufferString(sigInput)
+ signer, err := CheckDetachedSignature(kring, signed, signature)
+ if err != nil {
+ t.Errorf("%s: signature error: %s", tag, err)
+ return
+ }
+ if signer == nil {
+ t.Errorf("%s: signer is nil", tag)
+ return
+ }
+ if signer.PrimaryKey.KeyId != expectedSignerKeyId {
+ t.Errorf("%s: wrong signer got:%x want:%x", tag, signer.PrimaryKey.KeyId, expectedSignerKeyId)
+ }
+}
+
+func TestDetachedSignature(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ testDetachedSignature(t, kring, readerFromHex(detachedSignatureHex), signedInput, "binary", testKey1KeyId)
+ testDetachedSignature(t, kring, readerFromHex(detachedSignatureTextHex), signedInput, "text", testKey1KeyId)
+ testDetachedSignature(t, kring, readerFromHex(detachedSignatureV3TextHex), signedInput, "v3", testKey1KeyId)
+
+ incorrectSignedInput := signedInput + "X"
+ _, err := CheckDetachedSignature(kring, bytes.NewBufferString(incorrectSignedInput), readerFromHex(detachedSignatureHex))
+ if err == nil {
+ t.Fatal("CheckDetachedSignature returned without error for bad signature")
+ }
+ if err == errors.ErrUnknownIssuer {
+ t.Fatal("CheckDetachedSignature returned ErrUnknownIssuer when the signer was known, but the signature invalid")
+ }
+}
+
+func TestDetachedSignatureDSA(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex))
+ testDetachedSignature(t, kring, readerFromHex(detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId)
+}
+
+func TestMultipleSignaturePacketsDSA(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyHex))
+ testDetachedSignature(t, kring, readerFromHex(missingHashFunctionHex+detachedSignatureDSAHex), signedInput, "binary", testKey3KeyId)
+}
+
+func testHashFunctionError(t *testing.T, signatureHex string) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ _, err := CheckDetachedSignature(kring, nil, readerFromHex(signatureHex))
+ if err == nil {
+ t.Fatal("Packet with bad hash type was correctly parsed")
+ }
+ unsupported, ok := err.(errors.UnsupportedError)
+ if !ok {
+ t.Fatalf("Unexpected class of error: %s", err)
+ }
+ if !strings.Contains(string(unsupported), "hash ") {
+ t.Fatalf("Unexpected error: %s", err)
+ }
+}
+
+func TestUnknownHashFunction(t *testing.T) {
+ // unknownHashFunctionHex contains a signature packet with hash
+ // function type 153 (which isn't a real hash function id).
+ testHashFunctionError(t, unknownHashFunctionHex)
+}
+
+func TestMissingHashFunction(t *testing.T) {
+ // missingHashFunctionHex contains a signature packet that uses
+ // RIPEMD160, which isn't compiled in. Since that's the only signature
+ // packet we don't find any suitable packets and end up with ErrUnknownIssuer
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2Hex))
+ _, err := CheckDetachedSignature(kring, nil, readerFromHex(missingHashFunctionHex))
+ if err == nil {
+ t.Fatal("Packet with missing hash type was correctly parsed")
+ }
+ if err != errors.ErrUnknownIssuer {
+ t.Fatalf("Unexpected class of error: %s", err)
+ }
+}
+
+func TestReadingArmoredPrivateKey(t *testing.T) {
+ el, err := ReadArmoredKeyRing(bytes.NewBufferString(armoredPrivateKeyBlock))
+ if err != nil {
+ t.Error(err)
+ }
+ if len(el) != 1 {
+ t.Errorf("got %d entities, wanted 1\n", len(el))
+ }
+}
+
+func TestReadingArmoredPublicKey(t *testing.T) {
+ el, err := ReadArmoredKeyRing(bytes.NewBufferString(e2ePublicKey))
+ if err != nil {
+ t.Error(err)
+ }
+ if len(el) != 1 {
+ t.Errorf("didn't get a valid entity")
+ }
+}
+
+func TestNoArmoredData(t *testing.T) {
+ _, err := ReadArmoredKeyRing(bytes.NewBufferString("foo"))
+ if _, ok := err.(errors.InvalidArgumentError); !ok {
+ t.Errorf("error was not an InvalidArgumentError: %s", err)
+ }
+}
+
+func testReadMessageError(t *testing.T, messageHex string) {
+ buf, err := hex.DecodeString(messageHex)
+ if err != nil {
+ t.Errorf("hex.DecodeString(): %v", err)
+ }
+
+ kr, err := ReadKeyRing(new(bytes.Buffer))
+ if err != nil {
+ t.Errorf("ReadKeyring(): %v", err)
+ }
+
+ _, err = ReadMessage(bytes.NewBuffer(buf), kr,
+ func([]Key, bool) ([]byte, error) {
+ return []byte("insecure"), nil
+ }, nil)
+
+ if err == nil {
+ t.Errorf("ReadMessage(): Unexpected nil error")
+ }
+}
+
+func TestIssue11503(t *testing.T) {
+ testReadMessageError(t, "8c040402000aa430aa8228b9248b01fc899a91197130303030")
+}
+
+func TestIssue11504(t *testing.T) {
+ testReadMessageError(t, "9303000130303030303030303030983002303030303030030000000130")
+}
+
+const testKey1KeyId = 0xA34D7E18C20C31BB
+const testKey3KeyId = 0x338934250CCC0360
+
+const signedInput = "Signed message\nline 2\nline 3\n"
+const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n"
+
+const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b"
+
+const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77"
+
+const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39"
+
+const detachedSignatureV3TextHex = "8900950305005255c25ca34d7e18c20c31bb0102bb3f04009f6589ef8a028d6e54f6eaf25432e590d31c3a41f4710897585e10c31e5e332c7f9f409af8512adceaff24d0da1474ab07aa7bce4f674610b010fccc5b579ae5eb00a127f272fb799f988ab8e4574c141da6dbfecfef7e6b2c478d9a3d2551ba741f260ee22bec762812f0053e05380bfdd55ad0f22d8cdf71b233fe51ae8a24"
+
+const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83"
+
+const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003"
+
+const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000"
+
+const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000"
+
+const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300"
+
+const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200"
+
+const signedEncryptedMessageHex = "848c032a67d68660df41c70103ff5789d0de26b6a50c985a02a13131ca829c413a35d0e6fa8d6842599252162808ac7439c72151c8c6183e76923fe3299301414d0c25a2f06a2257db3839e7df0ec964773f6e4c4ac7ff3b48c444237166dd46ba8ff443a5410dc670cb486672fdbe7c9dfafb75b4fea83af3a204fe2a7dfa86bd20122b4f3d2646cbeecb8f7be8d2c03b018bd210b1d3791e1aba74b0f1034e122ab72e760492c192383cf5e20b5628bd043272d63df9b923f147eb6091cd897553204832aba48fec54aa447547bb16305a1024713b90e77fd0065f1918271947549205af3c74891af22ee0b56cd29bfec6d6e351901cd4ab3ece7c486f1e32a792d4e474aed98ee84b3f591c7dff37b64e0ecd68fd036d517e412dcadf85840ce184ad7921ad446c4ee28db80447aea1ca8d4f574db4d4e37688158ddd19e14ee2eab4873d46947d65d14a23e788d912cf9a19624ca7352469b72a83866b7c23cb5ace3deab3c7018061b0ba0f39ed2befe27163e5083cf9b8271e3e3d52cc7ad6e2a3bd81d4c3d7022f8d"
+
+const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3"
+
+const symmetricallyEncryptedCompressedHex = "8c0d04030302eb4a03808145d0d260c92f714339e13de5a79881216431925bf67ee2898ea61815f07894cd0703c50d0a76ef64d482196f47a8bc729af9b80bb6"
+
+const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
+
+const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
+
+const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK-----
+Version: GnuPG v1.4.10 (GNU/Linux)
+
+lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp
+idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn
+vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB
+AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X
+0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL
+IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk
+VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn
+gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9
+TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx
+q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz
+dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA
+CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1
+ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+
+eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid
+AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV
+bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK
+/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA
+A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX
+TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc
+lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6
+rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN
+oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8
+QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU
+nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC
+AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp
+BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad
+AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL
+VrM0m72/jnpKo04=
+=zNCn
+-----END PGP PRIVATE KEY BLOCK-----`
+
+const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
+Charset: UTF-8
+
+xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4
+sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk
+Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/
+AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD
+24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX
++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8
+B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX
+fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA
+FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9
+ex7En5r7rHR5xwX82Msc+Rq9dSyO
+=7MrZ
+-----END PGP PUBLIC KEY BLOCK-----`
+
+const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003`
+
+const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101`
+
+const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
+
+const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000`
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/s2k/s2k.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/s2k/s2k.go
new file mode 100644
index 00000000000..0e8641ed1b4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/s2k/s2k.go
@@ -0,0 +1,273 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package s2k implements the various OpenPGP string-to-key transforms as
+// specified in RFC 4800 section 3.7.1.
+package s2k // import "golang.org/x/crypto/openpgp/s2k"
+
+import (
+ "crypto"
+ "hash"
+ "io"
+ "strconv"
+
+ "golang.org/x/crypto/openpgp/errors"
+)
+
+// Config collects configuration parameters for s2k key-stretching
+// transformatioms. A nil *Config is valid and results in all default
+// values. Currently, Config is used only by the Serialize function in
+// this package.
+type Config struct {
+ // Hash is the default hash function to be used. If
+ // nil, SHA1 is used.
+ Hash crypto.Hash
+ // S2KCount is only used for symmetric encryption. It
+ // determines the strength of the passphrase stretching when
+ // the said passphrase is hashed to produce a key. S2KCount
+ // should be between 1024 and 65011712, inclusive. If Config
+ // is nil or S2KCount is 0, the value 65536 used. Not all
+ // values in the above range can be represented. S2KCount will
+ // be rounded up to the next representable value if it cannot
+ // be encoded exactly. When set, it is strongly encrouraged to
+ // use a value that is at least 65536. See RFC 4880 Section
+ // 3.7.1.3.
+ S2KCount int
+}
+
+func (c *Config) hash() crypto.Hash {
+ if c == nil || uint(c.Hash) == 0 {
+ // SHA1 is the historical default in this package.
+ return crypto.SHA1
+ }
+
+ return c.Hash
+}
+
+func (c *Config) encodedCount() uint8 {
+ if c == nil || c.S2KCount == 0 {
+ return 96 // The common case. Correspoding to 65536
+ }
+
+ i := c.S2KCount
+ switch {
+ // Behave like GPG. Should we make 65536 the lowest value used?
+ case i < 1024:
+ i = 1024
+ case i > 65011712:
+ i = 65011712
+ }
+
+ return encodeCount(i)
+}
+
+// encodeCount converts an iterative "count" in the range 1024 to
+// 65011712, inclusive, to an encoded count. The return value is the
+// octet that is actually stored in the GPG file. encodeCount panics
+// if i is not in the above range (encodedCount above takes care to
+// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
+func encodeCount(i int) uint8 {
+ if i < 1024 || i > 65011712 {
+ panic("count arg i outside the required range")
+ }
+
+ for encoded := 0; encoded < 256; encoded++ {
+ count := decodeCount(uint8(encoded))
+ if count >= i {
+ return uint8(encoded)
+ }
+ }
+
+ return 255
+}
+
+// decodeCount returns the s2k mode 3 iterative "count" corresponding to
+// the encoded octet c.
+func decodeCount(c uint8) int {
+ return (16 + int(c&15)) << (uint32(c>>4) + 6)
+}
+
+// Simple writes to out the result of computing the Simple S2K function (RFC
+// 4880, section 3.7.1.1) using the given hash and input passphrase.
+func Simple(out []byte, h hash.Hash, in []byte) {
+ Salted(out, h, in, nil)
+}
+
+var zero [1]byte
+
+// Salted writes to out the result of computing the Salted S2K function (RFC
+// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
+func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
+ done := 0
+ var digest []byte
+
+ for i := 0; done < len(out); i++ {
+ h.Reset()
+ for j := 0; j < i; j++ {
+ h.Write(zero[:])
+ }
+ h.Write(salt)
+ h.Write(in)
+ digest = h.Sum(digest[:0])
+ n := copy(out[done:], digest)
+ done += n
+ }
+}
+
+// Iterated writes to out the result of computing the Iterated and Salted S2K
+// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
+// salt and iteration count.
+func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
+ combined := make([]byte, len(in)+len(salt))
+ copy(combined, salt)
+ copy(combined[len(salt):], in)
+
+ if count < len(combined) {
+ count = len(combined)
+ }
+
+ done := 0
+ var digest []byte
+ for i := 0; done < len(out); i++ {
+ h.Reset()
+ for j := 0; j < i; j++ {
+ h.Write(zero[:])
+ }
+ written := 0
+ for written < count {
+ if written+len(combined) > count {
+ todo := count - written
+ h.Write(combined[:todo])
+ written = count
+ } else {
+ h.Write(combined)
+ written += len(combined)
+ }
+ }
+ digest = h.Sum(digest[:0])
+ n := copy(out[done:], digest)
+ done += n
+ }
+}
+
+// Parse reads a binary specification for a string-to-key transformation from r
+// and returns a function which performs that transform.
+func Parse(r io.Reader) (f func(out, in []byte), err error) {
+ var buf [9]byte
+
+ _, err = io.ReadFull(r, buf[:2])
+ if err != nil {
+ return
+ }
+
+ hash, ok := HashIdToHash(buf[1])
+ if !ok {
+ return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1])))
+ }
+ if !hash.Available() {
+ return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash)))
+ }
+ h := hash.New()
+
+ switch buf[0] {
+ case 0:
+ f := func(out, in []byte) {
+ Simple(out, h, in)
+ }
+ return f, nil
+ case 1:
+ _, err = io.ReadFull(r, buf[:8])
+ if err != nil {
+ return
+ }
+ f := func(out, in []byte) {
+ Salted(out, h, in, buf[:8])
+ }
+ return f, nil
+ case 3:
+ _, err = io.ReadFull(r, buf[:9])
+ if err != nil {
+ return
+ }
+ count := decodeCount(buf[8])
+ f := func(out, in []byte) {
+ Iterated(out, h, in, buf[:8], count)
+ }
+ return f, nil
+ }
+
+ return nil, errors.UnsupportedError("S2K function")
+}
+
+// Serialize salts and stretches the given passphrase and writes the
+// resulting key into key. It also serializes an S2K descriptor to
+// w. The key stretching can be configured with c, which may be
+// nil. In that case, sensible defaults will be used.
+func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
+ var buf [11]byte
+ buf[0] = 3 /* iterated and salted */
+ buf[1], _ = HashToHashId(c.hash())
+ salt := buf[2:10]
+ if _, err := io.ReadFull(rand, salt); err != nil {
+ return err
+ }
+ encodedCount := c.encodedCount()
+ count := decodeCount(encodedCount)
+ buf[10] = encodedCount
+ if _, err := w.Write(buf[:]); err != nil {
+ return err
+ }
+
+ Iterated(key, c.hash().New(), passphrase, salt, count)
+ return nil
+}
+
+// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with
+// Go's crypto.Hash type. See RFC 4880, section 9.4.
+var hashToHashIdMapping = []struct {
+ id byte
+ hash crypto.Hash
+ name string
+}{
+ {1, crypto.MD5, "MD5"},
+ {2, crypto.SHA1, "SHA1"},
+ {3, crypto.RIPEMD160, "RIPEMD160"},
+ {8, crypto.SHA256, "SHA256"},
+ {9, crypto.SHA384, "SHA384"},
+ {10, crypto.SHA512, "SHA512"},
+ {11, crypto.SHA224, "SHA224"},
+}
+
+// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
+// hash id.
+func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
+ for _, m := range hashToHashIdMapping {
+ if m.id == id {
+ return m.hash, true
+ }
+ }
+ return 0, false
+}
+
+// HashIdToString returns the name of the hash function corresponding to the
+// given OpenPGP hash id, or panics if id is unknown.
+func HashIdToString(id byte) (name string, ok bool) {
+ for _, m := range hashToHashIdMapping {
+ if m.id == id {
+ return m.name, true
+ }
+ }
+
+ return "", false
+}
+
+// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash.
+func HashToHashId(h crypto.Hash) (id byte, ok bool) {
+ for _, m := range hashToHashIdMapping {
+ if m.hash == h {
+ return m.id, true
+ }
+ }
+ return 0, false
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go
new file mode 100644
index 00000000000..183d26056b1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/s2k/s2k_test.go
@@ -0,0 +1,137 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s2k
+
+import (
+ "bytes"
+ "crypto"
+ _ "crypto/md5"
+ "crypto/rand"
+ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "encoding/hex"
+ "testing"
+
+ _ "golang.org/x/crypto/ripemd160"
+)
+
+var saltedTests = []struct {
+ in, out string
+}{
+ {"hello", "10295ac1"},
+ {"world", "ac587a5e"},
+ {"foo", "4dda8077"},
+ {"bar", "bd8aac6b9ea9cae04eae6a91c6133b58b5d9a61c14f355516ed9370456"},
+ {"x", "f1d3f289"},
+ {"xxxxxxxxxxxxxxxxxxxxxxx", "e00d7b45"},
+}
+
+func TestSalted(t *testing.T) {
+ h := sha1.New()
+ salt := [4]byte{1, 2, 3, 4}
+
+ for i, test := range saltedTests {
+ expected, _ := hex.DecodeString(test.out)
+ out := make([]byte, len(expected))
+ Salted(out, h, []byte(test.in), salt[:])
+ if !bytes.Equal(expected, out) {
+ t.Errorf("#%d, got: %x want: %x", i, out, expected)
+ }
+ }
+}
+
+var iteratedTests = []struct {
+ in, out string
+}{
+ {"hello", "83126105"},
+ {"world", "6fa317f9"},
+ {"foo", "8fbc35b9"},
+ {"bar", "2af5a99b54f093789fd657f19bd245af7604d0f6ae06f66602a46a08ae"},
+ {"x", "5a684dfe"},
+ {"xxxxxxxxxxxxxxxxxxxxxxx", "18955174"},
+}
+
+func TestIterated(t *testing.T) {
+ h := sha1.New()
+ salt := [4]byte{4, 3, 2, 1}
+
+ for i, test := range iteratedTests {
+ expected, _ := hex.DecodeString(test.out)
+ out := make([]byte, len(expected))
+ Iterated(out, h, []byte(test.in), salt[:], 31)
+ if !bytes.Equal(expected, out) {
+ t.Errorf("#%d, got: %x want: %x", i, out, expected)
+ }
+ }
+}
+
+var parseTests = []struct {
+ spec, in, out string
+}{
+ /* Simple with SHA1 */
+ {"0002", "hello", "aaf4c61d"},
+ /* Salted with SHA1 */
+ {"01020102030405060708", "hello", "f4f7d67e"},
+ /* Iterated with SHA1 */
+ {"03020102030405060708f1", "hello", "f2a57b7c"},
+}
+
+func TestParse(t *testing.T) {
+ for i, test := range parseTests {
+ spec, _ := hex.DecodeString(test.spec)
+ buf := bytes.NewBuffer(spec)
+ f, err := Parse(buf)
+ if err != nil {
+ t.Errorf("%d: Parse returned error: %s", i, err)
+ continue
+ }
+
+ expected, _ := hex.DecodeString(test.out)
+ out := make([]byte, len(expected))
+ f(out, []byte(test.in))
+ if !bytes.Equal(out, expected) {
+ t.Errorf("%d: output got: %x want: %x", i, out, expected)
+ }
+ if testing.Short() {
+ break
+ }
+ }
+}
+
+func TestSerialize(t *testing.T) {
+ hashes := []crypto.Hash{crypto.MD5, crypto.SHA1, crypto.RIPEMD160,
+ crypto.SHA256, crypto.SHA384, crypto.SHA512, crypto.SHA224}
+ testCounts := []int{-1, 0, 1024, 65536, 4063232, 65011712}
+ for _, h := range hashes {
+ for _, c := range testCounts {
+ testSerializeConfig(t, &Config{Hash: h, S2KCount: c})
+ }
+ }
+}
+
+func testSerializeConfig(t *testing.T, c *Config) {
+ t.Logf("Running testSerializeConfig() with config: %+v", c)
+
+ buf := bytes.NewBuffer(nil)
+ key := make([]byte, 16)
+ passphrase := []byte("testing")
+ err := Serialize(buf, key, rand.Reader, passphrase, c)
+ if err != nil {
+ t.Errorf("failed to serialize: %s", err)
+ return
+ }
+
+ f, err := Parse(buf)
+ if err != nil {
+ t.Errorf("failed to reparse: %s", err)
+ return
+ }
+ key2 := make([]byte, len(key))
+ f(key2, passphrase)
+ if !bytes.Equal(key2, key) {
+ t.Errorf("keys don't match: %x (serialied) vs %x (parsed)", key, key2)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/write.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/write.go
new file mode 100644
index 00000000000..15aaa1a0151
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/write.go
@@ -0,0 +1,378 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import (
+ "crypto"
+ "hash"
+ "io"
+ "strconv"
+ "time"
+
+ "golang.org/x/crypto/openpgp/armor"
+ "golang.org/x/crypto/openpgp/errors"
+ "golang.org/x/crypto/openpgp/packet"
+ "golang.org/x/crypto/openpgp/s2k"
+)
+
+// DetachSign signs message with the private key from signer (which must
+// already have been decrypted) and writes the signature to w.
+// If config is nil, sensible defaults will be used.
+func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
+ return detachSign(w, signer, message, packet.SigTypeBinary, config)
+}
+
+// ArmoredDetachSign signs message with the private key from signer (which
+// must already have been decrypted) and writes an armored signature to w.
+// If config is nil, sensible defaults will be used.
+func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
+ return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
+}
+
+// DetachSignText signs message (after canonicalising the line endings) with
+// the private key from signer (which must already have been decrypted) and
+// writes the signature to w.
+// If config is nil, sensible defaults will be used.
+func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
+ return detachSign(w, signer, message, packet.SigTypeText, config)
+}
+
+// ArmoredDetachSignText signs message (after canonicalising the line endings)
+// with the private key from signer (which must already have been decrypted)
+// and writes an armored signature to w.
+// If config is nil, sensible defaults will be used.
+func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
+ return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
+}
+
+func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
+ out, err := armor.Encode(w, SignatureType, nil)
+ if err != nil {
+ return
+ }
+ err = detachSign(out, signer, message, sigType, config)
+ if err != nil {
+ return
+ }
+ return out.Close()
+}
+
+func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
+ if signer.PrivateKey == nil {
+ return errors.InvalidArgumentError("signing key doesn't have a private key")
+ }
+ if signer.PrivateKey.Encrypted {
+ return errors.InvalidArgumentError("signing key is encrypted")
+ }
+
+ sig := new(packet.Signature)
+ sig.SigType = sigType
+ sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo
+ sig.Hash = config.Hash()
+ sig.CreationTime = config.Now()
+ sig.IssuerKeyId = &signer.PrivateKey.KeyId
+
+ h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType)
+ if err != nil {
+ return
+ }
+ io.Copy(wrappedHash, message)
+
+ err = sig.Sign(h, signer.PrivateKey, config)
+ if err != nil {
+ return
+ }
+
+ return sig.Serialize(w)
+}
+
+// FileHints contains metadata about encrypted files. This metadata is, itself,
+// encrypted.
+type FileHints struct {
+ // IsBinary can be set to hint that the contents are binary data.
+ IsBinary bool
+ // FileName hints at the name of the file that should be written. It's
+ // truncated to 255 bytes if longer. It may be empty to suggest that the
+ // file should not be written to disk. It may be equal to "_CONSOLE" to
+ // suggest the data should not be written to disk.
+ FileName string
+ // ModTime contains the modification time of the file, or the zero time if not applicable.
+ ModTime time.Time
+}
+
+// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
+// The resulting WriteCloser must be closed after the contents of the file have
+// been written.
+// If config is nil, sensible defaults will be used.
+func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
+ if hints == nil {
+ hints = &FileHints{}
+ }
+
+ key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
+ if err != nil {
+ return
+ }
+ w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config)
+ if err != nil {
+ return
+ }
+
+ literaldata := w
+ if algo := config.Compression(); algo != packet.CompressionNone {
+ var compConfig *packet.CompressionConfig
+ if config != nil {
+ compConfig = config.CompressionConfig
+ }
+ literaldata, err = packet.SerializeCompressed(w, algo, compConfig)
+ if err != nil {
+ return
+ }
+ }
+
+ var epochSeconds uint32
+ if !hints.ModTime.IsZero() {
+ epochSeconds = uint32(hints.ModTime.Unix())
+ }
+ return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds)
+}
+
+// intersectPreferences mutates and returns a prefix of a that contains only
+// the values in the intersection of a and b. The order of a is preserved.
+func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
+ var j int
+ for _, v := range a {
+ for _, v2 := range b {
+ if v == v2 {
+ a[j] = v
+ j++
+ break
+ }
+ }
+ }
+
+ return a[:j]
+}
+
+func hashToHashId(h crypto.Hash) uint8 {
+ v, ok := s2k.HashToHashId(h)
+ if !ok {
+ panic("tried to convert unknown hash")
+ }
+ return v
+}
+
+// Encrypt encrypts a message to a number of recipients and, optionally, signs
+// it. hints contains optional information, that is also encrypted, that aids
+// the recipients in processing the message. The resulting WriteCloser must
+// be closed after the contents of the file have been written.
+// If config is nil, sensible defaults will be used.
+func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
+ var signer *packet.PrivateKey
+ if signed != nil {
+ signKey, ok := signed.signingKey(config.Now())
+ if !ok {
+ return nil, errors.InvalidArgumentError("no valid signing keys")
+ }
+ signer = signKey.PrivateKey
+ if signer == nil {
+ return nil, errors.InvalidArgumentError("no private key in signing key")
+ }
+ if signer.Encrypted {
+ return nil, errors.InvalidArgumentError("signing key must be decrypted")
+ }
+ }
+
+ // These are the possible ciphers that we'll use for the message.
+ candidateCiphers := []uint8{
+ uint8(packet.CipherAES128),
+ uint8(packet.CipherAES256),
+ uint8(packet.CipherCAST5),
+ }
+ // These are the possible hash functions that we'll use for the signature.
+ candidateHashes := []uint8{
+ hashToHashId(crypto.SHA256),
+ hashToHashId(crypto.SHA512),
+ hashToHashId(crypto.SHA1),
+ hashToHashId(crypto.RIPEMD160),
+ }
+ // In the event that a recipient doesn't specify any supported ciphers
+ // or hash functions, these are the ones that we assume that every
+ // implementation supports.
+ defaultCiphers := candidateCiphers[len(candidateCiphers)-1:]
+ defaultHashes := candidateHashes[len(candidateHashes)-1:]
+
+ encryptKeys := make([]Key, len(to))
+ for i := range to {
+ var ok bool
+ encryptKeys[i], ok = to[i].encryptionKey(config.Now())
+ if !ok {
+ return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys")
+ }
+
+ sig := to[i].primaryIdentity().SelfSignature
+
+ preferredSymmetric := sig.PreferredSymmetric
+ if len(preferredSymmetric) == 0 {
+ preferredSymmetric = defaultCiphers
+ }
+ preferredHashes := sig.PreferredHash
+ if len(preferredHashes) == 0 {
+ preferredHashes = defaultHashes
+ }
+ candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric)
+ candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
+ }
+
+ if len(candidateCiphers) == 0 || len(candidateHashes) == 0 {
+ return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms")
+ }
+
+ cipher := packet.CipherFunction(candidateCiphers[0])
+ // If the cipher specifed by config is a candidate, we'll use that.
+ configuredCipher := config.Cipher()
+ for _, c := range candidateCiphers {
+ cipherFunc := packet.CipherFunction(c)
+ if cipherFunc == configuredCipher {
+ cipher = cipherFunc
+ break
+ }
+ }
+
+ var hash crypto.Hash
+ for _, hashId := range candidateHashes {
+ if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
+ hash = h
+ break
+ }
+ }
+
+ // If the hash specified by config is a candidate, we'll use that.
+ if configuredHash := config.Hash(); configuredHash.Available() {
+ for _, hashId := range candidateHashes {
+ if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
+ hash = h
+ break
+ }
+ }
+ }
+
+ if hash == 0 {
+ hashId := candidateHashes[0]
+ name, ok := s2k.HashIdToString(hashId)
+ if !ok {
+ name = "#" + strconv.Itoa(int(hashId))
+ }
+ return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
+ }
+
+ symKey := make([]byte, cipher.KeySize())
+ if _, err := io.ReadFull(config.Random(), symKey); err != nil {
+ return nil, err
+ }
+
+ for _, key := range encryptKeys {
+ if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil {
+ return nil, err
+ }
+ }
+
+ encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
+ if err != nil {
+ return
+ }
+
+ if signer != nil {
+ ops := &packet.OnePassSignature{
+ SigType: packet.SigTypeBinary,
+ Hash: hash,
+ PubKeyAlgo: signer.PubKeyAlgo,
+ KeyId: signer.KeyId,
+ IsLast: true,
+ }
+ if err := ops.Serialize(encryptedData); err != nil {
+ return nil, err
+ }
+ }
+
+ if hints == nil {
+ hints = &FileHints{}
+ }
+
+ w := encryptedData
+ if signer != nil {
+ // If we need to write a signature packet after the literal
+ // data then we need to stop literalData from closing
+ // encryptedData.
+ w = noOpCloser{encryptedData}
+
+ }
+ var epochSeconds uint32
+ if !hints.ModTime.IsZero() {
+ epochSeconds = uint32(hints.ModTime.Unix())
+ }
+ literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
+ if err != nil {
+ return nil, err
+ }
+
+ if signer != nil {
+ return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
+ }
+ return literalData, nil
+}
+
+// signatureWriter hashes the contents of a message while passing it along to
+// literalData. When closed, it closes literalData, writes a signature packet
+// to encryptedData and then also closes encryptedData.
+type signatureWriter struct {
+ encryptedData io.WriteCloser
+ literalData io.WriteCloser
+ hashType crypto.Hash
+ h hash.Hash
+ signer *packet.PrivateKey
+ config *packet.Config
+}
+
+func (s signatureWriter) Write(data []byte) (int, error) {
+ s.h.Write(data)
+ return s.literalData.Write(data)
+}
+
+func (s signatureWriter) Close() error {
+ sig := &packet.Signature{
+ SigType: packet.SigTypeBinary,
+ PubKeyAlgo: s.signer.PubKeyAlgo,
+ Hash: s.hashType,
+ CreationTime: s.config.Now(),
+ IssuerKeyId: &s.signer.KeyId,
+ }
+
+ if err := sig.Sign(s.h, s.signer, s.config); err != nil {
+ return err
+ }
+ if err := s.literalData.Close(); err != nil {
+ return err
+ }
+ if err := sig.Serialize(s.encryptedData); err != nil {
+ return err
+ }
+ return s.encryptedData.Close()
+}
+
+// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
+// TODO: we have two of these in OpenPGP packages alone. This probably needs
+// to be promoted somewhere more common.
+type noOpCloser struct {
+ w io.Writer
+}
+
+func (c noOpCloser) Write(data []byte) (n int, err error) {
+ return c.w.Write(data)
+}
+
+func (c noOpCloser) Close() error {
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/write_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/write_test.go
new file mode 100644
index 00000000000..8e9a33583f5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/openpgp/write_test.go
@@ -0,0 +1,259 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package openpgp
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "testing"
+ "time"
+
+ "golang.org/x/crypto/openpgp/packet"
+)
+
+func TestSignDetached(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+ out := bytes.NewBuffer(nil)
+ message := bytes.NewBufferString(signedInput)
+ err := DetachSign(out, kring[0], message, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId)
+}
+
+func TestSignTextDetached(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(testKeys1And2PrivateHex))
+ out := bytes.NewBuffer(nil)
+ message := bytes.NewBufferString(signedInput)
+ err := DetachSignText(out, kring[0], message, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ testDetachedSignature(t, kring, out, signedInput, "check", testKey1KeyId)
+}
+
+func TestSignDetachedDSA(t *testing.T) {
+ kring, _ := ReadKeyRing(readerFromHex(dsaTestKeyPrivateHex))
+ out := bytes.NewBuffer(nil)
+ message := bytes.NewBufferString(signedInput)
+ err := DetachSign(out, kring[0], message, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ testDetachedSignature(t, kring, out, signedInput, "check", testKey3KeyId)
+}
+
+func TestNewEntity(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+
+ // Check bit-length with no config.
+ e, err := NewEntity("Test User", "test", "test@example.com", nil)
+ if err != nil {
+ t.Errorf("failed to create entity: %s", err)
+ return
+ }
+ bl, err := e.PrimaryKey.BitLength()
+ if err != nil {
+ t.Errorf("failed to find bit length: %s", err)
+ }
+ if int(bl) != defaultRSAKeyBits {
+ t.Errorf("BitLength %v, expected %v", defaultRSAKeyBits)
+ }
+
+ // Check bit-length with a config.
+ cfg := &packet.Config{RSABits: 1024}
+ e, err = NewEntity("Test User", "test", "test@example.com", cfg)
+ if err != nil {
+ t.Errorf("failed to create entity: %s", err)
+ return
+ }
+ bl, err = e.PrimaryKey.BitLength()
+ if err != nil {
+ t.Errorf("failed to find bit length: %s", err)
+ }
+ if int(bl) != cfg.RSABits {
+ t.Errorf("BitLength %v, expected %v", bl, cfg.RSABits)
+ }
+
+ w := bytes.NewBuffer(nil)
+ if err := e.SerializePrivate(w, nil); err != nil {
+ t.Errorf("failed to serialize entity: %s", err)
+ return
+ }
+ serialized := w.Bytes()
+
+ el, err := ReadKeyRing(w)
+ if err != nil {
+ t.Errorf("failed to reparse entity: %s", err)
+ return
+ }
+
+ if len(el) != 1 {
+ t.Errorf("wrong number of entities found, got %d, want 1", len(el))
+ }
+
+ w = bytes.NewBuffer(nil)
+ if err := e.SerializePrivate(w, nil); err != nil {
+ t.Errorf("failed to serialize entity second time: %s", err)
+ return
+ }
+
+ if !bytes.Equal(w.Bytes(), serialized) {
+ t.Errorf("results differed")
+ }
+}
+
+func TestSymmetricEncryption(t *testing.T) {
+ buf := new(bytes.Buffer)
+ plaintext, err := SymmetricallyEncrypt(buf, []byte("testing"), nil, nil)
+ if err != nil {
+ t.Errorf("error writing headers: %s", err)
+ return
+ }
+ message := []byte("hello world\n")
+ _, err = plaintext.Write(message)
+ if err != nil {
+ t.Errorf("error writing to plaintext writer: %s", err)
+ }
+ err = plaintext.Close()
+ if err != nil {
+ t.Errorf("error closing plaintext writer: %s", err)
+ }
+
+ md, err := ReadMessage(buf, nil, func(keys []Key, symmetric bool) ([]byte, error) {
+ return []byte("testing"), nil
+ }, nil)
+ if err != nil {
+ t.Errorf("error rereading message: %s", err)
+ }
+ messageBuf := bytes.NewBuffer(nil)
+ _, err = io.Copy(messageBuf, md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("error rereading message: %s", err)
+ }
+ if !bytes.Equal(message, messageBuf.Bytes()) {
+ t.Errorf("recovered message incorrect got '%s', want '%s'", messageBuf.Bytes(), message)
+ }
+}
+
+var testEncryptionTests = []struct {
+ keyRingHex string
+ isSigned bool
+}{
+ {
+ testKeys1And2PrivateHex,
+ false,
+ },
+ {
+ testKeys1And2PrivateHex,
+ true,
+ },
+ {
+ dsaElGamalTestKeysHex,
+ false,
+ },
+ {
+ dsaElGamalTestKeysHex,
+ true,
+ },
+}
+
+func TestEncryption(t *testing.T) {
+ for i, test := range testEncryptionTests {
+ kring, _ := ReadKeyRing(readerFromHex(test.keyRingHex))
+
+ passphrase := []byte("passphrase")
+ for _, entity := range kring {
+ if entity.PrivateKey != nil && entity.PrivateKey.Encrypted {
+ err := entity.PrivateKey.Decrypt(passphrase)
+ if err != nil {
+ t.Errorf("#%d: failed to decrypt key", i)
+ }
+ }
+ for _, subkey := range entity.Subkeys {
+ if subkey.PrivateKey != nil && subkey.PrivateKey.Encrypted {
+ err := subkey.PrivateKey.Decrypt(passphrase)
+ if err != nil {
+ t.Errorf("#%d: failed to decrypt subkey", i)
+ }
+ }
+ }
+ }
+
+ var signed *Entity
+ if test.isSigned {
+ signed = kring[0]
+ }
+
+ buf := new(bytes.Buffer)
+ w, err := Encrypt(buf, kring[:1], signed, nil /* no hints */, nil)
+ if err != nil {
+ t.Errorf("#%d: error in Encrypt: %s", i, err)
+ continue
+ }
+
+ const message = "testing"
+ _, err = w.Write([]byte(message))
+ if err != nil {
+ t.Errorf("#%d: error writing plaintext: %s", i, err)
+ continue
+ }
+ err = w.Close()
+ if err != nil {
+ t.Errorf("#%d: error closing WriteCloser: %s", i, err)
+ continue
+ }
+
+ md, err := ReadMessage(buf, kring, nil /* no prompt */, nil)
+ if err != nil {
+ t.Errorf("#%d: error reading message: %s", i, err)
+ continue
+ }
+
+ testTime, _ := time.Parse("2006-01-02", "2013-07-01")
+ if test.isSigned {
+ signKey, _ := kring[0].signingKey(testTime)
+ expectedKeyId := signKey.PublicKey.KeyId
+ if md.SignedByKeyId != expectedKeyId {
+ t.Errorf("#%d: message signed by wrong key id, got: %d, want: %d", i, *md.SignedBy, expectedKeyId)
+ }
+ if md.SignedBy == nil {
+ t.Errorf("#%d: failed to find the signing Entity", i)
+ }
+ }
+
+ plaintext, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ t.Errorf("#%d: error reading encrypted contents: %s", i, err)
+ continue
+ }
+
+ encryptKey, _ := kring[0].encryptionKey(testTime)
+ expectedKeyId := encryptKey.PublicKey.KeyId
+ if len(md.EncryptedToKeyIds) != 1 || md.EncryptedToKeyIds[0] != expectedKeyId {
+ t.Errorf("#%d: expected message to be encrypted to %v, but got %#v", i, expectedKeyId, md.EncryptedToKeyIds)
+ }
+
+ if string(plaintext) != message {
+ t.Errorf("#%d: got: %s, want: %s", i, string(plaintext), message)
+ }
+
+ if test.isSigned {
+ if md.SignatureError != nil {
+ t.Errorf("#%d: signature error: %s", i, md.SignatureError)
+ }
+ if md.Signature == nil {
+ t.Error("signature missing")
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/libotr_test_helper.c b/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/libotr_test_helper.c
new file mode 100644
index 00000000000..b3ca072d480
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/libotr_test_helper.c
@@ -0,0 +1,197 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code can be compiled and used to test the otr package against libotr.
+// See otr_test.go.
+
+// +build ignore
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <proto.h>
+#include <message.h>
+#include <privkey.h>
+
+static int g_session_established = 0;
+
+OtrlPolicy policy(void *opdata, ConnContext *context) {
+ return OTRL_POLICY_ALWAYS;
+}
+
+int is_logged_in(void *opdata, const char *accountname, const char *protocol,
+ const char *recipient) {
+ return 1;
+}
+
+void inject_message(void *opdata, const char *accountname, const char *protocol,
+ const char *recipient, const char *message) {
+ printf("%s\n", message);
+ fflush(stdout);
+ fprintf(stderr, "libotr helper sent: %s\n", message);
+}
+
+void update_context_list(void *opdata) {}
+
+void new_fingerprint(void *opdata, OtrlUserState us, const char *accountname,
+ const char *protocol, const char *username,
+ unsigned char fingerprint[20]) {
+ fprintf(stderr, "NEW FINGERPRINT\n");
+ g_session_established = 1;
+}
+
+void write_fingerprints(void *opdata) {}
+
+void gone_secure(void *opdata, ConnContext *context) {}
+
+void gone_insecure(void *opdata, ConnContext *context) {}
+
+void still_secure(void *opdata, ConnContext *context, int is_reply) {}
+
+int max_message_size(void *opdata, ConnContext *context) { return 99999; }
+
+const char *account_name(void *opdata, const char *account,
+ const char *protocol) {
+ return "ACCOUNT";
+}
+
+void account_name_free(void *opdata, const char *account_name) {}
+
+const char *error_message(void *opdata, ConnContext *context,
+ OtrlErrorCode err_code) {
+ return "ERR";
+}
+
+void error_message_free(void *opdata, const char *msg) {}
+
+void resent_msg_prefix_free(void *opdata, const char *prefix) {}
+
+void handle_smp_event(void *opdata, OtrlSMPEvent smp_event,
+ ConnContext *context, unsigned short progress_event,
+ char *question) {}
+
+void handle_msg_event(void *opdata, OtrlMessageEvent msg_event,
+ ConnContext *context, const char *message,
+ gcry_error_t err) {
+ fprintf(stderr, "msg event: %d %s\n", msg_event, message);
+}
+
+OtrlMessageAppOps uiops = {
+ policy,
+ NULL,
+ is_logged_in,
+ inject_message,
+ update_context_list,
+ new_fingerprint,
+ write_fingerprints,
+ gone_secure,
+ gone_insecure,
+ still_secure,
+ max_message_size,
+ account_name,
+ account_name_free,
+ NULL, /* received_symkey */
+ error_message,
+ error_message_free,
+ NULL, /* resent_msg_prefix */
+ resent_msg_prefix_free,
+ handle_smp_event,
+ handle_msg_event,
+ NULL /* create_instag */,
+ NULL /* convert_msg */,
+ NULL /* convert_free */,
+ NULL /* timer_control */,
+};
+
+static const char kPrivateKeyData[] =
+ "(privkeys (account (name \"account\") (protocol proto) (private-key (dsa "
+ "(p "
+ "#00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F"
+ "30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E"
+ "5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB"
+ "8C031D3561FECEE72EBB4A090D450A9B7A857#) (q "
+ "#00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#) (g "
+ "#535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F"
+ "1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F"
+ "6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57"
+ "597766A2F9CE3857D7ACE3E1E3BC1FC6F26#) (y "
+ "#0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF"
+ "2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93"
+ "454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A"
+ "3C0FF501E3DC673B76D7BABF349009B6ECF#) (x "
+ "#14D0345A3562C480A039E3C72764F72D79043216#)))))\n";
+
+int main() {
+ OTRL_INIT;
+
+ // We have to write the private key information to a file because the libotr
+ // API demands a filename to read from.
+ const char *tmpdir = "/tmp";
+ if (getenv("TMP")) {
+ tmpdir = getenv("TMP");
+ }
+
+ char private_key_file[256];
+ snprintf(private_key_file, sizeof(private_key_file),
+ "%s/libotr_test_helper_privatekeys-XXXXXX", tmpdir);
+ int fd = mkstemp(private_key_file);
+ if (fd == -1) {
+ perror("creating temp file");
+ }
+ write(fd, kPrivateKeyData, sizeof(kPrivateKeyData) - 1);
+ close(fd);
+
+ OtrlUserState userstate = otrl_userstate_create();
+ otrl_privkey_read(userstate, private_key_file);
+ unlink(private_key_file);
+
+ fprintf(stderr, "libotr helper started\n");
+
+ char buf[4096];
+
+ for (;;) {
+ char *message = fgets(buf, sizeof(buf), stdin);
+ if (strlen(message) == 0) {
+ break;
+ }
+ message[strlen(message) - 1] = 0;
+ fprintf(stderr, "libotr helper got: %s\n", message);
+
+ char *newmessage = NULL;
+ OtrlTLV *tlvs;
+ int ignore_message = otrl_message_receiving(
+ userstate, &uiops, NULL, "account", "proto", "peer", message,
+ &newmessage, &tlvs, NULL, NULL, NULL);
+ if (tlvs) {
+ otrl_tlv_free(tlvs);
+ }
+
+ if (newmessage != NULL) {
+ fprintf(stderr, "libotr got: %s\n", newmessage);
+ otrl_message_free(newmessage);
+
+ gcry_error_t err;
+ char *newmessage = NULL;
+
+ err = otrl_message_sending(userstate, &uiops, NULL, "account", "proto",
+ "peer", 0, "test message", NULL, &newmessage,
+ OTRL_FRAGMENT_SEND_SKIP, NULL, NULL, NULL);
+ if (newmessage == NULL) {
+ fprintf(stderr, "libotr didn't encrypt message\n");
+ return 1;
+ }
+ write(1, newmessage, strlen(newmessage));
+ write(1, "\n", 1);
+ fprintf(stderr, "libotr sent: %s\n", newmessage);
+ otrl_message_free(newmessage);
+
+ g_session_established = 0;
+ write(1, "?OTRv2?\n", 8);
+ fprintf(stderr, "libotr sent: ?OTRv2\n");
+ }
+ }
+
+ return 0;
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/otr.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/otr.go
new file mode 100644
index 00000000000..549be116df2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/otr.go
@@ -0,0 +1,1408 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package otr implements the Off The Record protocol as specified in
+// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html
+package otr // import "golang.org/x/crypto/otr"
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/dsa"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/subtle"
+ "encoding/base64"
+ "encoding/hex"
+ "errors"
+ "hash"
+ "io"
+ "math/big"
+ "strconv"
+)
+
+// SecurityChange describes a change in the security state of a Conversation.
+type SecurityChange int
+
+const (
+ NoChange SecurityChange = iota
+ // NewKeys indicates that a key exchange has completed. This occurs
+ // when a conversation first becomes encrypted, and when the keys are
+ // renegotiated within an encrypted conversation.
+ NewKeys
+ // SMPSecretNeeded indicates that the peer has started an
+ // authentication and that we need to supply a secret. Call SMPQuestion
+ // to get the optional, human readable challenge and then Authenticate
+ // to supply the matching secret.
+ SMPSecretNeeded
+ // SMPComplete indicates that an authentication completed. The identity
+ // of the peer has now been confirmed.
+ SMPComplete
+ // SMPFailed indicates that an authentication failed.
+ SMPFailed
+ // ConversationEnded indicates that the peer ended the secure
+ // conversation.
+ ConversationEnded
+)
+
+// QueryMessage can be sent to a peer to start an OTR conversation.
+var QueryMessage = "?OTRv2?"
+
+// ErrorPrefix can be used to make an OTR error by appending an error message
+// to it.
+var ErrorPrefix = "?OTR Error:"
+
+var (
+ fragmentPartSeparator = []byte(",")
+ fragmentPrefix = []byte("?OTR,")
+ msgPrefix = []byte("?OTR:")
+ queryMarker = []byte("?OTR")
+)
+
+// isQuery attempts to parse an OTR query from msg and returns the greatest
+// common version, or 0 if msg is not an OTR query.
+func isQuery(msg []byte) (greatestCommonVersion int) {
+ pos := bytes.Index(msg, queryMarker)
+ if pos == -1 {
+ return 0
+ }
+ for i, c := range msg[pos+len(queryMarker):] {
+ if i == 0 {
+ if c == '?' {
+ // Indicates support for version 1, but we don't
+ // implement that.
+ continue
+ }
+
+ if c != 'v' {
+ // Invalid message
+ return 0
+ }
+
+ continue
+ }
+
+ if c == '?' {
+ // End of message
+ return
+ }
+
+ if c == ' ' || c == '\t' {
+ // Probably an invalid message
+ return 0
+ }
+
+ if c == '2' {
+ greatestCommonVersion = 2
+ }
+ }
+
+ return 0
+}
+
+const (
+ statePlaintext = iota
+ stateEncrypted
+ stateFinished
+)
+
+const (
+ authStateNone = iota
+ authStateAwaitingDHKey
+ authStateAwaitingRevealSig
+ authStateAwaitingSig
+)
+
+const (
+ msgTypeDHCommit = 2
+ msgTypeData = 3
+ msgTypeDHKey = 10
+ msgTypeRevealSig = 17
+ msgTypeSig = 18
+)
+
+const (
+ // If the requested fragment size is less than this, it will be ignored.
+ minFragmentSize = 18
+ // Messages are padded to a multiple of this number of bytes.
+ paddingGranularity = 256
+ // The number of bytes in a Diffie-Hellman private value (320-bits).
+ dhPrivateBytes = 40
+ // The number of bytes needed to represent an element of the DSA
+ // subgroup (160-bits).
+ dsaSubgroupBytes = 20
+ // The number of bytes of the MAC that are sent on the wire (160-bits).
+ macPrefixBytes = 20
+)
+
+// These are the global, common group parameters for OTR.
+var (
+ p *big.Int // group prime
+ g *big.Int // group generator
+ q *big.Int // group order
+ pMinus2 *big.Int
+)
+
+func init() {
+ p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", 16)
+ q, _ = new(big.Int).SetString("7FFFFFFFFFFFFFFFE487ED5110B4611A62633145C06E0E68948127044533E63A0105DF531D89CD9128A5043CC71A026EF7CA8CD9E69D218D98158536F92F8A1BA7F09AB6B6A8E122F242DABB312F3F637A262174D31BF6B585FFAE5B7A035BF6F71C35FDAD44CFD2D74F9208BE258FF324943328F6722D9EE1003E5C50B1DF82CC6D241B0E2AE9CD348B1FD47E9267AFC1B2AE91EE51D6CB0E3179AB1042A95DCF6A9483B84B4B36B3861AA7255E4C0278BA36046511B993FFFFFFFFFFFFFFFF", 16)
+ g = new(big.Int).SetInt64(2)
+ pMinus2 = new(big.Int).Sub(p, g)
+}
+
+// Conversation represents a relation with a peer. The zero value is a valid
+// Conversation, although PrivateKey must be set.
+//
+// When communicating with a peer, all inbound messages should be passed to
+// Conversation.Receive and all outbound messages to Conversation.Send. The
+// Conversation will take care of maintaining the encryption state and
+// negotiating encryption as needed.
+type Conversation struct {
+ // PrivateKey contains the private key to use to sign key exchanges.
+ PrivateKey *PrivateKey
+
+ // Rand can be set to override the entropy source. Otherwise,
+ // crypto/rand will be used.
+ Rand io.Reader
+ // If FragmentSize is set, all messages produced by Receive and Send
+ // will be fragmented into messages of, at most, this number of bytes.
+ FragmentSize int
+
+ // Once Receive has returned NewKeys once, the following fields are
+ // valid.
+ SSID [8]byte
+ TheirPublicKey PublicKey
+
+ state, authState int
+
+ r [16]byte
+ x, y *big.Int
+ gx, gy *big.Int
+ gxBytes []byte
+ digest [sha256.Size]byte
+
+ revealKeys, sigKeys akeKeys
+
+ myKeyId uint32
+ myCurrentDHPub *big.Int
+ myCurrentDHPriv *big.Int
+ myLastDHPub *big.Int
+ myLastDHPriv *big.Int
+
+ theirKeyId uint32
+ theirCurrentDHPub *big.Int
+ theirLastDHPub *big.Int
+
+ keySlots [4]keySlot
+
+ myCounter [8]byte
+ theirLastCtr [8]byte
+ oldMACs []byte
+
+ k, n int // fragment state
+ frag []byte
+
+ smp smpState
+}
+
+// A keySlot contains key material for a specific (their keyid, my keyid) pair.
+type keySlot struct {
+ // used is true if this slot is valid. If false, it's free for reuse.
+ used bool
+ theirKeyId uint32
+ myKeyId uint32
+ sendAESKey, recvAESKey []byte
+ sendMACKey, recvMACKey []byte
+ theirLastCtr [8]byte
+}
+
+// akeKeys are generated during key exchange. There's one set for the reveal
+// signature message and another for the signature message. In the protocol
+// spec the latter are indicated with a prime mark.
+type akeKeys struct {
+ c [16]byte
+ m1, m2 [32]byte
+}
+
+func (c *Conversation) rand() io.Reader {
+ if c.Rand != nil {
+ return c.Rand
+ }
+ return rand.Reader
+}
+
+func (c *Conversation) randMPI(buf []byte) *big.Int {
+ _, err := io.ReadFull(c.rand(), buf)
+ if err != nil {
+ panic("otr: short read from random source")
+ }
+
+ return new(big.Int).SetBytes(buf)
+}
+
+// tlv represents the type-length value from the protocol.
+type tlv struct {
+ typ, length uint16
+ data []byte
+}
+
+const (
+ tlvTypePadding = 0
+ tlvTypeDisconnected = 1
+ tlvTypeSMP1 = 2
+ tlvTypeSMP2 = 3
+ tlvTypeSMP3 = 4
+ tlvTypeSMP4 = 5
+ tlvTypeSMPAbort = 6
+ tlvTypeSMP1WithQuestion = 7
+)
+
+// Receive handles a message from a peer. It returns a human readable message,
+// an indicator of whether that message was encrypted, a hint about the
+// encryption state and zero or more messages to send back to the peer.
+// These messages do not need to be passed to Send before transmission.
+func (c *Conversation) Receive(in []byte) (out []byte, encrypted bool, change SecurityChange, toSend [][]byte, err error) {
+ if bytes.HasPrefix(in, fragmentPrefix) {
+ in, err = c.processFragment(in)
+ if in == nil || err != nil {
+ return
+ }
+ }
+
+ if bytes.HasPrefix(in, msgPrefix) && in[len(in)-1] == '.' {
+ in = in[len(msgPrefix) : len(in)-1]
+ } else if version := isQuery(in); version > 0 {
+ c.authState = authStateAwaitingDHKey
+ c.reset()
+ toSend = c.encode(c.generateDHCommit())
+ return
+ } else {
+ // plaintext message
+ out = in
+ return
+ }
+
+ msg := make([]byte, base64.StdEncoding.DecodedLen(len(in)))
+ msgLen, err := base64.StdEncoding.Decode(msg, in)
+ if err != nil {
+ err = errors.New("otr: invalid base64 encoding in message")
+ return
+ }
+ msg = msg[:msgLen]
+
+ // The first two bytes are the protocol version (2)
+ if len(msg) < 3 || msg[0] != 0 || msg[1] != 2 {
+ err = errors.New("otr: invalid OTR message")
+ return
+ }
+
+ msgType := int(msg[2])
+ msg = msg[3:]
+
+ switch msgType {
+ case msgTypeDHCommit:
+ switch c.authState {
+ case authStateNone:
+ c.authState = authStateAwaitingRevealSig
+ if err = c.processDHCommit(msg); err != nil {
+ return
+ }
+ c.reset()
+ toSend = c.encode(c.generateDHKey())
+ return
+ case authStateAwaitingDHKey:
+ // This is a 'SYN-crossing'. The greater digest wins.
+ var cmp int
+ if cmp, err = c.compareToDHCommit(msg); err != nil {
+ return
+ }
+ if cmp > 0 {
+ // We win. Retransmit DH commit.
+ toSend = c.encode(c.serializeDHCommit())
+ return
+ } else {
+ // They win. We forget about our DH commit.
+ c.authState = authStateAwaitingRevealSig
+ if err = c.processDHCommit(msg); err != nil {
+ return
+ }
+ c.reset()
+ toSend = c.encode(c.generateDHKey())
+ return
+ }
+ case authStateAwaitingRevealSig:
+ if err = c.processDHCommit(msg); err != nil {
+ return
+ }
+ toSend = c.encode(c.serializeDHKey())
+ case authStateAwaitingSig:
+ if err = c.processDHCommit(msg); err != nil {
+ return
+ }
+ c.reset()
+ toSend = c.encode(c.generateDHKey())
+ c.authState = authStateAwaitingRevealSig
+ default:
+ panic("bad state")
+ }
+ case msgTypeDHKey:
+ switch c.authState {
+ case authStateAwaitingDHKey:
+ var isSame bool
+ if isSame, err = c.processDHKey(msg); err != nil {
+ return
+ }
+ if isSame {
+ err = errors.New("otr: unexpected duplicate DH key")
+ return
+ }
+ toSend = c.encode(c.generateRevealSig())
+ c.authState = authStateAwaitingSig
+ case authStateAwaitingSig:
+ var isSame bool
+ if isSame, err = c.processDHKey(msg); err != nil {
+ return
+ }
+ if isSame {
+ toSend = c.encode(c.serializeDHKey())
+ }
+ }
+ case msgTypeRevealSig:
+ if c.authState != authStateAwaitingRevealSig {
+ return
+ }
+ if err = c.processRevealSig(msg); err != nil {
+ return
+ }
+ toSend = c.encode(c.generateSig())
+ c.authState = authStateNone
+ c.state = stateEncrypted
+ change = NewKeys
+ case msgTypeSig:
+ if c.authState != authStateAwaitingSig {
+ return
+ }
+ if err = c.processSig(msg); err != nil {
+ return
+ }
+ c.authState = authStateNone
+ c.state = stateEncrypted
+ change = NewKeys
+ case msgTypeData:
+ if c.state != stateEncrypted {
+ err = errors.New("otr: encrypted message received without encrypted session established")
+ return
+ }
+ var tlvs []tlv
+ out, tlvs, err = c.processData(msg)
+ encrypted = true
+
+ EachTLV:
+ for _, inTLV := range tlvs {
+ switch inTLV.typ {
+ case tlvTypeDisconnected:
+ change = ConversationEnded
+ c.state = stateFinished
+ break EachTLV
+ case tlvTypeSMP1, tlvTypeSMP2, tlvTypeSMP3, tlvTypeSMP4, tlvTypeSMPAbort, tlvTypeSMP1WithQuestion:
+ var reply tlv
+ var complete bool
+ reply, complete, err = c.processSMP(inTLV)
+ if err == smpSecretMissingError {
+ err = nil
+ change = SMPSecretNeeded
+ c.smp.saved = &inTLV
+ return
+ }
+ if err == smpFailureError {
+ err = nil
+ change = SMPFailed
+ } else if complete {
+ change = SMPComplete
+ }
+ if reply.typ != 0 {
+ toSend = c.encode(c.generateData(nil, &reply))
+ }
+ break EachTLV
+ default:
+ // skip unknown TLVs
+ }
+ }
+ default:
+ err = errors.New("otr: unknown message type " + strconv.Itoa(msgType))
+ }
+
+ return
+}
+
+// Send takes a human readable message from the local user, possibly encrypts
+// it and returns zero one or more messages to send to the peer.
+func (c *Conversation) Send(msg []byte) ([][]byte, error) {
+ switch c.state {
+ case statePlaintext:
+ return [][]byte{msg}, nil
+ case stateEncrypted:
+ return c.encode(c.generateData(msg, nil)), nil
+ case stateFinished:
+ return nil, errors.New("otr: cannot send message because secure conversation has finished")
+ }
+
+ return nil, errors.New("otr: cannot send message in current state")
+}
+
+// SMPQuestion returns the human readable challenge question from the peer.
+// It's only valid after Receive has returned SMPSecretNeeded.
+func (c *Conversation) SMPQuestion() string {
+ return c.smp.question
+}
+
+// Authenticate begins an authentication with the peer. Authentication involves
+// an optional challenge message and a shared secret. The authentication
+// proceeds until either Receive returns SMPComplete, SMPSecretNeeded (which
+// indicates that a new authentication is happening and thus this one was
+// aborted) or SMPFailed.
+func (c *Conversation) Authenticate(question string, mutualSecret []byte) (toSend [][]byte, err error) {
+ if c.state != stateEncrypted {
+ err = errors.New("otr: can't authenticate a peer without a secure conversation established")
+ return
+ }
+
+ if c.smp.saved != nil {
+ c.calcSMPSecret(mutualSecret, false /* they started it */)
+
+ var out tlv
+ var complete bool
+ out, complete, err = c.processSMP(*c.smp.saved)
+ if complete {
+ panic("SMP completed on the first message")
+ }
+ c.smp.saved = nil
+ if out.typ != 0 {
+ toSend = c.encode(c.generateData(nil, &out))
+ }
+ return
+ }
+
+ c.calcSMPSecret(mutualSecret, true /* we started it */)
+ outs := c.startSMP(question)
+ for _, out := range outs {
+ toSend = append(toSend, c.encode(c.generateData(nil, &out))...)
+ }
+ return
+}
+
+// End ends a secure conversation by generating a termination message for
+// the peer and switches to unencrypted communication.
+func (c *Conversation) End() (toSend [][]byte) {
+ switch c.state {
+ case statePlaintext:
+ return nil
+ case stateEncrypted:
+ c.state = statePlaintext
+ return c.encode(c.generateData(nil, &tlv{typ: tlvTypeDisconnected}))
+ case stateFinished:
+ c.state = statePlaintext
+ return nil
+ }
+ panic("unreachable")
+}
+
+// IsEncrypted returns true if a message passed to Send would be encrypted
+// before transmission. This result remains valid until the next call to
+// Receive or End, which may change the state of the Conversation.
+func (c *Conversation) IsEncrypted() bool {
+ return c.state == stateEncrypted
+}
+
+var fragmentError = errors.New("otr: invalid OTR fragment")
+
+// processFragment processes a fragmented OTR message and possibly returns a
+// complete message. Fragmented messages look like "?OTR,k,n,msg," where k is
+// the fragment number (starting from 1), n is the number of fragments in this
+// message and msg is a substring of the base64 encoded message.
+func (c *Conversation) processFragment(in []byte) (out []byte, err error) {
+ in = in[len(fragmentPrefix):] // remove "?OTR,"
+ parts := bytes.Split(in, fragmentPartSeparator)
+ if len(parts) != 4 || len(parts[3]) != 0 {
+ return nil, fragmentError
+ }
+
+ k, err := strconv.Atoi(string(parts[0]))
+ if err != nil {
+ return nil, fragmentError
+ }
+
+ n, err := strconv.Atoi(string(parts[1]))
+ if err != nil {
+ return nil, fragmentError
+ }
+
+ if k < 1 || n < 1 || k > n {
+ return nil, fragmentError
+ }
+
+ if k == 1 {
+ c.frag = append(c.frag[:0], parts[2]...)
+ c.k, c.n = k, n
+ } else if n == c.n && k == c.k+1 {
+ c.frag = append(c.frag, parts[2]...)
+ c.k++
+ } else {
+ c.frag = c.frag[:0]
+ c.n, c.k = 0, 0
+ }
+
+ if c.n > 0 && c.k == c.n {
+ c.n, c.k = 0, 0
+ return c.frag, nil
+ }
+
+ return nil, nil
+}
+
+func (c *Conversation) generateDHCommit() []byte {
+ _, err := io.ReadFull(c.rand(), c.r[:])
+ if err != nil {
+ panic("otr: short read from random source")
+ }
+
+ var xBytes [dhPrivateBytes]byte
+ c.x = c.randMPI(xBytes[:])
+ c.gx = new(big.Int).Exp(g, c.x, p)
+ c.gy = nil
+ c.gxBytes = appendMPI(nil, c.gx)
+
+ h := sha256.New()
+ h.Write(c.gxBytes)
+ h.Sum(c.digest[:0])
+
+ aesCipher, err := aes.NewCipher(c.r[:])
+ if err != nil {
+ panic(err.Error())
+ }
+
+ var iv [aes.BlockSize]byte
+ ctr := cipher.NewCTR(aesCipher, iv[:])
+ ctr.XORKeyStream(c.gxBytes, c.gxBytes)
+
+ return c.serializeDHCommit()
+}
+
+func (c *Conversation) serializeDHCommit() []byte {
+ var ret []byte
+ ret = appendU16(ret, 2) // protocol version
+ ret = append(ret, msgTypeDHCommit)
+ ret = appendData(ret, c.gxBytes)
+ ret = appendData(ret, c.digest[:])
+ return ret
+}
+
+func (c *Conversation) processDHCommit(in []byte) error {
+ var ok1, ok2 bool
+ c.gxBytes, in, ok1 = getData(in)
+ digest, in, ok2 := getData(in)
+ if !ok1 || !ok2 || len(in) > 0 {
+ return errors.New("otr: corrupt DH commit message")
+ }
+ copy(c.digest[:], digest)
+ return nil
+}
+
+func (c *Conversation) compareToDHCommit(in []byte) (int, error) {
+ _, in, ok1 := getData(in)
+ digest, in, ok2 := getData(in)
+ if !ok1 || !ok2 || len(in) > 0 {
+ return 0, errors.New("otr: corrupt DH commit message")
+ }
+ return bytes.Compare(c.digest[:], digest), nil
+}
+
+func (c *Conversation) generateDHKey() []byte {
+ var yBytes [dhPrivateBytes]byte
+ c.y = c.randMPI(yBytes[:])
+ c.gy = new(big.Int).Exp(g, c.y, p)
+ return c.serializeDHKey()
+}
+
+func (c *Conversation) serializeDHKey() []byte {
+ var ret []byte
+ ret = appendU16(ret, 2) // protocol version
+ ret = append(ret, msgTypeDHKey)
+ ret = appendMPI(ret, c.gy)
+ return ret
+}
+
+func (c *Conversation) processDHKey(in []byte) (isSame bool, err error) {
+ gy, in, ok := getMPI(in)
+ if !ok {
+ err = errors.New("otr: corrupt DH key message")
+ return
+ }
+ if gy.Cmp(g) < 0 || gy.Cmp(pMinus2) > 0 {
+ err = errors.New("otr: DH value out of range")
+ return
+ }
+ if c.gy != nil {
+ isSame = c.gy.Cmp(gy) == 0
+ return
+ }
+ c.gy = gy
+ return
+}
+
+func (c *Conversation) generateEncryptedSignature(keys *akeKeys, xFirst bool) ([]byte, []byte) {
+ var xb []byte
+ xb = c.PrivateKey.PublicKey.Serialize(xb)
+
+ var verifyData []byte
+ if xFirst {
+ verifyData = appendMPI(verifyData, c.gx)
+ verifyData = appendMPI(verifyData, c.gy)
+ } else {
+ verifyData = appendMPI(verifyData, c.gy)
+ verifyData = appendMPI(verifyData, c.gx)
+ }
+ verifyData = append(verifyData, xb...)
+ verifyData = appendU32(verifyData, c.myKeyId)
+
+ mac := hmac.New(sha256.New, keys.m1[:])
+ mac.Write(verifyData)
+ mb := mac.Sum(nil)
+
+ xb = appendU32(xb, c.myKeyId)
+ xb = append(xb, c.PrivateKey.Sign(c.rand(), mb)...)
+
+ aesCipher, err := aes.NewCipher(keys.c[:])
+ if err != nil {
+ panic(err.Error())
+ }
+ var iv [aes.BlockSize]byte
+ ctr := cipher.NewCTR(aesCipher, iv[:])
+ ctr.XORKeyStream(xb, xb)
+
+ mac = hmac.New(sha256.New, keys.m2[:])
+ encryptedSig := appendData(nil, xb)
+ mac.Write(encryptedSig)
+
+ return encryptedSig, mac.Sum(nil)
+}
+
+func (c *Conversation) generateRevealSig() []byte {
+ s := new(big.Int).Exp(c.gy, c.x, p)
+ c.calcAKEKeys(s)
+ c.myKeyId++
+
+ encryptedSig, mac := c.generateEncryptedSignature(&c.revealKeys, true /* gx comes first */)
+
+ c.myCurrentDHPub = c.gx
+ c.myCurrentDHPriv = c.x
+ c.rotateDHKeys()
+ incCounter(&c.myCounter)
+
+ var ret []byte
+ ret = appendU16(ret, 2)
+ ret = append(ret, msgTypeRevealSig)
+ ret = appendData(ret, c.r[:])
+ ret = append(ret, encryptedSig...)
+ ret = append(ret, mac[:20]...)
+ return ret
+}
+
+func (c *Conversation) processEncryptedSig(encryptedSig, theirMAC []byte, keys *akeKeys, xFirst bool) error {
+ mac := hmac.New(sha256.New, keys.m2[:])
+ mac.Write(appendData(nil, encryptedSig))
+ myMAC := mac.Sum(nil)[:20]
+
+ if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 {
+ return errors.New("bad signature MAC in encrypted signature")
+ }
+
+ aesCipher, err := aes.NewCipher(keys.c[:])
+ if err != nil {
+ panic(err.Error())
+ }
+ var iv [aes.BlockSize]byte
+ ctr := cipher.NewCTR(aesCipher, iv[:])
+ ctr.XORKeyStream(encryptedSig, encryptedSig)
+
+ sig := encryptedSig
+ sig, ok1 := c.TheirPublicKey.Parse(sig)
+ keyId, sig, ok2 := getU32(sig)
+ if !ok1 || !ok2 {
+ return errors.New("otr: corrupt encrypted signature")
+ }
+
+ var verifyData []byte
+ if xFirst {
+ verifyData = appendMPI(verifyData, c.gx)
+ verifyData = appendMPI(verifyData, c.gy)
+ } else {
+ verifyData = appendMPI(verifyData, c.gy)
+ verifyData = appendMPI(verifyData, c.gx)
+ }
+ verifyData = c.TheirPublicKey.Serialize(verifyData)
+ verifyData = appendU32(verifyData, keyId)
+
+ mac = hmac.New(sha256.New, keys.m1[:])
+ mac.Write(verifyData)
+ mb := mac.Sum(nil)
+
+ sig, ok1 = c.TheirPublicKey.Verify(mb, sig)
+ if !ok1 {
+ return errors.New("bad signature in encrypted signature")
+ }
+ if len(sig) > 0 {
+ return errors.New("corrupt encrypted signature")
+ }
+
+ c.theirKeyId = keyId
+ zero(c.theirLastCtr[:])
+ return nil
+}
+
+func (c *Conversation) processRevealSig(in []byte) error {
+ r, in, ok1 := getData(in)
+ encryptedSig, in, ok2 := getData(in)
+ theirMAC := in
+ if !ok1 || !ok2 || len(theirMAC) != 20 {
+ return errors.New("otr: corrupt reveal signature message")
+ }
+
+ aesCipher, err := aes.NewCipher(r)
+ if err != nil {
+ return errors.New("otr: cannot create AES cipher from reveal signature message: " + err.Error())
+ }
+ var iv [aes.BlockSize]byte
+ ctr := cipher.NewCTR(aesCipher, iv[:])
+ ctr.XORKeyStream(c.gxBytes, c.gxBytes)
+ h := sha256.New()
+ h.Write(c.gxBytes)
+ digest := h.Sum(nil)
+ if len(digest) != len(c.digest) || subtle.ConstantTimeCompare(digest, c.digest[:]) == 0 {
+ return errors.New("otr: bad commit MAC in reveal signature message")
+ }
+ var rest []byte
+ c.gx, rest, ok1 = getMPI(c.gxBytes)
+ if !ok1 || len(rest) > 0 {
+ return errors.New("otr: gx corrupt after decryption")
+ }
+ if c.gx.Cmp(g) < 0 || c.gx.Cmp(pMinus2) > 0 {
+ return errors.New("otr: DH value out of range")
+ }
+ s := new(big.Int).Exp(c.gx, c.y, p)
+ c.calcAKEKeys(s)
+
+ if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.revealKeys, true /* gx comes first */); err != nil {
+ return errors.New("otr: in reveal signature message: " + err.Error())
+ }
+
+ c.theirCurrentDHPub = c.gx
+ c.theirLastDHPub = nil
+
+ return nil
+}
+
+func (c *Conversation) generateSig() []byte {
+ c.myKeyId++
+
+ encryptedSig, mac := c.generateEncryptedSignature(&c.sigKeys, false /* gy comes first */)
+
+ c.myCurrentDHPub = c.gy
+ c.myCurrentDHPriv = c.y
+ c.rotateDHKeys()
+ incCounter(&c.myCounter)
+
+ var ret []byte
+ ret = appendU16(ret, 2)
+ ret = append(ret, msgTypeSig)
+ ret = append(ret, encryptedSig...)
+ ret = append(ret, mac[:macPrefixBytes]...)
+ return ret
+}
+
+func (c *Conversation) processSig(in []byte) error {
+ encryptedSig, in, ok1 := getData(in)
+ theirMAC := in
+ if !ok1 || len(theirMAC) != macPrefixBytes {
+ return errors.New("otr: corrupt signature message")
+ }
+
+ if err := c.processEncryptedSig(encryptedSig, theirMAC, &c.sigKeys, false /* gy comes first */); err != nil {
+ return errors.New("otr: in signature message: " + err.Error())
+ }
+
+ c.theirCurrentDHPub = c.gy
+ c.theirLastDHPub = nil
+
+ return nil
+}
+
+func (c *Conversation) rotateDHKeys() {
+ // evict slots using our retired key id
+ for i := range c.keySlots {
+ slot := &c.keySlots[i]
+ if slot.used && slot.myKeyId == c.myKeyId-1 {
+ slot.used = false
+ c.oldMACs = append(c.oldMACs, slot.recvMACKey...)
+ }
+ }
+
+ c.myLastDHPriv = c.myCurrentDHPriv
+ c.myLastDHPub = c.myCurrentDHPub
+
+ var xBytes [dhPrivateBytes]byte
+ c.myCurrentDHPriv = c.randMPI(xBytes[:])
+ c.myCurrentDHPub = new(big.Int).Exp(g, c.myCurrentDHPriv, p)
+ c.myKeyId++
+}
+
+func (c *Conversation) processData(in []byte) (out []byte, tlvs []tlv, err error) {
+ origIn := in
+ flags, in, ok1 := getU8(in)
+ theirKeyId, in, ok2 := getU32(in)
+ myKeyId, in, ok3 := getU32(in)
+ y, in, ok4 := getMPI(in)
+ counter, in, ok5 := getNBytes(in, 8)
+ encrypted, in, ok6 := getData(in)
+ macedData := origIn[:len(origIn)-len(in)]
+ theirMAC, in, ok7 := getNBytes(in, macPrefixBytes)
+ _, in, ok8 := getData(in)
+ if !ok1 || !ok2 || !ok3 || !ok4 || !ok5 || !ok6 || !ok7 || !ok8 || len(in) > 0 {
+ err = errors.New("otr: corrupt data message")
+ return
+ }
+
+ ignoreErrors := flags&1 != 0
+
+ slot, err := c.calcDataKeys(myKeyId, theirKeyId)
+ if err != nil {
+ if ignoreErrors {
+ err = nil
+ }
+ return
+ }
+
+ mac := hmac.New(sha1.New, slot.recvMACKey)
+ mac.Write([]byte{0, 2, 3})
+ mac.Write(macedData)
+ myMAC := mac.Sum(nil)
+ if len(myMAC) != len(theirMAC) || subtle.ConstantTimeCompare(myMAC, theirMAC) == 0 {
+ if !ignoreErrors {
+ err = errors.New("otr: bad MAC on data message")
+ }
+ return
+ }
+
+ if bytes.Compare(counter, slot.theirLastCtr[:]) <= 0 {
+ err = errors.New("otr: counter regressed")
+ return
+ }
+ copy(slot.theirLastCtr[:], counter)
+
+ var iv [aes.BlockSize]byte
+ copy(iv[:], counter)
+ aesCipher, err := aes.NewCipher(slot.recvAESKey)
+ if err != nil {
+ panic(err.Error())
+ }
+ ctr := cipher.NewCTR(aesCipher, iv[:])
+ ctr.XORKeyStream(encrypted, encrypted)
+ decrypted := encrypted
+
+ if myKeyId == c.myKeyId {
+ c.rotateDHKeys()
+ }
+ if theirKeyId == c.theirKeyId {
+ // evict slots using their retired key id
+ for i := range c.keySlots {
+ slot := &c.keySlots[i]
+ if slot.used && slot.theirKeyId == theirKeyId-1 {
+ slot.used = false
+ c.oldMACs = append(c.oldMACs, slot.recvMACKey...)
+ }
+ }
+
+ c.theirLastDHPub = c.theirCurrentDHPub
+ c.theirKeyId++
+ c.theirCurrentDHPub = y
+ }
+
+ if nulPos := bytes.IndexByte(decrypted, 0); nulPos >= 0 {
+ out = decrypted[:nulPos]
+ tlvData := decrypted[nulPos+1:]
+ for len(tlvData) > 0 {
+ var t tlv
+ var ok1, ok2, ok3 bool
+
+ t.typ, tlvData, ok1 = getU16(tlvData)
+ t.length, tlvData, ok2 = getU16(tlvData)
+ t.data, tlvData, ok3 = getNBytes(tlvData, int(t.length))
+ if !ok1 || !ok2 || !ok3 {
+ err = errors.New("otr: corrupt tlv data")
+ }
+ tlvs = append(tlvs, t)
+ }
+ } else {
+ out = decrypted
+ }
+
+ return
+}
+
+func (c *Conversation) generateData(msg []byte, extra *tlv) []byte {
+ slot, err := c.calcDataKeys(c.myKeyId-1, c.theirKeyId)
+ if err != nil {
+ panic("otr: failed to generate sending keys: " + err.Error())
+ }
+
+ var plaintext []byte
+ plaintext = append(plaintext, msg...)
+ plaintext = append(plaintext, 0)
+
+ padding := paddingGranularity - ((len(plaintext) + 4) % paddingGranularity)
+ plaintext = appendU16(plaintext, tlvTypePadding)
+ plaintext = appendU16(plaintext, uint16(padding))
+ for i := 0; i < padding; i++ {
+ plaintext = append(plaintext, 0)
+ }
+
+ if extra != nil {
+ plaintext = appendU16(plaintext, extra.typ)
+ plaintext = appendU16(plaintext, uint16(len(extra.data)))
+ plaintext = append(plaintext, extra.data...)
+ }
+
+ encrypted := make([]byte, len(plaintext))
+
+ var iv [aes.BlockSize]byte
+ copy(iv[:], c.myCounter[:])
+ aesCipher, err := aes.NewCipher(slot.sendAESKey)
+ if err != nil {
+ panic(err.Error())
+ }
+ ctr := cipher.NewCTR(aesCipher, iv[:])
+ ctr.XORKeyStream(encrypted, plaintext)
+
+ var ret []byte
+ ret = appendU16(ret, 2)
+ ret = append(ret, msgTypeData)
+ ret = append(ret, 0 /* flags */)
+ ret = appendU32(ret, c.myKeyId-1)
+ ret = appendU32(ret, c.theirKeyId)
+ ret = appendMPI(ret, c.myCurrentDHPub)
+ ret = append(ret, c.myCounter[:]...)
+ ret = appendData(ret, encrypted)
+
+ mac := hmac.New(sha1.New, slot.sendMACKey)
+ mac.Write(ret)
+ ret = append(ret, mac.Sum(nil)[:macPrefixBytes]...)
+ ret = appendData(ret, c.oldMACs)
+ c.oldMACs = nil
+ incCounter(&c.myCounter)
+
+ return ret
+}
+
+func incCounter(counter *[8]byte) {
+ for i := 7; i >= 0; i-- {
+ counter[i]++
+ if counter[i] > 0 {
+ break
+ }
+ }
+}
+
+// calcDataKeys computes the keys used to encrypt a data message given the key
+// IDs.
+func (c *Conversation) calcDataKeys(myKeyId, theirKeyId uint32) (slot *keySlot, err error) {
+ // Check for a cache hit.
+ for i := range c.keySlots {
+ slot = &c.keySlots[i]
+ if slot.used && slot.theirKeyId == theirKeyId && slot.myKeyId == myKeyId {
+ return
+ }
+ }
+
+ // Find an empty slot to write into.
+ slot = nil
+ for i := range c.keySlots {
+ if !c.keySlots[i].used {
+ slot = &c.keySlots[i]
+ break
+ }
+ }
+ if slot == nil {
+ return nil, errors.New("otr: internal error: no more key slots")
+ }
+
+ var myPriv, myPub, theirPub *big.Int
+
+ if myKeyId == c.myKeyId {
+ myPriv = c.myCurrentDHPriv
+ myPub = c.myCurrentDHPub
+ } else if myKeyId == c.myKeyId-1 {
+ myPriv = c.myLastDHPriv
+ myPub = c.myLastDHPub
+ } else {
+ err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when I'm on " + strconv.FormatUint(uint64(c.myKeyId), 10))
+ return
+ }
+
+ if theirKeyId == c.theirKeyId {
+ theirPub = c.theirCurrentDHPub
+ } else if theirKeyId == c.theirKeyId-1 && c.theirLastDHPub != nil {
+ theirPub = c.theirLastDHPub
+ } else {
+ err = errors.New("otr: peer requested keyid " + strconv.FormatUint(uint64(myKeyId), 10) + " when they're on " + strconv.FormatUint(uint64(c.myKeyId), 10))
+ return
+ }
+
+ var sendPrefixByte, recvPrefixByte [1]byte
+
+ if myPub.Cmp(theirPub) > 0 {
+ // we're the high end
+ sendPrefixByte[0], recvPrefixByte[0] = 1, 2
+ } else {
+ // we're the low end
+ sendPrefixByte[0], recvPrefixByte[0] = 2, 1
+ }
+
+ s := new(big.Int).Exp(theirPub, myPriv, p)
+ sBytes := appendMPI(nil, s)
+
+ h := sha1.New()
+ h.Write(sendPrefixByte[:])
+ h.Write(sBytes)
+ slot.sendAESKey = h.Sum(slot.sendAESKey[:0])[:16]
+
+ h.Reset()
+ h.Write(slot.sendAESKey)
+ slot.sendMACKey = h.Sum(slot.sendMACKey[:0])
+
+ h.Reset()
+ h.Write(recvPrefixByte[:])
+ h.Write(sBytes)
+ slot.recvAESKey = h.Sum(slot.recvAESKey[:0])[:16]
+
+ h.Reset()
+ h.Write(slot.recvAESKey)
+ slot.recvMACKey = h.Sum(slot.recvMACKey[:0])
+
+ slot.theirKeyId = theirKeyId
+ slot.myKeyId = myKeyId
+ slot.used = true
+
+ zero(slot.theirLastCtr[:])
+ return
+}
+
+func (c *Conversation) calcAKEKeys(s *big.Int) {
+ mpi := appendMPI(nil, s)
+ h := sha256.New()
+
+ var cBytes [32]byte
+ hashWithPrefix(c.SSID[:], 0, mpi, h)
+
+ hashWithPrefix(cBytes[:], 1, mpi, h)
+ copy(c.revealKeys.c[:], cBytes[:16])
+ copy(c.sigKeys.c[:], cBytes[16:])
+
+ hashWithPrefix(c.revealKeys.m1[:], 2, mpi, h)
+ hashWithPrefix(c.revealKeys.m2[:], 3, mpi, h)
+ hashWithPrefix(c.sigKeys.m1[:], 4, mpi, h)
+ hashWithPrefix(c.sigKeys.m2[:], 5, mpi, h)
+}
+
+func hashWithPrefix(out []byte, prefix byte, in []byte, h hash.Hash) {
+ h.Reset()
+ var p [1]byte
+ p[0] = prefix
+ h.Write(p[:])
+ h.Write(in)
+ if len(out) == h.Size() {
+ h.Sum(out[:0])
+ } else {
+ digest := h.Sum(nil)
+ copy(out, digest)
+ }
+}
+
+func (c *Conversation) encode(msg []byte) [][]byte {
+ b64 := make([]byte, base64.StdEncoding.EncodedLen(len(msg))+len(msgPrefix)+1)
+ base64.StdEncoding.Encode(b64[len(msgPrefix):], msg)
+ copy(b64, msgPrefix)
+ b64[len(b64)-1] = '.'
+
+ if c.FragmentSize < minFragmentSize || len(b64) <= c.FragmentSize {
+ // We can encode this in a single fragment.
+ return [][]byte{b64}
+ }
+
+ // We have to fragment this message.
+ var ret [][]byte
+ bytesPerFragment := c.FragmentSize - minFragmentSize
+ numFragments := (len(b64) + bytesPerFragment) / bytesPerFragment
+
+ for i := 0; i < numFragments; i++ {
+ frag := []byte("?OTR," + strconv.Itoa(i+1) + "," + strconv.Itoa(numFragments) + ",")
+ todo := bytesPerFragment
+ if todo > len(b64) {
+ todo = len(b64)
+ }
+ frag = append(frag, b64[:todo]...)
+ b64 = b64[todo:]
+ frag = append(frag, ',')
+ ret = append(ret, frag)
+ }
+
+ return ret
+}
+
+func (c *Conversation) reset() {
+ c.myKeyId = 0
+
+ for i := range c.keySlots {
+ c.keySlots[i].used = false
+ }
+}
+
+type PublicKey struct {
+ dsa.PublicKey
+}
+
+func (pk *PublicKey) Parse(in []byte) ([]byte, bool) {
+ var ok bool
+ var pubKeyType uint16
+
+ if pubKeyType, in, ok = getU16(in); !ok || pubKeyType != 0 {
+ return nil, false
+ }
+ if pk.P, in, ok = getMPI(in); !ok {
+ return nil, false
+ }
+ if pk.Q, in, ok = getMPI(in); !ok {
+ return nil, false
+ }
+ if pk.G, in, ok = getMPI(in); !ok {
+ return nil, false
+ }
+ if pk.Y, in, ok = getMPI(in); !ok {
+ return nil, false
+ }
+
+ return in, true
+}
+
+func (pk *PublicKey) Serialize(in []byte) []byte {
+ in = appendU16(in, 0)
+ in = appendMPI(in, pk.P)
+ in = appendMPI(in, pk.Q)
+ in = appendMPI(in, pk.G)
+ in = appendMPI(in, pk.Y)
+ return in
+}
+
+// Fingerprint returns the 20-byte, binary fingerprint of the PublicKey.
+func (pk *PublicKey) Fingerprint() []byte {
+ b := pk.Serialize(nil)
+ h := sha1.New()
+ h.Write(b[2:])
+ return h.Sum(nil)
+}
+
+func (pk *PublicKey) Verify(hashed, sig []byte) ([]byte, bool) {
+ if len(sig) != 2*dsaSubgroupBytes {
+ return nil, false
+ }
+ r := new(big.Int).SetBytes(sig[:dsaSubgroupBytes])
+ s := new(big.Int).SetBytes(sig[dsaSubgroupBytes:])
+ ok := dsa.Verify(&pk.PublicKey, hashed, r, s)
+ return sig[dsaSubgroupBytes*2:], ok
+}
+
+type PrivateKey struct {
+ PublicKey
+ dsa.PrivateKey
+}
+
+func (priv *PrivateKey) Sign(rand io.Reader, hashed []byte) []byte {
+ r, s, err := dsa.Sign(rand, &priv.PrivateKey, hashed)
+ if err != nil {
+ panic(err.Error())
+ }
+ rBytes := r.Bytes()
+ sBytes := s.Bytes()
+ if len(rBytes) > dsaSubgroupBytes || len(sBytes) > dsaSubgroupBytes {
+ panic("DSA signature too large")
+ }
+
+ out := make([]byte, 2*dsaSubgroupBytes)
+ copy(out[dsaSubgroupBytes-len(rBytes):], rBytes)
+ copy(out[len(out)-len(sBytes):], sBytes)
+ return out
+}
+
+func (priv *PrivateKey) Serialize(in []byte) []byte {
+ in = priv.PublicKey.Serialize(in)
+ in = appendMPI(in, priv.PrivateKey.X)
+ return in
+}
+
+func (priv *PrivateKey) Parse(in []byte) ([]byte, bool) {
+ in, ok := priv.PublicKey.Parse(in)
+ if !ok {
+ return in, ok
+ }
+ priv.PrivateKey.PublicKey = priv.PublicKey.PublicKey
+ priv.PrivateKey.X, in, ok = getMPI(in)
+ return in, ok
+}
+
+func (priv *PrivateKey) Generate(rand io.Reader) {
+ if err := dsa.GenerateParameters(&priv.PrivateKey.PublicKey.Parameters, rand, dsa.L1024N160); err != nil {
+ panic(err.Error())
+ }
+ if err := dsa.GenerateKey(&priv.PrivateKey, rand); err != nil {
+ panic(err.Error())
+ }
+ priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey
+}
+
+func notHex(r rune) bool {
+ if r >= '0' && r <= '9' ||
+ r >= 'a' && r <= 'f' ||
+ r >= 'A' && r <= 'F' {
+ return false
+ }
+
+ return true
+}
+
+// Import parses the contents of a libotr private key file.
+func (priv *PrivateKey) Import(in []byte) bool {
+ mpiStart := []byte(" #")
+
+ mpis := make([]*big.Int, 5)
+
+ for i := 0; i < len(mpis); i++ {
+ start := bytes.Index(in, mpiStart)
+ if start == -1 {
+ return false
+ }
+ in = in[start+len(mpiStart):]
+ end := bytes.IndexFunc(in, notHex)
+ if end == -1 {
+ return false
+ }
+ hexBytes := in[:end]
+ in = in[end:]
+
+ if len(hexBytes)&1 != 0 {
+ return false
+ }
+
+ mpiBytes := make([]byte, len(hexBytes)/2)
+ if _, err := hex.Decode(mpiBytes, hexBytes); err != nil {
+ return false
+ }
+
+ mpis[i] = new(big.Int).SetBytes(mpiBytes)
+ }
+
+ priv.PrivateKey.P = mpis[0]
+ priv.PrivateKey.Q = mpis[1]
+ priv.PrivateKey.G = mpis[2]
+ priv.PrivateKey.Y = mpis[3]
+ priv.PrivateKey.X = mpis[4]
+ priv.PublicKey.PublicKey = priv.PrivateKey.PublicKey
+
+ a := new(big.Int).Exp(priv.PrivateKey.G, priv.PrivateKey.X, priv.PrivateKey.P)
+ return a.Cmp(priv.PrivateKey.Y) == 0
+}
+
+func getU8(in []byte) (uint8, []byte, bool) {
+ if len(in) < 1 {
+ return 0, in, false
+ }
+ return in[0], in[1:], true
+}
+
+func getU16(in []byte) (uint16, []byte, bool) {
+ if len(in) < 2 {
+ return 0, in, false
+ }
+ r := uint16(in[0])<<8 | uint16(in[1])
+ return r, in[2:], true
+}
+
+func getU32(in []byte) (uint32, []byte, bool) {
+ if len(in) < 4 {
+ return 0, in, false
+ }
+ r := uint32(in[0])<<24 | uint32(in[1])<<16 | uint32(in[2])<<8 | uint32(in[3])
+ return r, in[4:], true
+}
+
+func getMPI(in []byte) (*big.Int, []byte, bool) {
+ l, in, ok := getU32(in)
+ if !ok || uint32(len(in)) < l {
+ return nil, in, false
+ }
+ r := new(big.Int).SetBytes(in[:l])
+ return r, in[l:], true
+}
+
+func getData(in []byte) ([]byte, []byte, bool) {
+ l, in, ok := getU32(in)
+ if !ok || uint32(len(in)) < l {
+ return nil, in, false
+ }
+ return in[:l], in[l:], true
+}
+
+func getNBytes(in []byte, n int) ([]byte, []byte, bool) {
+ if len(in) < n {
+ return nil, in, false
+ }
+ return in[:n], in[n:], true
+}
+
+func appendU16(out []byte, v uint16) []byte {
+ out = append(out, byte(v>>8), byte(v))
+ return out
+}
+
+func appendU32(out []byte, v uint32) []byte {
+ out = append(out, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+ return out
+}
+
+func appendData(out, v []byte) []byte {
+ out = appendU32(out, uint32(len(v)))
+ out = append(out, v...)
+ return out
+}
+
+func appendMPI(out []byte, v *big.Int) []byte {
+ vBytes := v.Bytes()
+ out = appendU32(out, uint32(len(vBytes)))
+ out = append(out, vBytes...)
+ return out
+}
+
+func appendMPIs(out []byte, mpis ...*big.Int) []byte {
+ for _, mpi := range mpis {
+ out = appendMPI(out, mpi)
+ }
+ return out
+}
+
+func zero(b []byte) {
+ for i := range b {
+ b[i] = 0
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/otr_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/otr_test.go
new file mode 100644
index 00000000000..cfcd062b274
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/otr_test.go
@@ -0,0 +1,470 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package otr
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "math/big"
+ "os"
+ "os/exec"
+ "testing"
+)
+
+var isQueryTests = []struct {
+ msg string
+ expectedVersion int
+}{
+ {"foo", 0},
+ {"?OtR", 0},
+ {"?OtR?", 0},
+ {"?OTR?", 0},
+ {"?OTRv?", 0},
+ {"?OTRv1?", 0},
+ {"?OTR?v1?", 0},
+ {"?OTR?v?", 0},
+ {"?OTR?v2?", 2},
+ {"?OTRv2?", 2},
+ {"?OTRv23?", 2},
+ {"?OTRv23 ?", 0},
+}
+
+func TestIsQuery(t *testing.T) {
+ for i, test := range isQueryTests {
+ version := isQuery([]byte(test.msg))
+ if version != test.expectedVersion {
+ t.Errorf("#%d: got %d, want %d", i, version, test.expectedVersion)
+ }
+ }
+}
+
+var alicePrivateKeyHex = "000000000080c81c2cb2eb729b7e6fd48e975a932c638b3a9055478583afa46755683e30102447f6da2d8bec9f386bbb5da6403b0040fee8650b6ab2d7f32c55ab017ae9b6aec8c324ab5844784e9a80e194830d548fb7f09a0410df2c4d5c8bc2b3e9ad484e65412be689cf0834694e0839fb2954021521ffdffb8f5c32c14dbf2020b3ce7500000014da4591d58def96de61aea7b04a8405fe1609308d000000808ddd5cb0b9d66956e3dea5a915d9aba9d8a6e7053b74dadb2fc52f9fe4e5bcc487d2305485ed95fed026ad93f06ebb8c9e8baf693b7887132c7ffdd3b0f72f4002ff4ed56583ca7c54458f8c068ca3e8a4dfa309d1dd5d34e2a4b68e6f4338835e5e0fb4317c9e4c7e4806dafda3ef459cd563775a586dd91b1319f72621bf3f00000080b8147e74d8c45e6318c37731b8b33b984a795b3653c2cd1d65cc99efe097cb7eb2fa49569bab5aab6e8a1c261a27d0f7840a5e80b317e6683042b59b6dceca2879c6ffc877a465be690c15e4a42f9a7588e79b10faac11b1ce3741fcef7aba8ce05327a2c16d279ee1b3d77eb783fb10e3356caa25635331e26dd42b8396c4d00000001420bec691fea37ecea58a5c717142f0b804452f57"
+
+var aliceFingerprintHex = "0bb01c360424522e94ee9c346ce877a1a4288b2f"
+
+var bobPrivateKeyHex = "000000000080a5138eb3d3eb9c1d85716faecadb718f87d31aaed1157671d7fee7e488f95e8e0ba60ad449ec732710a7dec5190f7182af2e2f98312d98497221dff160fd68033dd4f3a33b7c078d0d9f66e26847e76ca7447d4bab35486045090572863d9e4454777f24d6706f63e02548dfec2d0a620af37bbc1d24f884708a212c343b480d00000014e9c58f0ea21a5e4dfd9f44b6a9f7f6a9961a8fa9000000803c4d111aebd62d3c50c2889d420a32cdf1e98b70affcc1fcf44d59cca2eb019f6b774ef88153fb9b9615441a5fe25ea2d11b74ce922ca0232bd81b3c0fcac2a95b20cb6e6c0c5c1ace2e26f65dc43c751af0edbb10d669890e8ab6beea91410b8b2187af1a8347627a06ecea7e0f772c28aae9461301e83884860c9b656c722f0000008065af8625a555ea0e008cd04743671a3cda21162e83af045725db2eb2bb52712708dc0cc1a84c08b3649b88a966974bde27d8612c2861792ec9f08786a246fcadd6d8d3a81a32287745f309238f47618c2bd7612cb8b02d940571e0f30b96420bcd462ff542901b46109b1e5ad6423744448d20a57818a8cbb1647d0fea3b664e0000001440f9f2eb554cb00d45a5826b54bfa419b6980e48"
+
+func TestKeySerialization(t *testing.T) {
+ var priv PrivateKey
+ alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex)
+ rest, ok := priv.Parse(alicePrivateKey)
+ if !ok {
+ t.Error("failed to parse private key")
+ }
+ if len(rest) > 0 {
+ t.Error("data remaining after parsing private key")
+ }
+
+ out := priv.Serialize(nil)
+ if !bytes.Equal(alicePrivateKey, out) {
+ t.Errorf("serialization (%x) is not equal to original (%x)", out, alicePrivateKey)
+ }
+
+ aliceFingerprint, _ := hex.DecodeString(aliceFingerprintHex)
+ fingerprint := priv.PublicKey.Fingerprint()
+ if !bytes.Equal(aliceFingerprint, fingerprint) {
+ t.Errorf("fingerprint (%x) is not equal to expected value (%x)", fingerprint, aliceFingerprint)
+ }
+}
+
+const libOTRPrivateKey = `(privkeys
+ (account
+(name "foo@example.com")
+(protocol prpl-jabber)
+(private-key
+ (dsa
+ (p #00FC07ABCF0DC916AFF6E9AE47BEF60C7AB9B4D6B2469E436630E36F8A489BE812486A09F30B71224508654940A835301ACC525A4FF133FC152CC53DCC59D65C30A54F1993FE13FE63E5823D4C746DB21B90F9B9C00B49EC7404AB1D929BA7FBA12F2E45C6E0A651689750E8528AB8C031D3561FECEE72EBB4A090D450A9B7A857#)
+ (q #00997BD266EF7B1F60A5C23F3A741F2AEFD07A2081#)
+ (g #535E360E8A95EBA46A4F7DE50AD6E9B2A6DB785A66B64EB9F20338D2A3E8FB0E94725848F1AA6CC567CB83A1CC517EC806F2E92EAE71457E80B2210A189B91250779434B41FC8A8873F6DB94BEA7D177F5D59E7E114EE10A49CFD9CEF88AE43387023B672927BA74B04EB6BBB5E57597766A2F9CE3857D7ACE3E1E3BC1FC6F26#)
+ (y #0AC8670AD767D7A8D9D14CC1AC6744CD7D76F993B77FFD9E39DF01E5A6536EF65E775FCEF2A983E2A19BD6415500F6979715D9FD1257E1FE2B6F5E1E74B333079E7C880D39868462A93454B41877BE62E5EF0A041C2EE9C9E76BD1E12AE25D9628DECB097025DD625EF49C3258A1A3C0FF501E3DC673B76D7BABF349009B6ECF#)
+ (x #14D0345A3562C480A039E3C72764F72D79043216#)
+ )
+ )
+ )
+)`
+
+func TestParseLibOTRPrivateKey(t *testing.T) {
+ var priv PrivateKey
+
+ if !priv.Import([]byte(libOTRPrivateKey)) {
+ t.Fatalf("Failed to import sample private key")
+ }
+}
+
+func TestSignVerify(t *testing.T) {
+ var priv PrivateKey
+ alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex)
+ _, ok := priv.Parse(alicePrivateKey)
+ if !ok {
+ t.Error("failed to parse private key")
+ }
+
+ var msg [32]byte
+ rand.Reader.Read(msg[:])
+
+ sig := priv.Sign(rand.Reader, msg[:])
+ rest, ok := priv.PublicKey.Verify(msg[:], sig)
+ if !ok {
+ t.Errorf("signature (%x) of %x failed to verify", sig, msg[:])
+ } else if len(rest) > 0 {
+ t.Error("signature data remains after verification")
+ }
+
+ sig[10] ^= 80
+ _, ok = priv.PublicKey.Verify(msg[:], sig)
+ if ok {
+ t.Errorf("corrupted signature (%x) of %x verified", sig, msg[:])
+ }
+}
+
+func setupConversation(t *testing.T) (alice, bob *Conversation) {
+ alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex)
+ bobPrivateKey, _ := hex.DecodeString(bobPrivateKeyHex)
+
+ alice, bob = new(Conversation), new(Conversation)
+
+ alice.PrivateKey = new(PrivateKey)
+ bob.PrivateKey = new(PrivateKey)
+ alice.PrivateKey.Parse(alicePrivateKey)
+ bob.PrivateKey.Parse(bobPrivateKey)
+ alice.FragmentSize = 100
+ bob.FragmentSize = 100
+
+ if alice.IsEncrypted() {
+ t.Error("Alice believes that the conversation is secure before we've started")
+ }
+ if bob.IsEncrypted() {
+ t.Error("Bob believes that the conversation is secure before we've started")
+ }
+
+ performHandshake(t, alice, bob)
+ return alice, bob
+}
+
+func performHandshake(t *testing.T, alice, bob *Conversation) {
+ var alicesMessage, bobsMessage [][]byte
+ var out []byte
+ var aliceChange, bobChange SecurityChange
+ var err error
+ alicesMessage = append(alicesMessage, []byte(QueryMessage))
+
+ for round := 0; len(alicesMessage) > 0 || len(bobsMessage) > 0; round++ {
+ bobsMessage = nil
+ for i, msg := range alicesMessage {
+ out, _, bobChange, bobsMessage, err = bob.Receive(msg)
+ if len(out) > 0 {
+ t.Errorf("Bob generated output during key exchange, round %d, message %d", round, i)
+ }
+ if err != nil {
+ t.Fatalf("Bob returned an error, round %d, message %d (%x): %s", round, i, msg, err)
+ }
+ if len(bobsMessage) > 0 && i != len(alicesMessage)-1 {
+ t.Errorf("Bob produced output while processing a fragment, round %d, message %d", round, i)
+ }
+ }
+
+ alicesMessage = nil
+ for i, msg := range bobsMessage {
+ out, _, aliceChange, alicesMessage, err = alice.Receive(msg)
+ if len(out) > 0 {
+ t.Errorf("Alice generated output during key exchange, round %d, message %d", round, i)
+ }
+ if err != nil {
+ t.Fatalf("Alice returned an error, round %d, message %d (%x): %s", round, i, msg, err)
+ }
+ if len(alicesMessage) > 0 && i != len(bobsMessage)-1 {
+ t.Errorf("Alice produced output while processing a fragment, round %d, message %d", round, i)
+ }
+ }
+ }
+
+ if aliceChange != NewKeys {
+ t.Errorf("Alice terminated without signaling new keys")
+ }
+ if bobChange != NewKeys {
+ t.Errorf("Bob terminated without signaling new keys")
+ }
+
+ if !bytes.Equal(alice.SSID[:], bob.SSID[:]) {
+ t.Errorf("Session identifiers don't match. Alice has %x, Bob has %x", alice.SSID[:], bob.SSID[:])
+ }
+
+ if !alice.IsEncrypted() {
+ t.Error("Alice doesn't believe that the conversation is secure")
+ }
+ if !bob.IsEncrypted() {
+ t.Error("Bob doesn't believe that the conversation is secure")
+ }
+}
+
+const (
+ firstRoundTrip = iota
+ subsequentRoundTrip
+ noMACKeyCheck
+)
+
+func roundTrip(t *testing.T, alice, bob *Conversation, message []byte, macKeyCheck int) {
+ alicesMessage, err := alice.Send(message)
+ if err != nil {
+ t.Errorf("Error from Alice sending message: %s", err)
+ }
+
+ if len(alice.oldMACs) != 0 {
+ t.Errorf("Alice has not revealed all MAC keys")
+ }
+
+ for i, msg := range alicesMessage {
+ out, encrypted, _, _, err := bob.Receive(msg)
+
+ if err != nil {
+ t.Errorf("Error generated while processing test message: %s", err.Error())
+ }
+ if len(out) > 0 {
+ if i != len(alicesMessage)-1 {
+ t.Fatal("Bob produced a message while processing a fragment of Alice's")
+ }
+ if !encrypted {
+ t.Errorf("Message was not marked as encrypted")
+ }
+ if !bytes.Equal(out, message) {
+ t.Errorf("Message corrupted: got %x, want %x", out, message)
+ }
+ }
+ }
+
+ switch macKeyCheck {
+ case firstRoundTrip:
+ if len(bob.oldMACs) != 0 {
+ t.Errorf("Bob should not have MAC keys to reveal")
+ }
+ case subsequentRoundTrip:
+ if len(bob.oldMACs) != 40 {
+ t.Errorf("Bob has %d bytes of MAC keys to reveal, but should have 40", len(bob.oldMACs))
+ }
+ }
+
+ bobsMessage, err := bob.Send(message)
+ if err != nil {
+ t.Errorf("Error from Bob sending message: %s", err)
+ }
+
+ if len(bob.oldMACs) != 0 {
+ t.Errorf("Bob has not revealed all MAC keys")
+ }
+
+ for i, msg := range bobsMessage {
+ out, encrypted, _, _, err := alice.Receive(msg)
+
+ if err != nil {
+ t.Errorf("Error generated while processing test message: %s", err.Error())
+ }
+ if len(out) > 0 {
+ if i != len(bobsMessage)-1 {
+ t.Fatal("Alice produced a message while processing a fragment of Bob's")
+ }
+ if !encrypted {
+ t.Errorf("Message was not marked as encrypted")
+ }
+ if !bytes.Equal(out, message) {
+ t.Errorf("Message corrupted: got %x, want %x", out, message)
+ }
+ }
+ }
+
+ switch macKeyCheck {
+ case firstRoundTrip:
+ if len(alice.oldMACs) != 20 {
+ t.Errorf("Alice has %d bytes of MAC keys to reveal, but should have 20", len(alice.oldMACs))
+ }
+ case subsequentRoundTrip:
+ if len(alice.oldMACs) != 40 {
+ t.Errorf("Alice has %d bytes of MAC keys to reveal, but should have 40", len(alice.oldMACs))
+ }
+ }
+}
+
+func TestConversation(t *testing.T) {
+ alice, bob := setupConversation(t)
+
+ var testMessages = [][]byte{
+ []byte("hello"), []byte("bye"),
+ }
+
+ roundTripType := firstRoundTrip
+
+ for _, testMessage := range testMessages {
+ roundTrip(t, alice, bob, testMessage, roundTripType)
+ roundTripType = subsequentRoundTrip
+ }
+}
+
+func TestGoodSMP(t *testing.T) {
+ var alice, bob Conversation
+
+ alice.smp.secret = new(big.Int).SetInt64(42)
+ bob.smp.secret = alice.smp.secret
+
+ var alicesMessages, bobsMessages []tlv
+ var aliceComplete, bobComplete bool
+ var err error
+ var out tlv
+
+ alicesMessages = alice.startSMP("")
+ for round := 0; len(alicesMessages) > 0 || len(bobsMessages) > 0; round++ {
+ bobsMessages = bobsMessages[:0]
+ for i, msg := range alicesMessages {
+ out, bobComplete, err = bob.processSMP(msg)
+ if err != nil {
+ t.Errorf("Error from Bob in round %d: %s", round, err)
+ }
+ if bobComplete && i != len(alicesMessages)-1 {
+ t.Errorf("Bob returned a completed signal before processing all of Alice's messages in round %d", round)
+ }
+ if out.typ != 0 {
+ bobsMessages = append(bobsMessages, out)
+ }
+ }
+
+ alicesMessages = alicesMessages[:0]
+ for i, msg := range bobsMessages {
+ out, aliceComplete, err = alice.processSMP(msg)
+ if err != nil {
+ t.Errorf("Error from Alice in round %d: %s", round, err)
+ }
+ if aliceComplete && i != len(bobsMessages)-1 {
+ t.Errorf("Alice returned a completed signal before processing all of Bob's messages in round %d", round)
+ }
+ if out.typ != 0 {
+ alicesMessages = append(alicesMessages, out)
+ }
+ }
+ }
+
+ if !aliceComplete || !bobComplete {
+ t.Errorf("SMP completed without both sides reporting success: alice: %v, bob: %v\n", aliceComplete, bobComplete)
+ }
+}
+
+func TestBadSMP(t *testing.T) {
+ var alice, bob Conversation
+
+ alice.smp.secret = new(big.Int).SetInt64(42)
+ bob.smp.secret = new(big.Int).SetInt64(43)
+
+ var alicesMessages, bobsMessages []tlv
+
+ alicesMessages = alice.startSMP("")
+ for round := 0; len(alicesMessages) > 0 || len(bobsMessages) > 0; round++ {
+ bobsMessages = bobsMessages[:0]
+ for _, msg := range alicesMessages {
+ out, complete, _ := bob.processSMP(msg)
+ if complete {
+ t.Errorf("Bob signaled completion in round %d", round)
+ }
+ if out.typ != 0 {
+ bobsMessages = append(bobsMessages, out)
+ }
+ }
+
+ alicesMessages = alicesMessages[:0]
+ for _, msg := range bobsMessages {
+ out, complete, _ := alice.processSMP(msg)
+ if complete {
+ t.Errorf("Alice signaled completion in round %d", round)
+ }
+ if out.typ != 0 {
+ alicesMessages = append(alicesMessages, out)
+ }
+ }
+ }
+}
+
+func TestRehandshaking(t *testing.T) {
+ alice, bob := setupConversation(t)
+ roundTrip(t, alice, bob, []byte("test"), firstRoundTrip)
+ roundTrip(t, alice, bob, []byte("test 2"), subsequentRoundTrip)
+ roundTrip(t, alice, bob, []byte("test 3"), subsequentRoundTrip)
+ roundTrip(t, alice, bob, []byte("test 4"), subsequentRoundTrip)
+ roundTrip(t, alice, bob, []byte("test 5"), subsequentRoundTrip)
+ roundTrip(t, alice, bob, []byte("test 6"), subsequentRoundTrip)
+ roundTrip(t, alice, bob, []byte("test 7"), subsequentRoundTrip)
+ roundTrip(t, alice, bob, []byte("test 8"), subsequentRoundTrip)
+ performHandshake(t, alice, bob)
+ roundTrip(t, alice, bob, []byte("test"), noMACKeyCheck)
+ roundTrip(t, alice, bob, []byte("test 2"), noMACKeyCheck)
+}
+
+func TestAgainstLibOTR(t *testing.T) {
+ // This test requires otr.c.test to be built as /tmp/a.out.
+ // If enabled, this tests runs forever performing OTR handshakes in a
+ // loop.
+ return
+
+ alicePrivateKey, _ := hex.DecodeString(alicePrivateKeyHex)
+ var alice Conversation
+ alice.PrivateKey = new(PrivateKey)
+ alice.PrivateKey.Parse(alicePrivateKey)
+
+ cmd := exec.Command("/tmp/a.out")
+ cmd.Stderr = os.Stderr
+
+ out, err := cmd.StdinPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer out.Close()
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ in := bufio.NewReader(stdout)
+
+ if err := cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+
+ out.Write([]byte(QueryMessage))
+ out.Write([]byte("\n"))
+ var expectedText = []byte("test message")
+
+ for {
+ line, isPrefix, err := in.ReadLine()
+ if isPrefix {
+ t.Fatal("line from subprocess too long")
+ }
+ if err != nil {
+ t.Fatal(err)
+ }
+ text, encrypted, change, alicesMessage, err := alice.Receive(line)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, msg := range alicesMessage {
+ out.Write(msg)
+ out.Write([]byte("\n"))
+ }
+ if change == NewKeys {
+ alicesMessage, err := alice.Send([]byte("Go -> libotr test message"))
+ if err != nil {
+ t.Fatalf("error sending message: %s", err.Error())
+ } else {
+ for _, msg := range alicesMessage {
+ out.Write(msg)
+ out.Write([]byte("\n"))
+ }
+ }
+ }
+ if len(text) > 0 {
+ if !bytes.Equal(text, expectedText) {
+ t.Fatalf("expected %x, but got %x", expectedText, text)
+ }
+ if !encrypted {
+ t.Fatal("message wasn't encrypted")
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/smp.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/smp.go
new file mode 100644
index 00000000000..dc6de4ee0eb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/otr/smp.go
@@ -0,0 +1,572 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the Socialist Millionaires Protocol as described in
+// http://www.cypherpunks.ca/otr/Protocol-v2-3.1.0.html. The protocol
+// specification is required in order to understand this code and, where
+// possible, the variable names in the code match up with the spec.
+
+package otr
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "errors"
+ "hash"
+ "math/big"
+)
+
+type smpFailure string
+
+func (s smpFailure) Error() string {
+ return string(s)
+}
+
+var smpFailureError = smpFailure("otr: SMP protocol failed")
+var smpSecretMissingError = smpFailure("otr: mutual secret needed")
+
+const smpVersion = 1
+
+const (
+ smpState1 = iota
+ smpState2
+ smpState3
+ smpState4
+)
+
+type smpState struct {
+ state int
+ a2, a3, b2, b3, pb, qb *big.Int
+ g2a, g3a *big.Int
+ g2, g3 *big.Int
+ g3b, papb, qaqb, ra *big.Int
+ saved *tlv
+ secret *big.Int
+ question string
+}
+
+func (c *Conversation) startSMP(question string) (tlvs []tlv) {
+ if c.smp.state != smpState1 {
+ tlvs = append(tlvs, c.generateSMPAbort())
+ }
+ tlvs = append(tlvs, c.generateSMP1(question))
+ c.smp.question = ""
+ c.smp.state = smpState2
+ return
+}
+
+func (c *Conversation) resetSMP() {
+ c.smp.state = smpState1
+ c.smp.secret = nil
+ c.smp.question = ""
+}
+
+func (c *Conversation) processSMP(in tlv) (out tlv, complete bool, err error) {
+ data := in.data
+
+ switch in.typ {
+ case tlvTypeSMPAbort:
+ if c.smp.state != smpState1 {
+ err = smpFailureError
+ }
+ c.resetSMP()
+ return
+ case tlvTypeSMP1WithQuestion:
+ // We preprocess this into a SMP1 message.
+ nulPos := bytes.IndexByte(data, 0)
+ if nulPos == -1 {
+ err = errors.New("otr: SMP message with question didn't contain a NUL byte")
+ return
+ }
+ c.smp.question = string(data[:nulPos])
+ data = data[nulPos+1:]
+ }
+
+ numMPIs, data, ok := getU32(data)
+ if !ok || numMPIs > 20 {
+ err = errors.New("otr: corrupt SMP message")
+ return
+ }
+
+ mpis := make([]*big.Int, numMPIs)
+ for i := range mpis {
+ var ok bool
+ mpis[i], data, ok = getMPI(data)
+ if !ok {
+ err = errors.New("otr: corrupt SMP message")
+ return
+ }
+ }
+
+ switch in.typ {
+ case tlvTypeSMP1, tlvTypeSMP1WithQuestion:
+ if c.smp.state != smpState1 {
+ c.resetSMP()
+ out = c.generateSMPAbort()
+ return
+ }
+ if c.smp.secret == nil {
+ err = smpSecretMissingError
+ return
+ }
+ if err = c.processSMP1(mpis); err != nil {
+ return
+ }
+ c.smp.state = smpState3
+ out = c.generateSMP2()
+ case tlvTypeSMP2:
+ if c.smp.state != smpState2 {
+ c.resetSMP()
+ out = c.generateSMPAbort()
+ return
+ }
+ if out, err = c.processSMP2(mpis); err != nil {
+ out = c.generateSMPAbort()
+ return
+ }
+ c.smp.state = smpState4
+ case tlvTypeSMP3:
+ if c.smp.state != smpState3 {
+ c.resetSMP()
+ out = c.generateSMPAbort()
+ return
+ }
+ if out, err = c.processSMP3(mpis); err != nil {
+ return
+ }
+ c.smp.state = smpState1
+ c.smp.secret = nil
+ complete = true
+ case tlvTypeSMP4:
+ if c.smp.state != smpState4 {
+ c.resetSMP()
+ out = c.generateSMPAbort()
+ return
+ }
+ if err = c.processSMP4(mpis); err != nil {
+ out = c.generateSMPAbort()
+ return
+ }
+ c.smp.state = smpState1
+ c.smp.secret = nil
+ complete = true
+ default:
+ panic("unknown SMP message")
+ }
+
+ return
+}
+
+func (c *Conversation) calcSMPSecret(mutualSecret []byte, weStarted bool) {
+ h := sha256.New()
+ h.Write([]byte{smpVersion})
+ if weStarted {
+ h.Write(c.PrivateKey.PublicKey.Fingerprint())
+ h.Write(c.TheirPublicKey.Fingerprint())
+ } else {
+ h.Write(c.TheirPublicKey.Fingerprint())
+ h.Write(c.PrivateKey.PublicKey.Fingerprint())
+ }
+ h.Write(c.SSID[:])
+ h.Write(mutualSecret)
+ c.smp.secret = new(big.Int).SetBytes(h.Sum(nil))
+}
+
+func (c *Conversation) generateSMP1(question string) tlv {
+ var randBuf [16]byte
+ c.smp.a2 = c.randMPI(randBuf[:])
+ c.smp.a3 = c.randMPI(randBuf[:])
+ g2a := new(big.Int).Exp(g, c.smp.a2, p)
+ g3a := new(big.Int).Exp(g, c.smp.a3, p)
+ h := sha256.New()
+
+ r2 := c.randMPI(randBuf[:])
+ r := new(big.Int).Exp(g, r2, p)
+ c2 := new(big.Int).SetBytes(hashMPIs(h, 1, r))
+ d2 := new(big.Int).Mul(c.smp.a2, c2)
+ d2.Sub(r2, d2)
+ d2.Mod(d2, q)
+ if d2.Sign() < 0 {
+ d2.Add(d2, q)
+ }
+
+ r3 := c.randMPI(randBuf[:])
+ r.Exp(g, r3, p)
+ c3 := new(big.Int).SetBytes(hashMPIs(h, 2, r))
+ d3 := new(big.Int).Mul(c.smp.a3, c3)
+ d3.Sub(r3, d3)
+ d3.Mod(d3, q)
+ if d3.Sign() < 0 {
+ d3.Add(d3, q)
+ }
+
+ var ret tlv
+ if len(question) > 0 {
+ ret.typ = tlvTypeSMP1WithQuestion
+ ret.data = append(ret.data, question...)
+ ret.data = append(ret.data, 0)
+ } else {
+ ret.typ = tlvTypeSMP1
+ }
+ ret.data = appendU32(ret.data, 6)
+ ret.data = appendMPIs(ret.data, g2a, c2, d2, g3a, c3, d3)
+ return ret
+}
+
+func (c *Conversation) processSMP1(mpis []*big.Int) error {
+ if len(mpis) != 6 {
+ return errors.New("otr: incorrect number of arguments in SMP1 message")
+ }
+ g2a := mpis[0]
+ c2 := mpis[1]
+ d2 := mpis[2]
+ g3a := mpis[3]
+ c3 := mpis[4]
+ d3 := mpis[5]
+ h := sha256.New()
+
+ r := new(big.Int).Exp(g, d2, p)
+ s := new(big.Int).Exp(g2a, c2, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+ t := new(big.Int).SetBytes(hashMPIs(h, 1, r))
+ if c2.Cmp(t) != 0 {
+ return errors.New("otr: ZKP c2 incorrect in SMP1 message")
+ }
+ r.Exp(g, d3, p)
+ s.Exp(g3a, c3, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+ t.SetBytes(hashMPIs(h, 2, r))
+ if c3.Cmp(t) != 0 {
+ return errors.New("otr: ZKP c3 incorrect in SMP1 message")
+ }
+
+ c.smp.g2a = g2a
+ c.smp.g3a = g3a
+ return nil
+}
+
+func (c *Conversation) generateSMP2() tlv {
+ var randBuf [16]byte
+ b2 := c.randMPI(randBuf[:])
+ c.smp.b3 = c.randMPI(randBuf[:])
+ r2 := c.randMPI(randBuf[:])
+ r3 := c.randMPI(randBuf[:])
+ r4 := c.randMPI(randBuf[:])
+ r5 := c.randMPI(randBuf[:])
+ r6 := c.randMPI(randBuf[:])
+
+ g2b := new(big.Int).Exp(g, b2, p)
+ g3b := new(big.Int).Exp(g, c.smp.b3, p)
+
+ r := new(big.Int).Exp(g, r2, p)
+ h := sha256.New()
+ c2 := new(big.Int).SetBytes(hashMPIs(h, 3, r))
+ d2 := new(big.Int).Mul(b2, c2)
+ d2.Sub(r2, d2)
+ d2.Mod(d2, q)
+ if d2.Sign() < 0 {
+ d2.Add(d2, q)
+ }
+
+ r.Exp(g, r3, p)
+ c3 := new(big.Int).SetBytes(hashMPIs(h, 4, r))
+ d3 := new(big.Int).Mul(c.smp.b3, c3)
+ d3.Sub(r3, d3)
+ d3.Mod(d3, q)
+ if d3.Sign() < 0 {
+ d3.Add(d3, q)
+ }
+
+ c.smp.g2 = new(big.Int).Exp(c.smp.g2a, b2, p)
+ c.smp.g3 = new(big.Int).Exp(c.smp.g3a, c.smp.b3, p)
+ c.smp.pb = new(big.Int).Exp(c.smp.g3, r4, p)
+ c.smp.qb = new(big.Int).Exp(g, r4, p)
+ r.Exp(c.smp.g2, c.smp.secret, p)
+ c.smp.qb.Mul(c.smp.qb, r)
+ c.smp.qb.Mod(c.smp.qb, p)
+
+ s := new(big.Int)
+ s.Exp(c.smp.g2, r6, p)
+ r.Exp(g, r5, p)
+ s.Mul(r, s)
+ s.Mod(s, p)
+ r.Exp(c.smp.g3, r5, p)
+ cp := new(big.Int).SetBytes(hashMPIs(h, 5, r, s))
+
+ // D5 = r5 - r4 cP mod q and D6 = r6 - y cP mod q
+
+ s.Mul(r4, cp)
+ r.Sub(r5, s)
+ d5 := new(big.Int).Mod(r, q)
+ if d5.Sign() < 0 {
+ d5.Add(d5, q)
+ }
+
+ s.Mul(c.smp.secret, cp)
+ r.Sub(r6, s)
+ d6 := new(big.Int).Mod(r, q)
+ if d6.Sign() < 0 {
+ d6.Add(d6, q)
+ }
+
+ var ret tlv
+ ret.typ = tlvTypeSMP2
+ ret.data = appendU32(ret.data, 11)
+ ret.data = appendMPIs(ret.data, g2b, c2, d2, g3b, c3, d3, c.smp.pb, c.smp.qb, cp, d5, d6)
+ return ret
+}
+
+func (c *Conversation) processSMP2(mpis []*big.Int) (out tlv, err error) {
+ if len(mpis) != 11 {
+ err = errors.New("otr: incorrect number of arguments in SMP2 message")
+ return
+ }
+ g2b := mpis[0]
+ c2 := mpis[1]
+ d2 := mpis[2]
+ g3b := mpis[3]
+ c3 := mpis[4]
+ d3 := mpis[5]
+ pb := mpis[6]
+ qb := mpis[7]
+ cp := mpis[8]
+ d5 := mpis[9]
+ d6 := mpis[10]
+ h := sha256.New()
+
+ r := new(big.Int).Exp(g, d2, p)
+ s := new(big.Int).Exp(g2b, c2, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+ s.SetBytes(hashMPIs(h, 3, r))
+ if c2.Cmp(s) != 0 {
+ err = errors.New("otr: ZKP c2 failed in SMP2 message")
+ return
+ }
+
+ r.Exp(g, d3, p)
+ s.Exp(g3b, c3, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+ s.SetBytes(hashMPIs(h, 4, r))
+ if c3.Cmp(s) != 0 {
+ err = errors.New("otr: ZKP c3 failed in SMP2 message")
+ return
+ }
+
+ c.smp.g2 = new(big.Int).Exp(g2b, c.smp.a2, p)
+ c.smp.g3 = new(big.Int).Exp(g3b, c.smp.a3, p)
+
+ r.Exp(g, d5, p)
+ s.Exp(c.smp.g2, d6, p)
+ r.Mul(r, s)
+ s.Exp(qb, cp, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+
+ s.Exp(c.smp.g3, d5, p)
+ t := new(big.Int).Exp(pb, cp, p)
+ s.Mul(s, t)
+ s.Mod(s, p)
+ t.SetBytes(hashMPIs(h, 5, s, r))
+ if cp.Cmp(t) != 0 {
+ err = errors.New("otr: ZKP cP failed in SMP2 message")
+ return
+ }
+
+ var randBuf [16]byte
+ r4 := c.randMPI(randBuf[:])
+ r5 := c.randMPI(randBuf[:])
+ r6 := c.randMPI(randBuf[:])
+ r7 := c.randMPI(randBuf[:])
+
+ pa := new(big.Int).Exp(c.smp.g3, r4, p)
+ r.Exp(c.smp.g2, c.smp.secret, p)
+ qa := new(big.Int).Exp(g, r4, p)
+ qa.Mul(qa, r)
+ qa.Mod(qa, p)
+
+ r.Exp(g, r5, p)
+ s.Exp(c.smp.g2, r6, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+
+ s.Exp(c.smp.g3, r5, p)
+ cp.SetBytes(hashMPIs(h, 6, s, r))
+
+ r.Mul(r4, cp)
+ d5 = new(big.Int).Sub(r5, r)
+ d5.Mod(d5, q)
+ if d5.Sign() < 0 {
+ d5.Add(d5, q)
+ }
+
+ r.Mul(c.smp.secret, cp)
+ d6 = new(big.Int).Sub(r6, r)
+ d6.Mod(d6, q)
+ if d6.Sign() < 0 {
+ d6.Add(d6, q)
+ }
+
+ r.ModInverse(qb, p)
+ qaqb := new(big.Int).Mul(qa, r)
+ qaqb.Mod(qaqb, p)
+
+ ra := new(big.Int).Exp(qaqb, c.smp.a3, p)
+ r.Exp(qaqb, r7, p)
+ s.Exp(g, r7, p)
+ cr := new(big.Int).SetBytes(hashMPIs(h, 7, s, r))
+
+ r.Mul(c.smp.a3, cr)
+ d7 := new(big.Int).Sub(r7, r)
+ d7.Mod(d7, q)
+ if d7.Sign() < 0 {
+ d7.Add(d7, q)
+ }
+
+ c.smp.g3b = g3b
+ c.smp.qaqb = qaqb
+
+ r.ModInverse(pb, p)
+ c.smp.papb = new(big.Int).Mul(pa, r)
+ c.smp.papb.Mod(c.smp.papb, p)
+ c.smp.ra = ra
+
+ out.typ = tlvTypeSMP3
+ out.data = appendU32(out.data, 8)
+ out.data = appendMPIs(out.data, pa, qa, cp, d5, d6, ra, cr, d7)
+ return
+}
+
+func (c *Conversation) processSMP3(mpis []*big.Int) (out tlv, err error) {
+ if len(mpis) != 8 {
+ err = errors.New("otr: incorrect number of arguments in SMP3 message")
+ return
+ }
+ pa := mpis[0]
+ qa := mpis[1]
+ cp := mpis[2]
+ d5 := mpis[3]
+ d6 := mpis[4]
+ ra := mpis[5]
+ cr := mpis[6]
+ d7 := mpis[7]
+ h := sha256.New()
+
+ r := new(big.Int).Exp(g, d5, p)
+ s := new(big.Int).Exp(c.smp.g2, d6, p)
+ r.Mul(r, s)
+ s.Exp(qa, cp, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+
+ s.Exp(c.smp.g3, d5, p)
+ t := new(big.Int).Exp(pa, cp, p)
+ s.Mul(s, t)
+ s.Mod(s, p)
+ t.SetBytes(hashMPIs(h, 6, s, r))
+ if t.Cmp(cp) != 0 {
+ err = errors.New("otr: ZKP cP failed in SMP3 message")
+ return
+ }
+
+ r.ModInverse(c.smp.qb, p)
+ qaqb := new(big.Int).Mul(qa, r)
+ qaqb.Mod(qaqb, p)
+
+ r.Exp(qaqb, d7, p)
+ s.Exp(ra, cr, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+
+ s.Exp(g, d7, p)
+ t.Exp(c.smp.g3a, cr, p)
+ s.Mul(s, t)
+ s.Mod(s, p)
+ t.SetBytes(hashMPIs(h, 7, s, r))
+ if t.Cmp(cr) != 0 {
+ err = errors.New("otr: ZKP cR failed in SMP3 message")
+ return
+ }
+
+ var randBuf [16]byte
+ r7 := c.randMPI(randBuf[:])
+ rb := new(big.Int).Exp(qaqb, c.smp.b3, p)
+
+ r.Exp(qaqb, r7, p)
+ s.Exp(g, r7, p)
+ cr = new(big.Int).SetBytes(hashMPIs(h, 8, s, r))
+
+ r.Mul(c.smp.b3, cr)
+ d7 = new(big.Int).Sub(r7, r)
+ d7.Mod(d7, q)
+ if d7.Sign() < 0 {
+ d7.Add(d7, q)
+ }
+
+ out.typ = tlvTypeSMP4
+ out.data = appendU32(out.data, 3)
+ out.data = appendMPIs(out.data, rb, cr, d7)
+
+ r.ModInverse(c.smp.pb, p)
+ r.Mul(pa, r)
+ r.Mod(r, p)
+ s.Exp(ra, c.smp.b3, p)
+ if r.Cmp(s) != 0 {
+ err = smpFailureError
+ }
+
+ return
+}
+
+func (c *Conversation) processSMP4(mpis []*big.Int) error {
+ if len(mpis) != 3 {
+ return errors.New("otr: incorrect number of arguments in SMP4 message")
+ }
+ rb := mpis[0]
+ cr := mpis[1]
+ d7 := mpis[2]
+ h := sha256.New()
+
+ r := new(big.Int).Exp(c.smp.qaqb, d7, p)
+ s := new(big.Int).Exp(rb, cr, p)
+ r.Mul(r, s)
+ r.Mod(r, p)
+
+ s.Exp(g, d7, p)
+ t := new(big.Int).Exp(c.smp.g3b, cr, p)
+ s.Mul(s, t)
+ s.Mod(s, p)
+ t.SetBytes(hashMPIs(h, 8, s, r))
+ if t.Cmp(cr) != 0 {
+ return errors.New("otr: ZKP cR failed in SMP4 message")
+ }
+
+ r.Exp(rb, c.smp.a3, p)
+ if r.Cmp(c.smp.papb) != 0 {
+ return smpFailureError
+ }
+
+ return nil
+}
+
+func (c *Conversation) generateSMPAbort() tlv {
+ return tlv{typ: tlvTypeSMPAbort}
+}
+
+func hashMPIs(h hash.Hash, magic byte, mpis ...*big.Int) []byte {
+ if h != nil {
+ h.Reset()
+ } else {
+ h = sha256.New()
+ }
+
+ h.Write([]byte{magic})
+ for _, mpi := range mpis {
+ h.Write(appendMPI(nil, mpi))
+ }
+ return h.Sum(nil)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pbkdf2/pbkdf2.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 00000000000..593f6530084
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
+
+import (
+ "crypto/hmac"
+ "hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+ prf := hmac.New(h, password)
+ hashLen := prf.Size()
+ numBlocks := (keyLen + hashLen - 1) / hashLen
+
+ var buf [4]byte
+ dk := make([]byte, 0, numBlocks*hashLen)
+ U := make([]byte, hashLen)
+ for block := 1; block <= numBlocks; block++ {
+ // N.B.: || means concatenation, ^ means XOR
+ // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+ // U_1 = PRF(password, salt || uint(i))
+ prf.Reset()
+ prf.Write(salt)
+ buf[0] = byte(block >> 24)
+ buf[1] = byte(block >> 16)
+ buf[2] = byte(block >> 8)
+ buf[3] = byte(block)
+ prf.Write(buf[:4])
+ dk = prf.Sum(dk)
+ T := dk[len(dk)-hashLen:]
+ copy(U, T)
+
+ // U_n = PRF(password, U_(n-1))
+ for n := 2; n <= iter; n++ {
+ prf.Reset()
+ prf.Write(U)
+ U = U[:0]
+ U = prf.Sum(U)
+ for x := range U {
+ T[x] ^= U[x]
+ }
+ }
+ }
+ return dk[:keyLen]
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pbkdf2/pbkdf2_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pbkdf2/pbkdf2_test.go
new file mode 100644
index 00000000000..1379240610b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pbkdf2/pbkdf2_test.go
@@ -0,0 +1,157 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pbkdf2
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "crypto/sha256"
+ "hash"
+ "testing"
+)
+
+type testVector struct {
+ password string
+ salt string
+ iter int
+ output []byte
+}
+
+// Test vectors from RFC 6070, http://tools.ietf.org/html/rfc6070
+var sha1TestVectors = []testVector{
+ {
+ "password",
+ "salt",
+ 1,
+ []byte{
+ 0x0c, 0x60, 0xc8, 0x0f, 0x96, 0x1f, 0x0e, 0x71,
+ 0xf3, 0xa9, 0xb5, 0x24, 0xaf, 0x60, 0x12, 0x06,
+ 0x2f, 0xe0, 0x37, 0xa6,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 2,
+ []byte{
+ 0xea, 0x6c, 0x01, 0x4d, 0xc7, 0x2d, 0x6f, 0x8c,
+ 0xcd, 0x1e, 0xd9, 0x2a, 0xce, 0x1d, 0x41, 0xf0,
+ 0xd8, 0xde, 0x89, 0x57,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 4096,
+ []byte{
+ 0x4b, 0x00, 0x79, 0x01, 0xb7, 0x65, 0x48, 0x9a,
+ 0xbe, 0xad, 0x49, 0xd9, 0x26, 0xf7, 0x21, 0xd0,
+ 0x65, 0xa4, 0x29, 0xc1,
+ },
+ },
+ // // This one takes too long
+ // {
+ // "password",
+ // "salt",
+ // 16777216,
+ // []byte{
+ // 0xee, 0xfe, 0x3d, 0x61, 0xcd, 0x4d, 0xa4, 0xe4,
+ // 0xe9, 0x94, 0x5b, 0x3d, 0x6b, 0xa2, 0x15, 0x8c,
+ // 0x26, 0x34, 0xe9, 0x84,
+ // },
+ // },
+ {
+ "passwordPASSWORDpassword",
+ "saltSALTsaltSALTsaltSALTsaltSALTsalt",
+ 4096,
+ []byte{
+ 0x3d, 0x2e, 0xec, 0x4f, 0xe4, 0x1c, 0x84, 0x9b,
+ 0x80, 0xc8, 0xd8, 0x36, 0x62, 0xc0, 0xe4, 0x4a,
+ 0x8b, 0x29, 0x1a, 0x96, 0x4c, 0xf2, 0xf0, 0x70,
+ 0x38,
+ },
+ },
+ {
+ "pass\000word",
+ "sa\000lt",
+ 4096,
+ []byte{
+ 0x56, 0xfa, 0x6a, 0xa7, 0x55, 0x48, 0x09, 0x9d,
+ 0xcc, 0x37, 0xd7, 0xf0, 0x34, 0x25, 0xe0, 0xc3,
+ },
+ },
+}
+
+// Test vectors from
+// http://stackoverflow.com/questions/5130513/pbkdf2-hmac-sha2-test-vectors
+var sha256TestVectors = []testVector{
+ {
+ "password",
+ "salt",
+ 1,
+ []byte{
+ 0x12, 0x0f, 0xb6, 0xcf, 0xfc, 0xf8, 0xb3, 0x2c,
+ 0x43, 0xe7, 0x22, 0x52, 0x56, 0xc4, 0xf8, 0x37,
+ 0xa8, 0x65, 0x48, 0xc9,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 2,
+ []byte{
+ 0xae, 0x4d, 0x0c, 0x95, 0xaf, 0x6b, 0x46, 0xd3,
+ 0x2d, 0x0a, 0xdf, 0xf9, 0x28, 0xf0, 0x6d, 0xd0,
+ 0x2a, 0x30, 0x3f, 0x8e,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 4096,
+ []byte{
+ 0xc5, 0xe4, 0x78, 0xd5, 0x92, 0x88, 0xc8, 0x41,
+ 0xaa, 0x53, 0x0d, 0xb6, 0x84, 0x5c, 0x4c, 0x8d,
+ 0x96, 0x28, 0x93, 0xa0,
+ },
+ },
+ {
+ "passwordPASSWORDpassword",
+ "saltSALTsaltSALTsaltSALTsaltSALTsalt",
+ 4096,
+ []byte{
+ 0x34, 0x8c, 0x89, 0xdb, 0xcb, 0xd3, 0x2b, 0x2f,
+ 0x32, 0xd8, 0x14, 0xb8, 0x11, 0x6e, 0x84, 0xcf,
+ 0x2b, 0x17, 0x34, 0x7e, 0xbc, 0x18, 0x00, 0x18,
+ 0x1c,
+ },
+ },
+ {
+ "pass\000word",
+ "sa\000lt",
+ 4096,
+ []byte{
+ 0x89, 0xb6, 0x9d, 0x05, 0x16, 0xf8, 0x29, 0x89,
+ 0x3c, 0x69, 0x62, 0x26, 0x65, 0x0a, 0x86, 0x87,
+ },
+ },
+}
+
+func testHash(t *testing.T, h func() hash.Hash, hashName string, vectors []testVector) {
+ for i, v := range vectors {
+ o := Key([]byte(v.password), []byte(v.salt), v.iter, len(v.output), h)
+ if !bytes.Equal(o, v.output) {
+ t.Errorf("%s %d: expected %x, got %x", hashName, i, v.output, o)
+ }
+ }
+}
+
+func TestWithHMACSHA1(t *testing.T) {
+ testHash(t, sha1.New, "SHA1", sha1TestVectors)
+}
+
+func TestWithHMACSHA256(t *testing.T) {
+ testHash(t, sha256.New, "SHA256", sha256TestVectors)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go
new file mode 100644
index 00000000000..284d2a68f1e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "errors"
+ "unicode/utf16"
+)
+
+// bmpString returns s encoded in UCS-2 with a zero terminator.
+func bmpString(s string) ([]byte, error) {
+ // References:
+ // https://tools.ietf.org/html/rfc7292#appendix-B.1
+ // http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
+ // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes
+ // EncodeRune returns 0xfffd if the rune does not need special encoding
+ // - the above RFC provides the info that BMPStrings are NULL terminated.
+
+ ret := make([]byte, 0, 2*len(s)+2)
+
+ for _, r := range s {
+ if t, _ := utf16.EncodeRune(r); t != 0xfffd {
+ return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2")
+ }
+ ret = append(ret, byte(r/256), byte(r%256))
+ }
+
+ return append(ret, 0, 0), nil
+}
+
+func decodeBMPString(bmpString []byte) (string, error) {
+ if len(bmpString)%2 != 0 {
+ return "", errors.New("pkcs12: odd-length BMP string")
+ }
+
+ // strip terminator if present
+ if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 {
+ bmpString = bmpString[:l-2]
+ }
+
+ s := make([]uint16, 0, len(bmpString)/2)
+ for len(bmpString) > 0 {
+ s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1]))
+ bmpString = bmpString[2:]
+ }
+
+ return string(utf16.Decode(s)), nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/bmp-string_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/bmp-string_test.go
new file mode 100644
index 00000000000..7fca55f4e8b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/bmp-string_test.go
@@ -0,0 +1,63 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+)
+
+var bmpStringTests = []struct {
+ in string
+ expectedHex string
+ shouldFail bool
+}{
+ {"", "0000", false},
+ // Example from https://tools.ietf.org/html/rfc7292#appendix-B.
+ {"Beavis", "0042006500610076006900730000", false},
+ // Some characters from the "Letterlike Symbols Unicode block".
+ {"\u2115 - Double-struck N", "21150020002d00200044006f00750062006c0065002d00730074007200750063006b0020004e0000", false},
+ // any character outside the BMP should trigger an error.
+ {"\U0001f000 East wind (Mahjong)", "", true},
+}
+
+func TestBMPString(t *testing.T) {
+ for i, test := range bmpStringTests {
+ expected, err := hex.DecodeString(test.expectedHex)
+ if err != nil {
+ t.Fatalf("#%d: failed to decode expectation", i)
+ }
+
+ out, err := bmpString(test.in)
+ if err == nil && test.shouldFail {
+ t.Errorf("#%d: expected to fail, but produced %x", i, out)
+ continue
+ }
+
+ if err != nil && !test.shouldFail {
+ t.Errorf("#%d: failed unexpectedly: %s", i, err)
+ continue
+ }
+
+ if !test.shouldFail {
+ if !bytes.Equal(out, expected) {
+ t.Errorf("#%d: expected %s, got %x", i, test.expectedHex, out)
+ continue
+ }
+
+ roundTrip, err := decodeBMPString(out)
+ if err != nil {
+ t.Errorf("#%d: decoding output gave an error: %s", i, err)
+ continue
+ }
+
+ if roundTrip != test.in {
+ t.Errorf("#%d: decoding output resulted in %q, but it should have been %q", i, roundTrip, test.in)
+ continue
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/crypto.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/crypto.go
new file mode 100644
index 00000000000..4bd4470ec04
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/crypto.go
@@ -0,0 +1,131 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "crypto/cipher"
+ "crypto/des"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "errors"
+
+ "golang.org/x/crypto/pkcs12/internal/rc2"
+)
+
+var (
+ oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
+ oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6})
+)
+
+// pbeCipher is an abstraction of a PKCS#12 cipher.
+type pbeCipher interface {
+ // create returns a cipher.Block given a key.
+ create(key []byte) (cipher.Block, error)
+ // deriveKey returns a key derived from the given password and salt.
+ deriveKey(salt, password []byte, iterations int) []byte
+ // deriveKey returns an IV derived from the given password and salt.
+ deriveIV(salt, password []byte, iterations int) []byte
+}
+
+type shaWithTripleDESCBC struct{}
+
+func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) {
+ return des.NewTripleDESCipher(key)
+}
+
+func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24)
+}
+
+func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
+}
+
+type shaWith40BitRC2CBC struct{}
+
+func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) {
+ return rc2.New(key, len(key)*8)
+}
+
+func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5)
+}
+
+func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte {
+ return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8)
+}
+
+type pbeParams struct {
+ Salt []byte
+ Iterations int
+}
+
+func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) {
+ var cipherType pbeCipher
+
+ switch {
+ case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC):
+ cipherType = shaWithTripleDESCBC{}
+ case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC):
+ cipherType = shaWith40BitRC2CBC{}
+ default:
+ return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported")
+ }
+
+ var params pbeParams
+ if err := unmarshal(algorithm.Parameters.FullBytes, &params); err != nil {
+ return nil, 0, err
+ }
+
+ key := cipherType.deriveKey(params.Salt, password, params.Iterations)
+ iv := cipherType.deriveIV(params.Salt, password, params.Iterations)
+
+ block, err := cipherType.create(key)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil
+}
+
+func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) {
+ cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password)
+ if err != nil {
+ return nil, err
+ }
+
+ encrypted := info.Data()
+ if len(encrypted) == 0 {
+ return nil, errors.New("pkcs12: empty encrypted data")
+ }
+ if len(encrypted)%blockSize != 0 {
+ return nil, errors.New("pkcs12: input is not a multiple of the block size")
+ }
+ decrypted = make([]byte, len(encrypted))
+ cbc.CryptBlocks(decrypted, encrypted)
+
+ psLen := int(decrypted[len(decrypted)-1])
+ if psLen == 0 || psLen > blockSize {
+ return nil, ErrDecryption
+ }
+
+ if len(decrypted) < psLen {
+ return nil, ErrDecryption
+ }
+ ps := decrypted[len(decrypted)-psLen:]
+ decrypted = decrypted[:len(decrypted)-psLen]
+ if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 {
+ return nil, ErrDecryption
+ }
+
+ return
+}
+
+// decryptable abstracts a object that contains ciphertext.
+type decryptable interface {
+ Algorithm() pkix.AlgorithmIdentifier
+ Data() []byte
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/crypto_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/crypto_test.go
new file mode 100644
index 00000000000..eb4dae8fceb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/crypto_test.go
@@ -0,0 +1,125 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "testing"
+)
+
+var sha1WithTripleDES = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3})
+
+func TestPbDecrypterFor(t *testing.T) {
+ params, _ := asn1.Marshal(pbeParams{
+ Salt: []byte{1, 2, 3, 4, 5, 6, 7, 8},
+ Iterations: 2048,
+ })
+ alg := pkix.AlgorithmIdentifier{
+ Algorithm: asn1.ObjectIdentifier([]int{1, 2, 3}),
+ Parameters: asn1.RawValue{
+ FullBytes: params,
+ },
+ }
+
+ pass, _ := bmpString("Sesame open")
+
+ _, _, err := pbDecrypterFor(alg, pass)
+ if _, ok := err.(NotImplementedError); !ok {
+ t.Errorf("expected not implemented error, got: %T %s", err, err)
+ }
+
+ alg.Algorithm = sha1WithTripleDES
+ cbc, blockSize, err := pbDecrypterFor(alg, pass)
+ if err != nil {
+ t.Errorf("unexpected error from pbDecrypterFor %v", err)
+ }
+ if blockSize != 8 {
+ t.Errorf("unexpected block size %d, wanted 8", blockSize)
+ }
+
+ plaintext := []byte{1, 2, 3, 4, 5, 6, 7, 8}
+ expectedCiphertext := []byte{185, 73, 135, 249, 137, 1, 122, 247}
+ ciphertext := make([]byte, len(plaintext))
+ cbc.CryptBlocks(ciphertext, plaintext)
+
+ if bytes.Compare(ciphertext, expectedCiphertext) != 0 {
+ t.Errorf("bad ciphertext, got %x but wanted %x", ciphertext, expectedCiphertext)
+ }
+}
+
+var pbDecryptTests = []struct {
+ in []byte
+ expected []byte
+ expectedError error
+}{
+ {
+ []byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\xa0\x9a\xdf\x5a\x58\xa0\xea\x46"), // 7 padding bytes
+ []byte("A secret!"),
+ nil,
+ },
+ {
+ []byte("\x33\x73\xf3\x9f\xda\x49\xae\xfc\x96\x24\x2f\x71\x7e\x32\x3f\xe7"), // 8 padding bytes
+ []byte("A secret"),
+ nil,
+ },
+ {
+ []byte("\x35\x0c\xc0\x8d\xab\xa9\x5d\x30\x7f\x9a\xec\x6a\xd8\x9b\x9c\xd9"), // 9 padding bytes, incorrect
+ nil,
+ ErrDecryption,
+ },
+ {
+ []byte("\xb2\xf9\x6e\x06\x60\xae\x20\xcf\x08\xa0\x7b\xd9\x6b\x20\xef\x41"), // incorrect padding bytes: [ ... 0x04 0x02 ]
+ nil,
+ ErrDecryption,
+ },
+}
+
+func TestPbDecrypt(t *testing.T) {
+ for i, test := range pbDecryptTests {
+ decryptable := testDecryptable{
+ data: test.in,
+ algorithm: pkix.AlgorithmIdentifier{
+ Algorithm: sha1WithTripleDES,
+ Parameters: pbeParams{
+ Salt: []byte("\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8"),
+ Iterations: 4096,
+ }.RawASN1(),
+ },
+ }
+ password, _ := bmpString("sesame")
+
+ plaintext, err := pbDecrypt(decryptable, password)
+ if err != test.expectedError {
+ t.Errorf("#%d: got error %q, but wanted %q", i, err, test.expectedError)
+ continue
+ }
+
+ if !bytes.Equal(plaintext, test.expected) {
+ t.Errorf("#%d: got %x, but wanted %x", i, plaintext, test.expected)
+ }
+ }
+}
+
+type testDecryptable struct {
+ data []byte
+ algorithm pkix.AlgorithmIdentifier
+}
+
+func (d testDecryptable) Algorithm() pkix.AlgorithmIdentifier { return d.algorithm }
+func (d testDecryptable) Data() []byte { return d.data }
+
+func (params pbeParams) RawASN1() (raw asn1.RawValue) {
+ asn1Bytes, err := asn1.Marshal(params)
+ if err != nil {
+ panic(err)
+ }
+ _, err = asn1.Unmarshal(asn1Bytes, &raw)
+ if err != nil {
+ panic(err)
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/errors.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/errors.go
new file mode 100644
index 00000000000..7377ce6fb2b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/errors.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import "errors"
+
+var (
+ // ErrDecryption represents a failure to decrypt the input.
+ ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding")
+
+ // ErrIncorrectPassword is returned when an incorrect password is detected.
+ // Usually, P12/PFX data is signed to be able to verify the password.
+ ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect")
+)
+
+// NotImplementedError indicates that the input is not currently supported.
+type NotImplementedError string
+
+func (e NotImplementedError) Error() string {
+ return "pkcs12: " + string(e)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go
new file mode 100644
index 00000000000..3347f338c18
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/bench_test.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rc2
+
+import (
+ "testing"
+)
+
+func BenchmarkEncrypt(b *testing.B) {
+ r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64)
+ b.ResetTimer()
+ var src [8]byte
+ for i := 0; i < b.N; i++ {
+ r.Encrypt(src[:], src[:])
+ }
+}
+
+func BenchmarkDecrypt(b *testing.B) {
+ r, _ := New([]byte{0, 0, 0, 0, 0, 0, 0, 0}, 64)
+ b.ResetTimer()
+ var src [8]byte
+ for i := 0; i < b.N; i++ {
+ r.Decrypt(src[:], src[:])
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
new file mode 100644
index 00000000000..8c7090258c5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go
@@ -0,0 +1,274 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package rc2 implements the RC2 cipher
+/*
+https://www.ietf.org/rfc/rfc2268.txt
+http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf
+
+This code is licensed under the MIT license.
+*/
+package rc2
+
+import (
+ "crypto/cipher"
+ "encoding/binary"
+)
+
+// The rc2 block size in bytes
+const BlockSize = 8
+
+type rc2Cipher struct {
+ k [64]uint16
+}
+
+// New returns a new rc2 cipher with the given key and effective key length t1
+func New(key []byte, t1 int) (cipher.Block, error) {
+ // TODO(dgryski): error checking for key length
+ return &rc2Cipher{
+ k: expandKey(key, t1),
+ }, nil
+}
+
+func (*rc2Cipher) BlockSize() int { return BlockSize }
+
+var piTable = [256]byte{
+ 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d,
+ 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2,
+ 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32,
+ 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82,
+ 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc,
+ 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26,
+ 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03,
+ 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7,
+ 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a,
+ 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec,
+ 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39,
+ 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31,
+ 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9,
+ 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9,
+ 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e,
+ 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad,
+}
+
+func expandKey(key []byte, t1 int) [64]uint16 {
+
+ l := make([]byte, 128)
+ copy(l, key)
+
+ var t = len(key)
+ var t8 = (t1 + 7) / 8
+ var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8))))
+
+ for i := len(key); i < 128; i++ {
+ l[i] = piTable[l[i-1]+l[uint8(i-t)]]
+ }
+
+ l[128-t8] = piTable[l[128-t8]&tm]
+
+ for i := 127 - t8; i >= 0; i-- {
+ l[i] = piTable[l[i+1]^l[i+t8]]
+ }
+
+ var k [64]uint16
+
+ for i := range k {
+ k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256
+ }
+
+ return k
+}
+
+func rotl16(x uint16, b uint) uint16 {
+ return (x >> (16 - b)) | (x << b)
+}
+
+func (c *rc2Cipher) Encrypt(dst, src []byte) {
+
+ r0 := binary.LittleEndian.Uint16(src[0:])
+ r1 := binary.LittleEndian.Uint16(src[2:])
+ r2 := binary.LittleEndian.Uint16(src[4:])
+ r3 := binary.LittleEndian.Uint16(src[6:])
+
+ var j int
+
+ for j <= 16 {
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+
+ }
+
+ r0 = r0 + c.k[r3&63]
+ r1 = r1 + c.k[r0&63]
+ r2 = r2 + c.k[r1&63]
+ r3 = r3 + c.k[r2&63]
+
+ for j <= 40 {
+
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+
+ }
+
+ r0 = r0 + c.k[r3&63]
+ r1 = r1 + c.k[r0&63]
+ r2 = r2 + c.k[r1&63]
+ r3 = r3 + c.k[r2&63]
+
+ for j <= 60 {
+
+ // mix r0
+ r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1)
+ r0 = rotl16(r0, 1)
+ j++
+
+ // mix r1
+ r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2)
+ r1 = rotl16(r1, 2)
+ j++
+
+ // mix r2
+ r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3)
+ r2 = rotl16(r2, 3)
+ j++
+
+ // mix r3
+ r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0)
+ r3 = rotl16(r3, 5)
+ j++
+ }
+
+ binary.LittleEndian.PutUint16(dst[0:], r0)
+ binary.LittleEndian.PutUint16(dst[2:], r1)
+ binary.LittleEndian.PutUint16(dst[4:], r2)
+ binary.LittleEndian.PutUint16(dst[6:], r3)
+}
+
+func (c *rc2Cipher) Decrypt(dst, src []byte) {
+
+ r0 := binary.LittleEndian.Uint16(src[0:])
+ r1 := binary.LittleEndian.Uint16(src[2:])
+ r2 := binary.LittleEndian.Uint16(src[4:])
+ r3 := binary.LittleEndian.Uint16(src[6:])
+
+ j := 63
+
+ for j >= 44 {
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+ }
+
+ r3 = r3 - c.k[r2&63]
+ r2 = r2 - c.k[r1&63]
+ r1 = r1 - c.k[r0&63]
+ r0 = r0 - c.k[r3&63]
+
+ for j >= 20 {
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+
+ }
+
+ r3 = r3 - c.k[r2&63]
+ r2 = r2 - c.k[r1&63]
+ r1 = r1 - c.k[r0&63]
+ r0 = r0 - c.k[r3&63]
+
+ for j >= 0 {
+
+ // unmix r3
+ r3 = rotl16(r3, 16-5)
+ r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0)
+ j--
+
+ // unmix r2
+ r2 = rotl16(r2, 16-3)
+ r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3)
+ j--
+
+ // unmix r1
+ r1 = rotl16(r1, 16-2)
+ r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2)
+ j--
+
+ // unmix r0
+ r0 = rotl16(r0, 16-1)
+ r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1)
+ j--
+
+ }
+
+ binary.LittleEndian.PutUint16(dst[0:], r0)
+ binary.LittleEndian.PutUint16(dst[2:], r1)
+ binary.LittleEndian.PutUint16(dst[4:], r2)
+ binary.LittleEndian.PutUint16(dst[6:], r3)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go
new file mode 100644
index 00000000000..8a49dfaf3c6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2_test.go
@@ -0,0 +1,93 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package rc2
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+)
+
+func TestEncryptDecrypt(t *testing.T) {
+
+ // TODO(dgryski): add the rest of the test vectors from the RFC
+ var tests = []struct {
+ key string
+ plain string
+ cipher string
+ t1 int
+ }{
+ {
+ "0000000000000000",
+ "0000000000000000",
+ "ebb773f993278eff",
+ 63,
+ },
+ {
+ "ffffffffffffffff",
+ "ffffffffffffffff",
+ "278b27e42e2f0d49",
+ 64,
+ },
+ {
+ "3000000000000000",
+ "1000000000000001",
+ "30649edf9be7d2c2",
+ 64,
+ },
+ {
+ "88",
+ "0000000000000000",
+ "61a8a244adacccf0",
+ 64,
+ },
+ {
+ "88bca90e90875a",
+ "0000000000000000",
+ "6ccf4308974c267f",
+ 64,
+ },
+ {
+ "88bca90e90875a7f0f79c384627bafb2",
+ "0000000000000000",
+ "1a807d272bbe5db1",
+ 64,
+ },
+ {
+ "88bca90e90875a7f0f79c384627bafb2",
+ "0000000000000000",
+ "2269552ab0f85ca6",
+ 128,
+ },
+ {
+ "88bca90e90875a7f0f79c384627bafb216f80a6f85920584c42fceb0be255daf1e",
+ "0000000000000000",
+ "5b78d3a43dfff1f1",
+ 129,
+ },
+ }
+
+ for _, tt := range tests {
+ k, _ := hex.DecodeString(tt.key)
+ p, _ := hex.DecodeString(tt.plain)
+ c, _ := hex.DecodeString(tt.cipher)
+
+ b, _ := New(k, tt.t1)
+
+ var dst [8]byte
+
+ b.Encrypt(dst[:], p)
+
+ if !bytes.Equal(dst[:], c) {
+ t.Errorf("encrypt failed: got % 2x wanted % 2x\n", dst, c)
+ }
+
+ b.Decrypt(dst[:], c)
+
+ if !bytes.Equal(dst[:], p) {
+ t.Errorf("decrypt failed: got % 2x wanted % 2x\n", dst, p)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/mac.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/mac.go
new file mode 100644
index 00000000000..5f38aa7de83
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/mac.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+)
+
+type macData struct {
+ Mac digestInfo
+ MacSalt []byte
+ Iterations int `asn1:"optional,default:1"`
+}
+
+// from PKCS#7:
+type digestInfo struct {
+ Algorithm pkix.AlgorithmIdentifier
+ Digest []byte
+}
+
+var (
+ oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
+)
+
+func verifyMac(macData *macData, message, password []byte) error {
+ if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) {
+ return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String())
+ }
+
+ key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20)
+
+ mac := hmac.New(sha1.New, key)
+ mac.Write(message)
+ expectedMAC := mac.Sum(nil)
+
+ if !hmac.Equal(macData.Mac.Digest, expectedMAC) {
+ return ErrIncorrectPassword
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/mac_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/mac_test.go
new file mode 100644
index 00000000000..1ed4ff21e14
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/mac_test.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "encoding/asn1"
+ "testing"
+)
+
+func TestVerifyMac(t *testing.T) {
+ td := macData{
+ Mac: digestInfo{
+ Digest: []byte{0x18, 0x20, 0x3d, 0xff, 0x1e, 0x16, 0xf4, 0x92, 0xf2, 0xaf, 0xc8, 0x91, 0xa9, 0xba, 0xd6, 0xca, 0x9d, 0xee, 0x51, 0x93},
+ },
+ MacSalt: []byte{1, 2, 3, 4, 5, 6, 7, 8},
+ Iterations: 2048,
+ }
+
+ message := []byte{11, 12, 13, 14, 15}
+ password, _ := bmpString("")
+
+ td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 2, 3})
+ err := verifyMac(&td, message, password)
+ if _, ok := err.(NotImplementedError); !ok {
+ t.Errorf("err: %v", err)
+ }
+
+ td.Mac.Algorithm.Algorithm = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26})
+ err = verifyMac(&td, message, password)
+ if err != ErrIncorrectPassword {
+ t.Errorf("Expected incorrect password, got err: %v", err)
+ }
+
+ password, _ = bmpString("Sesame open")
+ err = verifyMac(&td, message, password)
+ if err != nil {
+ t.Errorf("err: %v", err)
+ }
+
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go
new file mode 100644
index 00000000000..5c419d41e32
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go
@@ -0,0 +1,170 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "math/big"
+)
+
+var (
+ one = big.NewInt(1)
+)
+
+// sha1Sum returns the SHA-1 hash of in.
+func sha1Sum(in []byte) []byte {
+ sum := sha1.Sum(in)
+ return sum[:]
+}
+
+// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of
+// repeats of pattern.
+func fillWithRepeats(pattern []byte, v int) []byte {
+ if len(pattern) == 0 {
+ return nil
+ }
+ outputLen := v * ((len(pattern) + v - 1) / v)
+ return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen]
+}
+
+func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) {
+ // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments
+
+ // Let H be a hash function built around a compression function f:
+
+ // Z_2^u x Z_2^v -> Z_2^u
+
+ // (that is, H has a chaining variable and output of length u bits, and
+ // the message input to the compression function of H is v bits). The
+ // values for u and v are as follows:
+
+ // HASH FUNCTION VALUE u VALUE v
+ // MD2, MD5 128 512
+ // SHA-1 160 512
+ // SHA-224 224 512
+ // SHA-256 256 512
+ // SHA-384 384 1024
+ // SHA-512 512 1024
+ // SHA-512/224 224 1024
+ // SHA-512/256 256 1024
+
+ // Furthermore, let r be the iteration count.
+
+ // We assume here that u and v are both multiples of 8, as are the
+ // lengths of the password and salt strings (which we denote by p and s,
+ // respectively) and the number n of pseudorandom bits required. In
+ // addition, u and v are of course non-zero.
+
+ // For information on security considerations for MD5 [19], see [25] and
+ // [1], and on those for MD2, see [18].
+
+ // The following procedure can be used to produce pseudorandom bits for
+ // a particular "purpose" that is identified by a byte called "ID".
+ // This standard specifies 3 different values for the ID byte:
+
+ // 1. If ID=1, then the pseudorandom bits being produced are to be used
+ // as key material for performing encryption or decryption.
+
+ // 2. If ID=2, then the pseudorandom bits being produced are to be used
+ // as an IV (Initial Value) for encryption or decryption.
+
+ // 3. If ID=3, then the pseudorandom bits being produced are to be used
+ // as an integrity key for MACing.
+
+ // 1. Construct a string, D (the "diversifier"), by concatenating v/8
+ // copies of ID.
+ var D []byte
+ for i := 0; i < v; i++ {
+ D = append(D, ID)
+ }
+
+ // 2. Concatenate copies of the salt together to create a string S of
+ // length v(ceiling(s/v)) bits (the final copy of the salt may be
+ // truncated to create S). Note that if the salt is the empty
+ // string, then so is S.
+
+ S := fillWithRepeats(salt, v)
+
+ // 3. Concatenate copies of the password together to create a string P
+ // of length v(ceiling(p/v)) bits (the final copy of the password
+ // may be truncated to create P). Note that if the password is the
+ // empty string, then so is P.
+
+ P := fillWithRepeats(password, v)
+
+ // 4. Set I=S||P to be the concatenation of S and P.
+ I := append(S, P...)
+
+ // 5. Set c=ceiling(n/u).
+ c := (size + u - 1) / u
+
+ // 6. For i=1, 2, ..., c, do the following:
+ A := make([]byte, c*20)
+ var IjBuf []byte
+ for i := 0; i < c; i++ {
+ // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1,
+ // H(H(H(... H(D||I))))
+ Ai := hash(append(D, I...))
+ for j := 1; j < r; j++ {
+ Ai = hash(Ai)
+ }
+ copy(A[i*20:], Ai[:])
+
+ if i < c-1 { // skip on last iteration
+ // B. Concatenate copies of Ai to create a string B of length v
+ // bits (the final copy of Ai may be truncated to create B).
+ var B []byte
+ for len(B) < v {
+ B = append(B, Ai[:]...)
+ }
+ B = B[:v]
+
+ // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit
+ // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by
+ // setting I_j=(I_j+B+1) mod 2^v for each j.
+ {
+ Bbi := new(big.Int).SetBytes(B)
+ Ij := new(big.Int)
+
+ for j := 0; j < len(I)/v; j++ {
+ Ij.SetBytes(I[j*v : (j+1)*v])
+ Ij.Add(Ij, Bbi)
+ Ij.Add(Ij, one)
+ Ijb := Ij.Bytes()
+ // We expect Ijb to be exactly v bytes,
+ // if it is longer or shorter we must
+ // adjust it accordingly.
+ if len(Ijb) > v {
+ Ijb = Ijb[len(Ijb)-v:]
+ }
+ if len(Ijb) < v {
+ if IjBuf == nil {
+ IjBuf = make([]byte, v)
+ }
+ bytesShort := v - len(Ijb)
+ for i := 0; i < bytesShort; i++ {
+ IjBuf[i] = 0
+ }
+ copy(IjBuf[bytesShort:], Ijb)
+ Ijb = IjBuf
+ }
+ copy(I[j*v:(j+1)*v], Ijb)
+ }
+ }
+ }
+ }
+ // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom
+ // bit string, A.
+
+ // 8. Use the first n bits of A as the output of this entire process.
+ return A[:size]
+
+ // If the above process is being used to generate a DES key, the process
+ // should be used to create 64 random bits, and the key's parity bits
+ // should be set after the 64 bits have been produced. Similar concerns
+ // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any
+ // similar keys with parity bits "built into them".
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pbkdf_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pbkdf_test.go
new file mode 100644
index 00000000000..262037d7eba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pbkdf_test.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestThatPBKDFWorksCorrectlyForLongKeys(t *testing.T) {
+ cipherInfo := shaWithTripleDESCBC{}
+
+ salt := []byte("\xff\xff\xff\xff\xff\xff\xff\xff")
+ password, _ := bmpString("sesame")
+ key := cipherInfo.deriveKey(salt, password, 2048)
+
+ if expected := []byte("\x7c\xd9\xfd\x3e\x2b\x3b\xe7\x69\x1a\x44\xe3\xbe\xf0\xf9\xea\x0f\xb9\xb8\x97\xd4\xe3\x25\xd9\xd1"); bytes.Compare(key, expected) != 0 {
+ t.Fatalf("expected key '%x', but found '%x'", expected, key)
+ }
+}
+
+func TestThatPBKDFHandlesLeadingZeros(t *testing.T) {
+ // This test triggers a case where I_j (in step 6C) ends up with leading zero
+ // byte, meaning that len(Ijb) < v (leading zeros get stripped by big.Int).
+ // This was previously causing bug whereby certain inputs would break the
+ // derivation and produce the wrong output.
+ key := pbkdf(sha1Sum, 20, 64, []byte("\xf3\x7e\x05\xb5\x18\x32\x4b\x4b"), []byte("\x00\x00"), 2048, 1, 24)
+ expected := []byte("\x00\xf7\x59\xff\x47\xd1\x4d\xd0\x36\x65\xd5\x94\x3c\xb3\xc4\xa3\x9a\x25\x55\xc0\x2a\xed\x66\xe1")
+ if bytes.Compare(key, expected) != 0 {
+ t.Fatalf("expected key '%x', but found '%x'", expected, key)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go
new file mode 100644
index 00000000000..ad6341e60fa
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go
@@ -0,0 +1,342 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pkcs12 implements some of PKCS#12.
+//
+// This implementation is distilled from https://tools.ietf.org/html/rfc7292
+// and referenced documents. It is intended for decoding P12/PFX-stored
+// certificates and keys for use with the crypto/tls package.
+package pkcs12
+
+import (
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/asn1"
+ "encoding/hex"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1})
+ oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6})
+
+ oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20})
+ oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21})
+ oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1})
+)
+
+type pfxPdu struct {
+ Version int
+ AuthSafe contentInfo
+ MacData macData `asn1:"optional"`
+}
+
+type contentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ Content asn1.RawValue `asn1:"tag:0,explicit,optional"`
+}
+
+type encryptedData struct {
+ Version int
+ EncryptedContentInfo encryptedContentInfo
+}
+
+type encryptedContentInfo struct {
+ ContentType asn1.ObjectIdentifier
+ ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
+ EncryptedContent []byte `asn1:"tag:0,optional"`
+}
+
+func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier {
+ return i.ContentEncryptionAlgorithm
+}
+
+func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent }
+
+type safeBag struct {
+ Id asn1.ObjectIdentifier
+ Value asn1.RawValue `asn1:"tag:0,explicit"`
+ Attributes []pkcs12Attribute `asn1:"set,optional"`
+}
+
+type pkcs12Attribute struct {
+ Id asn1.ObjectIdentifier
+ Value asn1.RawValue `asn1:"set"`
+}
+
+type encryptedPrivateKeyInfo struct {
+ AlgorithmIdentifier pkix.AlgorithmIdentifier
+ EncryptedData []byte
+}
+
+func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier {
+ return i.AlgorithmIdentifier
+}
+
+func (i encryptedPrivateKeyInfo) Data() []byte {
+ return i.EncryptedData
+}
+
+// PEM block types
+const (
+ certificateType = "CERTIFICATE"
+ privateKeyType = "PRIVATE KEY"
+)
+
+// unmarshal calls asn1.Unmarshal, but also returns an error if there is any
+// trailing data after unmarshaling.
+func unmarshal(in []byte, out interface{}) error {
+ trailing, err := asn1.Unmarshal(in, out)
+ if err != nil {
+ return err
+ }
+ if len(trailing) != 0 {
+ return errors.New("pkcs12: trailing data found")
+ }
+ return nil
+}
+
+// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks.
+func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) {
+ encodedPassword, err := bmpString(password)
+ if err != nil {
+ return nil, ErrIncorrectPassword
+ }
+
+ bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
+
+ blocks := make([]*pem.Block, 0, len(bags))
+ for _, bag := range bags {
+ block, err := convertBag(&bag, encodedPassword)
+ if err != nil {
+ return nil, err
+ }
+ blocks = append(blocks, block)
+ }
+
+ return blocks, nil
+}
+
+func convertBag(bag *safeBag, password []byte) (*pem.Block, error) {
+ block := &pem.Block{
+ Headers: make(map[string]string),
+ }
+
+ for _, attribute := range bag.Attributes {
+ k, v, err := convertAttribute(&attribute)
+ if err != nil {
+ return nil, err
+ }
+ block.Headers[k] = v
+ }
+
+ switch {
+ case bag.Id.Equal(oidCertBag):
+ block.Type = certificateType
+ certsData, err := decodeCertBag(bag.Value.Bytes)
+ if err != nil {
+ return nil, err
+ }
+ block.Bytes = certsData
+ case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
+ block.Type = privateKeyType
+
+ key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password)
+ if err != nil {
+ return nil, err
+ }
+
+ switch key := key.(type) {
+ case *rsa.PrivateKey:
+ block.Bytes = x509.MarshalPKCS1PrivateKey(key)
+ case *ecdsa.PrivateKey:
+ block.Bytes, err = x509.MarshalECPrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ return nil, errors.New("found unknown private key type in PKCS#8 wrapping")
+ }
+ default:
+ return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String())
+ }
+ return block, nil
+}
+
+func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) {
+ isString := false
+
+ switch {
+ case attribute.Id.Equal(oidFriendlyName):
+ key = "friendlyName"
+ isString = true
+ case attribute.Id.Equal(oidLocalKeyID):
+ key = "localKeyId"
+ case attribute.Id.Equal(oidMicrosoftCSPName):
+ // This key is chosen to match OpenSSL.
+ key = "Microsoft CSP Name"
+ isString = true
+ default:
+ return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String())
+ }
+
+ if isString {
+ if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil {
+ return "", "", err
+ }
+ if value, err = decodeBMPString(attribute.Value.Bytes); err != nil {
+ return "", "", err
+ }
+ } else {
+ var id []byte
+ if err := unmarshal(attribute.Value.Bytes, &id); err != nil {
+ return "", "", err
+ }
+ value = hex.EncodeToString(id)
+ }
+
+ return key, value, nil
+}
+
+// Decode extracts a certificate and private key from pfxData. This function
+// assumes that there is only one certificate and only one private key in the
+// pfxData.
+func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) {
+ encodedPassword, err := bmpString(password)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if len(bags) != 2 {
+ err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU")
+ return
+ }
+
+ for _, bag := range bags {
+ switch {
+ case bag.Id.Equal(oidCertBag):
+ if certificate != nil {
+ err = errors.New("pkcs12: expected exactly one certificate bag")
+ }
+
+ certsData, err := decodeCertBag(bag.Value.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ certs, err := x509.ParseCertificates(certsData)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(certs) != 1 {
+ err = errors.New("pkcs12: expected exactly one certificate in the certBag")
+ return nil, nil, err
+ }
+ certificate = certs[0]
+
+ case bag.Id.Equal(oidPKCS8ShroundedKeyBag):
+ if privateKey != nil {
+ err = errors.New("pkcs12: expected exactly one key bag")
+ }
+
+ if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+
+ if certificate == nil {
+ return nil, nil, errors.New("pkcs12: certificate missing")
+ }
+ if privateKey == nil {
+ return nil, nil, errors.New("pkcs12: private key missing")
+ }
+
+ return
+}
+
+func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) {
+ pfx := new(pfxPdu)
+ if err := unmarshal(p12Data, pfx); err != nil {
+ return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error())
+ }
+
+ if pfx.Version != 3 {
+ return nil, nil, NotImplementedError("can only decode v3 PFX PDU's")
+ }
+
+ if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) {
+ return nil, nil, NotImplementedError("only password-protected PFX is implemented")
+ }
+
+ // unmarshal the explicit bytes in the content for type 'data'
+ if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil {
+ return nil, nil, err
+ }
+
+ if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 {
+ return nil, nil, errors.New("pkcs12: no MAC in data")
+ }
+
+ if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil {
+ if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 {
+ // some implementations use an empty byte array
+ // for the empty string password try one more
+ // time with empty-empty password
+ password = nil
+ err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ var authenticatedSafe []contentInfo
+ if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil {
+ return nil, nil, err
+ }
+
+ if len(authenticatedSafe) != 2 {
+ return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe")
+ }
+
+ for _, ci := range authenticatedSafe {
+ var data []byte
+
+ switch {
+ case ci.ContentType.Equal(oidDataContentType):
+ if err := unmarshal(ci.Content.Bytes, &data); err != nil {
+ return nil, nil, err
+ }
+ case ci.ContentType.Equal(oidEncryptedDataContentType):
+ var encryptedData encryptedData
+ if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil {
+ return nil, nil, err
+ }
+ if encryptedData.Version != 0 {
+ return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported")
+ }
+ if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil {
+ return nil, nil, err
+ }
+ default:
+ return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe")
+ }
+
+ var safeContents []safeBag
+ if err := unmarshal(data, &safeContents); err != nil {
+ return nil, nil, err
+ }
+ bags = append(bags, safeContents...)
+ }
+
+ return bags, password, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pkcs12_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pkcs12_test.go
new file mode 100644
index 00000000000..14dd2a6c5d6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/pkcs12_test.go
@@ -0,0 +1,138 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "crypto/rsa"
+ "crypto/tls"
+ "encoding/base64"
+ "encoding/pem"
+ "testing"
+)
+
+func TestPfx(t *testing.T) {
+ for commonName, base64P12 := range testdata {
+ p12, _ := base64.StdEncoding.DecodeString(base64P12)
+
+ priv, cert, err := Decode(p12, "")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := priv.(*rsa.PrivateKey).Validate(); err != nil {
+ t.Errorf("error while validating private key: %v", err)
+ }
+
+ if cert.Subject.CommonName != commonName {
+ t.Errorf("expected common name to be %q, but found %q", commonName, cert.Subject.CommonName)
+ }
+ }
+}
+
+func TestPEM(t *testing.T) {
+ for commonName, base64P12 := range testdata {
+ p12, _ := base64.StdEncoding.DecodeString(base64P12)
+
+ blocks, err := ToPEM(p12, "")
+ if err != nil {
+ t.Fatalf("error while converting to PEM: %s", err)
+ }
+
+ var pemData []byte
+ for _, b := range blocks {
+ pemData = append(pemData, pem.EncodeToMemory(b)...)
+ }
+
+ cert, err := tls.X509KeyPair(pemData, pemData)
+ if err != nil {
+ t.Errorf("err while converting to key pair: %v", err)
+ }
+ config := tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+ config.BuildNameToCertificate()
+
+ if _, exists := config.NameToCertificate[commonName]; !exists {
+ t.Errorf("did not find our cert in PEM?: %v", config.NameToCertificate)
+ }
+ }
+}
+
+func ExampleToPEM() {
+ p12, _ := base64.StdEncoding.DecodeString(`MIIJzgIBAzCCCZQGCS ... CA+gwggPk==`)
+
+ blocks, err := ToPEM(p12, "password")
+ if err != nil {
+ panic(err)
+ }
+
+ var pemData []byte
+ for _, b := range blocks {
+ pemData = append(pemData, pem.EncodeToMemory(b)...)
+ }
+
+ // then use PEM data for tls to construct tls certificate:
+ cert, err := tls.X509KeyPair(pemData, pemData)
+ if err != nil {
+ panic(err)
+ }
+
+ config := &tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+
+ _ = config
+}
+
+var testdata = map[string]string{
+ // 'null' password test case
+ "Windows Azure Tools": `MIIKDAIBAzCCCcwGCSqGSIb3DQEHAaCCCb0Eggm5MIIJtTCCBe4GCSqGSIb3DQEHAaCCBd8EggXbMIIF1zCCBdMGCyqGSIb3DQEMCgECoIIE7jCCBOowHAYKKoZIhvcNAQwBAzAOBAhStUNnlTGV+gICB9AEggTIJ81JIossF6boFWpPtkiQRPtI6DW6e9QD4/WvHAVrM2bKdpMzSMsCML5NyuddANTKHBVq00Jc9keqGNAqJPKkjhSUebzQFyhe0E1oI9T4zY5UKr/I8JclOeccH4QQnsySzYUG2SnniXnQ+JrG3juetli7EKth9h6jLc6xbubPadY5HMB3wL/eG/kJymiXwU2KQ9Mgd4X6jbcV+NNCE/8jbZHvSTCPeYTJIjxfeX61Sj5kFKUCzERbsnpyevhY3X0eYtEDezZQarvGmXtMMdzf8HJHkWRdk9VLDLgjk8uiJif/+X4FohZ37ig0CpgC2+dP4DGugaZZ51hb8tN9GeCKIsrmWogMXDIVd0OACBp/EjJVmFB6y0kUCXxUE0TZt0XA1tjAGJcjDUpBvTntZjPsnH/4ZySy+s2d9OOhJ6pzRQBRm360TzkFdSwk9DLiLdGfv4pwMMu/vNGBlqjP/1sQtj+jprJiD1sDbCl4AdQZVoMBQHadF2uSD4/o17XG/Ci0r2h6Htc2yvZMAbEY4zMjjIn2a+vqIxD6onexaek1R3zbkS9j19D6EN9EWn8xgz80YRCyW65znZk8xaIhhvlU/mg7sTxeyuqroBZNcq6uDaQTehDpyH7bY2l4zWRpoj10a6JfH2q5shYz8Y6UZC/kOTfuGqbZDNZWro/9pYquvNNW0M847E5t9bsf9VkAAMHRGBbWoVoU9VpI0UnoXSfvpOo+aXa2DSq5sHHUTVY7A9eov3z5IqT+pligx11xcs+YhDWcU8di3BTJisohKvv5Y8WSkm/rloiZd4ig269k0jTRk1olP/vCksPli4wKG2wdsd5o42nX1yL7mFfXocOANZbB+5qMkiwdyoQSk+Vq+C8nAZx2bbKhUq2MbrORGMzOe0Hh0x2a0PeObycN1Bpyv7Mp3ZI9h5hBnONKCnqMhtyQHUj/nNvbJUnDVYNfoOEqDiEqqEwB7YqWzAKz8KW0OIqdlM8uiQ4JqZZlFllnWJUfaiDrdFM3lYSnFQBkzeVlts6GpDOOBjCYd7dcCNS6kq6pZC6p6HN60Twu0JnurZD6RT7rrPkIGE8vAenFt4iGe/yF52fahCSY8Ws4K0UTwN7bAS+4xRHVCWvE8sMRZsRCHizb5laYsVrPZJhE6+hux6OBb6w8kwPYXc+ud5v6UxawUWgt6uPwl8mlAtU9Z7Miw4Nn/wtBkiLL/ke1UI1gqJtcQXgHxx6mzsjh41+nAgTvdbsSEyU6vfOmxGj3Rwc1eOrIhJUqn5YjOWfzzsz/D5DzWKmwXIwdspt1p+u+kol1N3f2wT9fKPnd/RGCb4g/1hc3Aju4DQYgGY782l89CEEdalpQ/35bQczMFk6Fje12HykakWEXd/bGm9Unh82gH84USiRpeOfQvBDYoqEyrY3zkFZzBjhDqa+jEcAj41tcGx47oSfDq3iVYCdL7HSIjtnyEktVXd7mISZLoMt20JACFcMw+mrbjlug+eU7o2GR7T+LwtOp/p4LZqyLa7oQJDwde1BNZtm3TCK2P1mW94QDL0nDUps5KLtr1DaZXEkRbjSJub2ZE9WqDHyU3KA8G84Tq/rN1IoNu/if45jacyPje1Npj9IftUZSP22nV7HMwZtwQ4P4MYHRMBMGCSqGSIb3DQEJFTEGBAQBAAAAMFsGCSqGSIb3DQEJFDFOHkwAewBCADQAQQA0AEYARQBCADAALQBBADEAOABBAC0ANAA0AEIAQgAtAEIANQBGADIALQA0ADkAMQBFAEYAMQA1ADIAQgBBADEANgB9MF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAG8AZgB0AHcAYQByAGUAIABLAGUAeQAgAFMAdABvAHIAYQBnAGUAIABQAHIAbwB2AGkAZABlAHIwggO/BgkqhkiG9w0BBwagggOwMIIDrAIBADCCA6UGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECEBk5ZAYpu0WAgIH0ICCA3hik4mQFGpw9Ha8TQPtk+j2jwWdxfF0+sTk6S8PTsEfIhB7wPltjiCK92Uv2tCBQnodBUmatIfkpnRDEySmgmdglmOCzj204lWAMRs94PoALGn3JVBXbO1vIDCbAPOZ7Z0Hd0/1t2hmk8v3//QJGUg+qr59/4y/MuVfIg4qfkPcC2QSvYWcK3oTf6SFi5rv9B1IOWFgN5D0+C+x/9Lb/myPYX+rbOHrwtJ4W1fWKoz9g7wwmGFA9IJ2DYGuH8ifVFbDFT1Vcgsvs8arSX7oBsJVW0qrP7XkuDRe3EqCmKW7rBEwYrFznhxZcRDEpMwbFoSvgSIZ4XhFY9VKYglT+JpNH5iDceYEBOQL4vBLpxNUk3l5jKaBNxVa14AIBxq18bVHJ+STInhLhad4u10v/Xbx7wIL3f9DX1yLAkPrpBYbNHS2/ew6H/ySDJnoIDxkw2zZ4qJ+qUJZ1S0lbZVG+VT0OP5uF6tyOSpbMlcGkdl3z254n6MlCrTifcwkzscysDsgKXaYQw06rzrPW6RDub+t+hXzGny799fS9jhQMLDmOggaQ7+LA4oEZsfT89HLMWxJYDqjo3gIfjciV2mV54R684qLDS+AO09U49e6yEbwGlq8lpmO/pbXCbpGbB1b3EomcQbxdWxW2WEkkEd/VBn81K4M3obmywwXJkw+tPXDXfBmzzaqqCR+onMQ5ME1nMkY8ybnfoCc1bDIupjVWsEL2Wvq752RgI6KqzVNr1ew1IdqV5AWN2fOfek+0vi3Jd9FHF3hx8JMwjJL9dZsETV5kHtYJtE7wJ23J68BnCt2eI0GEuwXcCf5EdSKN/xXCTlIokc4Qk/gzRdIZsvcEJ6B1lGovKG54X4IohikqTjiepjbsMWj38yxDmK3mtENZ9ci8FPfbbvIEcOCZIinuY3qFUlRSbx7VUerEoV1IP3clUwexVQo4lHFee2jd7ocWsdSqSapW7OWUupBtDzRkqVhE7tGria+i1W2d6YLlJ21QTjyapWJehAMO637OdbJCCzDs1cXbodRRE7bsP492ocJy8OX66rKdhYbg8srSFNKdb3pF3UDNbN9jhI/t8iagRhNBhlQtTr1me2E/c86Q18qcRXl4bcXTt6acgCeffK6Y26LcVlrgjlD33AEYRRUeyC+rpxbT0aMjdFderlndKRIyG23mSp0HaUwNzAfMAcGBSsOAwIaBBRlviCbIyRrhIysg2dc/KbLFTc2vQQUg4rfwHMM4IKYRD/fsd1x6dda+wQ=`,
+ // empty string password test case
+ "testing@example.com": `MIIJzgIBAzCCCZQGCSqGSIb3DQEHAaCCCYUEggmBMIIJfTCCA/cGCSqGSIb3DQEHBqCCA+gwggPk
+AgEAMIID3QYJKoZIhvcNAQcBMBwGCiqGSIb3DQEMAQYwDgQIIszfRGqcmPcCAggAgIIDsOZ9Eg1L
+s5Wx8JhYoV3HAL4aRnkAWvTYB5NISZOgSgIQTssmt/3A7134dibTmaT/93LikkL3cTKLnQzJ4wDf
+YZ1bprpVJvUqz+HFT79m27bP9zYXFrvxWBJbxjYKTSjQMgz+h8LAEpXXGajCmxMJ1oCOtdXkhhzc
+LdZN6SAYgtmtyFnCdMEDskSggGuLb3fw84QEJ/Sj6FAULXunW/CPaS7Ce0TMsKmNU/jfFWj3yXXw
+ro0kwjKiVLpVFlnBlHo2OoVU7hmkm59YpGhLgS7nxLD3n7nBroQ0ID1+8R01NnV9XLGoGzxMm1te
+6UyTCkr5mj+kEQ8EP1Ys7g/TC411uhVWySMt/rcpkx7Vz1r9kYEAzJpONAfr6cuEVkPKrxpq4Fh0
+2fzlKBky0i/hrfIEUmngh+ERHUb/Mtv/fkv1j5w9suESbhsMLLiCXAlsP1UWMX+3bNizi3WVMEts
+FM2k9byn+p8IUD/A8ULlE4kEaWeoc+2idkCNQkLGuIdGUXUFVm58se0auUkVRoRJx8x4CkMesT8j
+b1H831W66YRWoEwwDQp2kK1lA2vQXxdVHWlFevMNxJeromLzj3ayiaFrfByeUXhR2S+Hpm+c0yNR
+4UVU9WED2kacsZcpRm9nlEa5sr28mri5JdBrNa/K02OOhvKCxr5ZGmbOVzUQKla2z4w+Ku9k8POm
+dfDNU/fGx1b5hcFWtghXe3msWVsSJrQihnN6q1ughzNiYZlJUGcHdZDRtiWwCFI0bR8h/Dmg9uO9
+4rawQQrjIRT7B8yF3UbkZyAqs8Ppb1TsMeNPHh1rxEfGVQknh/48ouJYsmtbnzugTUt3mJCXXiL+
+XcPMV6bBVAUu4aaVKSmg9+yJtY4/VKv10iw88ktv29fViIdBe3t6l/oPuvQgbQ8dqf4T8w0l/uKZ
+9lS1Na9jfT1vCoS7F5TRi+tmyj1vL5kr/amEIW6xKEP6oeAMvCMtbPAzVEj38zdJ1R22FfuIBxkh
+f0Zl7pdVbmzRxl/SBx9iIBJSqAvcXItiT0FIj8HxQ+0iZKqMQMiBuNWJf5pYOLWGrIyntCWwHuaQ
+wrx0sTGuEL9YXLEAsBDrsvzLkx/56E4INGZFrH8G7HBdW6iGqb22IMI4GHltYSyBRKbB0gadYTyv
+abPEoqww8o7/85aPSzOTJ/53ozD438Q+d0u9SyDuOb60SzCD/zPuCEd78YgtXJwBYTuUNRT27FaM
+3LGMX8Hz+6yPNRnmnA2XKPn7dx/IlaqAjIs8MIIFfgYJKoZIhvcNAQcBoIIFbwSCBWswggVnMIIF
+YwYLKoZIhvcNAQwKAQKgggTuMIIE6jAcBgoqhkiG9w0BDAEDMA4ECJr0cClYqOlcAgIIAASCBMhe
+OQSiP2s0/46ONXcNeVAkz2ksW3u/+qorhSiskGZ0b3dFa1hhgBU2Q7JVIkc4Hf7OXaT1eVQ8oqND
+uhqsNz83/kqYo70+LS8Hocj49jFgWAKrf/yQkdyP1daHa2yzlEw4mkpqOfnIORQHvYCa8nEApspZ
+wVu8y6WVuLHKU67mel7db2xwstQp7PRuSAYqGjTfAylElog8ASdaqqYbYIrCXucF8iF9oVgmb/Qo
+xrXshJ9aSLO4MuXlTPELmWgj07AXKSb90FKNihE+y0bWb9LPVFY1Sly3AX9PfrtkSXIZwqW3phpv
+MxGxQl/R6mr1z+hlTfY9Wdpb5vlKXPKA0L0Rt8d2pOesylFi6esJoS01QgP1kJILjbrV731kvDc0
+Jsd+Oxv4BMwA7ClG8w1EAOInc/GrV1MWFGw/HeEqj3CZ/l/0jv9bwkbVeVCiIhoL6P6lVx9pXq4t
+KZ0uKg/tk5TVJmG2vLcMLvezD0Yk3G2ZOMrywtmskrwoF7oAUpO9e87szoH6fEvUZlkDkPVW1NV4
+cZk3DBSQiuA3VOOg8qbo/tx/EE3H59P0axZWno2GSB0wFPWd1aj+b//tJEJHaaNR6qPRj4IWj9ru
+Qbc8eRAcVWleHg8uAehSvUXlFpyMQREyrnpvMGddpiTC8N4UMrrBRhV7+UbCOWhxPCbItnInBqgl
+1JpSZIP7iUtsIMdu3fEC2cdbXMTRul+4rdzUR7F9OaezV3jjvcAbDvgbK1CpyC+MJ1Mxm/iTgk9V
+iUArydhlR8OniN84GyGYoYCW9O/KUwb6ASmeFOu/msx8x6kAsSQHIkKqMKv0TUR3kZnkxUvdpBGP
+KTl4YCTvNGX4dYALBqrAETRDhua2KVBD/kEttDHwBNVbN2xi81+Mc7ml461aADfk0c66R/m2sjHB
+2tN9+wG12OIWFQjL6wF/UfJMYamxx2zOOExiId29Opt57uYiNVLOO4ourPewHPeH0u8Gz35aero7
+lkt7cZAe1Q0038JUuE/QGlnK4lESK9UkSIQAjSaAlTsrcfwtQxB2EjoOoLhwH5mvxUEmcNGNnXUc
+9xj3M5BD3zBz3Ft7G3YMMDwB1+zC2l+0UG0MGVjMVaeoy32VVNvxgX7jk22OXG1iaOB+PY9kdk+O
+X+52BGSf/rD6X0EnqY7XuRPkMGgjtpZeAYxRQnFtCZgDY4wYheuxqSSpdF49yNczSPLkgB3CeCfS
++9NTKN7aC6hBbmW/8yYh6OvSiCEwY0lFS/T+7iaVxr1loE4zI1y/FFp4Pe1qfLlLttVlkygga2UU
+SCunTQ8UB/M5IXWKkhMOO11dP4niWwb39Y7pCWpau7mwbXOKfRPX96cgHnQJK5uG+BesDD1oYnX0
+6frN7FOnTSHKruRIwuI8KnOQ/I+owmyz71wiv5LMQt+yM47UrEjB/EZa5X8dpEwOZvkdqL7utcyo
+l0XH5kWMXdW856LL/FYftAqJIDAmtX1TXF/rbP6mPyN/IlDC0gjP84Uzd/a2UyTIWr+wk49Ek3vQ
+/uDamq6QrwAxVmNh5Tset5Vhpc1e1kb7mRMZIzxSP8JcTuYd45oFKi98I8YjvueHVZce1g7OudQP
+SbFQoJvdT46iBg1TTatlltpOiH2mFaxWVS0xYjAjBgkqhkiG9w0BCRUxFgQUdA9eVqvETX4an/c8
+p8SsTugkit8wOwYJKoZIhvcNAQkUMS4eLABGAHIAaQBlAG4AZABsAHkAIABuAGEAbQBlACAAZgBv
+AHIAIABjAGUAcgB0MDEwITAJBgUrDgMCGgUABBRFsNz3Zd1O1GI8GTuFwCWuDOjEEwQIuBEfIcAy
+HQ8CAggA`,
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/safebags.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/safebags.go
new file mode 100644
index 00000000000..def1f7b98d7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/pkcs12/safebags.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkcs12
+
+import (
+ "crypto/x509"
+ "encoding/asn1"
+ "errors"
+)
+
+var (
+ // see https://tools.ietf.org/html/rfc7292#appendix-D
+ oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1})
+ oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2})
+ oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3})
+)
+
+type certBag struct {
+ Id asn1.ObjectIdentifier
+ Data []byte `asn1:"tag:0,explicit"`
+}
+
+func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) {
+ pkinfo := new(encryptedPrivateKeyInfo)
+ if err = unmarshal(asn1Data, pkinfo); err != nil {
+ return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error())
+ }
+
+ pkData, err := pbDecrypt(pkinfo, password)
+ if err != nil {
+ return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error())
+ }
+
+ ret := new(asn1.RawValue)
+ if err = unmarshal(pkData, ret); err != nil {
+ return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error())
+ }
+
+ if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil {
+ return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error())
+ }
+
+ return privateKey, nil
+}
+
+func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) {
+ bag := new(certBag)
+ if err := unmarshal(asn1Data, bag); err != nil {
+ return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error())
+ }
+ if !bag.Id.Equal(oidCertTypeX509Certificate) {
+ return nil, NotImplementedError("only X509 certificates are supported")
+ }
+ return bag.Data, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/const_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/const_amd64.s
new file mode 100644
index 00000000000..8e861f337cd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/const_amd64.s
@@ -0,0 +1,45 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+DATA ·SCALE(SB)/8, $0x37F4000000000000
+GLOBL ·SCALE(SB), 8, $8
+DATA ·TWO32(SB)/8, $0x41F0000000000000
+GLOBL ·TWO32(SB), 8, $8
+DATA ·TWO64(SB)/8, $0x43F0000000000000
+GLOBL ·TWO64(SB), 8, $8
+DATA ·TWO96(SB)/8, $0x45F0000000000000
+GLOBL ·TWO96(SB), 8, $8
+DATA ·ALPHA32(SB)/8, $0x45E8000000000000
+GLOBL ·ALPHA32(SB), 8, $8
+DATA ·ALPHA64(SB)/8, $0x47E8000000000000
+GLOBL ·ALPHA64(SB), 8, $8
+DATA ·ALPHA96(SB)/8, $0x49E8000000000000
+GLOBL ·ALPHA96(SB), 8, $8
+DATA ·ALPHA130(SB)/8, $0x4C08000000000000
+GLOBL ·ALPHA130(SB), 8, $8
+DATA ·DOFFSET0(SB)/8, $0x4330000000000000
+GLOBL ·DOFFSET0(SB), 8, $8
+DATA ·DOFFSET1(SB)/8, $0x4530000000000000
+GLOBL ·DOFFSET1(SB), 8, $8
+DATA ·DOFFSET2(SB)/8, $0x4730000000000000
+GLOBL ·DOFFSET2(SB), 8, $8
+DATA ·DOFFSET3(SB)/8, $0x4930000000000000
+GLOBL ·DOFFSET3(SB), 8, $8
+DATA ·DOFFSET3MINUSTWO128(SB)/8, $0x492FFFFE00000000
+GLOBL ·DOFFSET3MINUSTWO128(SB), 8, $8
+DATA ·HOFFSET0(SB)/8, $0x43300001FFFFFFFB
+GLOBL ·HOFFSET0(SB), 8, $8
+DATA ·HOFFSET1(SB)/8, $0x45300001FFFFFFFE
+GLOBL ·HOFFSET1(SB), 8, $8
+DATA ·HOFFSET2(SB)/8, $0x47300001FFFFFFFE
+GLOBL ·HOFFSET2(SB), 8, $8
+DATA ·HOFFSET3(SB)/8, $0x49300003FFFFFFFE
+GLOBL ·HOFFSET3(SB), 8, $8
+DATA ·ROUNDING(SB)/2, $0x137f
+GLOBL ·ROUNDING(SB), 8, $2
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305.go
new file mode 100644
index 00000000000..4a5f826f7a6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package poly1305 implements Poly1305 one-time message authentication code as specified in http://cr.yp.to/mac/poly1305-20050329.pdf.
+
+Poly1305 is a fast, one-time authentication function. It is infeasible for an
+attacker to generate an authenticator for a message without the key. However, a
+key must only be used for a single message. Authenticating two different
+messages with the same key allows an attacker to forge authenticators for other
+messages with the same key.
+
+Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
+used with a fixed key in order to generate one-time keys from an nonce.
+However, in this package AES isn't used and the one-time key is specified
+directly.
+*/
+package poly1305 // import "golang.org/x/crypto/poly1305"
+
+import "crypto/subtle"
+
+// TagSize is the size, in bytes, of a poly1305 authenticator.
+const TagSize = 16
+
+// Verify returns true if mac is a valid authenticator for m with the given
+// key.
+func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
+ var tmp [16]byte
+ Sum(&tmp, m, key)
+ return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_amd64.s
new file mode 100644
index 00000000000..f8d4ee92898
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_amd64.s
@@ -0,0 +1,497 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
+TEXT ·poly1305(SB),0,$224-32
+ MOVQ out+0(FP),DI
+ MOVQ m+8(FP),SI
+ MOVQ mlen+16(FP),DX
+ MOVQ key+24(FP),CX
+
+ MOVQ SP,R11
+ MOVQ $31,R9
+ NOTQ R9
+ ANDQ R9,SP
+ ADDQ $32,SP
+
+ MOVQ R11,32(SP)
+ MOVQ R12,40(SP)
+ MOVQ R13,48(SP)
+ MOVQ R14,56(SP)
+ MOVQ R15,64(SP)
+ MOVQ BX,72(SP)
+ MOVQ BP,80(SP)
+ FLDCW ·ROUNDING(SB)
+ MOVL 0(CX),R8
+ MOVL 4(CX),R9
+ MOVL 8(CX),AX
+ MOVL 12(CX),R10
+ MOVQ DI,88(SP)
+ MOVQ CX,96(SP)
+ MOVL $0X43300000,108(SP)
+ MOVL $0X45300000,116(SP)
+ MOVL $0X47300000,124(SP)
+ MOVL $0X49300000,132(SP)
+ ANDL $0X0FFFFFFF,R8
+ ANDL $0X0FFFFFFC,R9
+ ANDL $0X0FFFFFFC,AX
+ ANDL $0X0FFFFFFC,R10
+ MOVL R8,104(SP)
+ MOVL R9,112(SP)
+ MOVL AX,120(SP)
+ MOVL R10,128(SP)
+ FMOVD 104(SP), F0
+ FSUBD ·DOFFSET0(SB), F0
+ FMOVD 112(SP), F0
+ FSUBD ·DOFFSET1(SB), F0
+ FMOVD 120(SP), F0
+ FSUBD ·DOFFSET2(SB), F0
+ FMOVD 128(SP), F0
+ FSUBD ·DOFFSET3(SB), F0
+ FXCHD F0, F3
+ FMOVDP F0, 136(SP)
+ FXCHD F0, F1
+ FMOVD F0, 144(SP)
+ FMULD ·SCALE(SB), F0
+ FMOVDP F0, 152(SP)
+ FMOVD F0, 160(SP)
+ FMULD ·SCALE(SB), F0
+ FMOVDP F0, 168(SP)
+ FMOVD F0, 176(SP)
+ FMULD ·SCALE(SB), F0
+ FMOVDP F0, 184(SP)
+ FLDZ
+ FLDZ
+ FLDZ
+ FLDZ
+ CMPQ DX,$16
+ JB ADDATMOST15BYTES
+ INITIALATLEAST16BYTES:
+ MOVL 12(SI),DI
+ MOVL 8(SI),CX
+ MOVL 4(SI),R8
+ MOVL 0(SI),R9
+ MOVL DI,128(SP)
+ MOVL CX,120(SP)
+ MOVL R8,112(SP)
+ MOVL R9,104(SP)
+ ADDQ $16,SI
+ SUBQ $16,DX
+ FXCHD F0, F3
+ FADDD 128(SP), F0
+ FSUBD ·DOFFSET3MINUSTWO128(SB), F0
+ FXCHD F0, F1
+ FADDD 112(SP), F0
+ FSUBD ·DOFFSET1(SB), F0
+ FXCHD F0, F2
+ FADDD 120(SP), F0
+ FSUBD ·DOFFSET2(SB), F0
+ FXCHD F0, F3
+ FADDD 104(SP), F0
+ FSUBD ·DOFFSET0(SB), F0
+ CMPQ DX,$16
+ JB MULTIPLYADDATMOST15BYTES
+ MULTIPLYADDATLEAST16BYTES:
+ MOVL 12(SI),DI
+ MOVL 8(SI),CX
+ MOVL 4(SI),R8
+ MOVL 0(SI),R9
+ MOVL DI,128(SP)
+ MOVL CX,120(SP)
+ MOVL R8,112(SP)
+ MOVL R9,104(SP)
+ ADDQ $16,SI
+ SUBQ $16,DX
+ FMOVD ·ALPHA130(SB), F0
+ FADDD F2,F0
+ FSUBD ·ALPHA130(SB), F0
+ FSUBD F0,F2
+ FMULD ·SCALE(SB), F0
+ FMOVD ·ALPHA32(SB), F0
+ FADDD F2,F0
+ FSUBD ·ALPHA32(SB), F0
+ FSUBD F0,F2
+ FXCHD F0, F2
+ FADDDP F0,F1
+ FMOVD ·ALPHA64(SB), F0
+ FADDD F4,F0
+ FSUBD ·ALPHA64(SB), F0
+ FSUBD F0,F4
+ FMOVD ·ALPHA96(SB), F0
+ FADDD F6,F0
+ FSUBD ·ALPHA96(SB), F0
+ FSUBD F0,F6
+ FXCHD F0, F6
+ FADDDP F0,F1
+ FXCHD F0, F3
+ FADDDP F0,F5
+ FXCHD F0, F3
+ FADDDP F0,F1
+ FMOVD 176(SP), F0
+ FMULD F3,F0
+ FMOVD 160(SP), F0
+ FMULD F4,F0
+ FMOVD 144(SP), F0
+ FMULD F5,F0
+ FMOVD 136(SP), F0
+ FMULDP F0,F6
+ FMOVD 160(SP), F0
+ FMULD F4,F0
+ FADDDP F0,F3
+ FMOVD 144(SP), F0
+ FMULD F4,F0
+ FADDDP F0,F2
+ FMOVD 136(SP), F0
+ FMULD F4,F0
+ FADDDP F0,F1
+ FMOVD 184(SP), F0
+ FMULDP F0,F4
+ FXCHD F0, F3
+ FADDDP F0,F5
+ FMOVD 144(SP), F0
+ FMULD F4,F0
+ FADDDP F0,F2
+ FMOVD 136(SP), F0
+ FMULD F4,F0
+ FADDDP F0,F1
+ FMOVD 184(SP), F0
+ FMULD F4,F0
+ FADDDP F0,F3
+ FMOVD 168(SP), F0
+ FMULDP F0,F4
+ FXCHD F0, F3
+ FADDDP F0,F4
+ FMOVD 136(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F1
+ FXCHD F0, F3
+ FMOVD 184(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F3
+ FXCHD F0, F1
+ FMOVD 168(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F1
+ FMOVD 152(SP), F0
+ FMULDP F0,F5
+ FXCHD F0, F4
+ FADDDP F0,F1
+ CMPQ DX,$16
+ FXCHD F0, F2
+ FMOVD 128(SP), F0
+ FSUBD ·DOFFSET3MINUSTWO128(SB), F0
+ FADDDP F0,F1
+ FXCHD F0, F1
+ FMOVD 120(SP), F0
+ FSUBD ·DOFFSET2(SB), F0
+ FADDDP F0,F1
+ FXCHD F0, F3
+ FMOVD 112(SP), F0
+ FSUBD ·DOFFSET1(SB), F0
+ FADDDP F0,F1
+ FXCHD F0, F2
+ FMOVD 104(SP), F0
+ FSUBD ·DOFFSET0(SB), F0
+ FADDDP F0,F1
+ JAE MULTIPLYADDATLEAST16BYTES
+ MULTIPLYADDATMOST15BYTES:
+ FMOVD ·ALPHA130(SB), F0
+ FADDD F2,F0
+ FSUBD ·ALPHA130(SB), F0
+ FSUBD F0,F2
+ FMULD ·SCALE(SB), F0
+ FMOVD ·ALPHA32(SB), F0
+ FADDD F2,F0
+ FSUBD ·ALPHA32(SB), F0
+ FSUBD F0,F2
+ FMOVD ·ALPHA64(SB), F0
+ FADDD F5,F0
+ FSUBD ·ALPHA64(SB), F0
+ FSUBD F0,F5
+ FMOVD ·ALPHA96(SB), F0
+ FADDD F7,F0
+ FSUBD ·ALPHA96(SB), F0
+ FSUBD F0,F7
+ FXCHD F0, F7
+ FADDDP F0,F1
+ FXCHD F0, F5
+ FADDDP F0,F1
+ FXCHD F0, F3
+ FADDDP F0,F5
+ FADDDP F0,F1
+ FMOVD 176(SP), F0
+ FMULD F1,F0
+ FMOVD 160(SP), F0
+ FMULD F2,F0
+ FMOVD 144(SP), F0
+ FMULD F3,F0
+ FMOVD 136(SP), F0
+ FMULDP F0,F4
+ FMOVD 160(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F3
+ FMOVD 144(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F2
+ FMOVD 136(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F1
+ FMOVD 184(SP), F0
+ FMULDP F0,F5
+ FXCHD F0, F4
+ FADDDP F0,F3
+ FMOVD 144(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F2
+ FMOVD 136(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F1
+ FMOVD 184(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F4
+ FMOVD 168(SP), F0
+ FMULDP F0,F5
+ FXCHD F0, F4
+ FADDDP F0,F2
+ FMOVD 136(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F1
+ FMOVD 184(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F4
+ FMOVD 168(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F3
+ FMOVD 152(SP), F0
+ FMULDP F0,F5
+ FXCHD F0, F4
+ FADDDP F0,F1
+ ADDATMOST15BYTES:
+ CMPQ DX,$0
+ JE NOMOREBYTES
+ MOVL $0,0(SP)
+ MOVL $0, 4 (SP)
+ MOVL $0, 8 (SP)
+ MOVL $0, 12 (SP)
+ LEAQ 0(SP),DI
+ MOVQ DX,CX
+ REP; MOVSB
+ MOVB $1,0(DI)
+ MOVL 12 (SP),DI
+ MOVL 8 (SP),SI
+ MOVL 4 (SP),DX
+ MOVL 0(SP),CX
+ MOVL DI,128(SP)
+ MOVL SI,120(SP)
+ MOVL DX,112(SP)
+ MOVL CX,104(SP)
+ FXCHD F0, F3
+ FADDD 128(SP), F0
+ FSUBD ·DOFFSET3(SB), F0
+ FXCHD F0, F2
+ FADDD 120(SP), F0
+ FSUBD ·DOFFSET2(SB), F0
+ FXCHD F0, F1
+ FADDD 112(SP), F0
+ FSUBD ·DOFFSET1(SB), F0
+ FXCHD F0, F3
+ FADDD 104(SP), F0
+ FSUBD ·DOFFSET0(SB), F0
+ FMOVD ·ALPHA130(SB), F0
+ FADDD F3,F0
+ FSUBD ·ALPHA130(SB), F0
+ FSUBD F0,F3
+ FMULD ·SCALE(SB), F0
+ FMOVD ·ALPHA32(SB), F0
+ FADDD F2,F0
+ FSUBD ·ALPHA32(SB), F0
+ FSUBD F0,F2
+ FMOVD ·ALPHA64(SB), F0
+ FADDD F6,F0
+ FSUBD ·ALPHA64(SB), F0
+ FSUBD F0,F6
+ FMOVD ·ALPHA96(SB), F0
+ FADDD F5,F0
+ FSUBD ·ALPHA96(SB), F0
+ FSUBD F0,F5
+ FXCHD F0, F4
+ FADDDP F0,F3
+ FXCHD F0, F6
+ FADDDP F0,F1
+ FXCHD F0, F3
+ FADDDP F0,F5
+ FXCHD F0, F3
+ FADDDP F0,F1
+ FMOVD 176(SP), F0
+ FMULD F3,F0
+ FMOVD 160(SP), F0
+ FMULD F4,F0
+ FMOVD 144(SP), F0
+ FMULD F5,F0
+ FMOVD 136(SP), F0
+ FMULDP F0,F6
+ FMOVD 160(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F3
+ FMOVD 144(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F2
+ FMOVD 136(SP), F0
+ FMULD F5,F0
+ FADDDP F0,F1
+ FMOVD 184(SP), F0
+ FMULDP F0,F5
+ FXCHD F0, F4
+ FADDDP F0,F5
+ FMOVD 144(SP), F0
+ FMULD F6,F0
+ FADDDP F0,F2
+ FMOVD 136(SP), F0
+ FMULD F6,F0
+ FADDDP F0,F1
+ FMOVD 184(SP), F0
+ FMULD F6,F0
+ FADDDP F0,F4
+ FMOVD 168(SP), F0
+ FMULDP F0,F6
+ FXCHD F0, F5
+ FADDDP F0,F4
+ FMOVD 136(SP), F0
+ FMULD F2,F0
+ FADDDP F0,F1
+ FMOVD 184(SP), F0
+ FMULD F2,F0
+ FADDDP F0,F5
+ FMOVD 168(SP), F0
+ FMULD F2,F0
+ FADDDP F0,F3
+ FMOVD 152(SP), F0
+ FMULDP F0,F2
+ FXCHD F0, F1
+ FADDDP F0,F3
+ FXCHD F0, F3
+ FXCHD F0, F2
+ NOMOREBYTES:
+ MOVL $0,R10
+ FMOVD ·ALPHA130(SB), F0
+ FADDD F4,F0
+ FSUBD ·ALPHA130(SB), F0
+ FSUBD F0,F4
+ FMULD ·SCALE(SB), F0
+ FMOVD ·ALPHA32(SB), F0
+ FADDD F2,F0
+ FSUBD ·ALPHA32(SB), F0
+ FSUBD F0,F2
+ FMOVD ·ALPHA64(SB), F0
+ FADDD F4,F0
+ FSUBD ·ALPHA64(SB), F0
+ FSUBD F0,F4
+ FMOVD ·ALPHA96(SB), F0
+ FADDD F6,F0
+ FSUBD ·ALPHA96(SB), F0
+ FXCHD F0, F6
+ FSUBD F6,F0
+ FXCHD F0, F4
+ FADDDP F0,F3
+ FXCHD F0, F4
+ FADDDP F0,F1
+ FXCHD F0, F2
+ FADDDP F0,F3
+ FXCHD F0, F4
+ FADDDP F0,F3
+ FXCHD F0, F3
+ FADDD ·HOFFSET0(SB), F0
+ FXCHD F0, F3
+ FADDD ·HOFFSET1(SB), F0
+ FXCHD F0, F1
+ FADDD ·HOFFSET2(SB), F0
+ FXCHD F0, F2
+ FADDD ·HOFFSET3(SB), F0
+ FXCHD F0, F3
+ FMOVDP F0, 104(SP)
+ FMOVDP F0, 112(SP)
+ FMOVDP F0, 120(SP)
+ FMOVDP F0, 128(SP)
+ MOVL 108(SP),DI
+ ANDL $63,DI
+ MOVL 116(SP),SI
+ ANDL $63,SI
+ MOVL 124(SP),DX
+ ANDL $63,DX
+ MOVL 132(SP),CX
+ ANDL $63,CX
+ MOVL 112(SP),R8
+ ADDL DI,R8
+ MOVQ R8,112(SP)
+ MOVL 120(SP),DI
+ ADCL SI,DI
+ MOVQ DI,120(SP)
+ MOVL 128(SP),DI
+ ADCL DX,DI
+ MOVQ DI,128(SP)
+ MOVL R10,DI
+ ADCL CX,DI
+ MOVQ DI,136(SP)
+ MOVQ $5,DI
+ MOVL 104(SP),SI
+ ADDL SI,DI
+ MOVQ DI,104(SP)
+ MOVL R10,DI
+ MOVQ 112(SP),DX
+ ADCL DX,DI
+ MOVQ DI,112(SP)
+ MOVL R10,DI
+ MOVQ 120(SP),CX
+ ADCL CX,DI
+ MOVQ DI,120(SP)
+ MOVL R10,DI
+ MOVQ 128(SP),R8
+ ADCL R8,DI
+ MOVQ DI,128(SP)
+ MOVQ $0XFFFFFFFC,DI
+ MOVQ 136(SP),R9
+ ADCL R9,DI
+ SARL $16,DI
+ MOVQ DI,R9
+ XORL $0XFFFFFFFF,R9
+ ANDQ DI,SI
+ MOVQ 104(SP),AX
+ ANDQ R9,AX
+ ORQ AX,SI
+ ANDQ DI,DX
+ MOVQ 112(SP),AX
+ ANDQ R9,AX
+ ORQ AX,DX
+ ANDQ DI,CX
+ MOVQ 120(SP),AX
+ ANDQ R9,AX
+ ORQ AX,CX
+ ANDQ DI,R8
+ MOVQ 128(SP),DI
+ ANDQ R9,DI
+ ORQ DI,R8
+ MOVQ 88(SP),DI
+ MOVQ 96(SP),R9
+ ADDL 16(R9),SI
+ ADCL 20(R9),DX
+ ADCL 24(R9),CX
+ ADCL 28(R9),R8
+ MOVL SI,0(DI)
+ MOVL DX,4(DI)
+ MOVL CX,8(DI)
+ MOVL R8,12(DI)
+ MOVQ 32(SP),R11
+ MOVQ 40(SP),R12
+ MOVQ 48(SP),R13
+ MOVQ 56(SP),R14
+ MOVQ 64(SP),R15
+ MOVQ 72(SP),BX
+ MOVQ 80(SP),BP
+ MOVQ R11,SP
+ RET
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_arm.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_arm.s
new file mode 100644
index 00000000000..c15386744dd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_arm.s
@@ -0,0 +1,379 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 5a from the public
+// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
+
+// +build arm,!gccgo,!appengine
+
+DATA poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
+DATA poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
+DATA poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
+DATA poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
+DATA poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
+GLOBL poly1305_init_constants_armv6<>(SB), 8, $20
+
+// Warning: the linker may use R11 to synthesize certain instructions. Please
+// take care and verify that no synthetic instructions use it.
+
+TEXT poly1305_init_ext_armv6<>(SB),4,$-4
+ MOVM.DB.W [R4-R11], (R13)
+ MOVM.IA.W (R1), [R2-R5]
+ MOVW $poly1305_init_constants_armv6<>(SB), R7
+ MOVW R2, R8
+ MOVW R2>>26, R9
+ MOVW R3>>20, g
+ MOVW R4>>14, R11
+ MOVW R5>>8, R12
+ ORR R3<<6, R9, R9
+ ORR R4<<12, g, g
+ ORR R5<<18, R11, R11
+ MOVM.IA (R7), [R2-R6]
+ AND R8, R2, R2
+ AND R9, R3, R3
+ AND g, R4, R4
+ AND R11, R5, R5
+ AND R12, R6, R6
+ MOVM.IA.W [R2-R6], (R0)
+ EOR R2, R2, R2
+ EOR R3, R3, R3
+ EOR R4, R4, R4
+ EOR R5, R5, R5
+ EOR R6, R6, R6
+ MOVM.IA.W [R2-R6], (R0)
+ MOVM.IA.W (R1), [R2-R5]
+ MOVM.IA [R2-R6], (R0)
+ MOVM.IA.W (R13), [R4-R11]
+ RET
+
+#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
+ MOVBU (offset+0)(Rsrc), Rtmp; \
+ MOVBU Rtmp, (offset+0)(Rdst); \
+ MOVBU (offset+1)(Rsrc), Rtmp; \
+ MOVBU Rtmp, (offset+1)(Rdst); \
+ MOVBU (offset+2)(Rsrc), Rtmp; \
+ MOVBU Rtmp, (offset+2)(Rdst); \
+ MOVBU (offset+3)(Rsrc), Rtmp; \
+ MOVBU Rtmp, (offset+3)(Rdst)
+
+TEXT poly1305_blocks_armv6<>(SB),4,$-4
+ MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13)
+ SUB $128, R13
+ MOVW R0, 36(R13)
+ MOVW R1, 40(R13)
+ MOVW R2, 44(R13)
+ MOVW R1, R14
+ MOVW R2, R12
+ MOVW 56(R0), R8
+ WORD $0xe1180008 // TST R8, R8 not working see issue 5921
+ EOR R6, R6, R6
+ MOVW.EQ $(1<<24), R6
+ MOVW R6, 32(R13)
+ ADD $64, R13, g
+ MOVM.IA (R0), [R0-R9]
+ MOVM.IA [R0-R4], (g)
+ CMP $16, R12
+ BLO poly1305_blocks_armv6_done
+poly1305_blocks_armv6_mainloop:
+ WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
+ BEQ poly1305_blocks_armv6_mainloop_aligned
+ ADD $48, R13, g
+ MOVW_UNALIGNED(R14, g, R0, 0)
+ MOVW_UNALIGNED(R14, g, R0, 4)
+ MOVW_UNALIGNED(R14, g, R0, 8)
+ MOVW_UNALIGNED(R14, g, R0, 12)
+ MOVM.IA (g), [R0-R3]
+ ADD $16, R14
+ B poly1305_blocks_armv6_mainloop_loaded
+poly1305_blocks_armv6_mainloop_aligned:
+ MOVM.IA.W (R14), [R0-R3]
+poly1305_blocks_armv6_mainloop_loaded:
+ MOVW R0>>26, g
+ MOVW R1>>20, R11
+ MOVW R2>>14, R12
+ MOVW R14, 40(R13)
+ MOVW R3>>8, R4
+ ORR R1<<6, g, g
+ ORR R2<<12, R11, R11
+ ORR R3<<18, R12, R12
+ BIC $0xfc000000, R0, R0
+ BIC $0xfc000000, g, g
+ MOVW 32(R13), R3
+ BIC $0xfc000000, R11, R11
+ BIC $0xfc000000, R12, R12
+ ADD R0, R5, R5
+ ADD g, R6, R6
+ ORR R3, R4, R4
+ ADD R11, R7, R7
+ ADD $64, R13, R14
+ ADD R12, R8, R8
+ ADD R4, R9, R9
+ MOVM.IA (R14), [R0-R4]
+ MULLU R4, R5, (R11, g)
+ MULLU R3, R5, (R14, R12)
+ MULALU R3, R6, (R11, g)
+ MULALU R2, R6, (R14, R12)
+ MULALU R2, R7, (R11, g)
+ MULALU R1, R7, (R14, R12)
+ ADD R4<<2, R4, R4
+ ADD R3<<2, R3, R3
+ MULALU R1, R8, (R11, g)
+ MULALU R0, R8, (R14, R12)
+ MULALU R0, R9, (R11, g)
+ MULALU R4, R9, (R14, R12)
+ MOVW g, 24(R13)
+ MOVW R11, 28(R13)
+ MOVW R12, 16(R13)
+ MOVW R14, 20(R13)
+ MULLU R2, R5, (R11, g)
+ MULLU R1, R5, (R14, R12)
+ MULALU R1, R6, (R11, g)
+ MULALU R0, R6, (R14, R12)
+ MULALU R0, R7, (R11, g)
+ MULALU R4, R7, (R14, R12)
+ ADD R2<<2, R2, R2
+ ADD R1<<2, R1, R1
+ MULALU R4, R8, (R11, g)
+ MULALU R3, R8, (R14, R12)
+ MULALU R3, R9, (R11, g)
+ MULALU R2, R9, (R14, R12)
+ MOVW g, 8(R13)
+ MOVW R11, 12(R13)
+ MOVW R12, 0(R13)
+ MOVW R14, w+4(SP)
+ MULLU R0, R5, (R11, g)
+ MULALU R4, R6, (R11, g)
+ MULALU R3, R7, (R11, g)
+ MULALU R2, R8, (R11, g)
+ MULALU R1, R9, (R11, g)
+ MOVM.IA (R13), [R0-R7]
+ MOVW g>>26, R12
+ MOVW R4>>26, R14
+ ORR R11<<6, R12, R12
+ ORR R5<<6, R14, R14
+ BIC $0xfc000000, g, g
+ BIC $0xfc000000, R4, R4
+ ADD.S R12, R0, R0
+ ADC $0, R1, R1
+ ADD.S R14, R6, R6
+ ADC $0, R7, R7
+ MOVW R0>>26, R12
+ MOVW R6>>26, R14
+ ORR R1<<6, R12, R12
+ ORR R7<<6, R14, R14
+ BIC $0xfc000000, R0, R0
+ BIC $0xfc000000, R6, R6
+ ADD R14<<2, R14, R14
+ ADD.S R12, R2, R2
+ ADC $0, R3, R3
+ ADD R14, g, g
+ MOVW R2>>26, R12
+ MOVW g>>26, R14
+ ORR R3<<6, R12, R12
+ BIC $0xfc000000, g, R5
+ BIC $0xfc000000, R2, R7
+ ADD R12, R4, R4
+ ADD R14, R0, R0
+ MOVW R4>>26, R12
+ BIC $0xfc000000, R4, R8
+ ADD R12, R6, R9
+ MOVW w+44(SP), R12
+ MOVW w+40(SP), R14
+ MOVW R0, R6
+ CMP $32, R12
+ SUB $16, R12, R12
+ MOVW R12, 44(R13)
+ BHS poly1305_blocks_armv6_mainloop
+poly1305_blocks_armv6_done:
+ MOVW 36(R13), R12
+ MOVW R5, 20(R12)
+ MOVW R6, 24(R12)
+ MOVW R7, 28(R12)
+ MOVW R8, 32(R12)
+ MOVW R9, 36(R12)
+ ADD $128, R13, R13
+ MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14]
+ RET
+
+#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+ MOVBU.P 1(Rsrc), Rtmp; \
+ MOVBU.P Rtmp, 1(Rdst); \
+ MOVBU.P 1(Rsrc), Rtmp; \
+ MOVBU.P Rtmp, 1(Rdst)
+
+#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+ MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
+ MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
+
+TEXT poly1305_finish_ext_armv6<>(SB),4,$-4
+ MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13)
+ SUB $16, R13, R13
+ MOVW R0, R5
+ MOVW R1, R6
+ MOVW R2, R7
+ MOVW R3, R8
+ AND.S R2, R2, R2
+ BEQ poly1305_finish_ext_armv6_noremaining
+ EOR R0, R0
+ MOVW R13, R9
+ MOVW R0, 0(R13)
+ MOVW R0, 4(R13)
+ MOVW R0, 8(R13)
+ MOVW R0, 12(R13)
+ WORD $0xe3110003 // TST R1, #3 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_aligned
+ WORD $0xe3120008 // TST R2, #8 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip8
+ MOVWP_UNALIGNED(R1, R9, g)
+ MOVWP_UNALIGNED(R1, R9, g)
+poly1305_finish_ext_armv6_skip8:
+ WORD $0xe3120004 // TST $4, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip4
+ MOVWP_UNALIGNED(R1, R9, g)
+poly1305_finish_ext_armv6_skip4:
+ WORD $0xe3120002 // TST $2, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip2
+ MOVHUP_UNALIGNED(R1, R9, g)
+ B poly1305_finish_ext_armv6_skip2
+poly1305_finish_ext_armv6_aligned:
+ WORD $0xe3120008 // TST R2, #8 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip8_aligned
+ MOVM.IA.W (R1), [g-R11]
+ MOVM.IA.W [g-R11], (R9)
+poly1305_finish_ext_armv6_skip8_aligned:
+ WORD $0xe3120004 // TST $4, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip4_aligned
+ MOVW.P 4(R1), g
+ MOVW.P g, 4(R9)
+poly1305_finish_ext_armv6_skip4_aligned:
+ WORD $0xe3120002 // TST $2, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip2
+ MOVHU.P 2(R1), g
+ MOVH.P g, 2(R9)
+poly1305_finish_ext_armv6_skip2:
+ WORD $0xe3120001 // TST $1, R2 not working see issue 5921
+ BEQ poly1305_finish_ext_armv6_skip1
+ MOVBU.P 1(R1), g
+ MOVBU.P g, 1(R9)
+poly1305_finish_ext_armv6_skip1:
+ MOVW $1, R11
+ MOVBU R11, 0(R9)
+ MOVW R11, 56(R5)
+ MOVW R5, R0
+ MOVW R13, R1
+ MOVW $16, R2
+ BL poly1305_blocks_armv6<>(SB)
+poly1305_finish_ext_armv6_noremaining:
+ MOVW 20(R5), R0
+ MOVW 24(R5), R1
+ MOVW 28(R5), R2
+ MOVW 32(R5), R3
+ MOVW 36(R5), R4
+ MOVW R4>>26, R12
+ BIC $0xfc000000, R4, R4
+ ADD R12<<2, R12, R12
+ ADD R12, R0, R0
+ MOVW R0>>26, R12
+ BIC $0xfc000000, R0, R0
+ ADD R12, R1, R1
+ MOVW R1>>26, R12
+ BIC $0xfc000000, R1, R1
+ ADD R12, R2, R2
+ MOVW R2>>26, R12
+ BIC $0xfc000000, R2, R2
+ ADD R12, R3, R3
+ MOVW R3>>26, R12
+ BIC $0xfc000000, R3, R3
+ ADD R12, R4, R4
+ ADD $5, R0, R6
+ MOVW R6>>26, R12
+ BIC $0xfc000000, R6, R6
+ ADD R12, R1, R7
+ MOVW R7>>26, R12
+ BIC $0xfc000000, R7, R7
+ ADD R12, R2, g
+ MOVW g>>26, R12
+ BIC $0xfc000000, g, g
+ ADD R12, R3, R11
+ MOVW $-(1<<26), R12
+ ADD R11>>26, R12, R12
+ BIC $0xfc000000, R11, R11
+ ADD R12, R4, R14
+ MOVW R14>>31, R12
+ SUB $1, R12
+ AND R12, R6, R6
+ AND R12, R7, R7
+ AND R12, g, g
+ AND R12, R11, R11
+ AND R12, R14, R14
+ MVN R12, R12
+ AND R12, R0, R0
+ AND R12, R1, R1
+ AND R12, R2, R2
+ AND R12, R3, R3
+ AND R12, R4, R4
+ ORR R6, R0, R0
+ ORR R7, R1, R1
+ ORR g, R2, R2
+ ORR R11, R3, R3
+ ORR R14, R4, R4
+ ORR R1<<26, R0, R0
+ MOVW R1>>6, R1
+ ORR R2<<20, R1, R1
+ MOVW R2>>12, R2
+ ORR R3<<14, R2, R2
+ MOVW R3>>18, R3
+ ORR R4<<8, R3, R3
+ MOVW 40(R5), R6
+ MOVW 44(R5), R7
+ MOVW 48(R5), g
+ MOVW 52(R5), R11
+ ADD.S R6, R0, R0
+ ADC.S R7, R1, R1
+ ADC.S g, R2, R2
+ ADC.S R11, R3, R3
+ MOVM.IA [R0-R3], (R8)
+ MOVW R5, R12
+ EOR R0, R0, R0
+ EOR R1, R1, R1
+ EOR R2, R2, R2
+ EOR R3, R3, R3
+ EOR R4, R4, R4
+ EOR R5, R5, R5
+ EOR R6, R6, R6
+ EOR R7, R7, R7
+ MOVM.IA.W [R0-R7], (R12)
+ MOVM.IA [R0-R7], (R12)
+ ADD $16, R13, R13
+ MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14]
+ RET
+
+// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
+TEXT ·poly1305_auth_armv6(SB),0,$280-16
+ MOVW out+0(FP), R4
+ MOVW m+4(FP), R5
+ MOVW mlen+8(FP), R6
+ MOVW key+12(FP), R7
+
+ MOVW R13, R8
+ BIC $63, R13
+ SUB $64, R13, R13
+ MOVW R13, R0
+ MOVW R7, R1
+ BL poly1305_init_ext_armv6<>(SB)
+ BIC.S $15, R6, R2
+ BEQ poly1305_auth_armv6_noblocks
+ MOVW R13, R0
+ MOVW R5, R1
+ ADD R2, R5, R5
+ SUB R2, R6, R6
+ BL poly1305_blocks_armv6<>(SB)
+poly1305_auth_armv6_noblocks:
+ MOVW R13, R0
+ MOVW R5, R1
+ MOVW R6, R2
+ MOVW R4, R3
+ BL poly1305_finish_ext_armv6<>(SB)
+ MOVW R8, R13
+ RET
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_test.go
new file mode 100644
index 00000000000..b3e92310b5d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/poly1305_test.go
@@ -0,0 +1,86 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poly1305
+
+import (
+ "bytes"
+ "testing"
+ "unsafe"
+)
+
+var testData = []struct {
+ in, k, correct []byte
+}{
+ {
+ []byte("Hello world!"),
+ []byte("this is 32-byte key for Poly1305"),
+ []byte{0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0},
+ },
+ {
+ make([]byte, 32),
+ []byte("this is 32-byte key for Poly1305"),
+ []byte{0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07},
+ },
+ {
+ make([]byte, 2007),
+ []byte("this is 32-byte key for Poly1305"),
+ []byte{0xda, 0x84, 0xbc, 0xab, 0x02, 0x67, 0x6c, 0x38, 0xcd, 0xb0, 0x15, 0x60, 0x42, 0x74, 0xc2, 0xaa},
+ },
+ {
+ make([]byte, 2007),
+ make([]byte, 32),
+ make([]byte, 16),
+ },
+}
+
+func testSum(t *testing.T, unaligned bool) {
+ var out [16]byte
+ var key [32]byte
+
+ for i, v := range testData {
+ in := v.in
+ if unaligned {
+ in = unalignBytes(in)
+ }
+ copy(key[:], v.k)
+ Sum(&out, in, &key)
+ if !bytes.Equal(out[:], v.correct) {
+ t.Errorf("%d: expected %x, got %x", i, v.correct, out[:])
+ }
+ }
+}
+
+func TestSum(t *testing.T) { testSum(t, false) }
+func TestSumUnaligned(t *testing.T) { testSum(t, true) }
+
+func benchmark(b *testing.B, size int, unaligned bool) {
+ var out [16]byte
+ var key [32]byte
+ in := make([]byte, size)
+ if unaligned {
+ in = unalignBytes(in)
+ }
+ b.SetBytes(int64(len(in)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Sum(&out, in, &key)
+ }
+}
+
+func Benchmark64(b *testing.B) { benchmark(b, 64, false) }
+func Benchmark1K(b *testing.B) { benchmark(b, 1024, false) }
+func Benchmark64Unaligned(b *testing.B) { benchmark(b, 64, true) }
+func Benchmark1KUnaligned(b *testing.B) { benchmark(b, 1024, true) }
+
+func unalignBytes(in []byte) []byte {
+ out := make([]byte, len(in)+1)
+ if uintptr(unsafe.Pointer(&out[0]))&(unsafe.Alignof(uint32(0))-1) == 0 {
+ out = out[1:]
+ } else {
+ out = out[:len(in)]
+ }
+ copy(out, in)
+ return out
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_amd64.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_amd64.go
new file mode 100644
index 00000000000..6775c703f61
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_amd64.go
@@ -0,0 +1,24 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+package poly1305
+
+// This function is implemented in poly1305_amd64.s
+
+//go:noescape
+
+func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+ var mPtr *byte
+ if len(m) > 0 {
+ mPtr = &m[0]
+ }
+ poly1305(out, mPtr, uint64(len(m)), key)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_arm.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_arm.go
new file mode 100644
index 00000000000..50b979c24c6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_arm.go
@@ -0,0 +1,24 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,!gccgo,!appengine
+
+package poly1305
+
+// This function is implemented in poly1305_arm.s
+
+//go:noescape
+
+func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+ var mPtr *byte
+ if len(m) > 0 {
+ mPtr = &m[0]
+ }
+ poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_ref.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_ref.go
new file mode 100644
index 00000000000..0b24fc78b93
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/poly1305/sum_ref.go
@@ -0,0 +1,1531 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!arm gccgo appengine
+
+package poly1305
+
+// Based on original, public domain implementation from NaCl by D. J.
+// Bernstein.
+
+import "math"
+
+const (
+ alpham80 = 0.00000000558793544769287109375
+ alpham48 = 24.0
+ alpham16 = 103079215104.0
+ alpha0 = 6755399441055744.0
+ alpha18 = 1770887431076116955136.0
+ alpha32 = 29014219670751100192948224.0
+ alpha50 = 7605903601369376408980219232256.0
+ alpha64 = 124615124604835863084731911901282304.0
+ alpha82 = 32667107224410092492483962313449748299776.0
+ alpha96 = 535217884764734955396857238543560676143529984.0
+ alpha112 = 35076039295941670036888435985190792471742381031424.0
+ alpha130 = 9194973245195333150150082162901855101712434733101613056.0
+ scale = 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125
+ offset0 = 6755408030990331.0
+ offset1 = 29014256564239239022116864.0
+ offset2 = 124615283061160854719918951570079744.0
+ offset3 = 535219245894202480694386063513315216128475136.0
+)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+ r := key
+ s := key[16:]
+ var (
+ y7 float64
+ y6 float64
+ y1 float64
+ y0 float64
+ y5 float64
+ y4 float64
+ x7 float64
+ x6 float64
+ x1 float64
+ x0 float64
+ y3 float64
+ y2 float64
+ x5 float64
+ r3lowx0 float64
+ x4 float64
+ r0lowx6 float64
+ x3 float64
+ r3highx0 float64
+ x2 float64
+ r0highx6 float64
+ r0lowx0 float64
+ sr1lowx6 float64
+ r0highx0 float64
+ sr1highx6 float64
+ sr3low float64
+ r1lowx0 float64
+ sr2lowx6 float64
+ r1highx0 float64
+ sr2highx6 float64
+ r2lowx0 float64
+ sr3lowx6 float64
+ r2highx0 float64
+ sr3highx6 float64
+ r1highx4 float64
+ r1lowx4 float64
+ r0highx4 float64
+ r0lowx4 float64
+ sr3highx4 float64
+ sr3lowx4 float64
+ sr2highx4 float64
+ sr2lowx4 float64
+ r0lowx2 float64
+ r0highx2 float64
+ r1lowx2 float64
+ r1highx2 float64
+ r2lowx2 float64
+ r2highx2 float64
+ sr3lowx2 float64
+ sr3highx2 float64
+ z0 float64
+ z1 float64
+ z2 float64
+ z3 float64
+ m0 int64
+ m1 int64
+ m2 int64
+ m3 int64
+ m00 uint32
+ m01 uint32
+ m02 uint32
+ m03 uint32
+ m10 uint32
+ m11 uint32
+ m12 uint32
+ m13 uint32
+ m20 uint32
+ m21 uint32
+ m22 uint32
+ m23 uint32
+ m30 uint32
+ m31 uint32
+ m32 uint32
+ m33 uint64
+ lbelow2 int32
+ lbelow3 int32
+ lbelow4 int32
+ lbelow5 int32
+ lbelow6 int32
+ lbelow7 int32
+ lbelow8 int32
+ lbelow9 int32
+ lbelow10 int32
+ lbelow11 int32
+ lbelow12 int32
+ lbelow13 int32
+ lbelow14 int32
+ lbelow15 int32
+ s00 uint32
+ s01 uint32
+ s02 uint32
+ s03 uint32
+ s10 uint32
+ s11 uint32
+ s12 uint32
+ s13 uint32
+ s20 uint32
+ s21 uint32
+ s22 uint32
+ s23 uint32
+ s30 uint32
+ s31 uint32
+ s32 uint32
+ s33 uint32
+ bits32 uint64
+ f uint64
+ f0 uint64
+ f1 uint64
+ f2 uint64
+ f3 uint64
+ f4 uint64
+ g uint64
+ g0 uint64
+ g1 uint64
+ g2 uint64
+ g3 uint64
+ g4 uint64
+ )
+
+ var p int32
+
+ l := int32(len(m))
+
+ r00 := uint32(r[0])
+
+ r01 := uint32(r[1])
+
+ r02 := uint32(r[2])
+ r0 := int64(2151)
+
+ r03 := uint32(r[3])
+ r03 &= 15
+ r0 <<= 51
+
+ r10 := uint32(r[4])
+ r10 &= 252
+ r01 <<= 8
+ r0 += int64(r00)
+
+ r11 := uint32(r[5])
+ r02 <<= 16
+ r0 += int64(r01)
+
+ r12 := uint32(r[6])
+ r03 <<= 24
+ r0 += int64(r02)
+
+ r13 := uint32(r[7])
+ r13 &= 15
+ r1 := int64(2215)
+ r0 += int64(r03)
+
+ d0 := r0
+ r1 <<= 51
+ r2 := int64(2279)
+
+ r20 := uint32(r[8])
+ r20 &= 252
+ r11 <<= 8
+ r1 += int64(r10)
+
+ r21 := uint32(r[9])
+ r12 <<= 16
+ r1 += int64(r11)
+
+ r22 := uint32(r[10])
+ r13 <<= 24
+ r1 += int64(r12)
+
+ r23 := uint32(r[11])
+ r23 &= 15
+ r2 <<= 51
+ r1 += int64(r13)
+
+ d1 := r1
+ r21 <<= 8
+ r2 += int64(r20)
+
+ r30 := uint32(r[12])
+ r30 &= 252
+ r22 <<= 16
+ r2 += int64(r21)
+
+ r31 := uint32(r[13])
+ r23 <<= 24
+ r2 += int64(r22)
+
+ r32 := uint32(r[14])
+ r2 += int64(r23)
+ r3 := int64(2343)
+
+ d2 := r2
+ r3 <<= 51
+
+ r33 := uint32(r[15])
+ r33 &= 15
+ r31 <<= 8
+ r3 += int64(r30)
+
+ r32 <<= 16
+ r3 += int64(r31)
+
+ r33 <<= 24
+ r3 += int64(r32)
+
+ r3 += int64(r33)
+ h0 := alpha32 - alpha32
+
+ d3 := r3
+ h1 := alpha32 - alpha32
+
+ h2 := alpha32 - alpha32
+
+ h3 := alpha32 - alpha32
+
+ h4 := alpha32 - alpha32
+
+ r0low := math.Float64frombits(uint64(d0))
+ h5 := alpha32 - alpha32
+
+ r1low := math.Float64frombits(uint64(d1))
+ h6 := alpha32 - alpha32
+
+ r2low := math.Float64frombits(uint64(d2))
+ h7 := alpha32 - alpha32
+
+ r0low -= alpha0
+
+ r1low -= alpha32
+
+ r2low -= alpha64
+
+ r0high := r0low + alpha18
+
+ r3low := math.Float64frombits(uint64(d3))
+
+ r1high := r1low + alpha50
+ sr1low := scale * r1low
+
+ r2high := r2low + alpha82
+ sr2low := scale * r2low
+
+ r0high -= alpha18
+ r0high_stack := r0high
+
+ r3low -= alpha96
+
+ r1high -= alpha50
+ r1high_stack := r1high
+
+ sr1high := sr1low + alpham80
+
+ r0low -= r0high
+
+ r2high -= alpha82
+ sr3low = scale * r3low
+
+ sr2high := sr2low + alpham48
+
+ r1low -= r1high
+ r1low_stack := r1low
+
+ sr1high -= alpham80
+ sr1high_stack := sr1high
+
+ r2low -= r2high
+ r2low_stack := r2low
+
+ sr2high -= alpham48
+ sr2high_stack := sr2high
+
+ r3high := r3low + alpha112
+ r0low_stack := r0low
+
+ sr1low -= sr1high
+ sr1low_stack := sr1low
+
+ sr3high := sr3low + alpham16
+ r2high_stack := r2high
+
+ sr2low -= sr2high
+ sr2low_stack := sr2low
+
+ r3high -= alpha112
+ r3high_stack := r3high
+
+ sr3high -= alpham16
+ sr3high_stack := sr3high
+
+ r3low -= r3high
+ r3low_stack := r3low
+
+ sr3low -= sr3high
+ sr3low_stack := sr3low
+
+ if l < 16 {
+ goto addatmost15bytes
+ }
+
+ m00 = uint32(m[p+0])
+ m0 = 2151
+
+ m0 <<= 51
+ m1 = 2215
+ m01 = uint32(m[p+1])
+
+ m1 <<= 51
+ m2 = 2279
+ m02 = uint32(m[p+2])
+
+ m2 <<= 51
+ m3 = 2343
+ m03 = uint32(m[p+3])
+
+ m10 = uint32(m[p+4])
+ m01 <<= 8
+ m0 += int64(m00)
+
+ m11 = uint32(m[p+5])
+ m02 <<= 16
+ m0 += int64(m01)
+
+ m12 = uint32(m[p+6])
+ m03 <<= 24
+ m0 += int64(m02)
+
+ m13 = uint32(m[p+7])
+ m3 <<= 51
+ m0 += int64(m03)
+
+ m20 = uint32(m[p+8])
+ m11 <<= 8
+ m1 += int64(m10)
+
+ m21 = uint32(m[p+9])
+ m12 <<= 16
+ m1 += int64(m11)
+
+ m22 = uint32(m[p+10])
+ m13 <<= 24
+ m1 += int64(m12)
+
+ m23 = uint32(m[p+11])
+ m1 += int64(m13)
+
+ m30 = uint32(m[p+12])
+ m21 <<= 8
+ m2 += int64(m20)
+
+ m31 = uint32(m[p+13])
+ m22 <<= 16
+ m2 += int64(m21)
+
+ m32 = uint32(m[p+14])
+ m23 <<= 24
+ m2 += int64(m22)
+
+ m33 = uint64(m[p+15])
+ m2 += int64(m23)
+
+ d0 = m0
+ m31 <<= 8
+ m3 += int64(m30)
+
+ d1 = m1
+ m32 <<= 16
+ m3 += int64(m31)
+
+ d2 = m2
+ m33 += 256
+
+ m33 <<= 24
+ m3 += int64(m32)
+
+ m3 += int64(m33)
+ d3 = m3
+
+ p += 16
+ l -= 16
+
+ z0 = math.Float64frombits(uint64(d0))
+
+ z1 = math.Float64frombits(uint64(d1))
+
+ z2 = math.Float64frombits(uint64(d2))
+
+ z3 = math.Float64frombits(uint64(d3))
+
+ z0 -= alpha0
+
+ z1 -= alpha32
+
+ z2 -= alpha64
+
+ z3 -= alpha96
+
+ h0 += z0
+
+ h1 += z1
+
+ h3 += z2
+
+ h5 += z3
+
+ if l < 16 {
+ goto multiplyaddatmost15bytes
+ }
+
+multiplyaddatleast16bytes:
+
+ m2 = 2279
+ m20 = uint32(m[p+8])
+ y7 = h7 + alpha130
+
+ m2 <<= 51
+ m3 = 2343
+ m21 = uint32(m[p+9])
+ y6 = h6 + alpha130
+
+ m3 <<= 51
+ m0 = 2151
+ m22 = uint32(m[p+10])
+ y1 = h1 + alpha32
+
+ m0 <<= 51
+ m1 = 2215
+ m23 = uint32(m[p+11])
+ y0 = h0 + alpha32
+
+ m1 <<= 51
+ m30 = uint32(m[p+12])
+ y7 -= alpha130
+
+ m21 <<= 8
+ m2 += int64(m20)
+ m31 = uint32(m[p+13])
+ y6 -= alpha130
+
+ m22 <<= 16
+ m2 += int64(m21)
+ m32 = uint32(m[p+14])
+ y1 -= alpha32
+
+ m23 <<= 24
+ m2 += int64(m22)
+ m33 = uint64(m[p+15])
+ y0 -= alpha32
+
+ m2 += int64(m23)
+ m00 = uint32(m[p+0])
+ y5 = h5 + alpha96
+
+ m31 <<= 8
+ m3 += int64(m30)
+ m01 = uint32(m[p+1])
+ y4 = h4 + alpha96
+
+ m32 <<= 16
+ m02 = uint32(m[p+2])
+ x7 = h7 - y7
+ y7 *= scale
+
+ m33 += 256
+ m03 = uint32(m[p+3])
+ x6 = h6 - y6
+ y6 *= scale
+
+ m33 <<= 24
+ m3 += int64(m31)
+ m10 = uint32(m[p+4])
+ x1 = h1 - y1
+
+ m01 <<= 8
+ m3 += int64(m32)
+ m11 = uint32(m[p+5])
+ x0 = h0 - y0
+
+ m3 += int64(m33)
+ m0 += int64(m00)
+ m12 = uint32(m[p+6])
+ y5 -= alpha96
+
+ m02 <<= 16
+ m0 += int64(m01)
+ m13 = uint32(m[p+7])
+ y4 -= alpha96
+
+ m03 <<= 24
+ m0 += int64(m02)
+ d2 = m2
+ x1 += y7
+
+ m0 += int64(m03)
+ d3 = m3
+ x0 += y6
+
+ m11 <<= 8
+ m1 += int64(m10)
+ d0 = m0
+ x7 += y5
+
+ m12 <<= 16
+ m1 += int64(m11)
+ x6 += y4
+
+ m13 <<= 24
+ m1 += int64(m12)
+ y3 = h3 + alpha64
+
+ m1 += int64(m13)
+ d1 = m1
+ y2 = h2 + alpha64
+
+ x0 += x1
+
+ x6 += x7
+
+ y3 -= alpha64
+ r3low = r3low_stack
+
+ y2 -= alpha64
+ r0low = r0low_stack
+
+ x5 = h5 - y5
+ r3lowx0 = r3low * x0
+ r3high = r3high_stack
+
+ x4 = h4 - y4
+ r0lowx6 = r0low * x6
+ r0high = r0high_stack
+
+ x3 = h3 - y3
+ r3highx0 = r3high * x0
+ sr1low = sr1low_stack
+
+ x2 = h2 - y2
+ r0highx6 = r0high * x6
+ sr1high = sr1high_stack
+
+ x5 += y3
+ r0lowx0 = r0low * x0
+ r1low = r1low_stack
+
+ h6 = r3lowx0 + r0lowx6
+ sr1lowx6 = sr1low * x6
+ r1high = r1high_stack
+
+ x4 += y2
+ r0highx0 = r0high * x0
+ sr2low = sr2low_stack
+
+ h7 = r3highx0 + r0highx6
+ sr1highx6 = sr1high * x6
+ sr2high = sr2high_stack
+
+ x3 += y1
+ r1lowx0 = r1low * x0
+ r2low = r2low_stack
+
+ h0 = r0lowx0 + sr1lowx6
+ sr2lowx6 = sr2low * x6
+ r2high = r2high_stack
+
+ x2 += y0
+ r1highx0 = r1high * x0
+ sr3low = sr3low_stack
+
+ h1 = r0highx0 + sr1highx6
+ sr2highx6 = sr2high * x6
+ sr3high = sr3high_stack
+
+ x4 += x5
+ r2lowx0 = r2low * x0
+ z2 = math.Float64frombits(uint64(d2))
+
+ h2 = r1lowx0 + sr2lowx6
+ sr3lowx6 = sr3low * x6
+
+ x2 += x3
+ r2highx0 = r2high * x0
+ z3 = math.Float64frombits(uint64(d3))
+
+ h3 = r1highx0 + sr2highx6
+ sr3highx6 = sr3high * x6
+
+ r1highx4 = r1high * x4
+ z2 -= alpha64
+
+ h4 = r2lowx0 + sr3lowx6
+ r1lowx4 = r1low * x4
+
+ r0highx4 = r0high * x4
+ z3 -= alpha96
+
+ h5 = r2highx0 + sr3highx6
+ r0lowx4 = r0low * x4
+
+ h7 += r1highx4
+ sr3highx4 = sr3high * x4
+
+ h6 += r1lowx4
+ sr3lowx4 = sr3low * x4
+
+ h5 += r0highx4
+ sr2highx4 = sr2high * x4
+
+ h4 += r0lowx4
+ sr2lowx4 = sr2low * x4
+
+ h3 += sr3highx4
+ r0lowx2 = r0low * x2
+
+ h2 += sr3lowx4
+ r0highx2 = r0high * x2
+
+ h1 += sr2highx4
+ r1lowx2 = r1low * x2
+
+ h0 += sr2lowx4
+ r1highx2 = r1high * x2
+
+ h2 += r0lowx2
+ r2lowx2 = r2low * x2
+
+ h3 += r0highx2
+ r2highx2 = r2high * x2
+
+ h4 += r1lowx2
+ sr3lowx2 = sr3low * x2
+
+ h5 += r1highx2
+ sr3highx2 = sr3high * x2
+
+ p += 16
+ l -= 16
+ h6 += r2lowx2
+
+ h7 += r2highx2
+
+ z1 = math.Float64frombits(uint64(d1))
+ h0 += sr3lowx2
+
+ z0 = math.Float64frombits(uint64(d0))
+ h1 += sr3highx2
+
+ z1 -= alpha32
+
+ z0 -= alpha0
+
+ h5 += z3
+
+ h3 += z2
+
+ h1 += z1
+
+ h0 += z0
+
+ if l >= 16 {
+ goto multiplyaddatleast16bytes
+ }
+
+multiplyaddatmost15bytes:
+
+ y7 = h7 + alpha130
+
+ y6 = h6 + alpha130
+
+ y1 = h1 + alpha32
+
+ y0 = h0 + alpha32
+
+ y7 -= alpha130
+
+ y6 -= alpha130
+
+ y1 -= alpha32
+
+ y0 -= alpha32
+
+ y5 = h5 + alpha96
+
+ y4 = h4 + alpha96
+
+ x7 = h7 - y7
+ y7 *= scale
+
+ x6 = h6 - y6
+ y6 *= scale
+
+ x1 = h1 - y1
+
+ x0 = h0 - y0
+
+ y5 -= alpha96
+
+ y4 -= alpha96
+
+ x1 += y7
+
+ x0 += y6
+
+ x7 += y5
+
+ x6 += y4
+
+ y3 = h3 + alpha64
+
+ y2 = h2 + alpha64
+
+ x0 += x1
+
+ x6 += x7
+
+ y3 -= alpha64
+ r3low = r3low_stack
+
+ y2 -= alpha64
+ r0low = r0low_stack
+
+ x5 = h5 - y5
+ r3lowx0 = r3low * x0
+ r3high = r3high_stack
+
+ x4 = h4 - y4
+ r0lowx6 = r0low * x6
+ r0high = r0high_stack
+
+ x3 = h3 - y3
+ r3highx0 = r3high * x0
+ sr1low = sr1low_stack
+
+ x2 = h2 - y2
+ r0highx6 = r0high * x6
+ sr1high = sr1high_stack
+
+ x5 += y3
+ r0lowx0 = r0low * x0
+ r1low = r1low_stack
+
+ h6 = r3lowx0 + r0lowx6
+ sr1lowx6 = sr1low * x6
+ r1high = r1high_stack
+
+ x4 += y2
+ r0highx0 = r0high * x0
+ sr2low = sr2low_stack
+
+ h7 = r3highx0 + r0highx6
+ sr1highx6 = sr1high * x6
+ sr2high = sr2high_stack
+
+ x3 += y1
+ r1lowx0 = r1low * x0
+ r2low = r2low_stack
+
+ h0 = r0lowx0 + sr1lowx6
+ sr2lowx6 = sr2low * x6
+ r2high = r2high_stack
+
+ x2 += y0
+ r1highx0 = r1high * x0
+ sr3low = sr3low_stack
+
+ h1 = r0highx0 + sr1highx6
+ sr2highx6 = sr2high * x6
+ sr3high = sr3high_stack
+
+ x4 += x5
+ r2lowx0 = r2low * x0
+
+ h2 = r1lowx0 + sr2lowx6
+ sr3lowx6 = sr3low * x6
+
+ x2 += x3
+ r2highx0 = r2high * x0
+
+ h3 = r1highx0 + sr2highx6
+ sr3highx6 = sr3high * x6
+
+ r1highx4 = r1high * x4
+
+ h4 = r2lowx0 + sr3lowx6
+ r1lowx4 = r1low * x4
+
+ r0highx4 = r0high * x4
+
+ h5 = r2highx0 + sr3highx6
+ r0lowx4 = r0low * x4
+
+ h7 += r1highx4
+ sr3highx4 = sr3high * x4
+
+ h6 += r1lowx4
+ sr3lowx4 = sr3low * x4
+
+ h5 += r0highx4
+ sr2highx4 = sr2high * x4
+
+ h4 += r0lowx4
+ sr2lowx4 = sr2low * x4
+
+ h3 += sr3highx4
+ r0lowx2 = r0low * x2
+
+ h2 += sr3lowx4
+ r0highx2 = r0high * x2
+
+ h1 += sr2highx4
+ r1lowx2 = r1low * x2
+
+ h0 += sr2lowx4
+ r1highx2 = r1high * x2
+
+ h2 += r0lowx2
+ r2lowx2 = r2low * x2
+
+ h3 += r0highx2
+ r2highx2 = r2high * x2
+
+ h4 += r1lowx2
+ sr3lowx2 = sr3low * x2
+
+ h5 += r1highx2
+ sr3highx2 = sr3high * x2
+
+ h6 += r2lowx2
+
+ h7 += r2highx2
+
+ h0 += sr3lowx2
+
+ h1 += sr3highx2
+
+addatmost15bytes:
+
+ if l == 0 {
+ goto nomorebytes
+ }
+
+ lbelow2 = l - 2
+
+ lbelow3 = l - 3
+
+ lbelow2 >>= 31
+ lbelow4 = l - 4
+
+ m00 = uint32(m[p+0])
+ lbelow3 >>= 31
+ p += lbelow2
+
+ m01 = uint32(m[p+1])
+ lbelow4 >>= 31
+ p += lbelow3
+
+ m02 = uint32(m[p+2])
+ p += lbelow4
+ m0 = 2151
+
+ m03 = uint32(m[p+3])
+ m0 <<= 51
+ m1 = 2215
+
+ m0 += int64(m00)
+ m01 &^= uint32(lbelow2)
+
+ m02 &^= uint32(lbelow3)
+ m01 -= uint32(lbelow2)
+
+ m01 <<= 8
+ m03 &^= uint32(lbelow4)
+
+ m0 += int64(m01)
+ lbelow2 -= lbelow3
+
+ m02 += uint32(lbelow2)
+ lbelow3 -= lbelow4
+
+ m02 <<= 16
+ m03 += uint32(lbelow3)
+
+ m03 <<= 24
+ m0 += int64(m02)
+
+ m0 += int64(m03)
+ lbelow5 = l - 5
+
+ lbelow6 = l - 6
+ lbelow7 = l - 7
+
+ lbelow5 >>= 31
+ lbelow8 = l - 8
+
+ lbelow6 >>= 31
+ p += lbelow5
+
+ m10 = uint32(m[p+4])
+ lbelow7 >>= 31
+ p += lbelow6
+
+ m11 = uint32(m[p+5])
+ lbelow8 >>= 31
+ p += lbelow7
+
+ m12 = uint32(m[p+6])
+ m1 <<= 51
+ p += lbelow8
+
+ m13 = uint32(m[p+7])
+ m10 &^= uint32(lbelow5)
+ lbelow4 -= lbelow5
+
+ m10 += uint32(lbelow4)
+ lbelow5 -= lbelow6
+
+ m11 &^= uint32(lbelow6)
+ m11 += uint32(lbelow5)
+
+ m11 <<= 8
+ m1 += int64(m10)
+
+ m1 += int64(m11)
+ m12 &^= uint32(lbelow7)
+
+ lbelow6 -= lbelow7
+ m13 &^= uint32(lbelow8)
+
+ m12 += uint32(lbelow6)
+ lbelow7 -= lbelow8
+
+ m12 <<= 16
+ m13 += uint32(lbelow7)
+
+ m13 <<= 24
+ m1 += int64(m12)
+
+ m1 += int64(m13)
+ m2 = 2279
+
+ lbelow9 = l - 9
+ m3 = 2343
+
+ lbelow10 = l - 10
+ lbelow11 = l - 11
+
+ lbelow9 >>= 31
+ lbelow12 = l - 12
+
+ lbelow10 >>= 31
+ p += lbelow9
+
+ m20 = uint32(m[p+8])
+ lbelow11 >>= 31
+ p += lbelow10
+
+ m21 = uint32(m[p+9])
+ lbelow12 >>= 31
+ p += lbelow11
+
+ m22 = uint32(m[p+10])
+ m2 <<= 51
+ p += lbelow12
+
+ m23 = uint32(m[p+11])
+ m20 &^= uint32(lbelow9)
+ lbelow8 -= lbelow9
+
+ m20 += uint32(lbelow8)
+ lbelow9 -= lbelow10
+
+ m21 &^= uint32(lbelow10)
+ m21 += uint32(lbelow9)
+
+ m21 <<= 8
+ m2 += int64(m20)
+
+ m2 += int64(m21)
+ m22 &^= uint32(lbelow11)
+
+ lbelow10 -= lbelow11
+ m23 &^= uint32(lbelow12)
+
+ m22 += uint32(lbelow10)
+ lbelow11 -= lbelow12
+
+ m22 <<= 16
+ m23 += uint32(lbelow11)
+
+ m23 <<= 24
+ m2 += int64(m22)
+
+ m3 <<= 51
+ lbelow13 = l - 13
+
+ lbelow13 >>= 31
+ lbelow14 = l - 14
+
+ lbelow14 >>= 31
+ p += lbelow13
+ lbelow15 = l - 15
+
+ m30 = uint32(m[p+12])
+ lbelow15 >>= 31
+ p += lbelow14
+
+ m31 = uint32(m[p+13])
+ p += lbelow15
+ m2 += int64(m23)
+
+ m32 = uint32(m[p+14])
+ m30 &^= uint32(lbelow13)
+ lbelow12 -= lbelow13
+
+ m30 += uint32(lbelow12)
+ lbelow13 -= lbelow14
+
+ m3 += int64(m30)
+ m31 &^= uint32(lbelow14)
+
+ m31 += uint32(lbelow13)
+ m32 &^= uint32(lbelow15)
+
+ m31 <<= 8
+ lbelow14 -= lbelow15
+
+ m3 += int64(m31)
+ m32 += uint32(lbelow14)
+ d0 = m0
+
+ m32 <<= 16
+ m33 = uint64(lbelow15 + 1)
+ d1 = m1
+
+ m33 <<= 24
+ m3 += int64(m32)
+ d2 = m2
+
+ m3 += int64(m33)
+ d3 = m3
+
+ z3 = math.Float64frombits(uint64(d3))
+
+ z2 = math.Float64frombits(uint64(d2))
+
+ z1 = math.Float64frombits(uint64(d1))
+
+ z0 = math.Float64frombits(uint64(d0))
+
+ z3 -= alpha96
+
+ z2 -= alpha64
+
+ z1 -= alpha32
+
+ z0 -= alpha0
+
+ h5 += z3
+
+ h3 += z2
+
+ h1 += z1
+
+ h0 += z0
+
+ y7 = h7 + alpha130
+
+ y6 = h6 + alpha130
+
+ y1 = h1 + alpha32
+
+ y0 = h0 + alpha32
+
+ y7 -= alpha130
+
+ y6 -= alpha130
+
+ y1 -= alpha32
+
+ y0 -= alpha32
+
+ y5 = h5 + alpha96
+
+ y4 = h4 + alpha96
+
+ x7 = h7 - y7
+ y7 *= scale
+
+ x6 = h6 - y6
+ y6 *= scale
+
+ x1 = h1 - y1
+
+ x0 = h0 - y0
+
+ y5 -= alpha96
+
+ y4 -= alpha96
+
+ x1 += y7
+
+ x0 += y6
+
+ x7 += y5
+
+ x6 += y4
+
+ y3 = h3 + alpha64
+
+ y2 = h2 + alpha64
+
+ x0 += x1
+
+ x6 += x7
+
+ y3 -= alpha64
+ r3low = r3low_stack
+
+ y2 -= alpha64
+ r0low = r0low_stack
+
+ x5 = h5 - y5
+ r3lowx0 = r3low * x0
+ r3high = r3high_stack
+
+ x4 = h4 - y4
+ r0lowx6 = r0low * x6
+ r0high = r0high_stack
+
+ x3 = h3 - y3
+ r3highx0 = r3high * x0
+ sr1low = sr1low_stack
+
+ x2 = h2 - y2
+ r0highx6 = r0high * x6
+ sr1high = sr1high_stack
+
+ x5 += y3
+ r0lowx0 = r0low * x0
+ r1low = r1low_stack
+
+ h6 = r3lowx0 + r0lowx6
+ sr1lowx6 = sr1low * x6
+ r1high = r1high_stack
+
+ x4 += y2
+ r0highx0 = r0high * x0
+ sr2low = sr2low_stack
+
+ h7 = r3highx0 + r0highx6
+ sr1highx6 = sr1high * x6
+ sr2high = sr2high_stack
+
+ x3 += y1
+ r1lowx0 = r1low * x0
+ r2low = r2low_stack
+
+ h0 = r0lowx0 + sr1lowx6
+ sr2lowx6 = sr2low * x6
+ r2high = r2high_stack
+
+ x2 += y0
+ r1highx0 = r1high * x0
+ sr3low = sr3low_stack
+
+ h1 = r0highx0 + sr1highx6
+ sr2highx6 = sr2high * x6
+ sr3high = sr3high_stack
+
+ x4 += x5
+ r2lowx0 = r2low * x0
+
+ h2 = r1lowx0 + sr2lowx6
+ sr3lowx6 = sr3low * x6
+
+ x2 += x3
+ r2highx0 = r2high * x0
+
+ h3 = r1highx0 + sr2highx6
+ sr3highx6 = sr3high * x6
+
+ r1highx4 = r1high * x4
+
+ h4 = r2lowx0 + sr3lowx6
+ r1lowx4 = r1low * x4
+
+ r0highx4 = r0high * x4
+
+ h5 = r2highx0 + sr3highx6
+ r0lowx4 = r0low * x4
+
+ h7 += r1highx4
+ sr3highx4 = sr3high * x4
+
+ h6 += r1lowx4
+ sr3lowx4 = sr3low * x4
+
+ h5 += r0highx4
+ sr2highx4 = sr2high * x4
+
+ h4 += r0lowx4
+ sr2lowx4 = sr2low * x4
+
+ h3 += sr3highx4
+ r0lowx2 = r0low * x2
+
+ h2 += sr3lowx4
+ r0highx2 = r0high * x2
+
+ h1 += sr2highx4
+ r1lowx2 = r1low * x2
+
+ h0 += sr2lowx4
+ r1highx2 = r1high * x2
+
+ h2 += r0lowx2
+ r2lowx2 = r2low * x2
+
+ h3 += r0highx2
+ r2highx2 = r2high * x2
+
+ h4 += r1lowx2
+ sr3lowx2 = sr3low * x2
+
+ h5 += r1highx2
+ sr3highx2 = sr3high * x2
+
+ h6 += r2lowx2
+
+ h7 += r2highx2
+
+ h0 += sr3lowx2
+
+ h1 += sr3highx2
+
+nomorebytes:
+
+ y7 = h7 + alpha130
+
+ y0 = h0 + alpha32
+
+ y1 = h1 + alpha32
+
+ y2 = h2 + alpha64
+
+ y7 -= alpha130
+
+ y3 = h3 + alpha64
+
+ y4 = h4 + alpha96
+
+ y5 = h5 + alpha96
+
+ x7 = h7 - y7
+ y7 *= scale
+
+ y0 -= alpha32
+
+ y1 -= alpha32
+
+ y2 -= alpha64
+
+ h6 += x7
+
+ y3 -= alpha64
+
+ y4 -= alpha96
+
+ y5 -= alpha96
+
+ y6 = h6 + alpha130
+
+ x0 = h0 - y0
+
+ x1 = h1 - y1
+
+ x2 = h2 - y2
+
+ y6 -= alpha130
+
+ x0 += y7
+
+ x3 = h3 - y3
+
+ x4 = h4 - y4
+
+ x5 = h5 - y5
+
+ x6 = h6 - y6
+
+ y6 *= scale
+
+ x2 += y0
+
+ x3 += y1
+
+ x4 += y2
+
+ x0 += y6
+
+ x5 += y3
+
+ x6 += y4
+
+ x2 += x3
+
+ x0 += x1
+
+ x4 += x5
+
+ x6 += y5
+
+ x2 += offset1
+ d1 = int64(math.Float64bits(x2))
+
+ x0 += offset0
+ d0 = int64(math.Float64bits(x0))
+
+ x4 += offset2
+ d2 = int64(math.Float64bits(x4))
+
+ x6 += offset3
+ d3 = int64(math.Float64bits(x6))
+
+ f0 = uint64(d0)
+
+ f1 = uint64(d1)
+ bits32 = math.MaxUint64
+
+ f2 = uint64(d2)
+ bits32 >>= 32
+
+ f3 = uint64(d3)
+ f = f0 >> 32
+
+ f0 &= bits32
+ f &= 255
+
+ f1 += f
+ g0 = f0 + 5
+
+ g = g0 >> 32
+ g0 &= bits32
+
+ f = f1 >> 32
+ f1 &= bits32
+
+ f &= 255
+ g1 = f1 + g
+
+ g = g1 >> 32
+ f2 += f
+
+ f = f2 >> 32
+ g1 &= bits32
+
+ f2 &= bits32
+ f &= 255
+
+ f3 += f
+ g2 = f2 + g
+
+ g = g2 >> 32
+ g2 &= bits32
+
+ f4 = f3 >> 32
+ f3 &= bits32
+
+ f4 &= 255
+ g3 = f3 + g
+
+ g = g3 >> 32
+ g3 &= bits32
+
+ g4 = f4 + g
+
+ g4 = g4 - 4
+ s00 = uint32(s[0])
+
+ f = uint64(int64(g4) >> 63)
+ s01 = uint32(s[1])
+
+ f0 &= f
+ g0 &^= f
+ s02 = uint32(s[2])
+
+ f1 &= f
+ f0 |= g0
+ s03 = uint32(s[3])
+
+ g1 &^= f
+ f2 &= f
+ s10 = uint32(s[4])
+
+ f3 &= f
+ g2 &^= f
+ s11 = uint32(s[5])
+
+ g3 &^= f
+ f1 |= g1
+ s12 = uint32(s[6])
+
+ f2 |= g2
+ f3 |= g3
+ s13 = uint32(s[7])
+
+ s01 <<= 8
+ f0 += uint64(s00)
+ s20 = uint32(s[8])
+
+ s02 <<= 16
+ f0 += uint64(s01)
+ s21 = uint32(s[9])
+
+ s03 <<= 24
+ f0 += uint64(s02)
+ s22 = uint32(s[10])
+
+ s11 <<= 8
+ f1 += uint64(s10)
+ s23 = uint32(s[11])
+
+ s12 <<= 16
+ f1 += uint64(s11)
+ s30 = uint32(s[12])
+
+ s13 <<= 24
+ f1 += uint64(s12)
+ s31 = uint32(s[13])
+
+ f0 += uint64(s03)
+ f1 += uint64(s13)
+ s32 = uint32(s[14])
+
+ s21 <<= 8
+ f2 += uint64(s20)
+ s33 = uint32(s[15])
+
+ s22 <<= 16
+ f2 += uint64(s21)
+
+ s23 <<= 24
+ f2 += uint64(s22)
+
+ s31 <<= 8
+ f3 += uint64(s30)
+
+ s32 <<= 16
+ f3 += uint64(s31)
+
+ s33 <<= 24
+ f3 += uint64(s32)
+
+ f2 += uint64(s23)
+ f3 += uint64(s33)
+
+ out[0] = byte(f0)
+ f0 >>= 8
+ out[1] = byte(f0)
+ f0 >>= 8
+ out[2] = byte(f0)
+ f0 >>= 8
+ out[3] = byte(f0)
+ f0 >>= 8
+ f1 += f0
+
+ out[4] = byte(f1)
+ f1 >>= 8
+ out[5] = byte(f1)
+ f1 >>= 8
+ out[6] = byte(f1)
+ f1 >>= 8
+ out[7] = byte(f1)
+ f1 >>= 8
+ f2 += f1
+
+ out[8] = byte(f2)
+ f2 >>= 8
+ out[9] = byte(f2)
+ f2 >>= 8
+ out[10] = byte(f2)
+ f2 >>= 8
+ out[11] = byte(f2)
+ f2 >>= 8
+ f3 += f2
+
+ out[12] = byte(f3)
+ f3 >>= 8
+ out[13] = byte(f3)
+ f3 >>= 8
+ out[14] = byte(f3)
+ f3 >>= 8
+ out[15] = byte(f3)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160.go
new file mode 100644
index 00000000000..6c6e84236ab
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160.go
@@ -0,0 +1,120 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ripemd160 implements the RIPEMD-160 hash algorithm.
+package ripemd160 // import "golang.org/x/crypto/ripemd160"
+
+// RIPEMD-160 is designed by by Hans Dobbertin, Antoon Bosselaers, and Bart
+// Preneel with specifications available at:
+// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf.
+
+import (
+ "crypto"
+ "hash"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.RIPEMD160, New)
+}
+
+// The size of the checksum in bytes.
+const Size = 20
+
+// The block size of the hash algorithm in bytes.
+const BlockSize = 64
+
+const (
+ _s0 = 0x67452301
+ _s1 = 0xefcdab89
+ _s2 = 0x98badcfe
+ _s3 = 0x10325476
+ _s4 = 0xc3d2e1f0
+)
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+ s [5]uint32 // running context
+ x [BlockSize]byte // temporary buffer
+ nx int // index into x
+ tc uint64 // total count of bytes processed
+}
+
+func (d *digest) Reset() {
+ d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4
+ d.nx = 0
+ d.tc = 0
+}
+
+// New returns a new hash.Hash computing the checksum.
+func New() hash.Hash {
+ result := new(digest)
+ result.Reset()
+ return result
+}
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return BlockSize }
+
+func (d *digest) Write(p []byte) (nn int, err error) {
+ nn = len(p)
+ d.tc += uint64(nn)
+ if d.nx > 0 {
+ n := len(p)
+ if n > BlockSize-d.nx {
+ n = BlockSize - d.nx
+ }
+ for i := 0; i < n; i++ {
+ d.x[d.nx+i] = p[i]
+ }
+ d.nx += n
+ if d.nx == BlockSize {
+ _Block(d, d.x[0:])
+ d.nx = 0
+ }
+ p = p[n:]
+ }
+ n := _Block(d, p)
+ p = p[n:]
+ if len(p) > 0 {
+ d.nx = copy(d.x[:], p)
+ }
+ return
+}
+
+func (d0 *digest) Sum(in []byte) []byte {
+ // Make a copy of d0 so that caller can keep writing and summing.
+ d := *d0
+
+ // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
+ tc := d.tc
+ var tmp [64]byte
+ tmp[0] = 0x80
+ if tc%64 < 56 {
+ d.Write(tmp[0 : 56-tc%64])
+ } else {
+ d.Write(tmp[0 : 64+56-tc%64])
+ }
+
+ // Length in bits.
+ tc <<= 3
+ for i := uint(0); i < 8; i++ {
+ tmp[i] = byte(tc >> (8 * i))
+ }
+ d.Write(tmp[0:8])
+
+ if d.nx != 0 {
+ panic("d.nx != 0")
+ }
+
+ var digest [Size]byte
+ for i, s := range d.s {
+ digest[i*4] = byte(s)
+ digest[i*4+1] = byte(s >> 8)
+ digest[i*4+2] = byte(s >> 16)
+ digest[i*4+3] = byte(s >> 24)
+ }
+
+ return append(in, digest[:]...)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160_test.go
new file mode 100644
index 00000000000..5df1b2593d2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160_test.go
@@ -0,0 +1,64 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ripemd160
+
+// Test vectors are from:
+// http://homes.esat.kuleuven.be/~bosselae/ripemd160.html
+
+import (
+ "fmt"
+ "io"
+ "testing"
+)
+
+type mdTest struct {
+ out string
+ in string
+}
+
+var vectors = [...]mdTest{
+ {"9c1185a5c5e9fc54612808977ee8f548b2258d31", ""},
+ {"0bdc9d2d256b3ee9daae347be6f4dc835a467ffe", "a"},
+ {"8eb208f7e05d987a9b044a8e98c6b087f15a0bfc", "abc"},
+ {"5d0689ef49d2fae572b881b123a85ffa21595f36", "message digest"},
+ {"f71c27109c692c1b56bbdceb5b9d2865b3708dbc", "abcdefghijklmnopqrstuvwxyz"},
+ {"12a053384a9c0c88e405a06c27dcf49ada62eb2b", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"},
+ {"b0e20b6e3116640286ed3a87a5713079b21f5189", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"},
+ {"9b752e45573d4b39f4dbd3323cab82bf63326bfb", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"},
+}
+
+func TestVectors(t *testing.T) {
+ for i := 0; i < len(vectors); i++ {
+ tv := vectors[i]
+ md := New()
+ for j := 0; j < 3; j++ {
+ if j < 2 {
+ io.WriteString(md, tv.in)
+ } else {
+ io.WriteString(md, tv.in[0:len(tv.in)/2])
+ md.Sum(nil)
+ io.WriteString(md, tv.in[len(tv.in)/2:])
+ }
+ s := fmt.Sprintf("%x", md.Sum(nil))
+ if s != tv.out {
+ t.Fatalf("RIPEMD-160[%d](%s) = %s, expected %s", j, tv.in, s, tv.out)
+ }
+ md.Reset()
+ }
+ }
+}
+
+func TestMillionA(t *testing.T) {
+ md := New()
+ for i := 0; i < 100000; i++ {
+ io.WriteString(md, "aaaaaaaaaa")
+ }
+ out := "52783243c1697bdbe16d37f97f68f08325dc1528"
+ s := fmt.Sprintf("%x", md.Sum(nil))
+ if s != out {
+ t.Fatalf("RIPEMD-160 (1 million 'a') = %s, expected %s", s, out)
+ }
+ md.Reset()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160block.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160block.go
new file mode 100644
index 00000000000..7bc8e6c485e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ripemd160/ripemd160block.go
@@ -0,0 +1,161 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// RIPEMD-160 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+package ripemd160
+
+// work buffer indices and roll amounts for one line
+var _n = [80]uint{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+ 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
+ 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
+ 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
+ 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13,
+}
+
+var _r = [80]uint{
+ 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
+ 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
+ 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
+ 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
+ 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6,
+}
+
+// same for the other parallel one
+var n_ = [80]uint{
+ 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
+ 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
+ 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
+ 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
+ 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11,
+}
+
+var r_ = [80]uint{
+ 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
+ 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
+ 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
+ 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
+ 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11,
+}
+
+func _Block(md *digest, p []byte) int {
+ n := 0
+ var x [16]uint32
+ var alpha, beta uint32
+ for len(p) >= BlockSize {
+ a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4]
+ aa, bb, cc, dd, ee := a, b, c, d, e
+ j := 0
+ for i := 0; i < 16; i++ {
+ x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
+ j += 4
+ }
+
+ // round 1
+ i := 0
+ for i < 16 {
+ alpha = a + (b ^ c ^ d) + x[_n[i]]
+ s := _r[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + e
+ beta = c<<10 | c>>22
+ a, b, c, d, e = e, alpha, b, beta, d
+
+ // parallel line
+ alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
+ s = r_[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + ee
+ beta = cc<<10 | cc>>22
+ aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+ i++
+ }
+
+ // round 2
+ for i < 32 {
+ alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
+ s := _r[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + e
+ beta = c<<10 | c>>22
+ a, b, c, d, e = e, alpha, b, beta, d
+
+ // parallel line
+ alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
+ s = r_[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + ee
+ beta = cc<<10 | cc>>22
+ aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+ i++
+ }
+
+ // round 3
+ for i < 48 {
+ alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
+ s := _r[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + e
+ beta = c<<10 | c>>22
+ a, b, c, d, e = e, alpha, b, beta, d
+
+ // parallel line
+ alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
+ s = r_[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + ee
+ beta = cc<<10 | cc>>22
+ aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+ i++
+ }
+
+ // round 4
+ for i < 64 {
+ alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
+ s := _r[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + e
+ beta = c<<10 | c>>22
+ a, b, c, d, e = e, alpha, b, beta, d
+
+ // parallel line
+ alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
+ s = r_[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + ee
+ beta = cc<<10 | cc>>22
+ aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+ i++
+ }
+
+ // round 5
+ for i < 80 {
+ alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
+ s := _r[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + e
+ beta = c<<10 | c>>22
+ a, b, c, d, e = e, alpha, b, beta, d
+
+ // parallel line
+ alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
+ s = r_[i]
+ alpha = (alpha<<s | alpha>>(32-s)) + ee
+ beta = cc<<10 | cc>>22
+ aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
+
+ i++
+ }
+
+ // combine results
+ dd += c + md.s[1]
+ md.s[1] = md.s[2] + d + ee
+ md.s[2] = md.s[3] + e + aa
+ md.s[3] = md.s[4] + a + bb
+ md.s[4] = md.s[0] + b + cc
+ md.s[0] = dd
+
+ p = p[BlockSize:]
+ n += BlockSize
+ }
+ return n
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
new file mode 100644
index 00000000000..4c96147c86b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/hsalsa20.go
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package salsa provides low-level access to functions in the Salsa family.
+package salsa // import "golang.org/x/crypto/salsa20/salsa"
+
+// Sigma is the Salsa20 constant for 256-bit keys.
+var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'}
+
+// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte
+// key k, and 16-byte constant c, and puts the result into the 32-byte array
+// out.
+func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
+ x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
+ x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
+ x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
+ x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
+ x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
+ x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
+ x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
+ x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
+ x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
+ x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
+ x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
+ x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
+
+ for i := 0; i < 20; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x5)
+ out[5] = byte(x5 >> 8)
+ out[6] = byte(x5 >> 16)
+ out[7] = byte(x5 >> 24)
+
+ out[8] = byte(x10)
+ out[9] = byte(x10 >> 8)
+ out[10] = byte(x10 >> 16)
+ out[11] = byte(x10 >> 24)
+
+ out[12] = byte(x15)
+ out[13] = byte(x15 >> 8)
+ out[14] = byte(x15 >> 16)
+ out[15] = byte(x15 >> 24)
+
+ out[16] = byte(x6)
+ out[17] = byte(x6 >> 8)
+ out[18] = byte(x6 >> 16)
+ out[19] = byte(x6 >> 24)
+
+ out[20] = byte(x7)
+ out[21] = byte(x7 >> 8)
+ out[22] = byte(x7 >> 16)
+ out[23] = byte(x7 >> 24)
+
+ out[24] = byte(x8)
+ out[25] = byte(x8 >> 8)
+ out[26] = byte(x8 >> 16)
+ out[27] = byte(x8 >> 24)
+
+ out[28] = byte(x9)
+ out[29] = byte(x9 >> 8)
+ out[30] = byte(x9 >> 16)
+ out[31] = byte(x9 >> 24)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s
new file mode 100644
index 00000000000..6e1df963917
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s
@@ -0,0 +1,902 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!appengine,!gccgo
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
+TEXT ·salsa2020XORKeyStream(SB),0,$512-40
+ MOVQ out+0(FP),DI
+ MOVQ in+8(FP),SI
+ MOVQ n+16(FP),DX
+ MOVQ nonce+24(FP),CX
+ MOVQ key+32(FP),R8
+
+ MOVQ SP,R11
+ MOVQ $31,R9
+ NOTQ R9
+ ANDQ R9,SP
+ ADDQ $32,SP
+
+ MOVQ R11,352(SP)
+ MOVQ R12,360(SP)
+ MOVQ R13,368(SP)
+ MOVQ R14,376(SP)
+ MOVQ R15,384(SP)
+ MOVQ BX,392(SP)
+ MOVQ BP,400(SP)
+ MOVQ DX,R9
+ MOVQ CX,DX
+ MOVQ R8,R10
+ CMPQ R9,$0
+ JBE DONE
+ START:
+ MOVL 20(R10),CX
+ MOVL 0(R10),R8
+ MOVL 0(DX),AX
+ MOVL 16(R10),R11
+ MOVL CX,0(SP)
+ MOVL R8, 4 (SP)
+ MOVL AX, 8 (SP)
+ MOVL R11, 12 (SP)
+ MOVL 8(DX),CX
+ MOVL 24(R10),R8
+ MOVL 4(R10),AX
+ MOVL 4(DX),R11
+ MOVL CX,16(SP)
+ MOVL R8, 20 (SP)
+ MOVL AX, 24 (SP)
+ MOVL R11, 28 (SP)
+ MOVL 12(DX),CX
+ MOVL 12(R10),DX
+ MOVL 28(R10),R8
+ MOVL 8(R10),AX
+ MOVL DX,32(SP)
+ MOVL CX, 36 (SP)
+ MOVL R8, 40 (SP)
+ MOVL AX, 44 (SP)
+ MOVQ $1634760805,DX
+ MOVQ $857760878,CX
+ MOVQ $2036477234,R8
+ MOVQ $1797285236,AX
+ MOVL DX,48(SP)
+ MOVL CX, 52 (SP)
+ MOVL R8, 56 (SP)
+ MOVL AX, 60 (SP)
+ CMPQ R9,$256
+ JB BYTESBETWEEN1AND255
+ MOVOA 48(SP),X0
+ PSHUFL $0X55,X0,X1
+ PSHUFL $0XAA,X0,X2
+ PSHUFL $0XFF,X0,X3
+ PSHUFL $0X00,X0,X0
+ MOVOA X1,64(SP)
+ MOVOA X2,80(SP)
+ MOVOA X3,96(SP)
+ MOVOA X0,112(SP)
+ MOVOA 0(SP),X0
+ PSHUFL $0XAA,X0,X1
+ PSHUFL $0XFF,X0,X2
+ PSHUFL $0X00,X0,X3
+ PSHUFL $0X55,X0,X0
+ MOVOA X1,128(SP)
+ MOVOA X2,144(SP)
+ MOVOA X3,160(SP)
+ MOVOA X0,176(SP)
+ MOVOA 16(SP),X0
+ PSHUFL $0XFF,X0,X1
+ PSHUFL $0X55,X0,X2
+ PSHUFL $0XAA,X0,X0
+ MOVOA X1,192(SP)
+ MOVOA X2,208(SP)
+ MOVOA X0,224(SP)
+ MOVOA 32(SP),X0
+ PSHUFL $0X00,X0,X1
+ PSHUFL $0XAA,X0,X2
+ PSHUFL $0XFF,X0,X0
+ MOVOA X1,240(SP)
+ MOVOA X2,256(SP)
+ MOVOA X0,272(SP)
+ BYTESATLEAST256:
+ MOVL 16(SP),DX
+ MOVL 36 (SP),CX
+ MOVL DX,288(SP)
+ MOVL CX,304(SP)
+ ADDQ $1,DX
+ SHLQ $32,CX
+ ADDQ CX,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 292 (SP)
+ MOVL CX, 308 (SP)
+ ADDQ $1,DX
+ SHLQ $32,CX
+ ADDQ CX,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 296 (SP)
+ MOVL CX, 312 (SP)
+ ADDQ $1,DX
+ SHLQ $32,CX
+ ADDQ CX,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX, 300 (SP)
+ MOVL CX, 316 (SP)
+ ADDQ $1,DX
+ SHLQ $32,CX
+ ADDQ CX,DX
+ MOVQ DX,CX
+ SHRQ $32,CX
+ MOVL DX,16(SP)
+ MOVL CX, 36 (SP)
+ MOVQ R9,408(SP)
+ MOVQ $20,DX
+ MOVOA 64(SP),X0
+ MOVOA 80(SP),X1
+ MOVOA 96(SP),X2
+ MOVOA 256(SP),X3
+ MOVOA 272(SP),X4
+ MOVOA 128(SP),X5
+ MOVOA 144(SP),X6
+ MOVOA 176(SP),X7
+ MOVOA 192(SP),X8
+ MOVOA 208(SP),X9
+ MOVOA 224(SP),X10
+ MOVOA 304(SP),X11
+ MOVOA 112(SP),X12
+ MOVOA 160(SP),X13
+ MOVOA 240(SP),X14
+ MOVOA 288(SP),X15
+ MAINLOOP1:
+ MOVOA X1,320(SP)
+ MOVOA X2,336(SP)
+ MOVOA X13,X1
+ PADDL X12,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X14
+ PSRLL $25,X2
+ PXOR X2,X14
+ MOVOA X7,X1
+ PADDL X0,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X11
+ PSRLL $25,X2
+ PXOR X2,X11
+ MOVOA X12,X1
+ PADDL X14,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X15
+ PSRLL $23,X2
+ PXOR X2,X15
+ MOVOA X0,X1
+ PADDL X11,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X9
+ PSRLL $23,X2
+ PXOR X2,X9
+ MOVOA X14,X1
+ PADDL X15,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X13
+ PSRLL $19,X2
+ PXOR X2,X13
+ MOVOA X11,X1
+ PADDL X9,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X7
+ PSRLL $19,X2
+ PXOR X2,X7
+ MOVOA X15,X1
+ PADDL X13,X1
+ MOVOA X1,X2
+ PSLLL $18,X1
+ PXOR X1,X12
+ PSRLL $14,X2
+ PXOR X2,X12
+ MOVOA 320(SP),X1
+ MOVOA X12,320(SP)
+ MOVOA X9,X2
+ PADDL X7,X2
+ MOVOA X2,X12
+ PSLLL $18,X2
+ PXOR X2,X0
+ PSRLL $14,X12
+ PXOR X12,X0
+ MOVOA X5,X2
+ PADDL X1,X2
+ MOVOA X2,X12
+ PSLLL $7,X2
+ PXOR X2,X3
+ PSRLL $25,X12
+ PXOR X12,X3
+ MOVOA 336(SP),X2
+ MOVOA X0,336(SP)
+ MOVOA X6,X0
+ PADDL X2,X0
+ MOVOA X0,X12
+ PSLLL $7,X0
+ PXOR X0,X4
+ PSRLL $25,X12
+ PXOR X12,X4
+ MOVOA X1,X0
+ PADDL X3,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X10
+ PSRLL $23,X12
+ PXOR X12,X10
+ MOVOA X2,X0
+ PADDL X4,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X8
+ PSRLL $23,X12
+ PXOR X12,X8
+ MOVOA X3,X0
+ PADDL X10,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X5
+ PSRLL $19,X12
+ PXOR X12,X5
+ MOVOA X4,X0
+ PADDL X8,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X6
+ PSRLL $19,X12
+ PXOR X12,X6
+ MOVOA X10,X0
+ PADDL X5,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X1
+ PSRLL $14,X12
+ PXOR X12,X1
+ MOVOA 320(SP),X0
+ MOVOA X1,320(SP)
+ MOVOA X4,X1
+ PADDL X0,X1
+ MOVOA X1,X12
+ PSLLL $7,X1
+ PXOR X1,X7
+ PSRLL $25,X12
+ PXOR X12,X7
+ MOVOA X8,X1
+ PADDL X6,X1
+ MOVOA X1,X12
+ PSLLL $18,X1
+ PXOR X1,X2
+ PSRLL $14,X12
+ PXOR X12,X2
+ MOVOA 336(SP),X12
+ MOVOA X2,336(SP)
+ MOVOA X14,X1
+ PADDL X12,X1
+ MOVOA X1,X2
+ PSLLL $7,X1
+ PXOR X1,X5
+ PSRLL $25,X2
+ PXOR X2,X5
+ MOVOA X0,X1
+ PADDL X7,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X10
+ PSRLL $23,X2
+ PXOR X2,X10
+ MOVOA X12,X1
+ PADDL X5,X1
+ MOVOA X1,X2
+ PSLLL $9,X1
+ PXOR X1,X8
+ PSRLL $23,X2
+ PXOR X2,X8
+ MOVOA X7,X1
+ PADDL X10,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X4
+ PSRLL $19,X2
+ PXOR X2,X4
+ MOVOA X5,X1
+ PADDL X8,X1
+ MOVOA X1,X2
+ PSLLL $13,X1
+ PXOR X1,X14
+ PSRLL $19,X2
+ PXOR X2,X14
+ MOVOA X10,X1
+ PADDL X4,X1
+ MOVOA X1,X2
+ PSLLL $18,X1
+ PXOR X1,X0
+ PSRLL $14,X2
+ PXOR X2,X0
+ MOVOA 320(SP),X1
+ MOVOA X0,320(SP)
+ MOVOA X8,X0
+ PADDL X14,X0
+ MOVOA X0,X2
+ PSLLL $18,X0
+ PXOR X0,X12
+ PSRLL $14,X2
+ PXOR X2,X12
+ MOVOA X11,X0
+ PADDL X1,X0
+ MOVOA X0,X2
+ PSLLL $7,X0
+ PXOR X0,X6
+ PSRLL $25,X2
+ PXOR X2,X6
+ MOVOA 336(SP),X2
+ MOVOA X12,336(SP)
+ MOVOA X3,X0
+ PADDL X2,X0
+ MOVOA X0,X12
+ PSLLL $7,X0
+ PXOR X0,X13
+ PSRLL $25,X12
+ PXOR X12,X13
+ MOVOA X1,X0
+ PADDL X6,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X15
+ PSRLL $23,X12
+ PXOR X12,X15
+ MOVOA X2,X0
+ PADDL X13,X0
+ MOVOA X0,X12
+ PSLLL $9,X0
+ PXOR X0,X9
+ PSRLL $23,X12
+ PXOR X12,X9
+ MOVOA X6,X0
+ PADDL X15,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X11
+ PSRLL $19,X12
+ PXOR X12,X11
+ MOVOA X13,X0
+ PADDL X9,X0
+ MOVOA X0,X12
+ PSLLL $13,X0
+ PXOR X0,X3
+ PSRLL $19,X12
+ PXOR X12,X3
+ MOVOA X15,X0
+ PADDL X11,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X1
+ PSRLL $14,X12
+ PXOR X12,X1
+ MOVOA X9,X0
+ PADDL X3,X0
+ MOVOA X0,X12
+ PSLLL $18,X0
+ PXOR X0,X2
+ PSRLL $14,X12
+ PXOR X12,X2
+ MOVOA 320(SP),X12
+ MOVOA 336(SP),X0
+ SUBQ $2,DX
+ JA MAINLOOP1
+ PADDL 112(SP),X12
+ PADDL 176(SP),X7
+ PADDL 224(SP),X10
+ PADDL 272(SP),X4
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 0(SI),DX
+ XORL 4(SI),CX
+ XORL 8(SI),R8
+ XORL 12(SI),R9
+ MOVL DX,0(DI)
+ MOVL CX,4(DI)
+ MOVL R8,8(DI)
+ MOVL R9,12(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 64(SI),DX
+ XORL 68(SI),CX
+ XORL 72(SI),R8
+ XORL 76(SI),R9
+ MOVL DX,64(DI)
+ MOVL CX,68(DI)
+ MOVL R8,72(DI)
+ MOVL R9,76(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ PSHUFL $0X39,X12,X12
+ PSHUFL $0X39,X7,X7
+ PSHUFL $0X39,X10,X10
+ PSHUFL $0X39,X4,X4
+ XORL 128(SI),DX
+ XORL 132(SI),CX
+ XORL 136(SI),R8
+ XORL 140(SI),R9
+ MOVL DX,128(DI)
+ MOVL CX,132(DI)
+ MOVL R8,136(DI)
+ MOVL R9,140(DI)
+ MOVD X12,DX
+ MOVD X7,CX
+ MOVD X10,R8
+ MOVD X4,R9
+ XORL 192(SI),DX
+ XORL 196(SI),CX
+ XORL 200(SI),R8
+ XORL 204(SI),R9
+ MOVL DX,192(DI)
+ MOVL CX,196(DI)
+ MOVL R8,200(DI)
+ MOVL R9,204(DI)
+ PADDL 240(SP),X14
+ PADDL 64(SP),X0
+ PADDL 128(SP),X5
+ PADDL 192(SP),X8
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 16(SI),DX
+ XORL 20(SI),CX
+ XORL 24(SI),R8
+ XORL 28(SI),R9
+ MOVL DX,16(DI)
+ MOVL CX,20(DI)
+ MOVL R8,24(DI)
+ MOVL R9,28(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 80(SI),DX
+ XORL 84(SI),CX
+ XORL 88(SI),R8
+ XORL 92(SI),R9
+ MOVL DX,80(DI)
+ MOVL CX,84(DI)
+ MOVL R8,88(DI)
+ MOVL R9,92(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ PSHUFL $0X39,X14,X14
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X5,X5
+ PSHUFL $0X39,X8,X8
+ XORL 144(SI),DX
+ XORL 148(SI),CX
+ XORL 152(SI),R8
+ XORL 156(SI),R9
+ MOVL DX,144(DI)
+ MOVL CX,148(DI)
+ MOVL R8,152(DI)
+ MOVL R9,156(DI)
+ MOVD X14,DX
+ MOVD X0,CX
+ MOVD X5,R8
+ MOVD X8,R9
+ XORL 208(SI),DX
+ XORL 212(SI),CX
+ XORL 216(SI),R8
+ XORL 220(SI),R9
+ MOVL DX,208(DI)
+ MOVL CX,212(DI)
+ MOVL R8,216(DI)
+ MOVL R9,220(DI)
+ PADDL 288(SP),X15
+ PADDL 304(SP),X11
+ PADDL 80(SP),X1
+ PADDL 144(SP),X6
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 32(SI),DX
+ XORL 36(SI),CX
+ XORL 40(SI),R8
+ XORL 44(SI),R9
+ MOVL DX,32(DI)
+ MOVL CX,36(DI)
+ MOVL R8,40(DI)
+ MOVL R9,44(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 96(SI),DX
+ XORL 100(SI),CX
+ XORL 104(SI),R8
+ XORL 108(SI),R9
+ MOVL DX,96(DI)
+ MOVL CX,100(DI)
+ MOVL R8,104(DI)
+ MOVL R9,108(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ PSHUFL $0X39,X15,X15
+ PSHUFL $0X39,X11,X11
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X6,X6
+ XORL 160(SI),DX
+ XORL 164(SI),CX
+ XORL 168(SI),R8
+ XORL 172(SI),R9
+ MOVL DX,160(DI)
+ MOVL CX,164(DI)
+ MOVL R8,168(DI)
+ MOVL R9,172(DI)
+ MOVD X15,DX
+ MOVD X11,CX
+ MOVD X1,R8
+ MOVD X6,R9
+ XORL 224(SI),DX
+ XORL 228(SI),CX
+ XORL 232(SI),R8
+ XORL 236(SI),R9
+ MOVL DX,224(DI)
+ MOVL CX,228(DI)
+ MOVL R8,232(DI)
+ MOVL R9,236(DI)
+ PADDL 160(SP),X13
+ PADDL 208(SP),X9
+ PADDL 256(SP),X3
+ PADDL 96(SP),X2
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 48(SI),DX
+ XORL 52(SI),CX
+ XORL 56(SI),R8
+ XORL 60(SI),R9
+ MOVL DX,48(DI)
+ MOVL CX,52(DI)
+ MOVL R8,56(DI)
+ MOVL R9,60(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 112(SI),DX
+ XORL 116(SI),CX
+ XORL 120(SI),R8
+ XORL 124(SI),R9
+ MOVL DX,112(DI)
+ MOVL CX,116(DI)
+ MOVL R8,120(DI)
+ MOVL R9,124(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ PSHUFL $0X39,X13,X13
+ PSHUFL $0X39,X9,X9
+ PSHUFL $0X39,X3,X3
+ PSHUFL $0X39,X2,X2
+ XORL 176(SI),DX
+ XORL 180(SI),CX
+ XORL 184(SI),R8
+ XORL 188(SI),R9
+ MOVL DX,176(DI)
+ MOVL CX,180(DI)
+ MOVL R8,184(DI)
+ MOVL R9,188(DI)
+ MOVD X13,DX
+ MOVD X9,CX
+ MOVD X3,R8
+ MOVD X2,R9
+ XORL 240(SI),DX
+ XORL 244(SI),CX
+ XORL 248(SI),R8
+ XORL 252(SI),R9
+ MOVL DX,240(DI)
+ MOVL CX,244(DI)
+ MOVL R8,248(DI)
+ MOVL R9,252(DI)
+ MOVQ 408(SP),R9
+ SUBQ $256,R9
+ ADDQ $256,SI
+ ADDQ $256,DI
+ CMPQ R9,$256
+ JAE BYTESATLEAST256
+ CMPQ R9,$0
+ JBE DONE
+ BYTESBETWEEN1AND255:
+ CMPQ R9,$64
+ JAE NOCOPY
+ MOVQ DI,DX
+ LEAQ 416(SP),DI
+ MOVQ R9,CX
+ REP; MOVSB
+ LEAQ 416(SP),DI
+ LEAQ 416(SP),SI
+ NOCOPY:
+ MOVQ R9,408(SP)
+ MOVOA 48(SP),X0
+ MOVOA 0(SP),X1
+ MOVOA 16(SP),X2
+ MOVOA 32(SP),X3
+ MOVOA X1,X4
+ MOVQ $20,CX
+ MAINLOOP2:
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X3
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X3,X3
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X1
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X1,X1
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X1
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X1,X1
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X3
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X3,X3
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X3
+ PXOR X6,X3
+ PADDL X3,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X3,X3
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X1
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X3,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X1,X1
+ PXOR X6,X0
+ PADDL X0,X4
+ MOVOA X0,X5
+ MOVOA X4,X6
+ PSLLL $7,X4
+ PSRLL $25,X6
+ PXOR X4,X1
+ PXOR X6,X1
+ PADDL X1,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $9,X5
+ PSRLL $23,X6
+ PXOR X5,X2
+ PSHUFL $0X93,X1,X1
+ PXOR X6,X2
+ PADDL X2,X4
+ MOVOA X2,X5
+ MOVOA X4,X6
+ PSLLL $13,X4
+ PSRLL $19,X6
+ PXOR X4,X3
+ PSHUFL $0X4E,X2,X2
+ PXOR X6,X3
+ SUBQ $4,CX
+ PADDL X3,X5
+ MOVOA X1,X4
+ MOVOA X5,X6
+ PSLLL $18,X5
+ PXOR X7,X7
+ PSRLL $14,X6
+ PXOR X5,X0
+ PSHUFL $0X39,X3,X3
+ PXOR X6,X0
+ JA MAINLOOP2
+ PADDL 48(SP),X0
+ PADDL 0(SP),X1
+ PADDL 16(SP),X2
+ PADDL 32(SP),X3
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 0(SI),CX
+ XORL 48(SI),R8
+ XORL 32(SI),R9
+ XORL 16(SI),AX
+ MOVL CX,0(DI)
+ MOVL R8,48(DI)
+ MOVL R9,32(DI)
+ MOVL AX,16(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 20(SI),CX
+ XORL 4(SI),R8
+ XORL 52(SI),R9
+ XORL 36(SI),AX
+ MOVL CX,20(DI)
+ MOVL R8,4(DI)
+ MOVL R9,52(DI)
+ MOVL AX,36(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ PSHUFL $0X39,X0,X0
+ PSHUFL $0X39,X1,X1
+ PSHUFL $0X39,X2,X2
+ PSHUFL $0X39,X3,X3
+ XORL 40(SI),CX
+ XORL 24(SI),R8
+ XORL 8(SI),R9
+ XORL 56(SI),AX
+ MOVL CX,40(DI)
+ MOVL R8,24(DI)
+ MOVL R9,8(DI)
+ MOVL AX,56(DI)
+ MOVD X0,CX
+ MOVD X1,R8
+ MOVD X2,R9
+ MOVD X3,AX
+ XORL 60(SI),CX
+ XORL 44(SI),R8
+ XORL 28(SI),R9
+ XORL 12(SI),AX
+ MOVL CX,60(DI)
+ MOVL R8,44(DI)
+ MOVL R9,28(DI)
+ MOVL AX,12(DI)
+ MOVQ 408(SP),R9
+ MOVL 16(SP),CX
+ MOVL 36 (SP),R8
+ ADDQ $1,CX
+ SHLQ $32,R8
+ ADDQ R8,CX
+ MOVQ CX,R8
+ SHRQ $32,R8
+ MOVL CX,16(SP)
+ MOVL R8, 36 (SP)
+ CMPQ R9,$64
+ JA BYTESATLEAST65
+ JAE BYTESATLEAST64
+ MOVQ DI,SI
+ MOVQ DX,DI
+ MOVQ R9,CX
+ REP; MOVSB
+ BYTESATLEAST64:
+ DONE:
+ MOVQ 352(SP),R11
+ MOVQ 360(SP),R12
+ MOVQ 368(SP),R13
+ MOVQ 376(SP),R14
+ MOVQ 384(SP),R15
+ MOVQ 392(SP),BX
+ MOVQ 400(SP),BP
+ MOVQ R11,SP
+ RET
+ BYTESATLEAST65:
+ SUBQ $64,R9
+ ADDQ $64,DI
+ ADDQ $64,SI
+ JMP BYTESBETWEEN1AND255
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa208.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa208.go
new file mode 100644
index 00000000000..9bfc0927ce8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa208.go
@@ -0,0 +1,199 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package salsa
+
+// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts
+// the result into the 64-byte array out. The input and output may be the same array.
+func Core208(out *[64]byte, in *[64]byte) {
+ j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24
+ j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24
+ j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24
+ j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24
+ j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24
+ j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24
+ j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24
+ j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24
+ j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24
+ j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24
+ j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24
+ j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
+ x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
+
+ for i := 0; i < 8; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ x0 += j0
+ x1 += j1
+ x2 += j2
+ x3 += j3
+ x4 += j4
+ x5 += j5
+ x6 += j6
+ x7 += j7
+ x8 += j8
+ x9 += j9
+ x10 += j10
+ x11 += j11
+ x12 += j12
+ x13 += j13
+ x14 += j14
+ x15 += j15
+
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x1)
+ out[5] = byte(x1 >> 8)
+ out[6] = byte(x1 >> 16)
+ out[7] = byte(x1 >> 24)
+
+ out[8] = byte(x2)
+ out[9] = byte(x2 >> 8)
+ out[10] = byte(x2 >> 16)
+ out[11] = byte(x2 >> 24)
+
+ out[12] = byte(x3)
+ out[13] = byte(x3 >> 8)
+ out[14] = byte(x3 >> 16)
+ out[15] = byte(x3 >> 24)
+
+ out[16] = byte(x4)
+ out[17] = byte(x4 >> 8)
+ out[18] = byte(x4 >> 16)
+ out[19] = byte(x4 >> 24)
+
+ out[20] = byte(x5)
+ out[21] = byte(x5 >> 8)
+ out[22] = byte(x5 >> 16)
+ out[23] = byte(x5 >> 24)
+
+ out[24] = byte(x6)
+ out[25] = byte(x6 >> 8)
+ out[26] = byte(x6 >> 16)
+ out[27] = byte(x6 >> 24)
+
+ out[28] = byte(x7)
+ out[29] = byte(x7 >> 8)
+ out[30] = byte(x7 >> 16)
+ out[31] = byte(x7 >> 24)
+
+ out[32] = byte(x8)
+ out[33] = byte(x8 >> 8)
+ out[34] = byte(x8 >> 16)
+ out[35] = byte(x8 >> 24)
+
+ out[36] = byte(x9)
+ out[37] = byte(x9 >> 8)
+ out[38] = byte(x9 >> 16)
+ out[39] = byte(x9 >> 24)
+
+ out[40] = byte(x10)
+ out[41] = byte(x10 >> 8)
+ out[42] = byte(x10 >> 16)
+ out[43] = byte(x10 >> 24)
+
+ out[44] = byte(x11)
+ out[45] = byte(x11 >> 8)
+ out[46] = byte(x11 >> 16)
+ out[47] = byte(x11 >> 24)
+
+ out[48] = byte(x12)
+ out[49] = byte(x12 >> 8)
+ out[50] = byte(x12 >> 16)
+ out[51] = byte(x12 >> 24)
+
+ out[52] = byte(x13)
+ out[53] = byte(x13 >> 8)
+ out[54] = byte(x13 >> 16)
+ out[55] = byte(x13 >> 24)
+
+ out[56] = byte(x14)
+ out[57] = byte(x14 >> 8)
+ out[58] = byte(x14 >> 16)
+ out[59] = byte(x14 >> 24)
+
+ out[60] = byte(x15)
+ out[61] = byte(x15 >> 8)
+ out[62] = byte(x15 >> 16)
+ out[63] = byte(x15 >> 24)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
new file mode 100644
index 00000000000..903c7858e43
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go
@@ -0,0 +1,23 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!appengine,!gccgo
+
+package salsa
+
+// This function is implemented in salsa2020_amd64.s.
+
+//go:noescape
+
+func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
+
+// XORKeyStream crypts bytes from in to out using the given key and counters.
+// In and out may be the same slice but otherwise should not overlap. Counter
+// contains the raw salsa20 counter bytes (both nonce and block counter).
+func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ if len(in) == 0 {
+ return
+ }
+ salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0])
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
new file mode 100644
index 00000000000..95f8ca5bb96
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go
@@ -0,0 +1,234 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine gccgo
+
+package salsa
+
+const rounds = 20
+
+// core applies the Salsa20 core function to 16-byte input in, 32-byte key k,
+// and 16-byte constant c, and puts the result into 64-byte array out.
+func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) {
+ j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24
+ j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24
+ j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24
+ j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24
+ j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24
+ j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24
+ j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
+ j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24
+ j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24
+ j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24
+ j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24
+ j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24
+ j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24
+ j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24
+ j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24
+ j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8
+ x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15
+
+ for i := 0; i < rounds; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ x0 += j0
+ x1 += j1
+ x2 += j2
+ x3 += j3
+ x4 += j4
+ x5 += j5
+ x6 += j6
+ x7 += j7
+ x8 += j8
+ x9 += j9
+ x10 += j10
+ x11 += j11
+ x12 += j12
+ x13 += j13
+ x14 += j14
+ x15 += j15
+
+ out[0] = byte(x0)
+ out[1] = byte(x0 >> 8)
+ out[2] = byte(x0 >> 16)
+ out[3] = byte(x0 >> 24)
+
+ out[4] = byte(x1)
+ out[5] = byte(x1 >> 8)
+ out[6] = byte(x1 >> 16)
+ out[7] = byte(x1 >> 24)
+
+ out[8] = byte(x2)
+ out[9] = byte(x2 >> 8)
+ out[10] = byte(x2 >> 16)
+ out[11] = byte(x2 >> 24)
+
+ out[12] = byte(x3)
+ out[13] = byte(x3 >> 8)
+ out[14] = byte(x3 >> 16)
+ out[15] = byte(x3 >> 24)
+
+ out[16] = byte(x4)
+ out[17] = byte(x4 >> 8)
+ out[18] = byte(x4 >> 16)
+ out[19] = byte(x4 >> 24)
+
+ out[20] = byte(x5)
+ out[21] = byte(x5 >> 8)
+ out[22] = byte(x5 >> 16)
+ out[23] = byte(x5 >> 24)
+
+ out[24] = byte(x6)
+ out[25] = byte(x6 >> 8)
+ out[26] = byte(x6 >> 16)
+ out[27] = byte(x6 >> 24)
+
+ out[28] = byte(x7)
+ out[29] = byte(x7 >> 8)
+ out[30] = byte(x7 >> 16)
+ out[31] = byte(x7 >> 24)
+
+ out[32] = byte(x8)
+ out[33] = byte(x8 >> 8)
+ out[34] = byte(x8 >> 16)
+ out[35] = byte(x8 >> 24)
+
+ out[36] = byte(x9)
+ out[37] = byte(x9 >> 8)
+ out[38] = byte(x9 >> 16)
+ out[39] = byte(x9 >> 24)
+
+ out[40] = byte(x10)
+ out[41] = byte(x10 >> 8)
+ out[42] = byte(x10 >> 16)
+ out[43] = byte(x10 >> 24)
+
+ out[44] = byte(x11)
+ out[45] = byte(x11 >> 8)
+ out[46] = byte(x11 >> 16)
+ out[47] = byte(x11 >> 24)
+
+ out[48] = byte(x12)
+ out[49] = byte(x12 >> 8)
+ out[50] = byte(x12 >> 16)
+ out[51] = byte(x12 >> 24)
+
+ out[52] = byte(x13)
+ out[53] = byte(x13 >> 8)
+ out[54] = byte(x13 >> 16)
+ out[55] = byte(x13 >> 24)
+
+ out[56] = byte(x14)
+ out[57] = byte(x14 >> 8)
+ out[58] = byte(x14 >> 16)
+ out[59] = byte(x14 >> 24)
+
+ out[60] = byte(x15)
+ out[61] = byte(x15 >> 8)
+ out[62] = byte(x15 >> 16)
+ out[63] = byte(x15 >> 24)
+}
+
+// XORKeyStream crypts bytes from in to out using the given key and counters.
+// In and out may be the same slice but otherwise should not overlap. Counter
+// contains the raw salsa20 counter bytes (both nonce and block counter).
+func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+ var block [64]byte
+ var counterCopy [16]byte
+ copy(counterCopy[:], counter[:])
+
+ for len(in) >= 64 {
+ core(&block, &counterCopy, key, &Sigma)
+ for i, x := range block {
+ out[i] = in[i] ^ x
+ }
+ u := uint32(1)
+ for i := 8; i < 16; i++ {
+ u += uint32(counterCopy[i])
+ counterCopy[i] = byte(u)
+ u >>= 8
+ }
+ in = in[64:]
+ out = out[64:]
+ }
+
+ if len(in) > 0 {
+ core(&block, &counterCopy, key, &Sigma)
+ for i, v := range in {
+ out[i] = v ^ block[i]
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go
new file mode 100644
index 00000000000..f8cecd9e6e9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa/salsa_test.go
@@ -0,0 +1,35 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package salsa
+
+import "testing"
+
+func TestCore208(t *testing.T) {
+ in := [64]byte{
+ 0x7e, 0x87, 0x9a, 0x21, 0x4f, 0x3e, 0xc9, 0x86,
+ 0x7c, 0xa9, 0x40, 0xe6, 0x41, 0x71, 0x8f, 0x26,
+ 0xba, 0xee, 0x55, 0x5b, 0x8c, 0x61, 0xc1, 0xb5,
+ 0x0d, 0xf8, 0x46, 0x11, 0x6d, 0xcd, 0x3b, 0x1d,
+ 0xee, 0x24, 0xf3, 0x19, 0xdf, 0x9b, 0x3d, 0x85,
+ 0x14, 0x12, 0x1e, 0x4b, 0x5a, 0xc5, 0xaa, 0x32,
+ 0x76, 0x02, 0x1d, 0x29, 0x09, 0xc7, 0x48, 0x29,
+ 0xed, 0xeb, 0xc6, 0x8d, 0xb8, 0xb8, 0xc2, 0x5e}
+
+ out := [64]byte{
+ 0xa4, 0x1f, 0x85, 0x9c, 0x66, 0x08, 0xcc, 0x99,
+ 0x3b, 0x81, 0xca, 0xcb, 0x02, 0x0c, 0xef, 0x05,
+ 0x04, 0x4b, 0x21, 0x81, 0xa2, 0xfd, 0x33, 0x7d,
+ 0xfd, 0x7b, 0x1c, 0x63, 0x96, 0x68, 0x2f, 0x29,
+ 0xb4, 0x39, 0x31, 0x68, 0xe3, 0xc9, 0xe6, 0xbc,
+ 0xfe, 0x6b, 0xc5, 0xb7, 0xa0, 0x6d, 0x96, 0xba,
+ 0xe4, 0x24, 0xcc, 0x10, 0x2c, 0x91, 0x74, 0x5c,
+ 0x24, 0xad, 0x67, 0x3d, 0xc7, 0x61, 0x8f, 0x81,
+ }
+
+ Core208(&in, &in)
+ if in != out {
+ t.Errorf("expected %x, got %x", out, in)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa20.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa20.go
new file mode 100644
index 00000000000..fde9846b6eb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa20.go
@@ -0,0 +1,54 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package salsa20 implements the Salsa20 stream cipher as specified in http://cr.yp.to/snuffle/spec.pdf.
+
+Salsa20 differs from many other stream ciphers in that it is message orientated
+rather than byte orientated. Keystream blocks are not preserved between calls,
+therefore each side must encrypt/decrypt data with the same segmentation.
+
+Another aspect of this difference is that part of the counter is exposed as
+an nonce in each call. Encrypting two different messages with the same (key,
+nonce) pair leads to trivial plaintext recovery. This is analogous to
+encrypting two different messages with the same key with a traditional stream
+cipher.
+
+This package also implements XSalsa20: a version of Salsa20 with a 24-byte
+nonce as specified in http://cr.yp.to/snuffle/xsalsa-20081128.pdf. Simply
+passing a 24-byte slice as the nonce triggers XSalsa20.
+*/
+package salsa20 // import "golang.org/x/crypto/salsa20"
+
+// TODO(agl): implement XORKeyStream12 and XORKeyStream8 - the reduced round variants of Salsa20.
+
+import (
+ "golang.org/x/crypto/salsa20/salsa"
+)
+
+// XORKeyStream crypts bytes from in to out using the given key and nonce. In
+// and out may be the same slice but otherwise should not overlap. Nonce must
+// be either 8 or 24 bytes long.
+func XORKeyStream(out, in []byte, nonce []byte, key *[32]byte) {
+ if len(out) < len(in) {
+ in = in[:len(out)]
+ }
+
+ var subNonce [16]byte
+
+ if len(nonce) == 24 {
+ var subKey [32]byte
+ var hNonce [16]byte
+ copy(hNonce[:], nonce[:16])
+ salsa.HSalsa20(&subKey, &hNonce, key, &salsa.Sigma)
+ copy(subNonce[:], nonce[16:])
+ key = &subKey
+ } else if len(nonce) == 8 {
+ copy(subNonce[:], nonce[:])
+ } else {
+ panic("salsa20: nonce must be 8 or 24 bytes")
+ }
+
+ salsa.XORKeyStream(out, in, &subNonce, key)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa20_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa20_test.go
new file mode 100644
index 00000000000..0ef3328eb0a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/salsa20/salsa20_test.go
@@ -0,0 +1,139 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package salsa20
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+)
+
+func fromHex(s string) []byte {
+ ret, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
+
+// testVectors was taken from set 6 of the ECRYPT test vectors:
+// http://www.ecrypt.eu.org/stream/svn/viewcvs.cgi/ecrypt/trunk/submissions/salsa20/full/verified.test-vectors?logsort=rev&rev=210&view=markup
+var testVectors = []struct {
+ key []byte
+ iv []byte
+ numBytes int
+ xor []byte
+}{
+ {
+ fromHex("0053A6F94C9FF24598EB3E91E4378ADD3083D6297CCF2275C81B6EC11467BA0D"),
+ fromHex("0D74DB42A91077DE"),
+ 131072,
+ fromHex("C349B6A51A3EC9B712EAED3F90D8BCEE69B7628645F251A996F55260C62EF31FD6C6B0AEA94E136C9D984AD2DF3578F78E457527B03A0450580DD874F63B1AB9"),
+ },
+ {
+ fromHex("0558ABFE51A4F74A9DF04396E93C8FE23588DB2E81D4277ACD2073C6196CBF12"),
+ fromHex("167DE44BB21980E7"),
+ 131072,
+ fromHex("C3EAAF32836BACE32D04E1124231EF47E101367D6305413A0EEB07C60698A2876E4D031870A739D6FFDDD208597AFF0A47AC17EDB0167DD67EBA84F1883D4DFD"),
+ },
+ {
+ fromHex("0A5DB00356A9FC4FA2F5489BEE4194E73A8DE03386D92C7FD22578CB1E71C417"),
+ fromHex("1F86ED54BB2289F0"),
+ 131072,
+ fromHex("3CD23C3DC90201ACC0CF49B440B6C417F0DC8D8410A716D5314C059E14B1A8D9A9FB8EA3D9C8DAE12B21402F674AA95C67B1FC514E994C9D3F3A6E41DFF5BBA6"),
+ },
+ {
+ fromHex("0F62B5085BAE0154A7FA4DA0F34699EC3F92E5388BDE3184D72A7DD02376C91C"),
+ fromHex("288FF65DC42B92F9"),
+ 131072,
+ fromHex("E00EBCCD70D69152725F9987982178A2E2E139C7BCBE04CA8A0E99E318D9AB76F988C8549F75ADD790BA4F81C176DA653C1A043F11A958E169B6D2319F4EEC1A"),
+ },
+}
+
+func TestSalsa20(t *testing.T) {
+ var inBuf, outBuf []byte
+ var key [32]byte
+
+ for i, test := range testVectors {
+ if test.numBytes%64 != 0 {
+ t.Errorf("#%d: numBytes is not a multiple of 64", i)
+ continue
+ }
+
+ if test.numBytes > len(inBuf) {
+ inBuf = make([]byte, test.numBytes)
+ outBuf = make([]byte, test.numBytes)
+ }
+ in := inBuf[:test.numBytes]
+ out := outBuf[:test.numBytes]
+ copy(key[:], test.key)
+ XORKeyStream(out, in, test.iv, &key)
+
+ var xor [64]byte
+ for len(out) > 0 {
+ for i := 0; i < 64; i++ {
+ xor[i] ^= out[i]
+ }
+ out = out[64:]
+ }
+
+ if !bytes.Equal(xor[:], test.xor) {
+ t.Errorf("#%d: bad result", i)
+ }
+ }
+}
+
+var xSalsa20TestData = []struct {
+ in, nonce, key, out []byte
+}{
+ {
+ []byte("Hello world!"),
+ []byte("24-byte nonce for xsalsa"),
+ []byte("this is 32-byte key for xsalsa20"),
+ []byte{0x00, 0x2d, 0x45, 0x13, 0x84, 0x3f, 0xc2, 0x40, 0xc4, 0x01, 0xe5, 0x41},
+ },
+ {
+ make([]byte, 64),
+ []byte("24-byte nonce for xsalsa"),
+ []byte("this is 32-byte key for xsalsa20"),
+ []byte{0x48, 0x48, 0x29, 0x7f, 0xeb, 0x1f, 0xb5, 0x2f, 0xb6,
+ 0x6d, 0x81, 0x60, 0x9b, 0xd5, 0x47, 0xfa, 0xbc, 0xbe, 0x70,
+ 0x26, 0xed, 0xc8, 0xb5, 0xe5, 0xe4, 0x49, 0xd0, 0x88, 0xbf,
+ 0xa6, 0x9c, 0x08, 0x8f, 0x5d, 0x8d, 0xa1, 0xd7, 0x91, 0x26,
+ 0x7c, 0x2c, 0x19, 0x5a, 0x7f, 0x8c, 0xae, 0x9c, 0x4b, 0x40,
+ 0x50, 0xd0, 0x8c, 0xe6, 0xd3, 0xa1, 0x51, 0xec, 0x26, 0x5f,
+ 0x3a, 0x58, 0xe4, 0x76, 0x48},
+ },
+}
+
+func TestXSalsa20(t *testing.T) {
+ var key [32]byte
+
+ for i, test := range xSalsa20TestData {
+ out := make([]byte, len(test.in))
+ copy(key[:], test.key)
+ XORKeyStream(out, test.in, test.nonce, &key)
+ if !bytes.Equal(out, test.out) {
+ t.Errorf("%d: expected %x, got %x", i, test.out, out)
+ }
+ }
+}
+
+var (
+ keyArray [32]byte
+ key = &keyArray
+ nonce [8]byte
+ msg = make([]byte, 1<<10)
+)
+
+func BenchmarkXOR1K(b *testing.B) {
+ b.StopTimer()
+ out := make([]byte, 1024)
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ XORKeyStream(out, msg[:1024], nonce[:], key)
+ }
+ b.SetBytes(1024)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/scrypt/scrypt.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/scrypt/scrypt.go
new file mode 100644
index 00000000000..dc0124b1fce
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/scrypt/scrypt.go
@@ -0,0 +1,243 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package scrypt implements the scrypt key derivation function as defined in
+// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard
+// Functions" (http://www.tarsnap.com/scrypt/scrypt.pdf).
+package scrypt // import "golang.org/x/crypto/scrypt"
+
+import (
+ "crypto/sha256"
+ "errors"
+
+ "golang.org/x/crypto/pbkdf2"
+)
+
+const maxInt = int(^uint(0) >> 1)
+
+// blockCopy copies n numbers from src into dst.
+func blockCopy(dst, src []uint32, n int) {
+ copy(dst, src[:n])
+}
+
+// blockXOR XORs numbers from dst with n numbers from src.
+func blockXOR(dst, src []uint32, n int) {
+ for i, v := range src[:n] {
+ dst[i] ^= v
+ }
+}
+
+// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in,
+// and puts the result into both both tmp and out.
+func salsaXOR(tmp *[16]uint32, in, out []uint32) {
+ w0 := tmp[0] ^ in[0]
+ w1 := tmp[1] ^ in[1]
+ w2 := tmp[2] ^ in[2]
+ w3 := tmp[3] ^ in[3]
+ w4 := tmp[4] ^ in[4]
+ w5 := tmp[5] ^ in[5]
+ w6 := tmp[6] ^ in[6]
+ w7 := tmp[7] ^ in[7]
+ w8 := tmp[8] ^ in[8]
+ w9 := tmp[9] ^ in[9]
+ w10 := tmp[10] ^ in[10]
+ w11 := tmp[11] ^ in[11]
+ w12 := tmp[12] ^ in[12]
+ w13 := tmp[13] ^ in[13]
+ w14 := tmp[14] ^ in[14]
+ w15 := tmp[15] ^ in[15]
+
+ x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8
+ x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15
+
+ for i := 0; i < 8; i += 2 {
+ u := x0 + x12
+ x4 ^= u<<7 | u>>(32-7)
+ u = x4 + x0
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x4
+ x12 ^= u<<13 | u>>(32-13)
+ u = x12 + x8
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x1
+ x9 ^= u<<7 | u>>(32-7)
+ u = x9 + x5
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x9
+ x1 ^= u<<13 | u>>(32-13)
+ u = x1 + x13
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x6
+ x14 ^= u<<7 | u>>(32-7)
+ u = x14 + x10
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x14
+ x6 ^= u<<13 | u>>(32-13)
+ u = x6 + x2
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x11
+ x3 ^= u<<7 | u>>(32-7)
+ u = x3 + x15
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x3
+ x11 ^= u<<13 | u>>(32-13)
+ u = x11 + x7
+ x15 ^= u<<18 | u>>(32-18)
+
+ u = x0 + x3
+ x1 ^= u<<7 | u>>(32-7)
+ u = x1 + x0
+ x2 ^= u<<9 | u>>(32-9)
+ u = x2 + x1
+ x3 ^= u<<13 | u>>(32-13)
+ u = x3 + x2
+ x0 ^= u<<18 | u>>(32-18)
+
+ u = x5 + x4
+ x6 ^= u<<7 | u>>(32-7)
+ u = x6 + x5
+ x7 ^= u<<9 | u>>(32-9)
+ u = x7 + x6
+ x4 ^= u<<13 | u>>(32-13)
+ u = x4 + x7
+ x5 ^= u<<18 | u>>(32-18)
+
+ u = x10 + x9
+ x11 ^= u<<7 | u>>(32-7)
+ u = x11 + x10
+ x8 ^= u<<9 | u>>(32-9)
+ u = x8 + x11
+ x9 ^= u<<13 | u>>(32-13)
+ u = x9 + x8
+ x10 ^= u<<18 | u>>(32-18)
+
+ u = x15 + x14
+ x12 ^= u<<7 | u>>(32-7)
+ u = x12 + x15
+ x13 ^= u<<9 | u>>(32-9)
+ u = x13 + x12
+ x14 ^= u<<13 | u>>(32-13)
+ u = x14 + x13
+ x15 ^= u<<18 | u>>(32-18)
+ }
+ x0 += w0
+ x1 += w1
+ x2 += w2
+ x3 += w3
+ x4 += w4
+ x5 += w5
+ x6 += w6
+ x7 += w7
+ x8 += w8
+ x9 += w9
+ x10 += w10
+ x11 += w11
+ x12 += w12
+ x13 += w13
+ x14 += w14
+ x15 += w15
+
+ out[0], tmp[0] = x0, x0
+ out[1], tmp[1] = x1, x1
+ out[2], tmp[2] = x2, x2
+ out[3], tmp[3] = x3, x3
+ out[4], tmp[4] = x4, x4
+ out[5], tmp[5] = x5, x5
+ out[6], tmp[6] = x6, x6
+ out[7], tmp[7] = x7, x7
+ out[8], tmp[8] = x8, x8
+ out[9], tmp[9] = x9, x9
+ out[10], tmp[10] = x10, x10
+ out[11], tmp[11] = x11, x11
+ out[12], tmp[12] = x12, x12
+ out[13], tmp[13] = x13, x13
+ out[14], tmp[14] = x14, x14
+ out[15], tmp[15] = x15, x15
+}
+
+func blockMix(tmp *[16]uint32, in, out []uint32, r int) {
+ blockCopy(tmp[:], in[(2*r-1)*16:], 16)
+ for i := 0; i < 2*r; i += 2 {
+ salsaXOR(tmp, in[i*16:], out[i*8:])
+ salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:])
+ }
+}
+
+func integer(b []uint32, r int) uint64 {
+ j := (2*r - 1) * 16
+ return uint64(b[j]) | uint64(b[j+1])<<32
+}
+
+func smix(b []byte, r, N int, v, xy []uint32) {
+ var tmp [16]uint32
+ x := xy
+ y := xy[32*r:]
+
+ j := 0
+ for i := 0; i < 32*r; i++ {
+ x[i] = uint32(b[j]) | uint32(b[j+1])<<8 | uint32(b[j+2])<<16 | uint32(b[j+3])<<24
+ j += 4
+ }
+ for i := 0; i < N; i += 2 {
+ blockCopy(v[i*(32*r):], x, 32*r)
+ blockMix(&tmp, x, y, r)
+
+ blockCopy(v[(i+1)*(32*r):], y, 32*r)
+ blockMix(&tmp, y, x, r)
+ }
+ for i := 0; i < N; i += 2 {
+ j := int(integer(x, r) & uint64(N-1))
+ blockXOR(x, v[j*(32*r):], 32*r)
+ blockMix(&tmp, x, y, r)
+
+ j = int(integer(y, r) & uint64(N-1))
+ blockXOR(y, v[j*(32*r):], 32*r)
+ blockMix(&tmp, y, x, r)
+ }
+ j = 0
+ for _, v := range x[:32*r] {
+ b[j+0] = byte(v >> 0)
+ b[j+1] = byte(v >> 8)
+ b[j+2] = byte(v >> 16)
+ b[j+3] = byte(v >> 24)
+ j += 4
+ }
+}
+
+// Key derives a key from the password, salt, and cost parameters, returning
+// a byte slice of length keyLen that can be used as cryptographic key.
+//
+// N is a CPU/memory cost parameter, which must be a power of two greater than 1.
+// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the
+// limits, the function returns a nil byte slice and an error.
+//
+// For example, you can get a derived key for e.g. AES-256 (which needs a
+// 32-byte key) by doing:
+//
+// dk := scrypt.Key([]byte("some password"), salt, 16384, 8, 1, 32)
+//
+// The recommended parameters for interactive logins as of 2009 are N=16384,
+// r=8, p=1. They should be increased as memory latency and CPU parallelism
+// increases. Remember to get a good random salt.
+func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) {
+ if N <= 1 || N&(N-1) != 0 {
+ return nil, errors.New("scrypt: N must be > 1 and a power of 2")
+ }
+ if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r {
+ return nil, errors.New("scrypt: parameters are too large")
+ }
+
+ xy := make([]uint32, 64*r)
+ v := make([]uint32, 32*N*r)
+ b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New)
+
+ for i := 0; i < p; i++ {
+ smix(b[i*128*r:], r, N, v, xy)
+ }
+
+ return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/scrypt/scrypt_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/scrypt/scrypt_test.go
new file mode 100644
index 00000000000..e096c3a31ab
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/scrypt/scrypt_test.go
@@ -0,0 +1,160 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package scrypt
+
+import (
+ "bytes"
+ "testing"
+)
+
+type testVector struct {
+ password string
+ salt string
+ N, r, p int
+ output []byte
+}
+
+var good = []testVector{
+ {
+ "password",
+ "salt",
+ 2, 10, 10,
+ []byte{
+ 0x48, 0x2c, 0x85, 0x8e, 0x22, 0x90, 0x55, 0xe6, 0x2f,
+ 0x41, 0xe0, 0xec, 0x81, 0x9a, 0x5e, 0xe1, 0x8b, 0xdb,
+ 0x87, 0x25, 0x1a, 0x53, 0x4f, 0x75, 0xac, 0xd9, 0x5a,
+ 0xc5, 0xe5, 0xa, 0xa1, 0x5f,
+ },
+ },
+ {
+ "password",
+ "salt",
+ 16, 100, 100,
+ []byte{
+ 0x88, 0xbd, 0x5e, 0xdb, 0x52, 0xd1, 0xdd, 0x0, 0x18,
+ 0x87, 0x72, 0xad, 0x36, 0x17, 0x12, 0x90, 0x22, 0x4e,
+ 0x74, 0x82, 0x95, 0x25, 0xb1, 0x8d, 0x73, 0x23, 0xa5,
+ 0x7f, 0x91, 0x96, 0x3c, 0x37,
+ },
+ },
+ {
+ "this is a long \000 password",
+ "and this is a long \000 salt",
+ 16384, 8, 1,
+ []byte{
+ 0xc3, 0xf1, 0x82, 0xee, 0x2d, 0xec, 0x84, 0x6e, 0x70,
+ 0xa6, 0x94, 0x2f, 0xb5, 0x29, 0x98, 0x5a, 0x3a, 0x09,
+ 0x76, 0x5e, 0xf0, 0x4c, 0x61, 0x29, 0x23, 0xb1, 0x7f,
+ 0x18, 0x55, 0x5a, 0x37, 0x07, 0x6d, 0xeb, 0x2b, 0x98,
+ 0x30, 0xd6, 0x9d, 0xe5, 0x49, 0x26, 0x51, 0xe4, 0x50,
+ 0x6a, 0xe5, 0x77, 0x6d, 0x96, 0xd4, 0x0f, 0x67, 0xaa,
+ 0xee, 0x37, 0xe1, 0x77, 0x7b, 0x8a, 0xd5, 0xc3, 0x11,
+ 0x14, 0x32, 0xbb, 0x3b, 0x6f, 0x7e, 0x12, 0x64, 0x40,
+ 0x18, 0x79, 0xe6, 0x41, 0xae,
+ },
+ },
+ {
+ "p",
+ "s",
+ 2, 1, 1,
+ []byte{
+ 0x48, 0xb0, 0xd2, 0xa8, 0xa3, 0x27, 0x26, 0x11, 0x98,
+ 0x4c, 0x50, 0xeb, 0xd6, 0x30, 0xaf, 0x52,
+ },
+ },
+
+ {
+ "",
+ "",
+ 16, 1, 1,
+ []byte{
+ 0x77, 0xd6, 0x57, 0x62, 0x38, 0x65, 0x7b, 0x20, 0x3b,
+ 0x19, 0xca, 0x42, 0xc1, 0x8a, 0x04, 0x97, 0xf1, 0x6b,
+ 0x48, 0x44, 0xe3, 0x07, 0x4a, 0xe8, 0xdf, 0xdf, 0xfa,
+ 0x3f, 0xed, 0xe2, 0x14, 0x42, 0xfc, 0xd0, 0x06, 0x9d,
+ 0xed, 0x09, 0x48, 0xf8, 0x32, 0x6a, 0x75, 0x3a, 0x0f,
+ 0xc8, 0x1f, 0x17, 0xe8, 0xd3, 0xe0, 0xfb, 0x2e, 0x0d,
+ 0x36, 0x28, 0xcf, 0x35, 0xe2, 0x0c, 0x38, 0xd1, 0x89,
+ 0x06,
+ },
+ },
+ {
+ "password",
+ "NaCl",
+ 1024, 8, 16,
+ []byte{
+ 0xfd, 0xba, 0xbe, 0x1c, 0x9d, 0x34, 0x72, 0x00, 0x78,
+ 0x56, 0xe7, 0x19, 0x0d, 0x01, 0xe9, 0xfe, 0x7c, 0x6a,
+ 0xd7, 0xcb, 0xc8, 0x23, 0x78, 0x30, 0xe7, 0x73, 0x76,
+ 0x63, 0x4b, 0x37, 0x31, 0x62, 0x2e, 0xaf, 0x30, 0xd9,
+ 0x2e, 0x22, 0xa3, 0x88, 0x6f, 0xf1, 0x09, 0x27, 0x9d,
+ 0x98, 0x30, 0xda, 0xc7, 0x27, 0xaf, 0xb9, 0x4a, 0x83,
+ 0xee, 0x6d, 0x83, 0x60, 0xcb, 0xdf, 0xa2, 0xcc, 0x06,
+ 0x40,
+ },
+ },
+ {
+ "pleaseletmein", "SodiumChloride",
+ 16384, 8, 1,
+ []byte{
+ 0x70, 0x23, 0xbd, 0xcb, 0x3a, 0xfd, 0x73, 0x48, 0x46,
+ 0x1c, 0x06, 0xcd, 0x81, 0xfd, 0x38, 0xeb, 0xfd, 0xa8,
+ 0xfb, 0xba, 0x90, 0x4f, 0x8e, 0x3e, 0xa9, 0xb5, 0x43,
+ 0xf6, 0x54, 0x5d, 0xa1, 0xf2, 0xd5, 0x43, 0x29, 0x55,
+ 0x61, 0x3f, 0x0f, 0xcf, 0x62, 0xd4, 0x97, 0x05, 0x24,
+ 0x2a, 0x9a, 0xf9, 0xe6, 0x1e, 0x85, 0xdc, 0x0d, 0x65,
+ 0x1e, 0x40, 0xdf, 0xcf, 0x01, 0x7b, 0x45, 0x57, 0x58,
+ 0x87,
+ },
+ },
+ /*
+ // Disabled: needs 1 GiB RAM and takes too long for a simple test.
+ {
+ "pleaseletmein", "SodiumChloride",
+ 1048576, 8, 1,
+ []byte{
+ 0x21, 0x01, 0xcb, 0x9b, 0x6a, 0x51, 0x1a, 0xae, 0xad,
+ 0xdb, 0xbe, 0x09, 0xcf, 0x70, 0xf8, 0x81, 0xec, 0x56,
+ 0x8d, 0x57, 0x4a, 0x2f, 0xfd, 0x4d, 0xab, 0xe5, 0xee,
+ 0x98, 0x20, 0xad, 0xaa, 0x47, 0x8e, 0x56, 0xfd, 0x8f,
+ 0x4b, 0xa5, 0xd0, 0x9f, 0xfa, 0x1c, 0x6d, 0x92, 0x7c,
+ 0x40, 0xf4, 0xc3, 0x37, 0x30, 0x40, 0x49, 0xe8, 0xa9,
+ 0x52, 0xfb, 0xcb, 0xf4, 0x5c, 0x6f, 0xa7, 0x7a, 0x41,
+ 0xa4,
+ },
+ },
+ */
+}
+
+var bad = []testVector{
+ {"p", "s", 0, 1, 1, nil}, // N == 0
+ {"p", "s", 1, 1, 1, nil}, // N == 1
+ {"p", "s", 7, 8, 1, nil}, // N is not power of 2
+ {"p", "s", 16, maxInt / 2, maxInt / 2, nil}, // p * r too large
+}
+
+func TestKey(t *testing.T) {
+ for i, v := range good {
+ k, err := Key([]byte(v.password), []byte(v.salt), v.N, v.r, v.p, len(v.output))
+ if err != nil {
+ t.Errorf("%d: got unexpected error: %s", i, err)
+ }
+ if !bytes.Equal(k, v.output) {
+ t.Errorf("%d: expected %x, got %x", i, v.output, k)
+ }
+ }
+ for i, v := range bad {
+ _, err := Key([]byte(v.password), []byte(v.salt), v.N, v.r, v.p, 32)
+ if err == nil {
+ t.Errorf("%d: expected error, got nil", i)
+ }
+ }
+}
+
+func BenchmarkKey(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Key([]byte("password"), []byte("salt"), 16384, 8, 1, 64)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/doc.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/doc.go
new file mode 100644
index 00000000000..a0ee3ae725d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/doc.go
@@ -0,0 +1,66 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package sha3 implements the SHA-3 fixed-output-length hash functions and
+// the SHAKE variable-output-length hash functions defined by FIPS-202.
+//
+// Both types of hash function use the "sponge" construction and the Keccak
+// permutation. For a detailed specification see http://keccak.noekeon.org/
+//
+//
+// Guidance
+//
+// If you aren't sure what function you need, use SHAKE256 with at least 64
+// bytes of output. The SHAKE instances are faster than the SHA3 instances;
+// the latter have to allocate memory to conform to the hash.Hash interface.
+//
+// If you need a secret-key MAC (message authentication code), prepend the
+// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
+// output.
+//
+//
+// Security strengths
+//
+// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
+// strength against preimage attacks of x bits. Since they only produce "x"
+// bits of output, their collision-resistance is only "x/2" bits.
+//
+// The SHAKE-256 and -128 functions have a generic security strength of 256 and
+// 128 bits against all attacks, provided that at least 2x bits of their output
+// is used. Requesting more than 64 or 32 bytes of output, respectively, does
+// not increase the collision-resistance of the SHAKE functions.
+//
+//
+// The sponge construction
+//
+// A sponge builds a pseudo-random function from a public pseudo-random
+// permutation, by applying the permutation to a state of "rate + capacity"
+// bytes, but hiding "capacity" of the bytes.
+//
+// A sponge starts out with a zero state. To hash an input using a sponge, up
+// to "rate" bytes of the input are XORed into the sponge's state. The sponge
+// is then "full" and the permutation is applied to "empty" it. This process is
+// repeated until all the input has been "absorbed". The input is then padded.
+// The digest is "squeezed" from the sponge in the same way, except that output
+// output is copied out instead of input being XORed in.
+//
+// A sponge is parameterized by its generic security strength, which is equal
+// to half its capacity; capacity + rate is equal to the permutation's width.
+// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
+// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
+//
+//
+// Recommendations
+//
+// The SHAKE functions are recommended for most new uses. They can produce
+// output of arbitrary length. SHAKE256, with an output length of at least
+// 64 bytes, provides 256-bit security against all attacks. The Keccak team
+// recommends it for most applications upgrading from SHA2-512. (NIST chose a
+// much stronger, but much slower, sponge instance for SHA3-512.)
+//
+// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
+// They produce output of the same length, with the same security strengths
+// against all attacks. This means, in particular, that SHA3-256 only has
+// 128-bit collision resistance, because its output length is 32 bytes.
+package sha3 // import "golang.org/x/crypto/sha3"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/hashes.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/hashes.go
new file mode 100644
index 00000000000..2b51cf4e9b4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/hashes.go
@@ -0,0 +1,65 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// This file provides functions for creating instances of the SHA-3
+// and SHAKE hash functions, as well as utility functions for hashing
+// bytes.
+
+import (
+ "hash"
+)
+
+// New224 creates a new SHA3-224 hash.
+// Its generic security strength is 224 bits against preimage attacks,
+// and 112 bits against collision attacks.
+func New224() hash.Hash { return &state{rate: 144, outputLen: 28, dsbyte: 0x06} }
+
+// New256 creates a new SHA3-256 hash.
+// Its generic security strength is 256 bits against preimage attacks,
+// and 128 bits against collision attacks.
+func New256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x06} }
+
+// New384 creates a new SHA3-384 hash.
+// Its generic security strength is 384 bits against preimage attacks,
+// and 192 bits against collision attacks.
+func New384() hash.Hash { return &state{rate: 104, outputLen: 48, dsbyte: 0x06} }
+
+// New512 creates a new SHA3-512 hash.
+// Its generic security strength is 512 bits against preimage attacks,
+// and 256 bits against collision attacks.
+func New512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x06} }
+
+// Sum224 returns the SHA3-224 digest of the data.
+func Sum224(data []byte) (digest [28]byte) {
+ h := New224()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum256 returns the SHA3-256 digest of the data.
+func Sum256(data []byte) (digest [32]byte) {
+ h := New256()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum384 returns the SHA3-384 digest of the data.
+func Sum384(data []byte) (digest [48]byte) {
+ h := New384()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
+
+// Sum512 returns the SHA3-512 digest of the data.
+func Sum512(data []byte) (digest [64]byte) {
+ h := New512()
+ h.Write(data)
+ h.Sum(digest[:0])
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/keccakf.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/keccakf.go
new file mode 100644
index 00000000000..13e7058fa98
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/keccakf.go
@@ -0,0 +1,410 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// rc stores the round constants for use in the ι step.
+var rc = [24]uint64{
+ 0x0000000000000001,
+ 0x0000000000008082,
+ 0x800000000000808A,
+ 0x8000000080008000,
+ 0x000000000000808B,
+ 0x0000000080000001,
+ 0x8000000080008081,
+ 0x8000000000008009,
+ 0x000000000000008A,
+ 0x0000000000000088,
+ 0x0000000080008009,
+ 0x000000008000000A,
+ 0x000000008000808B,
+ 0x800000000000008B,
+ 0x8000000000008089,
+ 0x8000000000008003,
+ 0x8000000000008002,
+ 0x8000000000000080,
+ 0x000000000000800A,
+ 0x800000008000000A,
+ 0x8000000080008081,
+ 0x8000000000008080,
+ 0x0000000080000001,
+ 0x8000000080008008,
+}
+
+// keccakF1600 applies the Keccak permutation to a 1600b-wide
+// state represented as a slice of 25 uint64s.
+func keccakF1600(a *[25]uint64) {
+ // Implementation translated from Keccak-inplace.c
+ // in the keccak reference code.
+ var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
+
+ for i := 0; i < 24; i += 4 {
+ // Combines the 5 steps in each round into 2 steps.
+ // Unrolls 4 rounds per loop and spreads some steps across rounds.
+
+ // Round 1
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[6] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[12] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[18] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[24] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i]
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[16] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[22] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[3] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[1] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[7] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[19] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[11] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[23] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[4] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[2] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[8] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[14] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 2
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[16] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[7] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[23] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[14] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+1]
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[11] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[2] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[18] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[6] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[22] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[4] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[1] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[8] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[24] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[12] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[3] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[19] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 3
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[11] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[22] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[8] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[19] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+2]
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[1] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[12] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[23] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[16] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[2] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[24] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[6] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[3] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[14] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[7] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[18] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[4] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ // Round 4
+ bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
+ bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
+ bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
+ bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
+ bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
+ d0 = bc4 ^ (bc1<<1 | bc1>>63)
+ d1 = bc0 ^ (bc2<<1 | bc2>>63)
+ d2 = bc1 ^ (bc3<<1 | bc3>>63)
+ d3 = bc2 ^ (bc4<<1 | bc4>>63)
+ d4 = bc3 ^ (bc0<<1 | bc0>>63)
+
+ bc0 = a[0] ^ d0
+ t = a[1] ^ d1
+ bc1 = t<<44 | t>>(64-44)
+ t = a[2] ^ d2
+ bc2 = t<<43 | t>>(64-43)
+ t = a[3] ^ d3
+ bc3 = t<<21 | t>>(64-21)
+ t = a[4] ^ d4
+ bc4 = t<<14 | t>>(64-14)
+ a[0] = bc0 ^ (bc2 &^ bc1) ^ rc[i+3]
+ a[1] = bc1 ^ (bc3 &^ bc2)
+ a[2] = bc2 ^ (bc4 &^ bc3)
+ a[3] = bc3 ^ (bc0 &^ bc4)
+ a[4] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[5] ^ d0
+ bc2 = t<<3 | t>>(64-3)
+ t = a[6] ^ d1
+ bc3 = t<<45 | t>>(64-45)
+ t = a[7] ^ d2
+ bc4 = t<<61 | t>>(64-61)
+ t = a[8] ^ d3
+ bc0 = t<<28 | t>>(64-28)
+ t = a[9] ^ d4
+ bc1 = t<<20 | t>>(64-20)
+ a[5] = bc0 ^ (bc2 &^ bc1)
+ a[6] = bc1 ^ (bc3 &^ bc2)
+ a[7] = bc2 ^ (bc4 &^ bc3)
+ a[8] = bc3 ^ (bc0 &^ bc4)
+ a[9] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[10] ^ d0
+ bc4 = t<<18 | t>>(64-18)
+ t = a[11] ^ d1
+ bc0 = t<<1 | t>>(64-1)
+ t = a[12] ^ d2
+ bc1 = t<<6 | t>>(64-6)
+ t = a[13] ^ d3
+ bc2 = t<<25 | t>>(64-25)
+ t = a[14] ^ d4
+ bc3 = t<<8 | t>>(64-8)
+ a[10] = bc0 ^ (bc2 &^ bc1)
+ a[11] = bc1 ^ (bc3 &^ bc2)
+ a[12] = bc2 ^ (bc4 &^ bc3)
+ a[13] = bc3 ^ (bc0 &^ bc4)
+ a[14] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[15] ^ d0
+ bc1 = t<<36 | t>>(64-36)
+ t = a[16] ^ d1
+ bc2 = t<<10 | t>>(64-10)
+ t = a[17] ^ d2
+ bc3 = t<<15 | t>>(64-15)
+ t = a[18] ^ d3
+ bc4 = t<<56 | t>>(64-56)
+ t = a[19] ^ d4
+ bc0 = t<<27 | t>>(64-27)
+ a[15] = bc0 ^ (bc2 &^ bc1)
+ a[16] = bc1 ^ (bc3 &^ bc2)
+ a[17] = bc2 ^ (bc4 &^ bc3)
+ a[18] = bc3 ^ (bc0 &^ bc4)
+ a[19] = bc4 ^ (bc1 &^ bc0)
+
+ t = a[20] ^ d0
+ bc3 = t<<41 | t>>(64-41)
+ t = a[21] ^ d1
+ bc4 = t<<2 | t>>(64-2)
+ t = a[22] ^ d2
+ bc0 = t<<62 | t>>(64-62)
+ t = a[23] ^ d3
+ bc1 = t<<55 | t>>(64-55)
+ t = a[24] ^ d4
+ bc2 = t<<39 | t>>(64-39)
+ a[20] = bc0 ^ (bc2 &^ bc1)
+ a[21] = bc1 ^ (bc3 &^ bc2)
+ a[22] = bc2 ^ (bc4 &^ bc3)
+ a[23] = bc3 ^ (bc0 &^ bc4)
+ a[24] = bc4 ^ (bc1 &^ bc0)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/register.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/register.go
new file mode 100644
index 00000000000..3cf6a22e093
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/register.go
@@ -0,0 +1,18 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.4
+
+package sha3
+
+import (
+ "crypto"
+)
+
+func init() {
+ crypto.RegisterHash(crypto.SHA3_224, New224)
+ crypto.RegisterHash(crypto.SHA3_256, New256)
+ crypto.RegisterHash(crypto.SHA3_384, New384)
+ crypto.RegisterHash(crypto.SHA3_512, New512)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/sha3.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/sha3.go
new file mode 100644
index 00000000000..c8fd31cb0ba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/sha3.go
@@ -0,0 +1,193 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// spongeDirection indicates the direction bytes are flowing through the sponge.
+type spongeDirection int
+
+const (
+ // spongeAbsorbing indicates that the sponge is absorbing input.
+ spongeAbsorbing spongeDirection = iota
+ // spongeSqueezing indicates that the sponge is being squeezed.
+ spongeSqueezing
+)
+
+const (
+ // maxRate is the maximum size of the internal buffer. SHAKE-256
+ // currently needs the largest buffer.
+ maxRate = 168
+)
+
+type state struct {
+ // Generic sponge components.
+ a [25]uint64 // main state of the hash
+ buf []byte // points into storage
+ rate int // the number of bytes of state to use
+
+ // dsbyte contains the "domain separation" bits and the first bit of
+ // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
+ // SHA-3 and SHAKE functions by appending bitstrings to the message.
+ // Using a little-endian bit-ordering convention, these are "01" for SHA-3
+ // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
+ // padding rule from section 5.1 is applied to pad the message to a multiple
+ // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
+ // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
+ // giving 00000110b (0x06) and 00011111b (0x1f).
+ // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
+ // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
+ // Extendable-Output Functions (May 2014)"
+ dsbyte byte
+ storage [maxRate]byte
+
+ // Specific to SHA-3 and SHAKE.
+ fixedOutput bool // whether this is a fixed-ouput-length instance
+ outputLen int // the default output size in bytes
+ state spongeDirection // whether the sponge is absorbing or squeezing
+}
+
+// BlockSize returns the rate of sponge underlying this hash function.
+func (d *state) BlockSize() int { return d.rate }
+
+// Size returns the output size of the hash function in bytes.
+func (d *state) Size() int { return d.outputLen }
+
+// Reset clears the internal state by zeroing the sponge state and
+// the byte buffer, and setting Sponge.state to absorbing.
+func (d *state) Reset() {
+ // Zero the permutation's state.
+ for i := range d.a {
+ d.a[i] = 0
+ }
+ d.state = spongeAbsorbing
+ d.buf = d.storage[:0]
+}
+
+func (d *state) clone() *state {
+ ret := *d
+ if ret.state == spongeAbsorbing {
+ ret.buf = ret.storage[:len(ret.buf)]
+ } else {
+ ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate]
+ }
+
+ return &ret
+}
+
+// permute applies the KeccakF-1600 permutation. It handles
+// any input-output buffering.
+func (d *state) permute() {
+ switch d.state {
+ case spongeAbsorbing:
+ // If we're absorbing, we need to xor the input into the state
+ // before applying the permutation.
+ xorIn(d, d.buf)
+ d.buf = d.storage[:0]
+ keccakF1600(&d.a)
+ case spongeSqueezing:
+ // If we're squeezing, we need to apply the permutatin before
+ // copying more output.
+ keccakF1600(&d.a)
+ d.buf = d.storage[:d.rate]
+ copyOut(d, d.buf)
+ }
+}
+
+// pads appends the domain separation bits in dsbyte, applies
+// the multi-bitrate 10..1 padding rule, and permutes the state.
+func (d *state) padAndPermute(dsbyte byte) {
+ if d.buf == nil {
+ d.buf = d.storage[:0]
+ }
+ // Pad with this instance's domain-separator bits. We know that there's
+ // at least one byte of space in d.buf because, if it were full,
+ // permute would have been called to empty it. dsbyte also contains the
+ // first one bit for the padding. See the comment in the state struct.
+ d.buf = append(d.buf, dsbyte)
+ zerosStart := len(d.buf)
+ d.buf = d.storage[:d.rate]
+ for i := zerosStart; i < d.rate; i++ {
+ d.buf[i] = 0
+ }
+ // This adds the final one bit for the padding. Because of the way that
+ // bits are numbered from the LSB upwards, the final bit is the MSB of
+ // the last byte.
+ d.buf[d.rate-1] ^= 0x80
+ // Apply the permutation
+ d.permute()
+ d.state = spongeSqueezing
+ d.buf = d.storage[:d.rate]
+ copyOut(d, d.buf)
+}
+
+// Write absorbs more data into the hash's state. It produces an error
+// if more data is written to the ShakeHash after writing
+func (d *state) Write(p []byte) (written int, err error) {
+ if d.state != spongeAbsorbing {
+ panic("sha3: write to sponge after read")
+ }
+ if d.buf == nil {
+ d.buf = d.storage[:0]
+ }
+ written = len(p)
+
+ for len(p) > 0 {
+ if len(d.buf) == 0 && len(p) >= d.rate {
+ // The fast path; absorb a full "rate" bytes of input and apply the permutation.
+ xorIn(d, p[:d.rate])
+ p = p[d.rate:]
+ keccakF1600(&d.a)
+ } else {
+ // The slow path; buffer the input until we can fill the sponge, and then xor it in.
+ todo := d.rate - len(d.buf)
+ if todo > len(p) {
+ todo = len(p)
+ }
+ d.buf = append(d.buf, p[:todo]...)
+ p = p[todo:]
+
+ // If the sponge is full, apply the permutation.
+ if len(d.buf) == d.rate {
+ d.permute()
+ }
+ }
+ }
+
+ return
+}
+
+// Read squeezes an arbitrary number of bytes from the sponge.
+func (d *state) Read(out []byte) (n int, err error) {
+ // If we're still absorbing, pad and apply the permutation.
+ if d.state == spongeAbsorbing {
+ d.padAndPermute(d.dsbyte)
+ }
+
+ n = len(out)
+
+ // Now, do the squeezing.
+ for len(out) > 0 {
+ n := copy(out, d.buf)
+ d.buf = d.buf[n:]
+ out = out[n:]
+
+ // Apply the permutation if we've squeezed the sponge dry.
+ if len(d.buf) == 0 {
+ d.permute()
+ }
+ }
+
+ return
+}
+
+// Sum applies padding to the hash state and then squeezes out the desired
+// number of output bytes.
+func (d *state) Sum(in []byte) []byte {
+ // Make a copy of the original hash so that caller can keep writing
+ // and summing.
+ dup := d.clone()
+ hash := make([]byte, dup.outputLen)
+ dup.Read(hash)
+ return append(in, hash...)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/sha3_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/sha3_test.go
new file mode 100644
index 00000000000..caf72f279f1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/sha3_test.go
@@ -0,0 +1,306 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// Tests include all the ShortMsgKATs provided by the Keccak team at
+// https://github.com/gvanas/KeccakCodePackage
+//
+// They only include the zero-bit case of the bitwise testvectors
+// published by NIST in the draft of FIPS-202.
+
+import (
+ "bytes"
+ "compress/flate"
+ "encoding/hex"
+ "encoding/json"
+ "hash"
+ "os"
+ "strings"
+ "testing"
+)
+
+const (
+ testString = "brekeccakkeccak koax koax"
+ katFilename = "testdata/keccakKats.json.deflate"
+)
+
+// Internal-use instances of SHAKE used to test against KATs.
+func newHashShake128() hash.Hash {
+ return &state{rate: 168, dsbyte: 0x1f, outputLen: 512}
+}
+func newHashShake256() hash.Hash {
+ return &state{rate: 136, dsbyte: 0x1f, outputLen: 512}
+}
+
+// testDigests contains functions returning hash.Hash instances
+// with output-length equal to the KAT length for both SHA-3 and
+// SHAKE instances.
+var testDigests = map[string]func() hash.Hash{
+ "SHA3-224": New224,
+ "SHA3-256": New256,
+ "SHA3-384": New384,
+ "SHA3-512": New512,
+ "SHAKE128": newHashShake128,
+ "SHAKE256": newHashShake256,
+}
+
+// testShakes contains functions that return ShakeHash instances for
+// testing the ShakeHash-specific interface.
+var testShakes = map[string]func() ShakeHash{
+ "SHAKE128": NewShake128,
+ "SHAKE256": NewShake256,
+}
+
+// decodeHex converts a hex-encoded string into a raw byte string.
+func decodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+// structs used to marshal JSON test-cases.
+type KeccakKats struct {
+ Kats map[string][]struct {
+ Digest string `json:"digest"`
+ Length int64 `json:"length"`
+ Message string `json:"message"`
+ }
+}
+
+func testUnalignedAndGeneric(t *testing.T, testf func(impl string)) {
+ xorInOrig, copyOutOrig := xorIn, copyOut
+ xorIn, copyOut = xorInGeneric, copyOutGeneric
+ testf("generic")
+ if xorImplementationUnaligned != "generic" {
+ xorIn, copyOut = xorInUnaligned, copyOutUnaligned
+ testf("unaligned")
+ }
+ xorIn, copyOut = xorInOrig, copyOutOrig
+}
+
+// TestKeccakKats tests the SHA-3 and Shake implementations against all the
+// ShortMsgKATs from https://github.com/gvanas/KeccakCodePackage
+// (The testvectors are stored in keccakKats.json.deflate due to their length.)
+func TestKeccakKats(t *testing.T) {
+ testUnalignedAndGeneric(t, func(impl string) {
+ // Read the KATs.
+ deflated, err := os.Open(katFilename)
+ if err != nil {
+ t.Errorf("error opening %s: %s", katFilename, err)
+ }
+ file := flate.NewReader(deflated)
+ dec := json.NewDecoder(file)
+ var katSet KeccakKats
+ err = dec.Decode(&katSet)
+ if err != nil {
+ t.Errorf("error decoding KATs: %s", err)
+ }
+
+ // Do the KATs.
+ for functionName, kats := range katSet.Kats {
+ d := testDigests[functionName]()
+ for _, kat := range kats {
+ d.Reset()
+ in, err := hex.DecodeString(kat.Message)
+ if err != nil {
+ t.Errorf("error decoding KAT: %s", err)
+ }
+ d.Write(in[:kat.Length/8])
+ got := strings.ToUpper(hex.EncodeToString(d.Sum(nil)))
+ if got != kat.Digest {
+ t.Errorf("function=%s, implementation=%s, length=%d\nmessage:\n %s\ngot:\n %s\nwanted:\n %s",
+ functionName, impl, kat.Length, kat.Message, got, kat.Digest)
+ t.Logf("wanted %+v", kat)
+ t.FailNow()
+ }
+ continue
+ }
+ }
+ })
+}
+
+// TestUnalignedWrite tests that writing data in an arbitrary pattern with
+// small input buffers.
+func testUnalignedWrite(t *testing.T) {
+ testUnalignedAndGeneric(t, func(impl string) {
+ buf := sequentialBytes(0x10000)
+ for alg, df := range testDigests {
+ d := df()
+ d.Reset()
+ d.Write(buf)
+ want := d.Sum(nil)
+ d.Reset()
+ for i := 0; i < len(buf); {
+ // Cycle through offsets which make a 137 byte sequence.
+ // Because 137 is prime this sequence should exercise all corner cases.
+ offsets := [17]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 1}
+ for _, j := range offsets {
+ if v := len(buf) - i; v < j {
+ j = v
+ }
+ d.Write(buf[i : i+j])
+ i += j
+ }
+ }
+ got := d.Sum(nil)
+ if !bytes.Equal(got, want) {
+ t.Errorf("Unaligned writes, implementation=%s, alg=%s\ngot %q, want %q", impl, alg, got, want)
+ }
+ }
+ })
+}
+
+// TestAppend checks that appending works when reallocation is necessary.
+func TestAppend(t *testing.T) {
+ testUnalignedAndGeneric(t, func(impl string) {
+ d := New224()
+
+ for capacity := 2; capacity <= 66; capacity += 64 {
+ // The first time around the loop, Sum will have to reallocate.
+ // The second time, it will not.
+ buf := make([]byte, 2, capacity)
+ d.Reset()
+ d.Write([]byte{0xcc})
+ buf = d.Sum(buf)
+ expected := "0000DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
+ if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
+ t.Errorf("got %s, want %s", got, expected)
+ }
+ }
+ })
+}
+
+// TestAppendNoRealloc tests that appending works when no reallocation is necessary.
+func TestAppendNoRealloc(t *testing.T) {
+ testUnalignedAndGeneric(t, func(impl string) {
+ buf := make([]byte, 1, 200)
+ d := New224()
+ d.Write([]byte{0xcc})
+ buf = d.Sum(buf)
+ expected := "00DF70ADC49B2E76EEE3A6931B93FA41841C3AF2CDF5B32A18B5478C39"
+ if got := strings.ToUpper(hex.EncodeToString(buf)); got != expected {
+ t.Errorf("%s: got %s, want %s", impl, got, expected)
+ }
+ })
+}
+
+// TestSqueezing checks that squeezing the full output a single time produces
+// the same output as repeatedly squeezing the instance.
+func TestSqueezing(t *testing.T) {
+ testUnalignedAndGeneric(t, func(impl string) {
+ for functionName, newShakeHash := range testShakes {
+ d0 := newShakeHash()
+ d0.Write([]byte(testString))
+ ref := make([]byte, 32)
+ d0.Read(ref)
+
+ d1 := newShakeHash()
+ d1.Write([]byte(testString))
+ var multiple []byte
+ for _ = range ref {
+ one := make([]byte, 1)
+ d1.Read(one)
+ multiple = append(multiple, one...)
+ }
+ if !bytes.Equal(ref, multiple) {
+ t.Errorf("%s (%s): squeezing %d bytes one at a time failed", functionName, impl, len(ref))
+ }
+ }
+ })
+}
+
+// sequentialBytes produces a buffer of size consecutive bytes 0x00, 0x01, ..., used for testing.
+func sequentialBytes(size int) []byte {
+ result := make([]byte, size)
+ for i := range result {
+ result[i] = byte(i)
+ }
+ return result
+}
+
+// BenchmarkPermutationFunction measures the speed of the permutation function
+// with no input data.
+func BenchmarkPermutationFunction(b *testing.B) {
+ b.SetBytes(int64(200))
+ var lanes [25]uint64
+ for i := 0; i < b.N; i++ {
+ keccakF1600(&lanes)
+ }
+}
+
+// benchmarkHash tests the speed to hash num buffers of buflen each.
+func benchmarkHash(b *testing.B, h hash.Hash, size, num int) {
+ b.StopTimer()
+ h.Reset()
+ data := sequentialBytes(size)
+ b.SetBytes(int64(size * num))
+ b.StartTimer()
+
+ var state []byte
+ for i := 0; i < b.N; i++ {
+ for j := 0; j < num; j++ {
+ h.Write(data)
+ }
+ state = h.Sum(state[:0])
+ }
+ b.StopTimer()
+ h.Reset()
+}
+
+// benchmarkShake is specialized to the Shake instances, which don't
+// require a copy on reading output.
+func benchmarkShake(b *testing.B, h ShakeHash, size, num int) {
+ b.StopTimer()
+ h.Reset()
+ data := sequentialBytes(size)
+ d := make([]byte, 32)
+
+ b.SetBytes(int64(size * num))
+ b.StartTimer()
+
+ for i := 0; i < b.N; i++ {
+ h.Reset()
+ for j := 0; j < num; j++ {
+ h.Write(data)
+ }
+ h.Read(d)
+ }
+}
+
+func BenchmarkSha3_512_MTU(b *testing.B) { benchmarkHash(b, New512(), 1350, 1) }
+func BenchmarkSha3_384_MTU(b *testing.B) { benchmarkHash(b, New384(), 1350, 1) }
+func BenchmarkSha3_256_MTU(b *testing.B) { benchmarkHash(b, New256(), 1350, 1) }
+func BenchmarkSha3_224_MTU(b *testing.B) { benchmarkHash(b, New224(), 1350, 1) }
+
+func BenchmarkShake128_MTU(b *testing.B) { benchmarkShake(b, NewShake128(), 1350, 1) }
+func BenchmarkShake256_MTU(b *testing.B) { benchmarkShake(b, NewShake256(), 1350, 1) }
+func BenchmarkShake256_16x(b *testing.B) { benchmarkShake(b, NewShake256(), 16, 1024) }
+func BenchmarkShake256_1MiB(b *testing.B) { benchmarkShake(b, NewShake256(), 1024, 1024) }
+
+func BenchmarkSha3_512_1MiB(b *testing.B) { benchmarkHash(b, New512(), 1024, 1024) }
+
+func Example_sum() {
+ buf := []byte("some data to hash")
+ // A hash needs to be 64 bytes long to have 256-bit collision resistance.
+ h := make([]byte, 64)
+ // Compute a 64-byte hash of buf and put it in h.
+ ShakeSum256(h, buf)
+}
+
+func Example_mac() {
+ k := []byte("this is a secret key; you should generate a strong random key that's at least 32 bytes long")
+ buf := []byte("and this is some data to authenticate")
+ // A MAC with 32 bytes of output has 256-bit security strength -- if you use at least a 32-byte-long key.
+ h := make([]byte, 32)
+ d := NewShake256()
+ // Write the key into the hash.
+ d.Write(k)
+ // Now write the data.
+ d.Write(buf)
+ // Read 32 bytes of output from the hash into h.
+ d.Read(h)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/shake.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/shake.go
new file mode 100644
index 00000000000..841f9860f03
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/shake.go
@@ -0,0 +1,60 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+// This file defines the ShakeHash interface, and provides
+// functions for creating SHAKE instances, as well as utility
+// functions for hashing bytes to arbitrary-length output.
+
+import (
+ "io"
+)
+
+// ShakeHash defines the interface to hash functions that
+// support arbitrary-length output.
+type ShakeHash interface {
+ // Write absorbs more data into the hash's state. It panics if input is
+ // written to it after output has been read from it.
+ io.Writer
+
+ // Read reads more output from the hash; reading affects the hash's
+ // state. (ShakeHash.Read is thus very different from Hash.Sum)
+ // It never returns an error.
+ io.Reader
+
+ // Clone returns a copy of the ShakeHash in its current state.
+ Clone() ShakeHash
+
+ // Reset resets the ShakeHash to its initial state.
+ Reset()
+}
+
+func (d *state) Clone() ShakeHash {
+ return d.clone()
+}
+
+// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
+// Its generic security strength is 128 bits against all attacks if at
+// least 32 bytes of its output are used.
+func NewShake128() ShakeHash { return &state{rate: 168, dsbyte: 0x1f} }
+
+// NewShake256 creates a new SHAKE128 variable-output-length ShakeHash.
+// Its generic security strength is 256 bits against all attacks if
+// at least 64 bytes of its output are used.
+func NewShake256() ShakeHash { return &state{rate: 136, dsbyte: 0x1f} }
+
+// ShakeSum128 writes an arbitrary-length digest of data into hash.
+func ShakeSum128(hash, data []byte) {
+ h := NewShake128()
+ h.Write(data)
+ h.Read(hash)
+}
+
+// ShakeSum256 writes an arbitrary-length digest of data into hash.
+func ShakeSum256(hash, data []byte) {
+ h := NewShake256()
+ h.Write(data)
+ h.Read(hash)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate
new file mode 100644
index 00000000000..62e85ae2423
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/testdata/keccakKats.json.deflate
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor.go
new file mode 100644
index 00000000000..d622979c115
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!386 appengine
+
+package sha3
+
+var (
+ xorIn = xorInGeneric
+ copyOut = copyOutGeneric
+ xorInUnaligned = xorInGeneric
+ copyOutUnaligned = copyOutGeneric
+)
+
+const xorImplementationUnaligned = "generic"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor_generic.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor_generic.go
new file mode 100644
index 00000000000..fd35f02ef6e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor_generic.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sha3
+
+import "encoding/binary"
+
+// xorInGeneric xors the bytes in buf into the state; it
+// makes no non-portable assumptions about memory layout
+// or alignment.
+func xorInGeneric(d *state, buf []byte) {
+ n := len(buf) / 8
+
+ for i := 0; i < n; i++ {
+ a := binary.LittleEndian.Uint64(buf)
+ d.a[i] ^= a
+ buf = buf[8:]
+ }
+}
+
+// copyOutGeneric copies ulint64s to a byte buffer.
+func copyOutGeneric(d *state, b []byte) {
+ for i := 0; len(b) >= 8; i++ {
+ binary.LittleEndian.PutUint64(b, d.a[i])
+ b = b[8:]
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor_unaligned.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor_unaligned.go
new file mode 100644
index 00000000000..c7851a1d850
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/sha3/xor_unaligned.go
@@ -0,0 +1,58 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64 386
+// +build !appengine
+
+package sha3
+
+import "unsafe"
+
+func xorInUnaligned(d *state, buf []byte) {
+ bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))
+ n := len(buf)
+ if n >= 72 {
+ d.a[0] ^= bw[0]
+ d.a[1] ^= bw[1]
+ d.a[2] ^= bw[2]
+ d.a[3] ^= bw[3]
+ d.a[4] ^= bw[4]
+ d.a[5] ^= bw[5]
+ d.a[6] ^= bw[6]
+ d.a[7] ^= bw[7]
+ d.a[8] ^= bw[8]
+ }
+ if n >= 104 {
+ d.a[9] ^= bw[9]
+ d.a[10] ^= bw[10]
+ d.a[11] ^= bw[11]
+ d.a[12] ^= bw[12]
+ }
+ if n >= 136 {
+ d.a[13] ^= bw[13]
+ d.a[14] ^= bw[14]
+ d.a[15] ^= bw[15]
+ d.a[16] ^= bw[16]
+ }
+ if n >= 144 {
+ d.a[17] ^= bw[17]
+ }
+ if n >= 168 {
+ d.a[18] ^= bw[18]
+ d.a[19] ^= bw[19]
+ d.a[20] ^= bw[20]
+ }
+}
+
+func copyOutUnaligned(d *state, buf []byte) {
+ ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
+ copy(buf, ab[:])
+}
+
+var (
+ xorIn = xorInUnaligned
+ copyOut = copyOutUnaligned
+)
+
+const xorImplementationUnaligned = "unaligned"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/client.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/client.go
new file mode 100644
index 00000000000..8c856a08c18
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/client.go
@@ -0,0 +1,615 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ Package agent implements a client to an ssh-agent daemon.
+
+References:
+ [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD
+*/
+package agent // import "golang.org/x/crypto/ssh/agent"
+
+import (
+ "bytes"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "encoding/base64"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "sync"
+
+ "golang.org/x/crypto/ssh"
+)
+
+// Agent represents the capabilities of an ssh-agent.
+type Agent interface {
+ // List returns the identities known to the agent.
+ List() ([]*Key, error)
+
+ // Sign has the agent sign the data using a protocol 2 key as defined
+ // in [PROTOCOL.agent] section 2.6.2.
+ Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error)
+
+ // Add adds a private key to the agent.
+ Add(key AddedKey) error
+
+ // Remove removes all identities with the given public key.
+ Remove(key ssh.PublicKey) error
+
+ // RemoveAll removes all identities.
+ RemoveAll() error
+
+ // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list.
+ Lock(passphrase []byte) error
+
+ // Unlock undoes the effect of Lock
+ Unlock(passphrase []byte) error
+
+ // Signers returns signers for all the known keys.
+ Signers() ([]ssh.Signer, error)
+}
+
+// AddedKey describes an SSH key to be added to an Agent.
+type AddedKey struct {
+ // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or
+ // *ecdsa.PrivateKey, which will be inserted into the agent.
+ PrivateKey interface{}
+ // Certificate, if not nil, is communicated to the agent and will be
+ // stored with the key.
+ Certificate *ssh.Certificate
+ // Comment is an optional, free-form string.
+ Comment string
+ // LifetimeSecs, if not zero, is the number of seconds that the
+ // agent will store the key for.
+ LifetimeSecs uint32
+ // ConfirmBeforeUse, if true, requests that the agent confirm with the
+ // user before each use of this key.
+ ConfirmBeforeUse bool
+}
+
+// See [PROTOCOL.agent], section 3.
+const (
+ agentRequestV1Identities = 1
+
+ // 3.2 Requests from client to agent for protocol 2 key operations
+ agentAddIdentity = 17
+ agentRemoveIdentity = 18
+ agentRemoveAllIdentities = 19
+ agentAddIdConstrained = 25
+
+ // 3.3 Key-type independent requests from client to agent
+ agentAddSmartcardKey = 20
+ agentRemoveSmartcardKey = 21
+ agentLock = 22
+ agentUnlock = 23
+ agentAddSmartcardKeyConstrained = 26
+
+ // 3.7 Key constraint identifiers
+ agentConstrainLifetime = 1
+ agentConstrainConfirm = 2
+)
+
+// maxAgentResponseBytes is the maximum agent reply size that is accepted. This
+// is a sanity check, not a limit in the spec.
+const maxAgentResponseBytes = 16 << 20
+
+// Agent messages:
+// These structures mirror the wire format of the corresponding ssh agent
+// messages found in [PROTOCOL.agent].
+
+// 3.4 Generic replies from agent to client
+const agentFailure = 5
+
+type failureAgentMsg struct{}
+
+const agentSuccess = 6
+
+type successAgentMsg struct{}
+
+// See [PROTOCOL.agent], section 2.5.2.
+const agentRequestIdentities = 11
+
+type requestIdentitiesAgentMsg struct{}
+
+// See [PROTOCOL.agent], section 2.5.2.
+const agentIdentitiesAnswer = 12
+
+type identitiesAnswerAgentMsg struct {
+ NumKeys uint32 `sshtype:"12"`
+ Keys []byte `ssh:"rest"`
+}
+
+// See [PROTOCOL.agent], section 2.6.2.
+const agentSignRequest = 13
+
+type signRequestAgentMsg struct {
+ KeyBlob []byte `sshtype:"13"`
+ Data []byte
+ Flags uint32
+}
+
+// See [PROTOCOL.agent], section 2.6.2.
+
+// 3.6 Replies from agent to client for protocol 2 key operations
+const agentSignResponse = 14
+
+type signResponseAgentMsg struct {
+ SigBlob []byte `sshtype:"14"`
+}
+
+type publicKey struct {
+ Format string
+ Rest []byte `ssh:"rest"`
+}
+
+// Key represents a protocol 2 public key as defined in
+// [PROTOCOL.agent], section 2.5.2.
+type Key struct {
+ Format string
+ Blob []byte
+ Comment string
+}
+
+func clientErr(err error) error {
+ return fmt.Errorf("agent: client error: %v", err)
+}
+
+// String returns the storage form of an agent key with the format, base64
+// encoded serialized key, and the comment if it is not empty.
+func (k *Key) String() string {
+ s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob)
+
+ if k.Comment != "" {
+ s += " " + k.Comment
+ }
+
+ return s
+}
+
+// Type returns the public key type.
+func (k *Key) Type() string {
+ return k.Format
+}
+
+// Marshal returns key blob to satisfy the ssh.PublicKey interface.
+func (k *Key) Marshal() []byte {
+ return k.Blob
+}
+
+// Verify satisfies the ssh.PublicKey interface, but is not
+// implemented for agent keys.
+func (k *Key) Verify(data []byte, sig *ssh.Signature) error {
+ return errors.New("agent: agent key does not know how to verify")
+}
+
+type wireKey struct {
+ Format string
+ Rest []byte `ssh:"rest"`
+}
+
+func parseKey(in []byte) (out *Key, rest []byte, err error) {
+ var record struct {
+ Blob []byte
+ Comment string
+ Rest []byte `ssh:"rest"`
+ }
+
+ if err := ssh.Unmarshal(in, &record); err != nil {
+ return nil, nil, err
+ }
+
+ var wk wireKey
+ if err := ssh.Unmarshal(record.Blob, &wk); err != nil {
+ return nil, nil, err
+ }
+
+ return &Key{
+ Format: wk.Format,
+ Blob: record.Blob,
+ Comment: record.Comment,
+ }, record.Rest, nil
+}
+
+// client is a client for an ssh-agent process.
+type client struct {
+ // conn is typically a *net.UnixConn
+ conn io.ReadWriter
+ // mu is used to prevent concurrent access to the agent
+ mu sync.Mutex
+}
+
+// NewClient returns an Agent that talks to an ssh-agent process over
+// the given connection.
+func NewClient(rw io.ReadWriter) Agent {
+ return &client{conn: rw}
+}
+
+// call sends an RPC to the agent. On success, the reply is
+// unmarshaled into reply and replyType is set to the first byte of
+// the reply, which contains the type of the message.
+func (c *client) call(req []byte) (reply interface{}, err error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ msg := make([]byte, 4+len(req))
+ binary.BigEndian.PutUint32(msg, uint32(len(req)))
+ copy(msg[4:], req)
+ if _, err = c.conn.Write(msg); err != nil {
+ return nil, clientErr(err)
+ }
+
+ var respSizeBuf [4]byte
+ if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil {
+ return nil, clientErr(err)
+ }
+ respSize := binary.BigEndian.Uint32(respSizeBuf[:])
+ if respSize > maxAgentResponseBytes {
+ return nil, clientErr(err)
+ }
+
+ buf := make([]byte, respSize)
+ if _, err = io.ReadFull(c.conn, buf); err != nil {
+ return nil, clientErr(err)
+ }
+ reply, err = unmarshal(buf)
+ if err != nil {
+ return nil, clientErr(err)
+ }
+ return reply, err
+}
+
+func (c *client) simpleCall(req []byte) error {
+ resp, err := c.call(req)
+ if err != nil {
+ return err
+ }
+ if _, ok := resp.(*successAgentMsg); ok {
+ return nil
+ }
+ return errors.New("agent: failure")
+}
+
+func (c *client) RemoveAll() error {
+ return c.simpleCall([]byte{agentRemoveAllIdentities})
+}
+
+func (c *client) Remove(key ssh.PublicKey) error {
+ req := ssh.Marshal(&agentRemoveIdentityMsg{
+ KeyBlob: key.Marshal(),
+ })
+ return c.simpleCall(req)
+}
+
+func (c *client) Lock(passphrase []byte) error {
+ req := ssh.Marshal(&agentLockMsg{
+ Passphrase: passphrase,
+ })
+ return c.simpleCall(req)
+}
+
+func (c *client) Unlock(passphrase []byte) error {
+ req := ssh.Marshal(&agentUnlockMsg{
+ Passphrase: passphrase,
+ })
+ return c.simpleCall(req)
+}
+
+// List returns the identities known to the agent.
+func (c *client) List() ([]*Key, error) {
+ // see [PROTOCOL.agent] section 2.5.2.
+ req := []byte{agentRequestIdentities}
+
+ msg, err := c.call(req)
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *identitiesAnswerAgentMsg:
+ if msg.NumKeys > maxAgentResponseBytes/8 {
+ return nil, errors.New("agent: too many keys in agent reply")
+ }
+ keys := make([]*Key, msg.NumKeys)
+ data := msg.Keys
+ for i := uint32(0); i < msg.NumKeys; i++ {
+ var key *Key
+ var err error
+ if key, data, err = parseKey(data); err != nil {
+ return nil, err
+ }
+ keys[i] = key
+ }
+ return keys, nil
+ case *failureAgentMsg:
+ return nil, errors.New("agent: failed to list keys")
+ }
+ panic("unreachable")
+}
+
+// Sign has the agent sign the data using a protocol 2 key as defined
+// in [PROTOCOL.agent] section 2.6.2.
+func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
+ req := ssh.Marshal(signRequestAgentMsg{
+ KeyBlob: key.Marshal(),
+ Data: data,
+ })
+
+ msg, err := c.call(req)
+ if err != nil {
+ return nil, err
+ }
+
+ switch msg := msg.(type) {
+ case *signResponseAgentMsg:
+ var sig ssh.Signature
+ if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil {
+ return nil, err
+ }
+
+ return &sig, nil
+ case *failureAgentMsg:
+ return nil, errors.New("agent: failed to sign challenge")
+ }
+ panic("unreachable")
+}
+
+// unmarshal parses an agent message in packet, returning the parsed
+// form and the message type of packet.
+func unmarshal(packet []byte) (interface{}, error) {
+ if len(packet) < 1 {
+ return nil, errors.New("agent: empty packet")
+ }
+ var msg interface{}
+ switch packet[0] {
+ case agentFailure:
+ return new(failureAgentMsg), nil
+ case agentSuccess:
+ return new(successAgentMsg), nil
+ case agentIdentitiesAnswer:
+ msg = new(identitiesAnswerAgentMsg)
+ case agentSignResponse:
+ msg = new(signResponseAgentMsg)
+ default:
+ return nil, fmt.Errorf("agent: unknown type tag %d", packet[0])
+ }
+ if err := ssh.Unmarshal(packet, msg); err != nil {
+ return nil, err
+ }
+ return msg, nil
+}
+
+type rsaKeyMsg struct {
+ Type string `sshtype:"17"`
+ N *big.Int
+ E *big.Int
+ D *big.Int
+ Iqmp *big.Int // IQMP = Inverse Q Mod P
+ P *big.Int
+ Q *big.Int
+ Comments string
+ Constraints []byte `ssh:"rest"`
+}
+
+type dsaKeyMsg struct {
+ Type string `sshtype:"17"`
+ P *big.Int
+ Q *big.Int
+ G *big.Int
+ Y *big.Int
+ X *big.Int
+ Comments string
+ Constraints []byte `ssh:"rest"`
+}
+
+type ecdsaKeyMsg struct {
+ Type string `sshtype:"17"`
+ Curve string
+ KeyBytes []byte
+ D *big.Int
+ Comments string
+ Constraints []byte `ssh:"rest"`
+}
+
+// Insert adds a private key to the agent.
+func (c *client) insertKey(s interface{}, comment string, constraints []byte) error {
+ var req []byte
+ switch k := s.(type) {
+ case *rsa.PrivateKey:
+ if len(k.Primes) != 2 {
+ return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
+ }
+ k.Precompute()
+ req = ssh.Marshal(rsaKeyMsg{
+ Type: ssh.KeyAlgoRSA,
+ N: k.N,
+ E: big.NewInt(int64(k.E)),
+ D: k.D,
+ Iqmp: k.Precomputed.Qinv,
+ P: k.Primes[0],
+ Q: k.Primes[1],
+ Comments: comment,
+ Constraints: constraints,
+ })
+ case *dsa.PrivateKey:
+ req = ssh.Marshal(dsaKeyMsg{
+ Type: ssh.KeyAlgoDSA,
+ P: k.P,
+ Q: k.Q,
+ G: k.G,
+ Y: k.Y,
+ X: k.X,
+ Comments: comment,
+ Constraints: constraints,
+ })
+ case *ecdsa.PrivateKey:
+ nistID := fmt.Sprintf("nistp%d", k.Params().BitSize)
+ req = ssh.Marshal(ecdsaKeyMsg{
+ Type: "ecdsa-sha2-" + nistID,
+ Curve: nistID,
+ KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y),
+ D: k.D,
+ Comments: comment,
+ Constraints: constraints,
+ })
+ default:
+ return fmt.Errorf("agent: unsupported key type %T", s)
+ }
+
+ // if constraints are present then the message type needs to be changed.
+ if len(constraints) != 0 {
+ req[0] = agentAddIdConstrained
+ }
+
+ resp, err := c.call(req)
+ if err != nil {
+ return err
+ }
+ if _, ok := resp.(*successAgentMsg); ok {
+ return nil
+ }
+ return errors.New("agent: failure")
+}
+
+type rsaCertMsg struct {
+ Type string `sshtype:"17"`
+ CertBytes []byte
+ D *big.Int
+ Iqmp *big.Int // IQMP = Inverse Q Mod P
+ P *big.Int
+ Q *big.Int
+ Comments string
+ Constraints []byte `ssh:"rest"`
+}
+
+type dsaCertMsg struct {
+ Type string `sshtype:"17"`
+ CertBytes []byte
+ X *big.Int
+ Comments string
+ Constraints []byte `ssh:"rest"`
+}
+
+type ecdsaCertMsg struct {
+ Type string `sshtype:"17"`
+ CertBytes []byte
+ D *big.Int
+ Comments string
+ Constraints []byte `ssh:"rest"`
+}
+
+// Insert adds a private key to the agent. If a certificate is given,
+// that certificate is added instead as public key.
+func (c *client) Add(key AddedKey) error {
+ var constraints []byte
+
+ if secs := key.LifetimeSecs; secs != 0 {
+ constraints = append(constraints, agentConstrainLifetime)
+
+ var secsBytes [4]byte
+ binary.BigEndian.PutUint32(secsBytes[:], secs)
+ constraints = append(constraints, secsBytes[:]...)
+ }
+
+ if key.ConfirmBeforeUse {
+ constraints = append(constraints, agentConstrainConfirm)
+ }
+
+ if cert := key.Certificate; cert == nil {
+ return c.insertKey(key.PrivateKey, key.Comment, constraints)
+ } else {
+ return c.insertCert(key.PrivateKey, cert, key.Comment, constraints)
+ }
+}
+
+func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error {
+ var req []byte
+ switch k := s.(type) {
+ case *rsa.PrivateKey:
+ if len(k.Primes) != 2 {
+ return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes))
+ }
+ k.Precompute()
+ req = ssh.Marshal(rsaCertMsg{
+ Type: cert.Type(),
+ CertBytes: cert.Marshal(),
+ D: k.D,
+ Iqmp: k.Precomputed.Qinv,
+ P: k.Primes[0],
+ Q: k.Primes[1],
+ Comments: comment,
+ Constraints: constraints,
+ })
+ case *dsa.PrivateKey:
+ req = ssh.Marshal(dsaCertMsg{
+ Type: cert.Type(),
+ CertBytes: cert.Marshal(),
+ X: k.X,
+ Comments: comment,
+ })
+ case *ecdsa.PrivateKey:
+ req = ssh.Marshal(ecdsaCertMsg{
+ Type: cert.Type(),
+ CertBytes: cert.Marshal(),
+ D: k.D,
+ Comments: comment,
+ })
+ default:
+ return fmt.Errorf("agent: unsupported key type %T", s)
+ }
+
+ // if constraints are present then the message type needs to be changed.
+ if len(constraints) != 0 {
+ req[0] = agentAddIdConstrained
+ }
+
+ signer, err := ssh.NewSignerFromKey(s)
+ if err != nil {
+ return err
+ }
+ if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
+ return errors.New("agent: signer and cert have different public key")
+ }
+
+ resp, err := c.call(req)
+ if err != nil {
+ return err
+ }
+ if _, ok := resp.(*successAgentMsg); ok {
+ return nil
+ }
+ return errors.New("agent: failure")
+}
+
+// Signers provides a callback for client authentication.
+func (c *client) Signers() ([]ssh.Signer, error) {
+ keys, err := c.List()
+ if err != nil {
+ return nil, err
+ }
+
+ var result []ssh.Signer
+ for _, k := range keys {
+ result = append(result, &agentKeyringSigner{c, k})
+ }
+ return result, nil
+}
+
+type agentKeyringSigner struct {
+ agent *client
+ pub ssh.PublicKey
+}
+
+func (s *agentKeyringSigner) PublicKey() ssh.PublicKey {
+ return s.pub
+}
+
+func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) {
+ // The agent has its own entropy source, so the rand argument is ignored.
+ return s.agent.Sign(s.pub, data)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/client_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/client_test.go
new file mode 100644
index 00000000000..ec7198d549d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/client_test.go
@@ -0,0 +1,287 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package agent
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+)
+
+// startAgent executes ssh-agent, and returns a Agent interface to it.
+func startAgent(t *testing.T) (client Agent, socket string, cleanup func()) {
+ if testing.Short() {
+ // ssh-agent is not always available, and the key
+ // types supported vary by platform.
+ t.Skip("skipping test due to -short")
+ }
+
+ bin, err := exec.LookPath("ssh-agent")
+ if err != nil {
+ t.Skip("could not find ssh-agent")
+ }
+
+ cmd := exec.Command(bin, "-s")
+ out, err := cmd.Output()
+ if err != nil {
+ t.Fatalf("cmd.Output: %v", err)
+ }
+
+ /* Output looks like:
+
+ SSH_AUTH_SOCK=/tmp/ssh-P65gpcqArqvH/agent.15541; export SSH_AUTH_SOCK;
+ SSH_AGENT_PID=15542; export SSH_AGENT_PID;
+ echo Agent pid 15542;
+ */
+ fields := bytes.Split(out, []byte(";"))
+ line := bytes.SplitN(fields[0], []byte("="), 2)
+ line[0] = bytes.TrimLeft(line[0], "\n")
+ if string(line[0]) != "SSH_AUTH_SOCK" {
+ t.Fatalf("could not find key SSH_AUTH_SOCK in %q", fields[0])
+ }
+ socket = string(line[1])
+
+ line = bytes.SplitN(fields[2], []byte("="), 2)
+ line[0] = bytes.TrimLeft(line[0], "\n")
+ if string(line[0]) != "SSH_AGENT_PID" {
+ t.Fatalf("could not find key SSH_AGENT_PID in %q", fields[2])
+ }
+ pidStr := line[1]
+ pid, err := strconv.Atoi(string(pidStr))
+ if err != nil {
+ t.Fatalf("Atoi(%q): %v", pidStr, err)
+ }
+
+ conn, err := net.Dial("unix", string(socket))
+ if err != nil {
+ t.Fatalf("net.Dial: %v", err)
+ }
+
+ ac := NewClient(conn)
+ return ac, socket, func() {
+ proc, _ := os.FindProcess(pid)
+ if proc != nil {
+ proc.Kill()
+ }
+ conn.Close()
+ os.RemoveAll(filepath.Dir(socket))
+ }
+}
+
+func testAgent(t *testing.T, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
+ agent, _, cleanup := startAgent(t)
+ defer cleanup()
+
+ testAgentInterface(t, agent, key, cert, lifetimeSecs)
+}
+
+func testAgentInterface(t *testing.T, agent Agent, key interface{}, cert *ssh.Certificate, lifetimeSecs uint32) {
+ signer, err := ssh.NewSignerFromKey(key)
+ if err != nil {
+ t.Fatalf("NewSignerFromKey(%T): %v", key, err)
+ }
+ // The agent should start up empty.
+ if keys, err := agent.List(); err != nil {
+ t.Fatalf("RequestIdentities: %v", err)
+ } else if len(keys) > 0 {
+ t.Fatalf("got %d keys, want 0: %v", len(keys), keys)
+ }
+
+ // Attempt to insert the key, with certificate if specified.
+ var pubKey ssh.PublicKey
+ if cert != nil {
+ err = agent.Add(AddedKey{
+ PrivateKey: key,
+ Certificate: cert,
+ Comment: "comment",
+ LifetimeSecs: lifetimeSecs,
+ })
+ pubKey = cert
+ } else {
+ err = agent.Add(AddedKey{PrivateKey: key, Comment: "comment", LifetimeSecs: lifetimeSecs})
+ pubKey = signer.PublicKey()
+ }
+ if err != nil {
+ t.Fatalf("insert(%T): %v", key, err)
+ }
+
+ // Did the key get inserted successfully?
+ if keys, err := agent.List(); err != nil {
+ t.Fatalf("List: %v", err)
+ } else if len(keys) != 1 {
+ t.Fatalf("got %v, want 1 key", keys)
+ } else if keys[0].Comment != "comment" {
+ t.Fatalf("key comment: got %v, want %v", keys[0].Comment, "comment")
+ } else if !bytes.Equal(keys[0].Blob, pubKey.Marshal()) {
+ t.Fatalf("key mismatch")
+ }
+
+ // Can the agent make a valid signature?
+ data := []byte("hello")
+ sig, err := agent.Sign(pubKey, data)
+ if err != nil {
+ t.Fatalf("Sign(%s): %v", pubKey.Type(), err)
+ }
+
+ if err := pubKey.Verify(data, sig); err != nil {
+ t.Fatalf("Verify(%s): %v", pubKey.Type(), err)
+ }
+}
+
+func TestAgent(t *testing.T) {
+ for _, keyType := range []string{"rsa", "dsa", "ecdsa"} {
+ testAgent(t, testPrivateKeys[keyType], nil, 0)
+ }
+}
+
+func TestCert(t *testing.T) {
+ cert := &ssh.Certificate{
+ Key: testPublicKeys["rsa"],
+ ValidBefore: ssh.CertTimeInfinity,
+ CertType: ssh.UserCert,
+ }
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+
+ testAgent(t, testPrivateKeys["rsa"], cert, 0)
+}
+
+func TestConstraints(t *testing.T) {
+ testAgent(t, testPrivateKeys["rsa"], nil, 3600 /* lifetime in seconds */)
+}
+
+// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and
+// therefore is buffered (net.Pipe deadlocks if both sides start with
+// a write.)
+func netPipe() (net.Conn, net.Conn, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, nil, err
+ }
+ defer listener.Close()
+ c1, err := net.Dial("tcp", listener.Addr().String())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c2, err := listener.Accept()
+ if err != nil {
+ c1.Close()
+ return nil, nil, err
+ }
+
+ return c1, c2, nil
+}
+
+func TestAuth(t *testing.T) {
+ a, b, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+
+ defer a.Close()
+ defer b.Close()
+
+ agent, _, cleanup := startAgent(t)
+ defer cleanup()
+
+ if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment"}); err != nil {
+ t.Errorf("Add: %v", err)
+ }
+
+ serverConf := ssh.ServerConfig{}
+ serverConf.AddHostKey(testSigners["rsa"])
+ serverConf.PublicKeyCallback = func(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
+ if bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
+ return nil, nil
+ }
+
+ return nil, errors.New("pubkey rejected")
+ }
+
+ go func() {
+ conn, _, _, err := ssh.NewServerConn(a, &serverConf)
+ if err != nil {
+ t.Fatalf("Server: %v", err)
+ }
+ conn.Close()
+ }()
+
+ conf := ssh.ClientConfig{}
+ conf.Auth = append(conf.Auth, ssh.PublicKeysCallback(agent.Signers))
+ conn, _, _, err := ssh.NewClientConn(b, "", &conf)
+ if err != nil {
+ t.Fatalf("NewClientConn: %v", err)
+ }
+ conn.Close()
+}
+
+func TestLockClient(t *testing.T) {
+ agent, _, cleanup := startAgent(t)
+ defer cleanup()
+ testLockAgent(agent, t)
+}
+
+func testLockAgent(agent Agent, t *testing.T) {
+ if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["rsa"], Comment: "comment 1"}); err != nil {
+ t.Errorf("Add: %v", err)
+ }
+ if err := agent.Add(AddedKey{PrivateKey: testPrivateKeys["dsa"], Comment: "comment dsa"}); err != nil {
+ t.Errorf("Add: %v", err)
+ }
+ if keys, err := agent.List(); err != nil {
+ t.Errorf("List: %v", err)
+ } else if len(keys) != 2 {
+ t.Errorf("Want 2 keys, got %v", keys)
+ }
+
+ passphrase := []byte("secret")
+ if err := agent.Lock(passphrase); err != nil {
+ t.Errorf("Lock: %v", err)
+ }
+
+ if keys, err := agent.List(); err != nil {
+ t.Errorf("List: %v", err)
+ } else if len(keys) != 0 {
+ t.Errorf("Want 0 keys, got %v", keys)
+ }
+
+ signer, _ := ssh.NewSignerFromKey(testPrivateKeys["rsa"])
+ if _, err := agent.Sign(signer.PublicKey(), []byte("hello")); err == nil {
+ t.Fatalf("Sign did not fail")
+ }
+
+ if err := agent.Remove(signer.PublicKey()); err == nil {
+ t.Fatalf("Remove did not fail")
+ }
+
+ if err := agent.RemoveAll(); err == nil {
+ t.Fatalf("RemoveAll did not fail")
+ }
+
+ if err := agent.Unlock(nil); err == nil {
+ t.Errorf("Unlock with wrong passphrase succeeded")
+ }
+ if err := agent.Unlock(passphrase); err != nil {
+ t.Errorf("Unlock: %v", err)
+ }
+
+ if err := agent.Remove(signer.PublicKey()); err != nil {
+ t.Fatalf("Remove: %v", err)
+ }
+
+ if keys, err := agent.List(); err != nil {
+ t.Errorf("List: %v", err)
+ } else if len(keys) != 1 {
+ t.Errorf("Want 1 keys, got %v", keys)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/forward.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/forward.go
new file mode 100644
index 00000000000..fd24ba900d2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/forward.go
@@ -0,0 +1,103 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package agent
+
+import (
+ "errors"
+ "io"
+ "net"
+ "sync"
+
+ "golang.org/x/crypto/ssh"
+)
+
+// RequestAgentForwarding sets up agent forwarding for the session.
+// ForwardToAgent or ForwardToRemote should be called to route
+// the authentication requests.
+func RequestAgentForwarding(session *ssh.Session) error {
+ ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil)
+ if err != nil {
+ return err
+ }
+ if !ok {
+ return errors.New("forwarding request denied")
+ }
+ return nil
+}
+
+// ForwardToAgent routes authentication requests to the given keyring.
+func ForwardToAgent(client *ssh.Client, keyring Agent) error {
+ channels := client.HandleChannelOpen(channelType)
+ if channels == nil {
+ return errors.New("agent: already have handler for " + channelType)
+ }
+
+ go func() {
+ for ch := range channels {
+ channel, reqs, err := ch.Accept()
+ if err != nil {
+ continue
+ }
+ go ssh.DiscardRequests(reqs)
+ go func() {
+ ServeAgent(keyring, channel)
+ channel.Close()
+ }()
+ }
+ }()
+ return nil
+}
+
+const channelType = "auth-agent@openssh.com"
+
+// ForwardToRemote routes authentication requests to the ssh-agent
+// process serving on the given unix socket.
+func ForwardToRemote(client *ssh.Client, addr string) error {
+ channels := client.HandleChannelOpen(channelType)
+ if channels == nil {
+ return errors.New("agent: already have handler for " + channelType)
+ }
+ conn, err := net.Dial("unix", addr)
+ if err != nil {
+ return err
+ }
+ conn.Close()
+
+ go func() {
+ for ch := range channels {
+ channel, reqs, err := ch.Accept()
+ if err != nil {
+ continue
+ }
+ go ssh.DiscardRequests(reqs)
+ go forwardUnixSocket(channel, addr)
+ }
+ }()
+ return nil
+}
+
+func forwardUnixSocket(channel ssh.Channel, addr string) {
+ conn, err := net.Dial("unix", addr)
+ if err != nil {
+ return
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ io.Copy(conn, channel)
+ conn.(*net.UnixConn).CloseWrite()
+ wg.Done()
+ }()
+ go func() {
+ io.Copy(channel, conn)
+ channel.CloseWrite()
+ wg.Done()
+ }()
+
+ wg.Wait()
+ conn.Close()
+ channel.Close()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/keyring.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/keyring.go
new file mode 100644
index 00000000000..12ffa82b1a0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/keyring.go
@@ -0,0 +1,184 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package agent
+
+import (
+ "bytes"
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "fmt"
+ "sync"
+
+ "golang.org/x/crypto/ssh"
+)
+
+type privKey struct {
+ signer ssh.Signer
+ comment string
+}
+
+type keyring struct {
+ mu sync.Mutex
+ keys []privKey
+
+ locked bool
+ passphrase []byte
+}
+
+var errLocked = errors.New("agent: locked")
+
+// NewKeyring returns an Agent that holds keys in memory. It is safe
+// for concurrent use by multiple goroutines.
+func NewKeyring() Agent {
+ return &keyring{}
+}
+
+// RemoveAll removes all identities.
+func (r *keyring) RemoveAll() error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.locked {
+ return errLocked
+ }
+
+ r.keys = nil
+ return nil
+}
+
+// Remove removes all identities with the given public key.
+func (r *keyring) Remove(key ssh.PublicKey) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.locked {
+ return errLocked
+ }
+
+ want := key.Marshal()
+ found := false
+ for i := 0; i < len(r.keys); {
+ if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) {
+ found = true
+ r.keys[i] = r.keys[len(r.keys)-1]
+ r.keys = r.keys[:len(r.keys)-1]
+ continue
+ } else {
+ i++
+ }
+ }
+
+ if !found {
+ return errors.New("agent: key not found")
+ }
+ return nil
+}
+
+// Lock locks the agent. Sign and Remove will fail, and List will empty an empty list.
+func (r *keyring) Lock(passphrase []byte) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.locked {
+ return errLocked
+ }
+
+ r.locked = true
+ r.passphrase = passphrase
+ return nil
+}
+
+// Unlock undoes the effect of Lock
+func (r *keyring) Unlock(passphrase []byte) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if !r.locked {
+ return errors.New("agent: not locked")
+ }
+ if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) {
+ return fmt.Errorf("agent: incorrect passphrase")
+ }
+
+ r.locked = false
+ r.passphrase = nil
+ return nil
+}
+
+// List returns the identities known to the agent.
+func (r *keyring) List() ([]*Key, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.locked {
+ // section 2.7: locked agents return empty.
+ return nil, nil
+ }
+
+ var ids []*Key
+ for _, k := range r.keys {
+ pub := k.signer.PublicKey()
+ ids = append(ids, &Key{
+ Format: pub.Type(),
+ Blob: pub.Marshal(),
+ Comment: k.comment})
+ }
+ return ids, nil
+}
+
+// Insert adds a private key to the keyring. If a certificate
+// is given, that certificate is added as public key. Note that
+// any constraints given are ignored.
+func (r *keyring) Add(key AddedKey) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.locked {
+ return errLocked
+ }
+ signer, err := ssh.NewSignerFromKey(key.PrivateKey)
+
+ if err != nil {
+ return err
+ }
+
+ if cert := key.Certificate; cert != nil {
+ signer, err = ssh.NewCertSigner(cert, signer)
+ if err != nil {
+ return err
+ }
+ }
+
+ r.keys = append(r.keys, privKey{signer, key.Comment})
+
+ return nil
+}
+
+// Sign returns a signature for the data.
+func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.locked {
+ return nil, errLocked
+ }
+
+ wanted := key.Marshal()
+ for _, k := range r.keys {
+ if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) {
+ return k.signer.Sign(rand.Reader, data)
+ }
+ }
+ return nil, errors.New("not found")
+}
+
+// Signers returns signers for all the known keys.
+func (r *keyring) Signers() ([]ssh.Signer, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.locked {
+ return nil, errLocked
+ }
+
+ s := make([]ssh.Signer, 0, len(r.keys))
+ for _, k := range r.keys {
+ s = append(s, k.signer)
+ }
+ return s, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/keyring_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/keyring_test.go
new file mode 100644
index 00000000000..7f05905712d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/keyring_test.go
@@ -0,0 +1,78 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package agent
+
+import (
+ "testing"
+)
+
+func addTestKey(t *testing.T, a Agent, keyName string) {
+ err := a.Add(AddedKey{
+ PrivateKey: testPrivateKeys[keyName],
+ Comment: keyName,
+ })
+ if err != nil {
+ t.Fatalf("failed to add key %q: %v", keyName, err)
+ }
+}
+
+func removeTestKey(t *testing.T, a Agent, keyName string) {
+ err := a.Remove(testPublicKeys[keyName])
+ if err != nil {
+ t.Fatalf("failed to remove key %q: %v", keyName, err)
+ }
+}
+
+func validateListedKeys(t *testing.T, a Agent, expectedKeys []string) {
+ listedKeys, err := a.List()
+ if err != nil {
+ t.Fatalf("failed to list keys: %v", err)
+ return
+ }
+ actualKeys := make(map[string]bool)
+ for _, key := range listedKeys {
+ actualKeys[key.Comment] = true
+ }
+
+ matchedKeys := make(map[string]bool)
+ for _, expectedKey := range expectedKeys {
+ if !actualKeys[expectedKey] {
+ t.Fatalf("expected key %q, but was not found", expectedKey)
+ } else {
+ matchedKeys[expectedKey] = true
+ }
+ }
+
+ for actualKey := range actualKeys {
+ if !matchedKeys[actualKey] {
+ t.Fatalf("key %q was found, but was not expected", actualKey)
+ }
+ }
+}
+
+func TestKeyringAddingAndRemoving(t *testing.T) {
+ keyNames := []string{"dsa", "ecdsa", "rsa", "user"}
+
+ // add all test private keys
+ k := NewKeyring()
+ for _, keyName := range keyNames {
+ addTestKey(t, k, keyName)
+ }
+ validateListedKeys(t, k, keyNames)
+
+ // remove a key in the middle
+ keyToRemove := keyNames[1]
+ keyNames = append(keyNames[:1], keyNames[2:]...)
+
+ removeTestKey(t, k, keyToRemove)
+ validateListedKeys(t, k, keyNames)
+
+ // remove all keys
+ err := k.RemoveAll()
+ if err != nil {
+ t.Fatalf("failed to remove all keys: %v", err)
+ }
+ validateListedKeys(t, k, []string{})
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/server.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/server.go
new file mode 100644
index 00000000000..b21a20180fe
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/server.go
@@ -0,0 +1,209 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package agent
+
+import (
+ "crypto/rsa"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "log"
+ "math/big"
+
+ "golang.org/x/crypto/ssh"
+)
+
+// Server wraps an Agent and uses it to implement the agent side of
+// the SSH-agent, wire protocol.
+type server struct {
+ agent Agent
+}
+
+func (s *server) processRequestBytes(reqData []byte) []byte {
+ rep, err := s.processRequest(reqData)
+ if err != nil {
+ if err != errLocked {
+ // TODO(hanwen): provide better logging interface?
+ log.Printf("agent %d: %v", reqData[0], err)
+ }
+ return []byte{agentFailure}
+ }
+
+ if err == nil && rep == nil {
+ return []byte{agentSuccess}
+ }
+
+ return ssh.Marshal(rep)
+}
+
+func marshalKey(k *Key) []byte {
+ var record struct {
+ Blob []byte
+ Comment string
+ }
+ record.Blob = k.Marshal()
+ record.Comment = k.Comment
+
+ return ssh.Marshal(&record)
+}
+
+type agentV1IdentityMsg struct {
+ Numkeys uint32 `sshtype:"2"`
+}
+
+type agentRemoveIdentityMsg struct {
+ KeyBlob []byte `sshtype:"18"`
+}
+
+type agentLockMsg struct {
+ Passphrase []byte `sshtype:"22"`
+}
+
+type agentUnlockMsg struct {
+ Passphrase []byte `sshtype:"23"`
+}
+
+func (s *server) processRequest(data []byte) (interface{}, error) {
+ switch data[0] {
+ case agentRequestV1Identities:
+ return &agentV1IdentityMsg{0}, nil
+ case agentRemoveIdentity:
+ var req agentRemoveIdentityMsg
+ if err := ssh.Unmarshal(data, &req); err != nil {
+ return nil, err
+ }
+
+ var wk wireKey
+ if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
+ return nil, err
+ }
+
+ return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob})
+
+ case agentRemoveAllIdentities:
+ return nil, s.agent.RemoveAll()
+
+ case agentLock:
+ var req agentLockMsg
+ if err := ssh.Unmarshal(data, &req); err != nil {
+ return nil, err
+ }
+
+ return nil, s.agent.Lock(req.Passphrase)
+
+ case agentUnlock:
+ var req agentLockMsg
+ if err := ssh.Unmarshal(data, &req); err != nil {
+ return nil, err
+ }
+ return nil, s.agent.Unlock(req.Passphrase)
+
+ case agentSignRequest:
+ var req signRequestAgentMsg
+ if err := ssh.Unmarshal(data, &req); err != nil {
+ return nil, err
+ }
+
+ var wk wireKey
+ if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil {
+ return nil, err
+ }
+
+ k := &Key{
+ Format: wk.Format,
+ Blob: req.KeyBlob,
+ }
+
+ sig, err := s.agent.Sign(k, req.Data) // TODO(hanwen): flags.
+ if err != nil {
+ return nil, err
+ }
+ return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil
+ case agentRequestIdentities:
+ keys, err := s.agent.List()
+ if err != nil {
+ return nil, err
+ }
+
+ rep := identitiesAnswerAgentMsg{
+ NumKeys: uint32(len(keys)),
+ }
+ for _, k := range keys {
+ rep.Keys = append(rep.Keys, marshalKey(k)...)
+ }
+ return rep, nil
+ case agentAddIdentity:
+ return nil, s.insertIdentity(data)
+ }
+
+ return nil, fmt.Errorf("unknown opcode %d", data[0])
+}
+
+func (s *server) insertIdentity(req []byte) error {
+ var record struct {
+ Type string `sshtype:"17"`
+ Rest []byte `ssh:"rest"`
+ }
+ if err := ssh.Unmarshal(req, &record); err != nil {
+ return err
+ }
+
+ switch record.Type {
+ case ssh.KeyAlgoRSA:
+ var k rsaKeyMsg
+ if err := ssh.Unmarshal(req, &k); err != nil {
+ return err
+ }
+
+ priv := rsa.PrivateKey{
+ PublicKey: rsa.PublicKey{
+ E: int(k.E.Int64()),
+ N: k.N,
+ },
+ D: k.D,
+ Primes: []*big.Int{k.P, k.Q},
+ }
+ priv.Precompute()
+
+ return s.agent.Add(AddedKey{PrivateKey: &priv, Comment: k.Comments})
+ }
+ return fmt.Errorf("not implemented: %s", record.Type)
+}
+
+// ServeAgent serves the agent protocol on the given connection. It
+// returns when an I/O error occurs.
+func ServeAgent(agent Agent, c io.ReadWriter) error {
+ s := &server{agent}
+
+ var length [4]byte
+ for {
+ if _, err := io.ReadFull(c, length[:]); err != nil {
+ return err
+ }
+ l := binary.BigEndian.Uint32(length[:])
+ if l > maxAgentResponseBytes {
+ // We also cap requests.
+ return fmt.Errorf("agent: request too large: %d", l)
+ }
+
+ req := make([]byte, l)
+ if _, err := io.ReadFull(c, req); err != nil {
+ return err
+ }
+
+ repData := s.processRequestBytes(req)
+ if len(repData) > maxAgentResponseBytes {
+ return fmt.Errorf("agent: reply too large: %d bytes", len(repData))
+ }
+
+ binary.BigEndian.PutUint32(length[:], uint32(len(repData)))
+ if _, err := c.Write(length[:]); err != nil {
+ return err
+ }
+ if _, err := c.Write(repData); err != nil {
+ return err
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/server_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/server_test.go
new file mode 100644
index 00000000000..ef0ab293487
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/server_test.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package agent
+
+import (
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+)
+
+func TestServer(t *testing.T) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+ client := NewClient(c1)
+
+ go ServeAgent(NewKeyring(), c2)
+
+ testAgentInterface(t, client, testPrivateKeys["rsa"], nil, 0)
+}
+
+func TestLockServer(t *testing.T) {
+ testLockAgent(NewKeyring(), t)
+}
+
+func TestSetupForwardAgent(t *testing.T) {
+ a, b, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+
+ defer a.Close()
+ defer b.Close()
+
+ _, socket, cleanup := startAgent(t)
+ defer cleanup()
+
+ serverConf := ssh.ServerConfig{
+ NoClientAuth: true,
+ }
+ serverConf.AddHostKey(testSigners["rsa"])
+ incoming := make(chan *ssh.ServerConn, 1)
+ go func() {
+ conn, _, _, err := ssh.NewServerConn(a, &serverConf)
+ if err != nil {
+ t.Fatalf("Server: %v", err)
+ }
+ incoming <- conn
+ }()
+
+ conf := ssh.ClientConfig{}
+ conn, chans, reqs, err := ssh.NewClientConn(b, "", &conf)
+ if err != nil {
+ t.Fatalf("NewClientConn: %v", err)
+ }
+ client := ssh.NewClient(conn, chans, reqs)
+
+ if err := ForwardToRemote(client, socket); err != nil {
+ t.Fatalf("SetupForwardAgent: %v", err)
+ }
+
+ server := <-incoming
+ ch, reqs, err := server.OpenChannel(channelType, nil)
+ if err != nil {
+ t.Fatalf("OpenChannel(%q): %v", channelType, err)
+ }
+ go ssh.DiscardRequests(reqs)
+
+ agentClient := NewClient(ch)
+ testAgentInterface(t, agentClient, testPrivateKeys["rsa"], nil, 0)
+ conn.Close()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/testdata_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/testdata_test.go
new file mode 100644
index 00000000000..b7a8781e1a5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/agent/testdata_test.go
@@ -0,0 +1,64 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
+// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
+// instances.
+
+package agent
+
+import (
+ "crypto/rand"
+ "fmt"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+var (
+ testPrivateKeys map[string]interface{}
+ testSigners map[string]ssh.Signer
+ testPublicKeys map[string]ssh.PublicKey
+)
+
+func init() {
+ var err error
+
+ n := len(testdata.PEMBytes)
+ testPrivateKeys = make(map[string]interface{}, n)
+ testSigners = make(map[string]ssh.Signer, n)
+ testPublicKeys = make(map[string]ssh.PublicKey, n)
+ for t, k := range testdata.PEMBytes {
+ testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
+ }
+ testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
+ }
+ testPublicKeys[t] = testSigners[t].PublicKey()
+ }
+
+ // Create a cert and sign it for use in tests.
+ testCert := &ssh.Certificate{
+ Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
+ ValidAfter: 0, // unix epoch
+ ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time.
+ Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ Key: testPublicKeys["ecdsa"],
+ SignatureKey: testPublicKeys["rsa"],
+ Permissions: ssh.Permissions{
+ CriticalOptions: map[string]string{},
+ Extensions: map[string]string{},
+ },
+ }
+ testCert.SignCert(rand.Reader, testSigners["rsa"])
+ testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
+ testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/benchmark_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/benchmark_test.go
new file mode 100644
index 00000000000..d9f7eb9b60a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/benchmark_test.go
@@ -0,0 +1,122 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "errors"
+ "io"
+ "net"
+ "testing"
+)
+
+type server struct {
+ *ServerConn
+ chans <-chan NewChannel
+}
+
+func newServer(c net.Conn, conf *ServerConfig) (*server, error) {
+ sconn, chans, reqs, err := NewServerConn(c, conf)
+ if err != nil {
+ return nil, err
+ }
+ go DiscardRequests(reqs)
+ return &server{sconn, chans}, nil
+}
+
+func (s *server) Accept() (NewChannel, error) {
+ n, ok := <-s.chans
+ if !ok {
+ return nil, io.EOF
+ }
+ return n, nil
+}
+
+func sshPipe() (Conn, *server, error) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ clientConf := ClientConfig{
+ User: "user",
+ }
+ serverConf := ServerConfig{
+ NoClientAuth: true,
+ }
+ serverConf.AddHostKey(testSigners["ecdsa"])
+ done := make(chan *server, 1)
+ go func() {
+ server, err := newServer(c2, &serverConf)
+ if err != nil {
+ done <- nil
+ }
+ done <- server
+ }()
+
+ client, _, reqs, err := NewClientConn(c1, "", &clientConf)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ server := <-done
+ if server == nil {
+ return nil, nil, errors.New("server handshake failed.")
+ }
+ go DiscardRequests(reqs)
+
+ return client, server, nil
+}
+
+func BenchmarkEndToEnd(b *testing.B) {
+ b.StopTimer()
+
+ client, server, err := sshPipe()
+ if err != nil {
+ b.Fatalf("sshPipe: %v", err)
+ }
+
+ defer client.Close()
+ defer server.Close()
+
+ size := (1 << 20)
+ input := make([]byte, size)
+ output := make([]byte, size)
+ b.SetBytes(int64(size))
+ done := make(chan int, 1)
+
+ go func() {
+ newCh, err := server.Accept()
+ if err != nil {
+ b.Fatalf("Client: %v", err)
+ }
+ ch, incoming, err := newCh.Accept()
+ go DiscardRequests(incoming)
+ for i := 0; i < b.N; i++ {
+ if _, err := io.ReadFull(ch, output); err != nil {
+ b.Fatalf("ReadFull: %v", err)
+ }
+ }
+ ch.Close()
+ done <- 1
+ }()
+
+ ch, in, err := client.OpenChannel("speed", nil)
+ if err != nil {
+ b.Fatalf("OpenChannel: %v", err)
+ }
+ go DiscardRequests(in)
+
+ b.ResetTimer()
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ if _, err := ch.Write(input); err != nil {
+ b.Fatalf("WriteFull: %v", err)
+ }
+ }
+ ch.Close()
+ b.StopTimer()
+
+ <-done
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/buffer.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/buffer.go
new file mode 100644
index 00000000000..6931b5114fe
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/buffer.go
@@ -0,0 +1,98 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "io"
+ "sync"
+)
+
+// buffer provides a linked list buffer for data exchange
+// between producer and consumer. Theoretically the buffer is
+// of unlimited capacity as it does no allocation of its own.
+type buffer struct {
+ // protects concurrent access to head, tail and closed
+ *sync.Cond
+
+ head *element // the buffer that will be read first
+ tail *element // the buffer that will be read last
+
+ closed bool
+}
+
+// An element represents a single link in a linked list.
+type element struct {
+ buf []byte
+ next *element
+}
+
+// newBuffer returns an empty buffer that is not closed.
+func newBuffer() *buffer {
+ e := new(element)
+ b := &buffer{
+ Cond: newCond(),
+ head: e,
+ tail: e,
+ }
+ return b
+}
+
+// write makes buf available for Read to receive.
+// buf must not be modified after the call to write.
+func (b *buffer) write(buf []byte) {
+ b.Cond.L.Lock()
+ e := &element{buf: buf}
+ b.tail.next = e
+ b.tail = e
+ b.Cond.Signal()
+ b.Cond.L.Unlock()
+}
+
+// eof closes the buffer. Reads from the buffer once all
+// the data has been consumed will receive os.EOF.
+func (b *buffer) eof() error {
+ b.Cond.L.Lock()
+ b.closed = true
+ b.Cond.Signal()
+ b.Cond.L.Unlock()
+ return nil
+}
+
+// Read reads data from the internal buffer in buf. Reads will block
+// if no data is available, or until the buffer is closed.
+func (b *buffer) Read(buf []byte) (n int, err error) {
+ b.Cond.L.Lock()
+ defer b.Cond.L.Unlock()
+
+ for len(buf) > 0 {
+ // if there is data in b.head, copy it
+ if len(b.head.buf) > 0 {
+ r := copy(buf, b.head.buf)
+ buf, b.head.buf = buf[r:], b.head.buf[r:]
+ n += r
+ continue
+ }
+ // if there is a next buffer, make it the head
+ if len(b.head.buf) == 0 && b.head != b.tail {
+ b.head = b.head.next
+ continue
+ }
+
+ // if at least one byte has been copied, return
+ if n > 0 {
+ break
+ }
+
+ // if nothing was read, and there is nothing outstanding
+ // check to see if the buffer is closed.
+ if b.closed {
+ err = io.EOF
+ break
+ }
+ // out of buffers, wait for producer
+ b.Cond.Wait()
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/buffer_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/buffer_test.go
new file mode 100644
index 00000000000..d5781cb3da9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/buffer_test.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "io"
+ "testing"
+)
+
+var alphabet = []byte("abcdefghijklmnopqrstuvwxyz")
+
+func TestBufferReadwrite(t *testing.T) {
+ b := newBuffer()
+ b.write(alphabet[:10])
+ r, _ := b.Read(make([]byte, 10))
+ if r != 10 {
+ t.Fatalf("Expected written == read == 10, written: 10, read %d", r)
+ }
+
+ b = newBuffer()
+ b.write(alphabet[:5])
+ r, _ = b.Read(make([]byte, 10))
+ if r != 5 {
+ t.Fatalf("Expected written == read == 5, written: 5, read %d", r)
+ }
+
+ b = newBuffer()
+ b.write(alphabet[:10])
+ r, _ = b.Read(make([]byte, 5))
+ if r != 5 {
+ t.Fatalf("Expected written == 10, read == 5, written: 10, read %d", r)
+ }
+
+ b = newBuffer()
+ b.write(alphabet[:5])
+ b.write(alphabet[5:15])
+ r, _ = b.Read(make([]byte, 10))
+ r2, _ := b.Read(make([]byte, 10))
+ if r != 10 || r2 != 5 || 15 != r+r2 {
+ t.Fatal("Expected written == read == 15")
+ }
+}
+
+func TestBufferClose(t *testing.T) {
+ b := newBuffer()
+ b.write(alphabet[:10])
+ b.eof()
+ _, err := b.Read(make([]byte, 5))
+ if err != nil {
+ t.Fatal("expected read of 5 to not return EOF")
+ }
+ b = newBuffer()
+ b.write(alphabet[:10])
+ b.eof()
+ r, err := b.Read(make([]byte, 5))
+ r2, err2 := b.Read(make([]byte, 10))
+ if r != 5 || r2 != 5 || err != nil || err2 != nil {
+ t.Fatal("expected reads of 5 and 5")
+ }
+
+ b = newBuffer()
+ b.write(alphabet[:10])
+ b.eof()
+ r, err = b.Read(make([]byte, 5))
+ r2, err2 = b.Read(make([]byte, 10))
+ r3, err3 := b.Read(make([]byte, 10))
+ if r != 5 || r2 != 5 || r3 != 0 || err != nil || err2 != nil || err3 != io.EOF {
+ t.Fatal("expected reads of 5 and 5 and 0, with EOF")
+ }
+
+ b = newBuffer()
+ b.write(make([]byte, 5))
+ b.write(make([]byte, 10))
+ b.eof()
+ r, err = b.Read(make([]byte, 9))
+ r2, err2 = b.Read(make([]byte, 3))
+ r3, err3 = b.Read(make([]byte, 3))
+ r4, err4 := b.Read(make([]byte, 10))
+ if err != nil || err2 != nil || err3 != nil || err4 != io.EOF {
+ t.Fatalf("Expected EOF on forth read only, err=%v, err2=%v, err3=%v, err4=%v", err, err2, err3, err4)
+ }
+ if r != 9 || r2 != 3 || r3 != 3 || r4 != 0 {
+ t.Fatal("Expected written == read == 15", r, r2, r3, r4)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/certs.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/certs.go
new file mode 100644
index 00000000000..385770036a7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/certs.go
@@ -0,0 +1,501 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sort"
+ "time"
+)
+
+// These constants from [PROTOCOL.certkeys] represent the algorithm names
+// for certificate types supported by this package.
+const (
+ CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
+ CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
+ CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
+ CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
+ CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
+)
+
+// Certificate types distinguish between host and user
+// certificates. The values can be set in the CertType field of
+// Certificate.
+const (
+ UserCert = 1
+ HostCert = 2
+)
+
+// Signature represents a cryptographic signature.
+type Signature struct {
+ Format string
+ Blob []byte
+}
+
+// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
+// a certificate does not expire.
+const CertTimeInfinity = 1<<64 - 1
+
+// An Certificate represents an OpenSSH certificate as defined in
+// [PROTOCOL.certkeys]?rev=1.8.
+type Certificate struct {
+ Nonce []byte
+ Key PublicKey
+ Serial uint64
+ CertType uint32
+ KeyId string
+ ValidPrincipals []string
+ ValidAfter uint64
+ ValidBefore uint64
+ Permissions
+ Reserved []byte
+ SignatureKey PublicKey
+ Signature *Signature
+}
+
+// genericCertData holds the key-independent part of the certificate data.
+// Overall, certificates contain an nonce, public key fields and
+// key-independent fields.
+type genericCertData struct {
+ Serial uint64
+ CertType uint32
+ KeyId string
+ ValidPrincipals []byte
+ ValidAfter uint64
+ ValidBefore uint64
+ CriticalOptions []byte
+ Extensions []byte
+ Reserved []byte
+ SignatureKey []byte
+ Signature []byte
+}
+
+func marshalStringList(namelist []string) []byte {
+ var to []byte
+ for _, name := range namelist {
+ s := struct{ N string }{name}
+ to = append(to, Marshal(&s)...)
+ }
+ return to
+}
+
+type optionsTuple struct {
+ Key string
+ Value []byte
+}
+
+type optionsTupleValue struct {
+ Value string
+}
+
+// serialize a map of critical options or extensions
+// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
+// we need two length prefixes for a non-empty string value
+func marshalTuples(tups map[string]string) []byte {
+ keys := make([]string, 0, len(tups))
+ for key := range tups {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+
+ var ret []byte
+ for _, key := range keys {
+ s := optionsTuple{Key: key}
+ if value := tups[key]; len(value) > 0 {
+ s.Value = Marshal(&optionsTupleValue{value})
+ }
+ ret = append(ret, Marshal(&s)...)
+ }
+ return ret
+}
+
+// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
+// we need two length prefixes for a non-empty option value
+func parseTuples(in []byte) (map[string]string, error) {
+ tups := map[string]string{}
+ var lastKey string
+ var haveLastKey bool
+
+ for len(in) > 0 {
+ var key, val, extra []byte
+ var ok bool
+
+ if key, in, ok = parseString(in); !ok {
+ return nil, errShortRead
+ }
+ keyStr := string(key)
+ // according to [PROTOCOL.certkeys], the names must be in
+ // lexical order.
+ if haveLastKey && keyStr <= lastKey {
+ return nil, fmt.Errorf("ssh: certificate options are not in lexical order")
+ }
+ lastKey, haveLastKey = keyStr, true
+ // the next field is a data field, which if non-empty has a string embedded
+ if val, in, ok = parseString(in); !ok {
+ return nil, errShortRead
+ }
+ if len(val) > 0 {
+ val, extra, ok = parseString(val)
+ if !ok {
+ return nil, errShortRead
+ }
+ if len(extra) > 0 {
+ return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value")
+ }
+ tups[keyStr] = string(val)
+ } else {
+ tups[keyStr] = ""
+ }
+ }
+ return tups, nil
+}
+
+func parseCert(in []byte, privAlgo string) (*Certificate, error) {
+ nonce, rest, ok := parseString(in)
+ if !ok {
+ return nil, errShortRead
+ }
+
+ key, rest, err := parsePubKey(rest, privAlgo)
+ if err != nil {
+ return nil, err
+ }
+
+ var g genericCertData
+ if err := Unmarshal(rest, &g); err != nil {
+ return nil, err
+ }
+
+ c := &Certificate{
+ Nonce: nonce,
+ Key: key,
+ Serial: g.Serial,
+ CertType: g.CertType,
+ KeyId: g.KeyId,
+ ValidAfter: g.ValidAfter,
+ ValidBefore: g.ValidBefore,
+ }
+
+ for principals := g.ValidPrincipals; len(principals) > 0; {
+ principal, rest, ok := parseString(principals)
+ if !ok {
+ return nil, errShortRead
+ }
+ c.ValidPrincipals = append(c.ValidPrincipals, string(principal))
+ principals = rest
+ }
+
+ c.CriticalOptions, err = parseTuples(g.CriticalOptions)
+ if err != nil {
+ return nil, err
+ }
+ c.Extensions, err = parseTuples(g.Extensions)
+ if err != nil {
+ return nil, err
+ }
+ c.Reserved = g.Reserved
+ k, err := ParsePublicKey(g.SignatureKey)
+ if err != nil {
+ return nil, err
+ }
+
+ c.SignatureKey = k
+ c.Signature, rest, ok = parseSignatureBody(g.Signature)
+ if !ok || len(rest) > 0 {
+ return nil, errors.New("ssh: signature parse error")
+ }
+
+ return c, nil
+}
+
+type openSSHCertSigner struct {
+ pub *Certificate
+ signer Signer
+}
+
+// NewCertSigner returns a Signer that signs with the given Certificate, whose
+// private key is held by signer. It returns an error if the public key in cert
+// doesn't match the key used by signer.
+func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
+ if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
+ return nil, errors.New("ssh: signer and cert have different public key")
+ }
+
+ return &openSSHCertSigner{cert, signer}, nil
+}
+
+func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
+ return s.signer.Sign(rand, data)
+}
+
+func (s *openSSHCertSigner) PublicKey() PublicKey {
+ return s.pub
+}
+
+const sourceAddressCriticalOption = "source-address"
+
+// CertChecker does the work of verifying a certificate. Its methods
+// can be plugged into ClientConfig.HostKeyCallback and
+// ServerConfig.PublicKeyCallback. For the CertChecker to work,
+// minimally, the IsAuthority callback should be set.
+type CertChecker struct {
+ // SupportedCriticalOptions lists the CriticalOptions that the
+ // server application layer understands. These are only used
+ // for user certificates.
+ SupportedCriticalOptions []string
+
+ // IsAuthority should return true if the key is recognized as
+ // an authority. This allows for certificates to be signed by other
+ // certificates.
+ IsAuthority func(auth PublicKey) bool
+
+ // Clock is used for verifying time stamps. If nil, time.Now
+ // is used.
+ Clock func() time.Time
+
+ // UserKeyFallback is called when CertChecker.Authenticate encounters a
+ // public key that is not a certificate. It must implement validation
+ // of user keys or else, if nil, all such keys are rejected.
+ UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
+
+ // HostKeyFallback is called when CertChecker.CheckHostKey encounters a
+ // public key that is not a certificate. It must implement host key
+ // validation or else, if nil, all such keys are rejected.
+ HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error
+
+ // IsRevoked is called for each certificate so that revocation checking
+ // can be implemented. It should return true if the given certificate
+ // is revoked and false otherwise. If nil, no certificates are
+ // considered to have been revoked.
+ IsRevoked func(cert *Certificate) bool
+}
+
+// CheckHostKey checks a host key certificate. This method can be
+// plugged into ClientConfig.HostKeyCallback.
+func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error {
+ cert, ok := key.(*Certificate)
+ if !ok {
+ if c.HostKeyFallback != nil {
+ return c.HostKeyFallback(addr, remote, key)
+ }
+ return errors.New("ssh: non-certificate host key")
+ }
+ if cert.CertType != HostCert {
+ return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType)
+ }
+
+ return c.CheckCert(addr, cert)
+}
+
+// Authenticate checks a user certificate. Authenticate can be used as
+// a value for ServerConfig.PublicKeyCallback.
+func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) {
+ cert, ok := pubKey.(*Certificate)
+ if !ok {
+ if c.UserKeyFallback != nil {
+ return c.UserKeyFallback(conn, pubKey)
+ }
+ return nil, errors.New("ssh: normal key pairs not accepted")
+ }
+
+ if cert.CertType != UserCert {
+ return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType)
+ }
+
+ if err := c.CheckCert(conn.User(), cert); err != nil {
+ return nil, err
+ }
+
+ return &cert.Permissions, nil
+}
+
+// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and
+// the signature of the certificate.
+func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
+ if c.IsRevoked != nil && c.IsRevoked(cert) {
+ return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
+ }
+
+ for opt, _ := range cert.CriticalOptions {
+ // sourceAddressCriticalOption will be enforced by
+ // serverAuthenticate
+ if opt == sourceAddressCriticalOption {
+ continue
+ }
+
+ found := false
+ for _, supp := range c.SupportedCriticalOptions {
+ if supp == opt {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt)
+ }
+ }
+
+ if len(cert.ValidPrincipals) > 0 {
+ // By default, certs are valid for all users/hosts.
+ found := false
+ for _, p := range cert.ValidPrincipals {
+ if p == principal {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals)
+ }
+ }
+
+ if !c.IsAuthority(cert.SignatureKey) {
+ return fmt.Errorf("ssh: certificate signed by unrecognized authority")
+ }
+
+ clock := c.Clock
+ if clock == nil {
+ clock = time.Now
+ }
+
+ unixNow := clock().Unix()
+ if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
+ return fmt.Errorf("ssh: cert is not yet valid")
+ }
+ if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
+ return fmt.Errorf("ssh: cert has expired")
+ }
+ if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {
+ return fmt.Errorf("ssh: certificate signature does not verify")
+ }
+
+ return nil
+}
+
+// SignCert sets c.SignatureKey to the authority's public key and stores a
+// Signature, by authority, in the certificate.
+func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
+ c.Nonce = make([]byte, 32)
+ if _, err := io.ReadFull(rand, c.Nonce); err != nil {
+ return err
+ }
+ c.SignatureKey = authority.PublicKey()
+
+ sig, err := authority.Sign(rand, c.bytesForSigning())
+ if err != nil {
+ return err
+ }
+ c.Signature = sig
+ return nil
+}
+
+var certAlgoNames = map[string]string{
+ KeyAlgoRSA: CertAlgoRSAv01,
+ KeyAlgoDSA: CertAlgoDSAv01,
+ KeyAlgoECDSA256: CertAlgoECDSA256v01,
+ KeyAlgoECDSA384: CertAlgoECDSA384v01,
+ KeyAlgoECDSA521: CertAlgoECDSA521v01,
+}
+
+// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
+// Panics if a non-certificate algorithm is passed.
+func certToPrivAlgo(algo string) string {
+ for privAlgo, pubAlgo := range certAlgoNames {
+ if pubAlgo == algo {
+ return privAlgo
+ }
+ }
+ panic("unknown cert algorithm")
+}
+
+func (cert *Certificate) bytesForSigning() []byte {
+ c2 := *cert
+ c2.Signature = nil
+ out := c2.Marshal()
+ // Drop trailing signature length.
+ return out[:len(out)-4]
+}
+
+// Marshal serializes c into OpenSSH's wire format. It is part of the
+// PublicKey interface.
+func (c *Certificate) Marshal() []byte {
+ generic := genericCertData{
+ Serial: c.Serial,
+ CertType: c.CertType,
+ KeyId: c.KeyId,
+ ValidPrincipals: marshalStringList(c.ValidPrincipals),
+ ValidAfter: uint64(c.ValidAfter),
+ ValidBefore: uint64(c.ValidBefore),
+ CriticalOptions: marshalTuples(c.CriticalOptions),
+ Extensions: marshalTuples(c.Extensions),
+ Reserved: c.Reserved,
+ SignatureKey: c.SignatureKey.Marshal(),
+ }
+ if c.Signature != nil {
+ generic.Signature = Marshal(c.Signature)
+ }
+ genericBytes := Marshal(&generic)
+ keyBytes := c.Key.Marshal()
+ _, keyBytes, _ = parseString(keyBytes)
+ prefix := Marshal(&struct {
+ Name string
+ Nonce []byte
+ Key []byte `ssh:"rest"`
+ }{c.Type(), c.Nonce, keyBytes})
+
+ result := make([]byte, 0, len(prefix)+len(genericBytes))
+ result = append(result, prefix...)
+ result = append(result, genericBytes...)
+ return result
+}
+
+// Type returns the key name. It is part of the PublicKey interface.
+func (c *Certificate) Type() string {
+ algo, ok := certAlgoNames[c.Key.Type()]
+ if !ok {
+ panic("unknown cert key type")
+ }
+ return algo
+}
+
+// Verify verifies a signature against the certificate's public
+// key. It is part of the PublicKey interface.
+func (c *Certificate) Verify(data []byte, sig *Signature) error {
+ return c.Key.Verify(data, sig)
+}
+
+func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
+ format, in, ok := parseString(in)
+ if !ok {
+ return
+ }
+
+ out = &Signature{
+ Format: string(format),
+ }
+
+ if out.Blob, in, ok = parseString(in); !ok {
+ return
+ }
+
+ return out, in, ok
+}
+
+func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) {
+ sigBytes, rest, ok := parseString(in)
+ if !ok {
+ return
+ }
+
+ out, trailing, ok := parseSignatureBody(sigBytes)
+ if !ok || len(trailing) > 0 {
+ return nil, nil, false
+ }
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/certs_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/certs_test.go
new file mode 100644
index 00000000000..c5f2e533043
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/certs_test.go
@@ -0,0 +1,216 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "reflect"
+ "testing"
+ "time"
+)
+
+// Cert generated by ssh-keygen 6.0p1 Debian-4.
+// % ssh-keygen -s ca-key -I test user-key
+const exampleSSHCert = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgb1srW/W3ZDjYAO45xLYAwzHBDLsJ4Ux6ICFIkTjb1LEAAAADAQABAAAAYQCkoR51poH0wE8w72cqSB8Sszx+vAhzcMdCO0wqHTj7UNENHWEXGrU0E0UQekD7U+yhkhtoyjbPOVIP7hNa6aRk/ezdh/iUnCIt4Jt1v3Z1h1P+hA4QuYFMHNB+rmjPwAcAAAAAAAAAAAAAAAEAAAAEdGVzdAAAAAAAAAAAAAAAAP//////////AAAAAAAAAIIAAAAVcGVybWl0LVgxMS1mb3J3YXJkaW5nAAAAAAAAABdwZXJtaXQtYWdlbnQtZm9yd2FyZGluZwAAAAAAAAAWcGVybWl0LXBvcnQtZm9yd2FyZGluZwAAAAAAAAAKcGVybWl0LXB0eQAAAAAAAAAOcGVybWl0LXVzZXItcmMAAAAAAAAAAAAAAHcAAAAHc3NoLXJzYQAAAAMBAAEAAABhANFS2kaktpSGc+CcmEKPyw9mJC4nZKxHKTgLVZeaGbFZOvJTNzBspQHdy7Q1uKSfktxpgjZnksiu/tFF9ngyY2KFoc+U88ya95IZUycBGCUbBQ8+bhDtw/icdDGQD5WnUwAAAG8AAAAHc3NoLXJzYQAAAGC8Y9Z2LQKhIhxf52773XaWrXdxP0t3GBVo4A10vUWiYoAGepr6rQIoGGXFxT4B9Gp+nEBJjOwKDXPrAevow0T9ca8gZN+0ykbhSrXLE5Ao48rqr3zP4O1/9P7e6gp0gw8=`
+
+func TestParseCert(t *testing.T) {
+ authKeyBytes := []byte(exampleSSHCert)
+
+ key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes)
+ if err != nil {
+ t.Fatalf("ParseAuthorizedKey: %v", err)
+ }
+ if len(rest) > 0 {
+ t.Errorf("rest: got %q, want empty", rest)
+ }
+
+ if _, ok := key.(*Certificate); !ok {
+ t.Fatalf("got %v (%T), want *Certificate", key, key)
+ }
+
+ marshaled := MarshalAuthorizedKey(key)
+ // Before comparison, remove the trailing newline that
+ // MarshalAuthorizedKey adds.
+ marshaled = marshaled[:len(marshaled)-1]
+ if !bytes.Equal(authKeyBytes, marshaled) {
+ t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes)
+ }
+}
+
+// Cert generated by ssh-keygen OpenSSH_6.8p1 OS X 10.10.3
+// % ssh-keygen -s ca -I testcert -O source-address=192.168.1.0/24 -O force-command=/bin/sleep user.pub
+// user.pub key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMN
+// Critical Options:
+// force-command /bin/sleep
+// source-address 192.168.1.0/24
+// Extensions:
+// permit-X11-forwarding
+// permit-agent-forwarding
+// permit-port-forwarding
+// permit-pty
+// permit-user-rc
+const exampleSSHCertWithOptions = `ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgDyysCJY0XrO1n03EeRRoITnTPdjENFmWDs9X58PP3VUAAAADAQABAAABAQDACh1rt2DXfV3hk6fszSQcQ/rueMId0kVD9U7nl8cfEnFxqOCrNT92g4laQIGl2mn8lsGZfTLg8ksHq3gkvgO3oo/0wHy4v32JeBOHTsN5AL4gfHNEhWeWb50ev47hnTsRIt9P4dxogeUo/hTu7j9+s9lLpEQXCvq6xocXQt0j8MV9qZBBXFLXVT3cWIkSqOdwt/5ZBg+1GSrc7WfCXVWgTk4a20uPMuJPxU4RQwZW6X3+O8Pqo8C3cW0OzZRFP6gUYUKUsTI5WntlS+LAxgw1mZNsozFGdbiOPRnEryE3SRldh9vjDR3tin1fGpA5P7+CEB/bqaXtG3V+F2OkqaMNAAAAAAAAAAAAAAABAAAACHRlc3RjZXJ0AAAAAAAAAAAAAAAA//////////8AAABLAAAADWZvcmNlLWNvbW1hbmQAAAAOAAAACi9iaW4vc2xlZXAAAAAOc291cmNlLWFkZHJlc3MAAAASAAAADjE5Mi4xNjguMS4wLzI0AAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEAwU+c5ui5A8+J/CFpjW8wCa52bEODA808WWQDCSuTG/eMXNf59v9Y8Pk0F1E9dGCosSNyVcB/hacUrc6He+i97+HJCyKavBsE6GDxrjRyxYqAlfcOXi/IVmaUGiO8OQ39d4GHrjToInKvExSUeleQyH4Y4/e27T/pILAqPFL3fyrvMLT5qU9QyIt6zIpa7GBP5+urouNavMprV3zsfIqNBbWypinOQAw823a5wN+zwXnhZrgQiHZ/USG09Y6k98y1dTVz8YHlQVR4D3lpTAsKDKJ5hCH9WU4fdf+lU8OyNGaJ/vz0XNqxcToe1l4numLTnaoSuH89pHryjqurB7lJKwAAAQ8AAAAHc3NoLXJzYQAAAQCaHvUIoPL1zWUHIXLvu96/HU1s/i4CAW2IIEuGgxCUCiFj6vyTyYtgxQxcmbfZf6eaITlS6XJZa7Qq4iaFZh75C1DXTX8labXhRSD4E2t//AIP9MC1rtQC5xo6FmbQ+BoKcDskr+mNACcbRSxs3IL3bwCfWDnIw2WbVox9ZdcthJKk4UoCW4ix4QwdHw7zlddlz++fGEEVhmTbll1SUkycGApPFBsAYRTMupUJcYPIeReBI/m8XfkoMk99bV8ZJQTAd7OekHY2/48Ff53jLmyDjP7kNw1F8OaPtkFs6dGJXta4krmaekPy87j+35In5hFj7yoOqvSbmYUkeX70/GGQ`
+
+func TestParseCertWithOptions(t *testing.T) {
+ opts := map[string]string{
+ "source-address": "192.168.1.0/24",
+ "force-command": "/bin/sleep",
+ }
+ exts := map[string]string{
+ "permit-X11-forwarding": "",
+ "permit-agent-forwarding": "",
+ "permit-port-forwarding": "",
+ "permit-pty": "",
+ "permit-user-rc": "",
+ }
+ authKeyBytes := []byte(exampleSSHCertWithOptions)
+
+ key, _, _, rest, err := ParseAuthorizedKey(authKeyBytes)
+ if err != nil {
+ t.Fatalf("ParseAuthorizedKey: %v", err)
+ }
+ if len(rest) > 0 {
+ t.Errorf("rest: got %q, want empty", rest)
+ }
+ cert, ok := key.(*Certificate)
+ if !ok {
+ t.Fatalf("got %v (%T), want *Certificate", key, key)
+ }
+ if !reflect.DeepEqual(cert.CriticalOptions, opts) {
+ t.Errorf("unexpected critical options - got %v, want %v", cert.CriticalOptions, opts)
+ }
+ if !reflect.DeepEqual(cert.Extensions, exts) {
+ t.Errorf("unexpected Extensions - got %v, want %v", cert.Extensions, exts)
+ }
+ marshaled := MarshalAuthorizedKey(key)
+ // Before comparison, remove the trailing newline that
+ // MarshalAuthorizedKey adds.
+ marshaled = marshaled[:len(marshaled)-1]
+ if !bytes.Equal(authKeyBytes, marshaled) {
+ t.Errorf("marshaled certificate does not match original: got %q, want %q", marshaled, authKeyBytes)
+ }
+}
+
+func TestValidateCert(t *testing.T) {
+ key, _, _, _, err := ParseAuthorizedKey([]byte(exampleSSHCert))
+ if err != nil {
+ t.Fatalf("ParseAuthorizedKey: %v", err)
+ }
+ validCert, ok := key.(*Certificate)
+ if !ok {
+ t.Fatalf("got %v (%T), want *Certificate", key, key)
+ }
+ checker := CertChecker{}
+ checker.IsAuthority = func(k PublicKey) bool {
+ return bytes.Equal(k.Marshal(), validCert.SignatureKey.Marshal())
+ }
+
+ if err := checker.CheckCert("user", validCert); err != nil {
+ t.Errorf("Unable to validate certificate: %v", err)
+ }
+ invalidCert := &Certificate{
+ Key: testPublicKeys["rsa"],
+ SignatureKey: testPublicKeys["ecdsa"],
+ ValidBefore: CertTimeInfinity,
+ Signature: &Signature{},
+ }
+ if err := checker.CheckCert("user", invalidCert); err == nil {
+ t.Error("Invalid cert signature passed validation")
+ }
+}
+
+func TestValidateCertTime(t *testing.T) {
+ cert := Certificate{
+ ValidPrincipals: []string{"user"},
+ Key: testPublicKeys["rsa"],
+ ValidAfter: 50,
+ ValidBefore: 100,
+ }
+
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+
+ for ts, ok := range map[int64]bool{
+ 25: false,
+ 50: true,
+ 99: true,
+ 100: false,
+ 125: false,
+ } {
+ checker := CertChecker{
+ Clock: func() time.Time { return time.Unix(ts, 0) },
+ }
+ checker.IsAuthority = func(k PublicKey) bool {
+ return bytes.Equal(k.Marshal(),
+ testPublicKeys["ecdsa"].Marshal())
+ }
+
+ if v := checker.CheckCert("user", &cert); (v == nil) != ok {
+ t.Errorf("Authenticate(%d): %v", ts, v)
+ }
+ }
+}
+
+// TODO(hanwen): tests for
+//
+// host keys:
+// * fallbacks
+
+func TestHostKeyCert(t *testing.T) {
+ cert := &Certificate{
+ ValidPrincipals: []string{"hostname", "hostname.domain"},
+ Key: testPublicKeys["rsa"],
+ ValidBefore: CertTimeInfinity,
+ CertType: HostCert,
+ }
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+
+ checker := &CertChecker{
+ IsAuthority: func(p PublicKey) bool {
+ return bytes.Equal(testPublicKeys["ecdsa"].Marshal(), p.Marshal())
+ },
+ }
+
+ certSigner, err := NewCertSigner(cert, testSigners["rsa"])
+ if err != nil {
+ t.Errorf("NewCertSigner: %v", err)
+ }
+
+ for _, name := range []string{"hostname", "otherhost"} {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ errc := make(chan error)
+
+ go func() {
+ conf := ServerConfig{
+ NoClientAuth: true,
+ }
+ conf.AddHostKey(certSigner)
+ _, _, _, err := NewServerConn(c1, &conf)
+ errc <- err
+ }()
+
+ config := &ClientConfig{
+ User: "user",
+ HostKeyCallback: checker.CheckHostKey,
+ }
+ _, _, _, err = NewClientConn(c2, name, config)
+
+ succeed := name == "hostname"
+ if (err == nil) != succeed {
+ t.Fatalf("NewClientConn(%q): %v", name, err)
+ }
+
+ err = <-errc
+ if (err == nil) != succeed {
+ t.Fatalf("NewServerConn(%q): %v", name, err)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/channel.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/channel.go
new file mode 100644
index 00000000000..5403c7e45fb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/channel.go
@@ -0,0 +1,631 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "sync"
+)
+
+const (
+ minPacketLength = 9
+ // channelMaxPacket contains the maximum number of bytes that will be
+ // sent in a single packet. As per RFC 4253, section 6.1, 32k is also
+ // the minimum.
+ channelMaxPacket = 1 << 15
+ // We follow OpenSSH here.
+ channelWindowSize = 64 * channelMaxPacket
+)
+
+// NewChannel represents an incoming request to a channel. It must either be
+// accepted for use by calling Accept, or rejected by calling Reject.
+type NewChannel interface {
+ // Accept accepts the channel creation request. It returns the Channel
+ // and a Go channel containing SSH requests. The Go channel must be
+ // serviced otherwise the Channel will hang.
+ Accept() (Channel, <-chan *Request, error)
+
+ // Reject rejects the channel creation request. After calling
+ // this, no other methods on the Channel may be called.
+ Reject(reason RejectionReason, message string) error
+
+ // ChannelType returns the type of the channel, as supplied by the
+ // client.
+ ChannelType() string
+
+ // ExtraData returns the arbitrary payload for this channel, as supplied
+ // by the client. This data is specific to the channel type.
+ ExtraData() []byte
+}
+
+// A Channel is an ordered, reliable, flow-controlled, duplex stream
+// that is multiplexed over an SSH connection.
+type Channel interface {
+ // Read reads up to len(data) bytes from the channel.
+ Read(data []byte) (int, error)
+
+ // Write writes len(data) bytes to the channel.
+ Write(data []byte) (int, error)
+
+ // Close signals end of channel use. No data may be sent after this
+ // call.
+ Close() error
+
+ // CloseWrite signals the end of sending in-band
+ // data. Requests may still be sent, and the other side may
+ // still send data
+ CloseWrite() error
+
+ // SendRequest sends a channel request. If wantReply is true,
+ // it will wait for a reply and return the result as a
+ // boolean, otherwise the return value will be false. Channel
+ // requests are out-of-band messages so they may be sent even
+ // if the data stream is closed or blocked by flow control.
+ SendRequest(name string, wantReply bool, payload []byte) (bool, error)
+
+ // Stderr returns an io.ReadWriter that writes to this channel
+ // with the extended data type set to stderr. Stderr may
+ // safely be read and written from a different goroutine than
+ // Read and Write respectively.
+ Stderr() io.ReadWriter
+}
+
+// Request is a request sent outside of the normal stream of
+// data. Requests can either be specific to an SSH channel, or they
+// can be global.
+type Request struct {
+ Type string
+ WantReply bool
+ Payload []byte
+
+ ch *channel
+ mux *mux
+}
+
+// Reply sends a response to a request. It must be called for all requests
+// where WantReply is true and is a no-op otherwise. The payload argument is
+// ignored for replies to channel-specific requests.
+func (r *Request) Reply(ok bool, payload []byte) error {
+ if !r.WantReply {
+ return nil
+ }
+
+ if r.ch == nil {
+ return r.mux.ackRequest(ok, payload)
+ }
+
+ return r.ch.ackRequest(ok)
+}
+
+// RejectionReason is an enumeration used when rejecting channel creation
+// requests. See RFC 4254, section 5.1.
+type RejectionReason uint32
+
+const (
+ Prohibited RejectionReason = iota + 1
+ ConnectionFailed
+ UnknownChannelType
+ ResourceShortage
+)
+
+// String converts the rejection reason to human readable form.
+func (r RejectionReason) String() string {
+ switch r {
+ case Prohibited:
+ return "administratively prohibited"
+ case ConnectionFailed:
+ return "connect failed"
+ case UnknownChannelType:
+ return "unknown channel type"
+ case ResourceShortage:
+ return "resource shortage"
+ }
+ return fmt.Sprintf("unknown reason %d", int(r))
+}
+
+func min(a uint32, b int) uint32 {
+ if a < uint32(b) {
+ return a
+ }
+ return uint32(b)
+}
+
+type channelDirection uint8
+
+const (
+ channelInbound channelDirection = iota
+ channelOutbound
+)
+
+// channel is an implementation of the Channel interface that works
+// with the mux class.
+type channel struct {
+ // R/O after creation
+ chanType string
+ extraData []byte
+ localId, remoteId uint32
+
+ // maxIncomingPayload and maxRemotePayload are the maximum
+ // payload sizes of normal and extended data packets for
+ // receiving and sending, respectively. The wire packet will
+ // be 9 or 13 bytes larger (excluding encryption overhead).
+ maxIncomingPayload uint32
+ maxRemotePayload uint32
+
+ mux *mux
+
+ // decided is set to true if an accept or reject message has been sent
+ // (for outbound channels) or received (for inbound channels).
+ decided bool
+
+ // direction contains either channelOutbound, for channels created
+ // locally, or channelInbound, for channels created by the peer.
+ direction channelDirection
+
+ // Pending internal channel messages.
+ msg chan interface{}
+
+ // Since requests have no ID, there can be only one request
+ // with WantReply=true outstanding. This lock is held by a
+ // goroutine that has such an outgoing request pending.
+ sentRequestMu sync.Mutex
+
+ incomingRequests chan *Request
+
+ sentEOF bool
+
+ // thread-safe data
+ remoteWin window
+ pending *buffer
+ extPending *buffer
+
+ // windowMu protects myWindow, the flow-control window.
+ windowMu sync.Mutex
+ myWindow uint32
+
+ // writeMu serializes calls to mux.conn.writePacket() and
+ // protects sentClose and packetPool. This mutex must be
+ // different from windowMu, as writePacket can block if there
+ // is a key exchange pending.
+ writeMu sync.Mutex
+ sentClose bool
+
+ // packetPool has a buffer for each extended channel ID to
+ // save allocations during writes.
+ packetPool map[uint32][]byte
+}
+
+// writePacket sends a packet. If the packet is a channel close, it updates
+// sentClose. This method takes the lock c.writeMu.
+func (c *channel) writePacket(packet []byte) error {
+ c.writeMu.Lock()
+ if c.sentClose {
+ c.writeMu.Unlock()
+ return io.EOF
+ }
+ c.sentClose = (packet[0] == msgChannelClose)
+ err := c.mux.conn.writePacket(packet)
+ c.writeMu.Unlock()
+ return err
+}
+
+func (c *channel) sendMessage(msg interface{}) error {
+ if debugMux {
+ log.Printf("send %d: %#v", c.mux.chanList.offset, msg)
+ }
+
+ p := Marshal(msg)
+ binary.BigEndian.PutUint32(p[1:], c.remoteId)
+ return c.writePacket(p)
+}
+
+// WriteExtended writes data to a specific extended stream. These streams are
+// used, for example, for stderr.
+func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
+ if c.sentEOF {
+ return 0, io.EOF
+ }
+ // 1 byte message type, 4 bytes remoteId, 4 bytes data length
+ opCode := byte(msgChannelData)
+ headerLength := uint32(9)
+ if extendedCode > 0 {
+ headerLength += 4
+ opCode = msgChannelExtendedData
+ }
+
+ c.writeMu.Lock()
+ packet := c.packetPool[extendedCode]
+ // We don't remove the buffer from packetPool, so
+ // WriteExtended calls from different goroutines will be
+ // flagged as errors by the race detector.
+ c.writeMu.Unlock()
+
+ for len(data) > 0 {
+ space := min(c.maxRemotePayload, len(data))
+ if space, err = c.remoteWin.reserve(space); err != nil {
+ return n, err
+ }
+ if want := headerLength + space; uint32(cap(packet)) < want {
+ packet = make([]byte, want)
+ } else {
+ packet = packet[:want]
+ }
+
+ todo := data[:space]
+
+ packet[0] = opCode
+ binary.BigEndian.PutUint32(packet[1:], c.remoteId)
+ if extendedCode > 0 {
+ binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
+ }
+ binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
+ copy(packet[headerLength:], todo)
+ if err = c.writePacket(packet); err != nil {
+ return n, err
+ }
+
+ n += len(todo)
+ data = data[len(todo):]
+ }
+
+ c.writeMu.Lock()
+ c.packetPool[extendedCode] = packet
+ c.writeMu.Unlock()
+
+ return n, err
+}
+
+func (c *channel) handleData(packet []byte) error {
+ headerLen := 9
+ isExtendedData := packet[0] == msgChannelExtendedData
+ if isExtendedData {
+ headerLen = 13
+ }
+ if len(packet) < headerLen {
+ // malformed data packet
+ return parseError(packet[0])
+ }
+
+ var extended uint32
+ if isExtendedData {
+ extended = binary.BigEndian.Uint32(packet[5:])
+ }
+
+ length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen])
+ if length == 0 {
+ return nil
+ }
+ if length > c.maxIncomingPayload {
+ // TODO(hanwen): should send Disconnect?
+ return errors.New("ssh: incoming packet exceeds maximum payload size")
+ }
+
+ data := packet[headerLen:]
+ if length != uint32(len(data)) {
+ return errors.New("ssh: wrong packet length")
+ }
+
+ c.windowMu.Lock()
+ if c.myWindow < length {
+ c.windowMu.Unlock()
+ // TODO(hanwen): should send Disconnect with reason?
+ return errors.New("ssh: remote side wrote too much")
+ }
+ c.myWindow -= length
+ c.windowMu.Unlock()
+
+ if extended == 1 {
+ c.extPending.write(data)
+ } else if extended > 0 {
+ // discard other extended data.
+ } else {
+ c.pending.write(data)
+ }
+ return nil
+}
+
+func (c *channel) adjustWindow(n uint32) error {
+ c.windowMu.Lock()
+ // Since myWindow is managed on our side, and can never exceed
+ // the initial window setting, we don't worry about overflow.
+ c.myWindow += uint32(n)
+ c.windowMu.Unlock()
+ return c.sendMessage(windowAdjustMsg{
+ AdditionalBytes: uint32(n),
+ })
+}
+
+func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) {
+ switch extended {
+ case 1:
+ n, err = c.extPending.Read(data)
+ case 0:
+ n, err = c.pending.Read(data)
+ default:
+ return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended)
+ }
+
+ if n > 0 {
+ err = c.adjustWindow(uint32(n))
+ // sendWindowAdjust can return io.EOF if the remote
+ // peer has closed the connection, however we want to
+ // defer forwarding io.EOF to the caller of Read until
+ // the buffer has been drained.
+ if n > 0 && err == io.EOF {
+ err = nil
+ }
+ }
+
+ return n, err
+}
+
+func (c *channel) close() {
+ c.pending.eof()
+ c.extPending.eof()
+ close(c.msg)
+ close(c.incomingRequests)
+ c.writeMu.Lock()
+ // This is not necesary for a normal channel teardown, but if
+ // there was another error, it is.
+ c.sentClose = true
+ c.writeMu.Unlock()
+ // Unblock writers.
+ c.remoteWin.close()
+}
+
+// responseMessageReceived is called when a success or failure message is
+// received on a channel to check that such a message is reasonable for the
+// given channel.
+func (c *channel) responseMessageReceived() error {
+ if c.direction == channelInbound {
+ return errors.New("ssh: channel response message received on inbound channel")
+ }
+ if c.decided {
+ return errors.New("ssh: duplicate response received for channel")
+ }
+ c.decided = true
+ return nil
+}
+
+func (c *channel) handlePacket(packet []byte) error {
+ switch packet[0] {
+ case msgChannelData, msgChannelExtendedData:
+ return c.handleData(packet)
+ case msgChannelClose:
+ c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
+ c.mux.chanList.remove(c.localId)
+ c.close()
+ return nil
+ case msgChannelEOF:
+ // RFC 4254 is mute on how EOF affects dataExt messages but
+ // it is logical to signal EOF at the same time.
+ c.extPending.eof()
+ c.pending.eof()
+ return nil
+ }
+
+ decoded, err := decode(packet)
+ if err != nil {
+ return err
+ }
+
+ switch msg := decoded.(type) {
+ case *channelOpenFailureMsg:
+ if err := c.responseMessageReceived(); err != nil {
+ return err
+ }
+ c.mux.chanList.remove(msg.PeersId)
+ c.msg <- msg
+ case *channelOpenConfirmMsg:
+ if err := c.responseMessageReceived(); err != nil {
+ return err
+ }
+ if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
+ return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
+ }
+ c.remoteId = msg.MyId
+ c.maxRemotePayload = msg.MaxPacketSize
+ c.remoteWin.add(msg.MyWindow)
+ c.msg <- msg
+ case *windowAdjustMsg:
+ if !c.remoteWin.add(msg.AdditionalBytes) {
+ return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
+ }
+ case *channelRequestMsg:
+ req := Request{
+ Type: msg.Request,
+ WantReply: msg.WantReply,
+ Payload: msg.RequestSpecificData,
+ ch: c,
+ }
+
+ c.incomingRequests <- &req
+ default:
+ c.msg <- msg
+ }
+ return nil
+}
+
+func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel {
+ ch := &channel{
+ remoteWin: window{Cond: newCond()},
+ myWindow: channelWindowSize,
+ pending: newBuffer(),
+ extPending: newBuffer(),
+ direction: direction,
+ incomingRequests: make(chan *Request, 16),
+ msg: make(chan interface{}, 16),
+ chanType: chanType,
+ extraData: extraData,
+ mux: m,
+ packetPool: make(map[uint32][]byte),
+ }
+ ch.localId = m.chanList.add(ch)
+ return ch
+}
+
+var errUndecided = errors.New("ssh: must Accept or Reject channel")
+var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once")
+
+type extChannel struct {
+ code uint32
+ ch *channel
+}
+
+func (e *extChannel) Write(data []byte) (n int, err error) {
+ return e.ch.WriteExtended(data, e.code)
+}
+
+func (e *extChannel) Read(data []byte) (n int, err error) {
+ return e.ch.ReadExtended(data, e.code)
+}
+
+func (c *channel) Accept() (Channel, <-chan *Request, error) {
+ if c.decided {
+ return nil, nil, errDecidedAlready
+ }
+ c.maxIncomingPayload = channelMaxPacket
+ confirm := channelOpenConfirmMsg{
+ PeersId: c.remoteId,
+ MyId: c.localId,
+ MyWindow: c.myWindow,
+ MaxPacketSize: c.maxIncomingPayload,
+ }
+ c.decided = true
+ if err := c.sendMessage(confirm); err != nil {
+ return nil, nil, err
+ }
+
+ return c, c.incomingRequests, nil
+}
+
+func (ch *channel) Reject(reason RejectionReason, message string) error {
+ if ch.decided {
+ return errDecidedAlready
+ }
+ reject := channelOpenFailureMsg{
+ PeersId: ch.remoteId,
+ Reason: reason,
+ Message: message,
+ Language: "en",
+ }
+ ch.decided = true
+ return ch.sendMessage(reject)
+}
+
+func (ch *channel) Read(data []byte) (int, error) {
+ if !ch.decided {
+ return 0, errUndecided
+ }
+ return ch.ReadExtended(data, 0)
+}
+
+func (ch *channel) Write(data []byte) (int, error) {
+ if !ch.decided {
+ return 0, errUndecided
+ }
+ return ch.WriteExtended(data, 0)
+}
+
+func (ch *channel) CloseWrite() error {
+ if !ch.decided {
+ return errUndecided
+ }
+ ch.sentEOF = true
+ return ch.sendMessage(channelEOFMsg{
+ PeersId: ch.remoteId})
+}
+
+func (ch *channel) Close() error {
+ if !ch.decided {
+ return errUndecided
+ }
+
+ return ch.sendMessage(channelCloseMsg{
+ PeersId: ch.remoteId})
+}
+
+// Extended returns an io.ReadWriter that sends and receives data on the given,
+// SSH extended stream. Such streams are used, for example, for stderr.
+func (ch *channel) Extended(code uint32) io.ReadWriter {
+ if !ch.decided {
+ return nil
+ }
+ return &extChannel{code, ch}
+}
+
+func (ch *channel) Stderr() io.ReadWriter {
+ return ch.Extended(1)
+}
+
+func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
+ if !ch.decided {
+ return false, errUndecided
+ }
+
+ if wantReply {
+ ch.sentRequestMu.Lock()
+ defer ch.sentRequestMu.Unlock()
+ }
+
+ msg := channelRequestMsg{
+ PeersId: ch.remoteId,
+ Request: name,
+ WantReply: wantReply,
+ RequestSpecificData: payload,
+ }
+
+ if err := ch.sendMessage(msg); err != nil {
+ return false, err
+ }
+
+ if wantReply {
+ m, ok := (<-ch.msg)
+ if !ok {
+ return false, io.EOF
+ }
+ switch m.(type) {
+ case *channelRequestFailureMsg:
+ return false, nil
+ case *channelRequestSuccessMsg:
+ return true, nil
+ default:
+ return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m)
+ }
+ }
+
+ return false, nil
+}
+
+// ackRequest either sends an ack or nack to the channel request.
+func (ch *channel) ackRequest(ok bool) error {
+ if !ch.decided {
+ return errUndecided
+ }
+
+ var msg interface{}
+ if !ok {
+ msg = channelRequestFailureMsg{
+ PeersId: ch.remoteId,
+ }
+ } else {
+ msg = channelRequestSuccessMsg{
+ PeersId: ch.remoteId,
+ }
+ }
+ return ch.sendMessage(msg)
+}
+
+func (ch *channel) ChannelType() string {
+ return ch.chanType
+}
+
+func (ch *channel) ExtraData() []byte {
+ return ch.extraData
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/cipher.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/cipher.go
new file mode 100644
index 00000000000..2732963f39e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/cipher.go
@@ -0,0 +1,552 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/rc4"
+ "crypto/subtle"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "io/ioutil"
+)
+
+const (
+ packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher.
+
+ // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations
+ // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC
+ // indicates implementations SHOULD be able to handle larger packet sizes, but then
+ // waffles on about reasonable limits.
+ //
+ // OpenSSH caps their maxPacket at 256kB so we choose to do
+ // the same. maxPacket is also used to ensure that uint32
+ // length fields do not overflow, so it should remain well
+ // below 4G.
+ maxPacket = 256 * 1024
+)
+
+// noneCipher implements cipher.Stream and provides no encryption. It is used
+// by the transport before the first key-exchange.
+type noneCipher struct{}
+
+func (c noneCipher) XORKeyStream(dst, src []byte) {
+ copy(dst, src)
+}
+
+func newAESCTR(key, iv []byte) (cipher.Stream, error) {
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+ return cipher.NewCTR(c, iv), nil
+}
+
+func newRC4(key, iv []byte) (cipher.Stream, error) {
+ return rc4.NewCipher(key)
+}
+
+type streamCipherMode struct {
+ keySize int
+ ivSize int
+ skip int
+ createFunc func(key, iv []byte) (cipher.Stream, error)
+}
+
+func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
+ if len(key) < c.keySize {
+ panic("ssh: key length too small for cipher")
+ }
+ if len(iv) < c.ivSize {
+ panic("ssh: iv too small for cipher")
+ }
+
+ stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
+ if err != nil {
+ return nil, err
+ }
+
+ var streamDump []byte
+ if c.skip > 0 {
+ streamDump = make([]byte, 512)
+ }
+
+ for remainingToDump := c.skip; remainingToDump > 0; {
+ dumpThisTime := remainingToDump
+ if dumpThisTime > len(streamDump) {
+ dumpThisTime = len(streamDump)
+ }
+ stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
+ remainingToDump -= dumpThisTime
+ }
+
+ return stream, nil
+}
+
+// cipherModes documents properties of supported ciphers. Ciphers not included
+// are not supported and will not be negotiated, even if explicitly requested in
+// ClientConfig.Crypto.Ciphers.
+var cipherModes = map[string]*streamCipherMode{
+ // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
+ // are defined in the order specified in the RFC.
+ "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
+ "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
+ "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
+
+ // Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
+ // They are defined in the order specified in the RFC.
+ "arcfour128": {16, 0, 1536, newRC4},
+ "arcfour256": {32, 0, 1536, newRC4},
+
+ // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
+ // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
+ // RC4) has problems with weak keys, and should be used with caution."
+ // RFC4345 introduces improved versions of Arcfour.
+ "arcfour": {16, 0, 0, newRC4},
+
+ // AES-GCM is not a stream cipher, so it is constructed with a
+ // special case. If we add any more non-stream ciphers, we
+ // should invest a cleaner way to do this.
+ gcmCipherID: {16, 12, 0, nil},
+
+ // CBC mode is insecure and so is not included in the default config.
+ // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
+ // needed, it's possible to specify a custom Config to enable it.
+ // You should expect that an active attacker can recover plaintext if
+ // you do.
+ aes128cbcID: {16, aes.BlockSize, 0, nil},
+}
+
+// prefixLen is the length of the packet prefix that contains the packet length
+// and number of padding bytes.
+const prefixLen = 5
+
+// streamPacketCipher is a packetCipher using a stream cipher.
+type streamPacketCipher struct {
+ mac hash.Hash
+ cipher cipher.Stream
+
+ // The following members are to avoid per-packet allocations.
+ prefix [prefixLen]byte
+ seqNumBytes [4]byte
+ padding [2 * packetSizeMultiple]byte
+ packetData []byte
+ macResult []byte
+}
+
+// readPacket reads and decrypt a single packet from the reader argument.
+func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
+ if _, err := io.ReadFull(r, s.prefix[:]); err != nil {
+ return nil, err
+ }
+
+ s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
+ length := binary.BigEndian.Uint32(s.prefix[0:4])
+ paddingLength := uint32(s.prefix[4])
+
+ var macSize uint32
+ if s.mac != nil {
+ s.mac.Reset()
+ binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
+ s.mac.Write(s.seqNumBytes[:])
+ s.mac.Write(s.prefix[:])
+ macSize = uint32(s.mac.Size())
+ }
+
+ if length <= paddingLength+1 {
+ return nil, errors.New("ssh: invalid packet length, packet too small")
+ }
+
+ if length > maxPacket {
+ return nil, errors.New("ssh: invalid packet length, packet too large")
+ }
+
+ // the maxPacket check above ensures that length-1+macSize
+ // does not overflow.
+ if uint32(cap(s.packetData)) < length-1+macSize {
+ s.packetData = make([]byte, length-1+macSize)
+ } else {
+ s.packetData = s.packetData[:length-1+macSize]
+ }
+
+ if _, err := io.ReadFull(r, s.packetData); err != nil {
+ return nil, err
+ }
+ mac := s.packetData[length-1:]
+ data := s.packetData[:length-1]
+ s.cipher.XORKeyStream(data, data)
+
+ if s.mac != nil {
+ s.mac.Write(data)
+ s.macResult = s.mac.Sum(s.macResult[:0])
+ if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
+ return nil, errors.New("ssh: MAC failure")
+ }
+ }
+
+ return s.packetData[:length-paddingLength-1], nil
+}
+
+// writePacket encrypts and sends a packet of data to the writer argument
+func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
+ if len(packet) > maxPacket {
+ return errors.New("ssh: packet too large")
+ }
+
+ paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple
+ if paddingLength < 4 {
+ paddingLength += packetSizeMultiple
+ }
+
+ length := len(packet) + 1 + paddingLength
+ binary.BigEndian.PutUint32(s.prefix[:], uint32(length))
+ s.prefix[4] = byte(paddingLength)
+ padding := s.padding[:paddingLength]
+ if _, err := io.ReadFull(rand, padding); err != nil {
+ return err
+ }
+
+ if s.mac != nil {
+ s.mac.Reset()
+ binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
+ s.mac.Write(s.seqNumBytes[:])
+ s.mac.Write(s.prefix[:])
+ s.mac.Write(packet)
+ s.mac.Write(padding)
+ }
+
+ s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
+ s.cipher.XORKeyStream(packet, packet)
+ s.cipher.XORKeyStream(padding, padding)
+
+ if _, err := w.Write(s.prefix[:]); err != nil {
+ return err
+ }
+ if _, err := w.Write(packet); err != nil {
+ return err
+ }
+ if _, err := w.Write(padding); err != nil {
+ return err
+ }
+
+ if s.mac != nil {
+ s.macResult = s.mac.Sum(s.macResult[:0])
+ if _, err := w.Write(s.macResult); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type gcmCipher struct {
+ aead cipher.AEAD
+ prefix [4]byte
+ iv []byte
+ buf []byte
+}
+
+func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) {
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ aead, err := cipher.NewGCM(c)
+ if err != nil {
+ return nil, err
+ }
+
+ return &gcmCipher{
+ aead: aead,
+ iv: iv,
+ }, nil
+}
+
+const gcmTagSize = 16
+
+func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
+ // Pad out to multiple of 16 bytes. This is different from the
+ // stream cipher because that encrypts the length too.
+ padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple)
+ if padding < 4 {
+ padding += packetSizeMultiple
+ }
+
+ length := uint32(len(packet) + int(padding) + 1)
+ binary.BigEndian.PutUint32(c.prefix[:], length)
+ if _, err := w.Write(c.prefix[:]); err != nil {
+ return err
+ }
+
+ if cap(c.buf) < int(length) {
+ c.buf = make([]byte, length)
+ } else {
+ c.buf = c.buf[:length]
+ }
+
+ c.buf[0] = padding
+ copy(c.buf[1:], packet)
+ if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil {
+ return err
+ }
+ c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:])
+ if _, err := w.Write(c.buf); err != nil {
+ return err
+ }
+ c.incIV()
+
+ return nil
+}
+
+func (c *gcmCipher) incIV() {
+ for i := 4 + 7; i >= 4; i-- {
+ c.iv[i]++
+ if c.iv[i] != 0 {
+ break
+ }
+ }
+}
+
+func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
+ if _, err := io.ReadFull(r, c.prefix[:]); err != nil {
+ return nil, err
+ }
+ length := binary.BigEndian.Uint32(c.prefix[:])
+ if length > maxPacket {
+ return nil, errors.New("ssh: max packet length exceeded.")
+ }
+
+ if cap(c.buf) < int(length+gcmTagSize) {
+ c.buf = make([]byte, length+gcmTagSize)
+ } else {
+ c.buf = c.buf[:length+gcmTagSize]
+ }
+
+ if _, err := io.ReadFull(r, c.buf); err != nil {
+ return nil, err
+ }
+
+ plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:])
+ if err != nil {
+ return nil, err
+ }
+ c.incIV()
+
+ padding := plain[0]
+ if padding < 4 || padding >= 20 {
+ return nil, fmt.Errorf("ssh: illegal padding %d", padding)
+ }
+
+ if int(padding+1) >= len(plain) {
+ return nil, fmt.Errorf("ssh: padding %d too large", padding)
+ }
+ plain = plain[1 : length-uint32(padding)]
+ return plain, nil
+}
+
+// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1
+type cbcCipher struct {
+ mac hash.Hash
+ macSize uint32
+ decrypter cipher.BlockMode
+ encrypter cipher.BlockMode
+
+ // The following members are to avoid per-packet allocations.
+ seqNumBytes [4]byte
+ packetData []byte
+ macResult []byte
+
+ // Amount of data we should still read to hide which
+ // verification error triggered.
+ oracleCamouflage uint32
+}
+
+func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+ c, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+
+ cbc := &cbcCipher{
+ mac: macModes[algs.MAC].new(macKey),
+ decrypter: cipher.NewCBCDecrypter(c, iv),
+ encrypter: cipher.NewCBCEncrypter(c, iv),
+ packetData: make([]byte, 1024),
+ }
+ if cbc.mac != nil {
+ cbc.macSize = uint32(cbc.mac.Size())
+ }
+
+ return cbc, nil
+}
+
+func maxUInt32(a, b int) uint32 {
+ if a > b {
+ return uint32(a)
+ }
+ return uint32(b)
+}
+
+const (
+ cbcMinPacketSizeMultiple = 8
+ cbcMinPacketSize = 16
+ cbcMinPaddingSize = 4
+)
+
+// cbcError represents a verification error that may leak information.
+type cbcError string
+
+func (e cbcError) Error() string { return string(e) }
+
+func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
+ p, err := c.readPacketLeaky(seqNum, r)
+ if err != nil {
+ if _, ok := err.(cbcError); ok {
+ // Verification error: read a fixed amount of
+ // data, to make distinguishing between
+ // failing MAC and failing length check more
+ // difficult.
+ io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
+ }
+ }
+ return p, err
+}
+
+func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) {
+ blockSize := c.decrypter.BlockSize()
+
+ // Read the header, which will include some of the subsequent data in the
+ // case of block ciphers - this is copied back to the payload later.
+ // How many bytes of payload/padding will be read with this first read.
+ firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize)
+ firstBlock := c.packetData[:firstBlockLength]
+ if _, err := io.ReadFull(r, firstBlock); err != nil {
+ return nil, err
+ }
+
+ c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength
+
+ c.decrypter.CryptBlocks(firstBlock, firstBlock)
+ length := binary.BigEndian.Uint32(firstBlock[:4])
+ if length > maxPacket {
+ return nil, cbcError("ssh: packet too large")
+ }
+ if length+4 < maxUInt32(cbcMinPacketSize, blockSize) {
+ // The minimum size of a packet is 16 (or the cipher block size, whichever
+ // is larger) bytes.
+ return nil, cbcError("ssh: packet too small")
+ }
+ // The length of the packet (including the length field but not the MAC) must
+ // be a multiple of the block size or 8, whichever is larger.
+ if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 {
+ return nil, cbcError("ssh: invalid packet length multiple")
+ }
+
+ paddingLength := uint32(firstBlock[4])
+ if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 {
+ return nil, cbcError("ssh: invalid packet length")
+ }
+
+ // Positions within the c.packetData buffer:
+ macStart := 4 + length
+ paddingStart := macStart - paddingLength
+
+ // Entire packet size, starting before length, ending at end of mac.
+ entirePacketSize := macStart + c.macSize
+
+ // Ensure c.packetData is large enough for the entire packet data.
+ if uint32(cap(c.packetData)) < entirePacketSize {
+ // Still need to upsize and copy, but this should be rare at runtime, only
+ // on upsizing the packetData buffer.
+ c.packetData = make([]byte, entirePacketSize)
+ copy(c.packetData, firstBlock)
+ } else {
+ c.packetData = c.packetData[:entirePacketSize]
+ }
+
+ if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
+ return nil, err
+ } else {
+ c.oracleCamouflage -= uint32(n)
+ }
+
+ remainingCrypted := c.packetData[firstBlockLength:macStart]
+ c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
+
+ mac := c.packetData[macStart:]
+ if c.mac != nil {
+ c.mac.Reset()
+ binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
+ c.mac.Write(c.seqNumBytes[:])
+ c.mac.Write(c.packetData[:macStart])
+ c.macResult = c.mac.Sum(c.macResult[:0])
+ if subtle.ConstantTimeCompare(c.macResult, mac) != 1 {
+ return nil, cbcError("ssh: MAC failure")
+ }
+ }
+
+ return c.packetData[prefixLen:paddingStart], nil
+}
+
+func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
+ effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize())
+
+ // Length of encrypted portion of the packet (header, payload, padding).
+ // Enforce minimum padding and packet size.
+ encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize)
+ // Enforce block size.
+ encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize
+
+ length := encLength - 4
+ paddingLength := int(length) - (1 + len(packet))
+
+ // Overall buffer contains: header, payload, padding, mac.
+ // Space for the MAC is reserved in the capacity but not the slice length.
+ bufferSize := encLength + c.macSize
+ if uint32(cap(c.packetData)) < bufferSize {
+ c.packetData = make([]byte, encLength, bufferSize)
+ } else {
+ c.packetData = c.packetData[:encLength]
+ }
+
+ p := c.packetData
+
+ // Packet header.
+ binary.BigEndian.PutUint32(p, length)
+ p = p[4:]
+ p[0] = byte(paddingLength)
+
+ // Payload.
+ p = p[1:]
+ copy(p, packet)
+
+ // Padding.
+ p = p[len(packet):]
+ if _, err := io.ReadFull(rand, p); err != nil {
+ return err
+ }
+
+ if c.mac != nil {
+ c.mac.Reset()
+ binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
+ c.mac.Write(c.seqNumBytes[:])
+ c.mac.Write(c.packetData)
+ // The MAC is now appended into the capacity reserved for it earlier.
+ c.packetData = c.mac.Sum(c.packetData)
+ }
+
+ c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength])
+
+ if _, err := w.Write(c.packetData); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/cipher_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/cipher_test.go
new file mode 100644
index 00000000000..54b92b6edce
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/cipher_test.go
@@ -0,0 +1,127 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/aes"
+ "crypto/rand"
+ "testing"
+)
+
+func TestDefaultCiphersExist(t *testing.T) {
+ for _, cipherAlgo := range supportedCiphers {
+ if _, ok := cipherModes[cipherAlgo]; !ok {
+ t.Errorf("default cipher %q is unknown", cipherAlgo)
+ }
+ }
+}
+
+func TestPacketCiphers(t *testing.T) {
+ // Still test aes128cbc cipher althought it's commented out.
+ cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
+ defer delete(cipherModes, aes128cbcID)
+
+ for cipher := range cipherModes {
+ kr := &kexResult{Hash: crypto.SHA1}
+ algs := directionAlgorithms{
+ Cipher: cipher,
+ MAC: "hmac-sha1",
+ Compression: "none",
+ }
+ client, err := newPacketCipher(clientKeys, algs, kr)
+ if err != nil {
+ t.Errorf("newPacketCipher(client, %q): %v", cipher, err)
+ continue
+ }
+ server, err := newPacketCipher(clientKeys, algs, kr)
+ if err != nil {
+ t.Errorf("newPacketCipher(client, %q): %v", cipher, err)
+ continue
+ }
+
+ want := "bla bla"
+ input := []byte(want)
+ buf := &bytes.Buffer{}
+ if err := client.writePacket(0, buf, rand.Reader, input); err != nil {
+ t.Errorf("writePacket(%q): %v", cipher, err)
+ continue
+ }
+
+ packet, err := server.readPacket(0, buf)
+ if err != nil {
+ t.Errorf("readPacket(%q): %v", cipher, err)
+ continue
+ }
+
+ if string(packet) != want {
+ t.Errorf("roundtrip(%q): got %q, want %q", cipher, packet, want)
+ }
+ }
+}
+
+func TestCBCOracleCounterMeasure(t *testing.T) {
+ cipherModes[aes128cbcID] = &streamCipherMode{16, aes.BlockSize, 0, nil}
+ defer delete(cipherModes, aes128cbcID)
+
+ kr := &kexResult{Hash: crypto.SHA1}
+ algs := directionAlgorithms{
+ Cipher: aes128cbcID,
+ MAC: "hmac-sha1",
+ Compression: "none",
+ }
+ client, err := newPacketCipher(clientKeys, algs, kr)
+ if err != nil {
+ t.Fatalf("newPacketCipher(client): %v", err)
+ }
+
+ want := "bla bla"
+ input := []byte(want)
+ buf := &bytes.Buffer{}
+ if err := client.writePacket(0, buf, rand.Reader, input); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ packetSize := buf.Len()
+ buf.Write(make([]byte, 2*maxPacket))
+
+ // We corrupt each byte, but this usually will only test the
+ // 'packet too large' or 'MAC failure' cases.
+ lastRead := -1
+ for i := 0; i < packetSize; i++ {
+ server, err := newPacketCipher(clientKeys, algs, kr)
+ if err != nil {
+ t.Fatalf("newPacketCipher(client): %v", err)
+ }
+
+ fresh := &bytes.Buffer{}
+ fresh.Write(buf.Bytes())
+ fresh.Bytes()[i] ^= 0x01
+
+ before := fresh.Len()
+ _, err = server.readPacket(0, fresh)
+ if err == nil {
+ t.Errorf("corrupt byte %d: readPacket succeeded ", i)
+ continue
+ }
+ if _, ok := err.(cbcError); !ok {
+ t.Errorf("corrupt byte %d: got %v (%T), want cbcError", i, err, err)
+ continue
+ }
+
+ after := fresh.Len()
+ bytesRead := before - after
+ if bytesRead < maxPacket {
+ t.Errorf("corrupt byte %d: read %d bytes, want more than %d", i, bytesRead, maxPacket)
+ continue
+ }
+
+ if i > 0 && bytesRead != lastRead {
+ t.Errorf("corrupt byte %d: read %d bytes, want %d bytes read", i, bytesRead, lastRead)
+ }
+ lastRead = bytesRead
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client.go
new file mode 100644
index 00000000000..0b9fbe5002a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client.go
@@ -0,0 +1,213 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+)
+
+// Client implements a traditional SSH client that supports shells,
+// subprocesses, port forwarding and tunneled dialing.
+type Client struct {
+ Conn
+
+ forwards forwardList // forwarded tcpip connections from the remote side
+ mu sync.Mutex
+ channelHandlers map[string]chan NewChannel
+}
+
+// HandleChannelOpen returns a channel on which NewChannel requests
+// for the given type are sent. If the type already is being handled,
+// nil is returned. The channel is closed when the connection is closed.
+func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.channelHandlers == nil {
+ // The SSH channel has been closed.
+ c := make(chan NewChannel)
+ close(c)
+ return c
+ }
+
+ ch := c.channelHandlers[channelType]
+ if ch != nil {
+ return nil
+ }
+
+ ch = make(chan NewChannel, 16)
+ c.channelHandlers[channelType] = ch
+ return ch
+}
+
+// NewClient creates a Client on top of the given connection.
+func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
+ conn := &Client{
+ Conn: c,
+ channelHandlers: make(map[string]chan NewChannel, 1),
+ }
+
+ go conn.handleGlobalRequests(reqs)
+ go conn.handleChannelOpens(chans)
+ go func() {
+ conn.Wait()
+ conn.forwards.closeAll()
+ }()
+ go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
+ return conn
+}
+
+// NewClientConn establishes an authenticated SSH connection using c
+// as the underlying transport. The Request and NewChannel channels
+// must be serviced or the connection will hang.
+func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) {
+ fullConf := *config
+ fullConf.SetDefaults()
+ conn := &connection{
+ sshConn: sshConn{conn: c},
+ }
+
+ if err := conn.clientHandshake(addr, &fullConf); err != nil {
+ c.Close()
+ return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
+ }
+ conn.mux = newMux(conn.transport)
+ return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
+}
+
+// clientHandshake performs the client side key exchange. See RFC 4253 Section
+// 7.
+func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error {
+ if config.ClientVersion != "" {
+ c.clientVersion = []byte(config.ClientVersion)
+ } else {
+ c.clientVersion = []byte(packageVersion)
+ }
+ var err error
+ c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion)
+ if err != nil {
+ return err
+ }
+
+ c.transport = newClientTransport(
+ newTransport(c.sshConn.conn, config.Rand, true /* is client */),
+ c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
+ if err := c.transport.requestKeyChange(); err != nil {
+ return err
+ }
+
+ if packet, err := c.transport.readPacket(); err != nil {
+ return err
+ } else if packet[0] != msgNewKeys {
+ return unexpectedMessageError(msgNewKeys, packet[0])
+ }
+
+ // We just did the key change, so the session ID is established.
+ c.sessionID = c.transport.getSessionID()
+
+ return c.clientAuthenticate(config)
+}
+
+// verifyHostKeySignature verifies the host key obtained in the key
+// exchange.
+func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
+ sig, rest, ok := parseSignatureBody(result.Signature)
+ if len(rest) > 0 || !ok {
+ return errors.New("ssh: signature parse error")
+ }
+
+ return hostKey.Verify(result.H, sig)
+}
+
+// NewSession opens a new Session for this client. (A session is a remote
+// execution of a program.)
+func (c *Client) NewSession() (*Session, error) {
+ ch, in, err := c.OpenChannel("session", nil)
+ if err != nil {
+ return nil, err
+ }
+ return newSession(ch, in)
+}
+
+func (c *Client) handleGlobalRequests(incoming <-chan *Request) {
+ for r := range incoming {
+ // This handles keepalive messages and matches
+ // the behaviour of OpenSSH.
+ r.Reply(false, nil)
+ }
+}
+
+// handleChannelOpens channel open messages from the remote side.
+func (c *Client) handleChannelOpens(in <-chan NewChannel) {
+ for ch := range in {
+ c.mu.Lock()
+ handler := c.channelHandlers[ch.ChannelType()]
+ c.mu.Unlock()
+
+ if handler != nil {
+ handler <- ch
+ } else {
+ ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType()))
+ }
+ }
+
+ c.mu.Lock()
+ for _, ch := range c.channelHandlers {
+ close(ch)
+ }
+ c.channelHandlers = nil
+ c.mu.Unlock()
+}
+
+// Dial starts a client connection to the given SSH server. It is a
+// convenience function that connects to the given network address,
+// initiates the SSH handshake, and then sets up a Client. For access
+// to incoming channels and requests, use net.Dial with NewClientConn
+// instead.
+func Dial(network, addr string, config *ClientConfig) (*Client, error) {
+ conn, err := net.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+ c, chans, reqs, err := NewClientConn(conn, addr, config)
+ if err != nil {
+ return nil, err
+ }
+ return NewClient(c, chans, reqs), nil
+}
+
+// A ClientConfig structure is used to configure a Client. It must not be
+// modified after having been passed to an SSH function.
+type ClientConfig struct {
+ // Config contains configuration that is shared between clients and
+ // servers.
+ Config
+
+ // User contains the username to authenticate as.
+ User string
+
+ // Auth contains possible authentication methods to use with the
+ // server. Only the first instance of a particular RFC 4252 method will
+ // be used during authentication.
+ Auth []AuthMethod
+
+ // HostKeyCallback, if not nil, is called during the cryptographic
+ // handshake to validate the server's host key. A nil HostKeyCallback
+ // implies that all host keys are accepted.
+ HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
+
+ // ClientVersion contains the version identification string that will
+ // be used for the connection. If empty, a reasonable default is used.
+ ClientVersion string
+
+ // HostKeyAlgorithms lists the key types that the client will
+ // accept from the server as host key, in order of
+ // preference. If empty, a reasonable default is used. Any
+ // string returned from PublicKey.Type method may be used, or
+ // any of the CertAlgoXxxx and KeyAlgoXxxx constants.
+ HostKeyAlgorithms []string
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_auth.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_auth.go
new file mode 100644
index 00000000000..e15be3ef29d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_auth.go
@@ -0,0 +1,441 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// clientAuthenticate authenticates with the remote server. See RFC 4252.
+func (c *connection) clientAuthenticate(config *ClientConfig) error {
+ // initiate user auth session
+ if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil {
+ return err
+ }
+ packet, err := c.transport.readPacket()
+ if err != nil {
+ return err
+ }
+ var serviceAccept serviceAcceptMsg
+ if err := Unmarshal(packet, &serviceAccept); err != nil {
+ return err
+ }
+
+ // during the authentication phase the client first attempts the "none" method
+ // then any untried methods suggested by the server.
+ tried := make(map[string]bool)
+ var lastMethods []string
+ for auth := AuthMethod(new(noneAuth)); auth != nil; {
+ ok, methods, err := auth.auth(c.transport.getSessionID(), config.User, c.transport, config.Rand)
+ if err != nil {
+ return err
+ }
+ if ok {
+ // success
+ return nil
+ }
+ tried[auth.method()] = true
+ if methods == nil {
+ methods = lastMethods
+ }
+ lastMethods = methods
+
+ auth = nil
+
+ findNext:
+ for _, a := range config.Auth {
+ candidateMethod := a.method()
+ if tried[candidateMethod] {
+ continue
+ }
+ for _, meth := range methods {
+ if meth == candidateMethod {
+ auth = a
+ break findNext
+ }
+ }
+ }
+ }
+ return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
+}
+
+func keys(m map[string]bool) []string {
+ s := make([]string, 0, len(m))
+
+ for key := range m {
+ s = append(s, key)
+ }
+ return s
+}
+
+// An AuthMethod represents an instance of an RFC 4252 authentication method.
+type AuthMethod interface {
+ // auth authenticates user over transport t.
+ // Returns true if authentication is successful.
+ // If authentication is not successful, a []string of alternative
+ // method names is returned. If the slice is nil, it will be ignored
+ // and the previous set of possible methods will be reused.
+ auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
+
+ // method returns the RFC 4252 method name.
+ method() string
+}
+
+// "none" authentication, RFC 4252 section 5.2.
+type noneAuth int
+
+func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+ if err := c.writePacket(Marshal(&userAuthRequestMsg{
+ User: user,
+ Service: serviceSSH,
+ Method: "none",
+ })); err != nil {
+ return false, nil, err
+ }
+
+ return handleAuthResponse(c)
+}
+
+func (n *noneAuth) method() string {
+ return "none"
+}
+
+// passwordCallback is an AuthMethod that fetches the password through
+// a function call, e.g. by prompting the user.
+type passwordCallback func() (password string, err error)
+
+func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+ type passwordAuthMsg struct {
+ User string `sshtype:"50"`
+ Service string
+ Method string
+ Reply bool
+ Password string
+ }
+
+ pw, err := cb()
+ // REVIEW NOTE: is there a need to support skipping a password attempt?
+ // The program may only find out that the user doesn't have a password
+ // when prompting.
+ if err != nil {
+ return false, nil, err
+ }
+
+ if err := c.writePacket(Marshal(&passwordAuthMsg{
+ User: user,
+ Service: serviceSSH,
+ Method: cb.method(),
+ Reply: false,
+ Password: pw,
+ })); err != nil {
+ return false, nil, err
+ }
+
+ return handleAuthResponse(c)
+}
+
+func (cb passwordCallback) method() string {
+ return "password"
+}
+
+// Password returns an AuthMethod using the given password.
+func Password(secret string) AuthMethod {
+ return passwordCallback(func() (string, error) { return secret, nil })
+}
+
+// PasswordCallback returns an AuthMethod that uses a callback for
+// fetching a password.
+func PasswordCallback(prompt func() (secret string, err error)) AuthMethod {
+ return passwordCallback(prompt)
+}
+
+type publickeyAuthMsg struct {
+ User string `sshtype:"50"`
+ Service string
+ Method string
+ // HasSig indicates to the receiver packet that the auth request is signed and
+ // should be used for authentication of the request.
+ HasSig bool
+ Algoname string
+ PubKey []byte
+ // Sig is tagged with "rest" so Marshal will exclude it during
+ // validateKey
+ Sig []byte `ssh:"rest"`
+}
+
+// publicKeyCallback is an AuthMethod that uses a set of key
+// pairs for authentication.
+type publicKeyCallback func() ([]Signer, error)
+
+func (cb publicKeyCallback) method() string {
+ return "publickey"
+}
+
+func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+ // Authentication is performed in two stages. The first stage sends an
+ // enquiry to test if each key is acceptable to the remote. The second
+ // stage attempts to authenticate with the valid keys obtained in the
+ // first stage.
+
+ signers, err := cb()
+ if err != nil {
+ return false, nil, err
+ }
+ var validKeys []Signer
+ for _, signer := range signers {
+ if ok, err := validateKey(signer.PublicKey(), user, c); ok {
+ validKeys = append(validKeys, signer)
+ } else {
+ if err != nil {
+ return false, nil, err
+ }
+ }
+ }
+
+ // methods that may continue if this auth is not successful.
+ var methods []string
+ for _, signer := range validKeys {
+ pub := signer.PublicKey()
+
+ pubKey := pub.Marshal()
+ sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
+ User: user,
+ Service: serviceSSH,
+ Method: cb.method(),
+ }, []byte(pub.Type()), pubKey))
+ if err != nil {
+ return false, nil, err
+ }
+
+ // manually wrap the serialized signature in a string
+ s := Marshal(sign)
+ sig := make([]byte, stringLength(len(s)))
+ marshalString(sig, s)
+ msg := publickeyAuthMsg{
+ User: user,
+ Service: serviceSSH,
+ Method: cb.method(),
+ HasSig: true,
+ Algoname: pub.Type(),
+ PubKey: pubKey,
+ Sig: sig,
+ }
+ p := Marshal(&msg)
+ if err := c.writePacket(p); err != nil {
+ return false, nil, err
+ }
+ var success bool
+ success, methods, err = handleAuthResponse(c)
+ if err != nil {
+ return false, nil, err
+ }
+ if success {
+ return success, methods, err
+ }
+ }
+ return false, methods, nil
+}
+
+// validateKey validates the key provided is acceptable to the server.
+func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
+ pubKey := key.Marshal()
+ msg := publickeyAuthMsg{
+ User: user,
+ Service: serviceSSH,
+ Method: "publickey",
+ HasSig: false,
+ Algoname: key.Type(),
+ PubKey: pubKey,
+ }
+ if err := c.writePacket(Marshal(&msg)); err != nil {
+ return false, err
+ }
+
+ return confirmKeyAck(key, c)
+}
+
+func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
+ pubKey := key.Marshal()
+ algoname := key.Type()
+
+ for {
+ packet, err := c.readPacket()
+ if err != nil {
+ return false, err
+ }
+ switch packet[0] {
+ case msgUserAuthBanner:
+ // TODO(gpaul): add callback to present the banner to the user
+ case msgUserAuthPubKeyOk:
+ var msg userAuthPubKeyOkMsg
+ if err := Unmarshal(packet, &msg); err != nil {
+ return false, err
+ }
+ if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
+ return false, nil
+ }
+ return true, nil
+ case msgUserAuthFailure:
+ return false, nil
+ default:
+ return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
+ }
+ }
+}
+
+// PublicKeys returns an AuthMethod that uses the given key
+// pairs.
+func PublicKeys(signers ...Signer) AuthMethod {
+ return publicKeyCallback(func() ([]Signer, error) { return signers, nil })
+}
+
+// PublicKeysCallback returns an AuthMethod that runs the given
+// function to obtain a list of key pairs.
+func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod {
+ return publicKeyCallback(getSigners)
+}
+
+// handleAuthResponse returns whether the preceding authentication request succeeded
+// along with a list of remaining authentication methods to try next and
+// an error if an unexpected response was received.
+func handleAuthResponse(c packetConn) (bool, []string, error) {
+ for {
+ packet, err := c.readPacket()
+ if err != nil {
+ return false, nil, err
+ }
+
+ switch packet[0] {
+ case msgUserAuthBanner:
+ // TODO: add callback to present the banner to the user
+ case msgUserAuthFailure:
+ var msg userAuthFailureMsg
+ if err := Unmarshal(packet, &msg); err != nil {
+ return false, nil, err
+ }
+ return false, msg.Methods, nil
+ case msgUserAuthSuccess:
+ return true, nil, nil
+ case msgDisconnect:
+ return false, nil, io.EOF
+ default:
+ return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
+ }
+ }
+}
+
+// KeyboardInteractiveChallenge should print questions, optionally
+// disabling echoing (e.g. for passwords), and return all the answers.
+// Challenge may be called multiple times in a single session. After
+// successful authentication, the server may send a challenge with no
+// questions, for which the user and instruction messages should be
+// printed. RFC 4256 section 3.3 details how the UI should behave for
+// both CLI and GUI environments.
+type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
+
+// KeyboardInteractive returns a AuthMethod using a prompt/response
+// sequence controlled by the server.
+func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod {
+ return challenge
+}
+
+func (cb KeyboardInteractiveChallenge) method() string {
+ return "keyboard-interactive"
+}
+
+func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+ type initiateMsg struct {
+ User string `sshtype:"50"`
+ Service string
+ Method string
+ Language string
+ Submethods string
+ }
+
+ if err := c.writePacket(Marshal(&initiateMsg{
+ User: user,
+ Service: serviceSSH,
+ Method: "keyboard-interactive",
+ })); err != nil {
+ return false, nil, err
+ }
+
+ for {
+ packet, err := c.readPacket()
+ if err != nil {
+ return false, nil, err
+ }
+
+ // like handleAuthResponse, but with less options.
+ switch packet[0] {
+ case msgUserAuthBanner:
+ // TODO: Print banners during userauth.
+ continue
+ case msgUserAuthInfoRequest:
+ // OK
+ case msgUserAuthFailure:
+ var msg userAuthFailureMsg
+ if err := Unmarshal(packet, &msg); err != nil {
+ return false, nil, err
+ }
+ return false, msg.Methods, nil
+ case msgUserAuthSuccess:
+ return true, nil, nil
+ default:
+ return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
+ }
+
+ var msg userAuthInfoRequestMsg
+ if err := Unmarshal(packet, &msg); err != nil {
+ return false, nil, err
+ }
+
+ // Manually unpack the prompt/echo pairs.
+ rest := msg.Prompts
+ var prompts []string
+ var echos []bool
+ for i := 0; i < int(msg.NumPrompts); i++ {
+ prompt, r, ok := parseString(rest)
+ if !ok || len(r) == 0 {
+ return false, nil, errors.New("ssh: prompt format error")
+ }
+ prompts = append(prompts, string(prompt))
+ echos = append(echos, r[0] != 0)
+ rest = r[1:]
+ }
+
+ if len(rest) != 0 {
+ return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
+ }
+
+ answers, err := cb(msg.User, msg.Instruction, prompts, echos)
+ if err != nil {
+ return false, nil, err
+ }
+
+ if len(answers) != len(prompts) {
+ return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
+ }
+ responseLength := 1 + 4
+ for _, a := range answers {
+ responseLength += stringLength(len(a))
+ }
+ serialized := make([]byte, responseLength)
+ p := serialized
+ p[0] = msgUserAuthInfoResponse
+ p = p[1:]
+ p = marshalUint32(p, uint32(len(answers)))
+ for _, a := range answers {
+ p = marshalString(p, []byte(a))
+ }
+
+ if err := c.writePacket(serialized); err != nil {
+ return false, nil, err
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_auth_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_auth_test.go
new file mode 100644
index 00000000000..2ea44624fc1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_auth_test.go
@@ -0,0 +1,393 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+type keyboardInteractive map[string]string
+
+func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) {
+ var answers []string
+ for _, q := range questions {
+ answers = append(answers, cr[q])
+ }
+ return answers, nil
+}
+
+// reused internally by tests
+var clientPassword = "tiger"
+
+// tryAuth runs a handshake with a given config against an SSH server
+// with config serverConfig
+func tryAuth(t *testing.T, config *ClientConfig) error {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ certChecker := CertChecker{
+ IsAuthority: func(k PublicKey) bool {
+ return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal())
+ },
+ UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
+ if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
+ return nil, nil
+ }
+
+ return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User())
+ },
+ IsRevoked: func(c *Certificate) bool {
+ return c.Serial == 666
+ },
+ }
+
+ serverConfig := &ServerConfig{
+ PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) {
+ if conn.User() == "testuser" && string(pass) == clientPassword {
+ return nil, nil
+ }
+ return nil, errors.New("password auth failed")
+ },
+ PublicKeyCallback: certChecker.Authenticate,
+ KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) {
+ ans, err := challenge("user",
+ "instruction",
+ []string{"question1", "question2"},
+ []bool{true, true})
+ if err != nil {
+ return nil, err
+ }
+ ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2"
+ if ok {
+ challenge("user", "motd", nil, nil)
+ return nil, nil
+ }
+ return nil, errors.New("keyboard-interactive failed")
+ },
+ AuthLogCallback: func(conn ConnMetadata, method string, err error) {
+ t.Logf("user %q, method %q: %v", conn.User(), method, err)
+ },
+ }
+ serverConfig.AddHostKey(testSigners["rsa"])
+
+ go newServer(c1, serverConfig)
+ _, _, _, err = NewClientConn(c2, "", config)
+ return err
+}
+
+func TestClientAuthPublicKey(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["rsa"]),
+ },
+ }
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+}
+
+func TestAuthMethodPassword(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ Password(clientPassword),
+ },
+ }
+
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+}
+
+func TestAuthMethodFallback(t *testing.T) {
+ var passwordCalled bool
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["rsa"]),
+ PasswordCallback(
+ func() (string, error) {
+ passwordCalled = true
+ return "WRONG", nil
+ }),
+ },
+ }
+
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+
+ if passwordCalled {
+ t.Errorf("password auth tried before public-key auth.")
+ }
+}
+
+func TestAuthMethodWrongPassword(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ Password("wrong"),
+ PublicKeys(testSigners["rsa"]),
+ },
+ }
+
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+}
+
+func TestAuthMethodKeyboardInteractive(t *testing.T) {
+ answers := keyboardInteractive(map[string]string{
+ "question1": "answer1",
+ "question2": "answer2",
+ })
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ KeyboardInteractive(answers.Challenge),
+ },
+ }
+
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("unable to dial remote side: %s", err)
+ }
+}
+
+func TestAuthMethodWrongKeyboardInteractive(t *testing.T) {
+ answers := keyboardInteractive(map[string]string{
+ "question1": "answer1",
+ "question2": "WRONG",
+ })
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ KeyboardInteractive(answers.Challenge),
+ },
+ }
+
+ if err := tryAuth(t, config); err == nil {
+ t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive")
+ }
+}
+
+// the mock server will only authenticate ssh-rsa keys
+func TestAuthMethodInvalidPublicKey(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["dsa"]),
+ },
+ }
+
+ if err := tryAuth(t, config); err == nil {
+ t.Fatalf("dsa private key should not have authenticated with rsa public key")
+ }
+}
+
+// the client should authenticate with the second key
+func TestAuthMethodRSAandDSA(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["dsa"], testSigners["rsa"]),
+ },
+ }
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("client could not authenticate with rsa key: %v", err)
+ }
+}
+
+func TestClientHMAC(t *testing.T) {
+ for _, mac := range supportedMACs {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["rsa"]),
+ },
+ Config: Config{
+ MACs: []string{mac},
+ },
+ }
+ if err := tryAuth(t, config); err != nil {
+ t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err)
+ }
+ }
+}
+
+// issue 4285.
+func TestClientUnsupportedCipher(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(),
+ },
+ Config: Config{
+ Ciphers: []string{"aes128-cbc"}, // not currently supported
+ },
+ }
+ if err := tryAuth(t, config); err == nil {
+ t.Errorf("expected no ciphers in common")
+ }
+}
+
+func TestClientUnsupportedKex(t *testing.T) {
+ config := &ClientConfig{
+ User: "testuser",
+ Auth: []AuthMethod{
+ PublicKeys(),
+ },
+ Config: Config{
+ KeyExchanges: []string{"diffie-hellman-group-exchange-sha256"}, // not currently supported
+ },
+ }
+ if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") {
+ t.Errorf("got %v, expected 'common algorithm'", err)
+ }
+}
+
+func TestClientLoginCert(t *testing.T) {
+ cert := &Certificate{
+ Key: testPublicKeys["rsa"],
+ ValidBefore: CertTimeInfinity,
+ CertType: UserCert,
+ }
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ certSigner, err := NewCertSigner(cert, testSigners["rsa"])
+ if err != nil {
+ t.Fatalf("NewCertSigner: %v", err)
+ }
+
+ clientConfig := &ClientConfig{
+ User: "user",
+ }
+ clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner))
+
+ t.Log("should succeed")
+ if err := tryAuth(t, clientConfig); err != nil {
+ t.Errorf("cert login failed: %v", err)
+ }
+
+ t.Log("corrupted signature")
+ cert.Signature.Blob[0]++
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with corrupted sig")
+ }
+
+ t.Log("revoked")
+ cert.Serial = 666
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("revoked cert login succeeded")
+ }
+ cert.Serial = 1
+
+ t.Log("sign with wrong key")
+ cert.SignCert(rand.Reader, testSigners["dsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with non-authoritive key")
+ }
+
+ t.Log("host cert")
+ cert.CertType = HostCert
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with wrong type")
+ }
+ cert.CertType = UserCert
+
+ t.Log("principal specified")
+ cert.ValidPrincipals = []string{"user"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err != nil {
+ t.Errorf("cert login failed: %v", err)
+ }
+
+ t.Log("wrong principal specified")
+ cert.ValidPrincipals = []string{"fred"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with wrong principal")
+ }
+ cert.ValidPrincipals = nil
+
+ t.Log("added critical option")
+ cert.CriticalOptions = map[string]string{"root-access": "yes"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login passed with unrecognized critical option")
+ }
+
+ t.Log("allowed source address")
+ cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err != nil {
+ t.Errorf("cert login with source-address failed: %v", err)
+ }
+
+ t.Log("disallowed source address")
+ cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42"}
+ cert.SignCert(rand.Reader, testSigners["ecdsa"])
+ if err := tryAuth(t, clientConfig); err == nil {
+ t.Errorf("cert login with source-address succeeded")
+ }
+}
+
+func testPermissionsPassing(withPermissions bool, t *testing.T) {
+ serverConfig := &ServerConfig{
+ PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
+ if conn.User() == "nopermissions" {
+ return nil, nil
+ } else {
+ return &Permissions{}, nil
+ }
+ },
+ }
+ serverConfig.AddHostKey(testSigners["rsa"])
+
+ clientConfig := &ClientConfig{
+ Auth: []AuthMethod{
+ PublicKeys(testSigners["rsa"]),
+ },
+ }
+ if withPermissions {
+ clientConfig.User = "permissions"
+ } else {
+ clientConfig.User = "nopermissions"
+ }
+
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ go NewClientConn(c2, "", clientConfig)
+ serverConn, err := newServer(c1, serverConfig)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if p := serverConn.Permissions; (p != nil) != withPermissions {
+ t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p)
+ }
+}
+
+func TestPermissionsPassing(t *testing.T) {
+ testPermissionsPassing(true, t)
+}
+
+func TestNoPermissionsPassing(t *testing.T) {
+ testPermissionsPassing(false, t)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_test.go
new file mode 100644
index 00000000000..1fe790cb494
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/client_test.go
@@ -0,0 +1,39 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "net"
+ "testing"
+)
+
+func testClientVersion(t *testing.T, config *ClientConfig, expected string) {
+ clientConn, serverConn := net.Pipe()
+ defer clientConn.Close()
+ receivedVersion := make(chan string, 1)
+ go func() {
+ version, err := readVersion(serverConn)
+ if err != nil {
+ receivedVersion <- ""
+ } else {
+ receivedVersion <- string(version)
+ }
+ serverConn.Close()
+ }()
+ NewClientConn(clientConn, "", config)
+ actual := <-receivedVersion
+ if actual != expected {
+ t.Fatalf("got %s; want %s", actual, expected)
+ }
+}
+
+func TestCustomClientVersion(t *testing.T) {
+ version := "Test-Client-Version-0.0"
+ testClientVersion(t, &ClientConfig{ClientVersion: version}, version)
+}
+
+func TestDefaultClientVersion(t *testing.T) {
+ testClientVersion(t, &ClientConfig{}, packageVersion)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/common.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/common.go
new file mode 100644
index 00000000000..9fc739e1d01
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/common.go
@@ -0,0 +1,354 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "crypto"
+ "crypto/rand"
+ "fmt"
+ "io"
+ "sync"
+
+ _ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+)
+
+// These are string constants in the SSH protocol.
+const (
+ compressionNone = "none"
+ serviceUserAuth = "ssh-userauth"
+ serviceSSH = "ssh-connection"
+)
+
+// supportedCiphers specifies the supported ciphers in preference order.
+var supportedCiphers = []string{
+ "aes128-ctr", "aes192-ctr", "aes256-ctr",
+ "aes128-gcm@openssh.com",
+ "arcfour256", "arcfour128",
+}
+
+// supportedKexAlgos specifies the supported key-exchange algorithms in
+// preference order.
+var supportedKexAlgos = []string{
+ kexAlgoCurve25519SHA256,
+ // P384 and P521 are not constant-time yet, but since we don't
+ // reuse ephemeral keys, using them for ECDH should be OK.
+ kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
+ kexAlgoDH14SHA1, kexAlgoDH1SHA1,
+}
+
+// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods
+// of authenticating servers) in preference order.
+var supportedHostKeyAlgos = []string{
+ CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
+ CertAlgoECDSA384v01, CertAlgoECDSA521v01,
+
+ KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
+ KeyAlgoRSA, KeyAlgoDSA,
+}
+
+// supportedMACs specifies a default set of MAC algorithms in preference order.
+// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
+// because they have reached the end of their useful life.
+var supportedMACs = []string{
+ "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
+}
+
+var supportedCompressions = []string{compressionNone}
+
+// hashFuncs keeps the mapping of supported algorithms to their respective
+// hashes needed for signature verification.
+var hashFuncs = map[string]crypto.Hash{
+ KeyAlgoRSA: crypto.SHA1,
+ KeyAlgoDSA: crypto.SHA1,
+ KeyAlgoECDSA256: crypto.SHA256,
+ KeyAlgoECDSA384: crypto.SHA384,
+ KeyAlgoECDSA521: crypto.SHA512,
+ CertAlgoRSAv01: crypto.SHA1,
+ CertAlgoDSAv01: crypto.SHA1,
+ CertAlgoECDSA256v01: crypto.SHA256,
+ CertAlgoECDSA384v01: crypto.SHA384,
+ CertAlgoECDSA521v01: crypto.SHA512,
+}
+
+// unexpectedMessageError results when the SSH message that we received didn't
+// match what we wanted.
+func unexpectedMessageError(expected, got uint8) error {
+ return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected)
+}
+
+// parseError results from a malformed SSH message.
+func parseError(tag uint8) error {
+ return fmt.Errorf("ssh: parse error in message type %d", tag)
+}
+
+func findCommon(what string, client []string, server []string) (common string, err error) {
+ for _, c := range client {
+ for _, s := range server {
+ if c == s {
+ return c, nil
+ }
+ }
+ }
+ return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
+}
+
+type directionAlgorithms struct {
+ Cipher string
+ MAC string
+ Compression string
+}
+
+type algorithms struct {
+ kex string
+ hostKey string
+ w directionAlgorithms
+ r directionAlgorithms
+}
+
+func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) {
+ result := &algorithms{}
+
+ result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
+ if err != nil {
+ return
+ }
+
+ result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
+ if err != nil {
+ return
+ }
+
+ result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
+ if err != nil {
+ return
+ }
+
+ result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
+ if err != nil {
+ return
+ }
+
+ result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
+ if err != nil {
+ return
+ }
+
+ result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
+ if err != nil {
+ return
+ }
+
+ result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
+ if err != nil {
+ return
+ }
+
+ result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
+ if err != nil {
+ return
+ }
+
+ return result, nil
+}
+
+// If rekeythreshold is too small, we can't make any progress sending
+// stuff.
+const minRekeyThreshold uint64 = 256
+
+// Config contains configuration data common to both ServerConfig and
+// ClientConfig.
+type Config struct {
+ // Rand provides the source of entropy for cryptographic
+ // primitives. If Rand is nil, the cryptographic random reader
+ // in package crypto/rand will be used.
+ Rand io.Reader
+
+ // The maximum number of bytes sent or received after which a
+ // new key is negotiated. It must be at least 256. If
+ // unspecified, 1 gigabyte is used.
+ RekeyThreshold uint64
+
+ // The allowed key exchanges algorithms. If unspecified then a
+ // default set of algorithms is used.
+ KeyExchanges []string
+
+ // The allowed cipher algorithms. If unspecified then a sensible
+ // default is used.
+ Ciphers []string
+
+ // The allowed MAC algorithms. If unspecified then a sensible default
+ // is used.
+ MACs []string
+}
+
+// SetDefaults sets sensible values for unset fields in config. This is
+// exported for testing: Configs passed to SSH functions are copied and have
+// default values set automatically.
+func (c *Config) SetDefaults() {
+ if c.Rand == nil {
+ c.Rand = rand.Reader
+ }
+ if c.Ciphers == nil {
+ c.Ciphers = supportedCiphers
+ }
+ var ciphers []string
+ for _, c := range c.Ciphers {
+ if cipherModes[c] != nil {
+ // reject the cipher if we have no cipherModes definition
+ ciphers = append(ciphers, c)
+ }
+ }
+ c.Ciphers = ciphers
+
+ if c.KeyExchanges == nil {
+ c.KeyExchanges = supportedKexAlgos
+ }
+
+ if c.MACs == nil {
+ c.MACs = supportedMACs
+ }
+
+ if c.RekeyThreshold == 0 {
+ // RFC 4253, section 9 suggests rekeying after 1G.
+ c.RekeyThreshold = 1 << 30
+ }
+ if c.RekeyThreshold < minRekeyThreshold {
+ c.RekeyThreshold = minRekeyThreshold
+ }
+}
+
+// buildDataSignedForAuth returns the data that is signed in order to prove
+// possession of a private key. See RFC 4252, section 7.
+func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
+ data := struct {
+ Session []byte
+ Type byte
+ User string
+ Service string
+ Method string
+ Sign bool
+ Algo []byte
+ PubKey []byte
+ }{
+ sessionId,
+ msgUserAuthRequest,
+ req.User,
+ req.Service,
+ req.Method,
+ true,
+ algo,
+ pubKey,
+ }
+ return Marshal(data)
+}
+
+func appendU16(buf []byte, n uint16) []byte {
+ return append(buf, byte(n>>8), byte(n))
+}
+
+func appendU32(buf []byte, n uint32) []byte {
+ return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
+}
+
+func appendU64(buf []byte, n uint64) []byte {
+ return append(buf,
+ byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32),
+ byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
+}
+
+func appendInt(buf []byte, n int) []byte {
+ return appendU32(buf, uint32(n))
+}
+
+func appendString(buf []byte, s string) []byte {
+ buf = appendU32(buf, uint32(len(s)))
+ buf = append(buf, s...)
+ return buf
+}
+
+func appendBool(buf []byte, b bool) []byte {
+ if b {
+ return append(buf, 1)
+ }
+ return append(buf, 0)
+}
+
+// newCond is a helper to hide the fact that there is no usable zero
+// value for sync.Cond.
+func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) }
+
+// window represents the buffer available to clients
+// wishing to write to a channel.
+type window struct {
+ *sync.Cond
+ win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1
+ writeWaiters int
+ closed bool
+}
+
+// add adds win to the amount of window available
+// for consumers.
+func (w *window) add(win uint32) bool {
+ // a zero sized window adjust is a noop.
+ if win == 0 {
+ return true
+ }
+ w.L.Lock()
+ if w.win+win < win {
+ w.L.Unlock()
+ return false
+ }
+ w.win += win
+ // It is unusual that multiple goroutines would be attempting to reserve
+ // window space, but not guaranteed. Use broadcast to notify all waiters
+ // that additional window is available.
+ w.Broadcast()
+ w.L.Unlock()
+ return true
+}
+
+// close sets the window to closed, so all reservations fail
+// immediately.
+func (w *window) close() {
+ w.L.Lock()
+ w.closed = true
+ w.Broadcast()
+ w.L.Unlock()
+}
+
+// reserve reserves win from the available window capacity.
+// If no capacity remains, reserve will block. reserve may
+// return less than requested.
+func (w *window) reserve(win uint32) (uint32, error) {
+ var err error
+ w.L.Lock()
+ w.writeWaiters++
+ w.Broadcast()
+ for w.win == 0 && !w.closed {
+ w.Wait()
+ }
+ w.writeWaiters--
+ if w.win < win {
+ win = w.win
+ }
+ w.win -= win
+ if w.closed {
+ err = io.EOF
+ }
+ w.L.Unlock()
+ return win, err
+}
+
+// waitWriterBlocked waits until some goroutine is blocked for further
+// writes. It is used in tests only.
+func (w *window) waitWriterBlocked() {
+ w.Cond.L.Lock()
+ for w.writeWaiters == 0 {
+ w.Cond.Wait()
+ }
+ w.Cond.L.Unlock()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/connection.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/connection.go
new file mode 100644
index 00000000000..979d919e81e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/connection.go
@@ -0,0 +1,144 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "fmt"
+ "net"
+)
+
+// OpenChannelError is returned if the other side rejects an
+// OpenChannel request.
+type OpenChannelError struct {
+ Reason RejectionReason
+ Message string
+}
+
+func (e *OpenChannelError) Error() string {
+ return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message)
+}
+
+// ConnMetadata holds metadata for the connection.
+type ConnMetadata interface {
+ // User returns the user ID for this connection.
+ // It is empty if no authentication is used.
+ User() string
+
+ // SessionID returns the sesson hash, also denoted by H.
+ SessionID() []byte
+
+ // ClientVersion returns the client's version string as hashed
+ // into the session ID.
+ ClientVersion() []byte
+
+ // ServerVersion returns the server's version string as hashed
+ // into the session ID.
+ ServerVersion() []byte
+
+ // RemoteAddr returns the remote address for this connection.
+ RemoteAddr() net.Addr
+
+ // LocalAddr returns the local address for this connection.
+ LocalAddr() net.Addr
+}
+
+// Conn represents an SSH connection for both server and client roles.
+// Conn is the basis for implementing an application layer, such
+// as ClientConn, which implements the traditional shell access for
+// clients.
+type Conn interface {
+ ConnMetadata
+
+ // SendRequest sends a global request, and returns the
+ // reply. If wantReply is true, it returns the response status
+ // and payload. See also RFC4254, section 4.
+ SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error)
+
+ // OpenChannel tries to open an channel. If the request is
+ // rejected, it returns *OpenChannelError. On success it returns
+ // the SSH Channel and a Go channel for incoming, out-of-band
+ // requests. The Go channel must be serviced, or the
+ // connection will hang.
+ OpenChannel(name string, data []byte) (Channel, <-chan *Request, error)
+
+ // Close closes the underlying network connection
+ Close() error
+
+ // Wait blocks until the connection has shut down, and returns the
+ // error causing the shutdown.
+ Wait() error
+
+ // TODO(hanwen): consider exposing:
+ // RequestKeyChange
+ // Disconnect
+}
+
+// DiscardRequests consumes and rejects all requests from the
+// passed-in channel.
+func DiscardRequests(in <-chan *Request) {
+ for req := range in {
+ if req.WantReply {
+ req.Reply(false, nil)
+ }
+ }
+}
+
+// A connection represents an incoming connection.
+type connection struct {
+ transport *handshakeTransport
+ sshConn
+
+ // The connection protocol.
+ *mux
+}
+
+func (c *connection) Close() error {
+ return c.sshConn.conn.Close()
+}
+
+// sshconn provides net.Conn metadata, but disallows direct reads and
+// writes.
+type sshConn struct {
+ conn net.Conn
+
+ user string
+ sessionID []byte
+ clientVersion []byte
+ serverVersion []byte
+}
+
+func dup(src []byte) []byte {
+ dst := make([]byte, len(src))
+ copy(dst, src)
+ return dst
+}
+
+func (c *sshConn) User() string {
+ return c.user
+}
+
+func (c *sshConn) RemoteAddr() net.Addr {
+ return c.conn.RemoteAddr()
+}
+
+func (c *sshConn) Close() error {
+ return c.conn.Close()
+}
+
+func (c *sshConn) LocalAddr() net.Addr {
+ return c.conn.LocalAddr()
+}
+
+func (c *sshConn) SessionID() []byte {
+ return dup(c.sessionID)
+}
+
+func (c *sshConn) ClientVersion() []byte {
+ return dup(c.clientVersion)
+}
+
+func (c *sshConn) ServerVersion() []byte {
+ return dup(c.serverVersion)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/doc.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/doc.go
new file mode 100644
index 00000000000..d6be8946629
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package ssh implements an SSH client and server.
+
+SSH is a transport security protocol, an authentication protocol and a
+family of application protocols. The most typical application level
+protocol is a remote shell and this is specifically implemented. However,
+the multiplexed nature of SSH is exposed to users that wish to support
+others.
+
+References:
+ [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
+ [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
+*/
+package ssh // import "golang.org/x/crypto/ssh"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/example_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/example_test.go
new file mode 100644
index 00000000000..dfd9dcab606
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/example_test.go
@@ -0,0 +1,211 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh_test
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+func ExampleNewServerConn() {
+ // An SSH server is represented by a ServerConfig, which holds
+ // certificate details and handles authentication of ServerConns.
+ config := &ssh.ServerConfig{
+ PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
+ // Should use constant-time compare (or better, salt+hash) in
+ // a production setting.
+ if c.User() == "testuser" && string(pass) == "tiger" {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("password rejected for %q", c.User())
+ },
+ }
+
+ privateBytes, err := ioutil.ReadFile("id_rsa")
+ if err != nil {
+ panic("Failed to load private key")
+ }
+
+ private, err := ssh.ParsePrivateKey(privateBytes)
+ if err != nil {
+ panic("Failed to parse private key")
+ }
+
+ config.AddHostKey(private)
+
+ // Once a ServerConfig has been configured, connections can be
+ // accepted.
+ listener, err := net.Listen("tcp", "0.0.0.0:2022")
+ if err != nil {
+ panic("failed to listen for connection")
+ }
+ nConn, err := listener.Accept()
+ if err != nil {
+ panic("failed to accept incoming connection")
+ }
+
+ // Before use, a handshake must be performed on the incoming
+ // net.Conn.
+ _, chans, reqs, err := ssh.NewServerConn(nConn, config)
+ if err != nil {
+ panic("failed to handshake")
+ }
+ // The incoming Request channel must be serviced.
+ go ssh.DiscardRequests(reqs)
+
+ // Service the incoming Channel channel.
+ for newChannel := range chans {
+ // Channels have a type, depending on the application level
+ // protocol intended. In the case of a shell, the type is
+ // "session" and ServerShell may be used to present a simple
+ // terminal interface.
+ if newChannel.ChannelType() != "session" {
+ newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
+ continue
+ }
+ channel, requests, err := newChannel.Accept()
+ if err != nil {
+ panic("could not accept channel.")
+ }
+
+ // Sessions have out-of-band requests such as "shell",
+ // "pty-req" and "env". Here we handle only the
+ // "shell" request.
+ go func(in <-chan *ssh.Request) {
+ for req := range in {
+ ok := false
+ switch req.Type {
+ case "shell":
+ ok = true
+ if len(req.Payload) > 0 {
+ // We don't accept any
+ // commands, only the
+ // default shell.
+ ok = false
+ }
+ }
+ req.Reply(ok, nil)
+ }
+ }(requests)
+
+ term := terminal.NewTerminal(channel, "> ")
+
+ go func() {
+ defer channel.Close()
+ for {
+ line, err := term.ReadLine()
+ if err != nil {
+ break
+ }
+ fmt.Println(line)
+ }
+ }()
+ }
+}
+
+func ExampleDial() {
+ // An SSH client is represented with a ClientConn. Currently only
+ // the "password" authentication method is supported.
+ //
+ // To authenticate with the remote server you must pass at least one
+ // implementation of AuthMethod via the Auth field in ClientConfig.
+ config := &ssh.ClientConfig{
+ User: "username",
+ Auth: []ssh.AuthMethod{
+ ssh.Password("yourpassword"),
+ },
+ }
+ client, err := ssh.Dial("tcp", "yourserver.com:22", config)
+ if err != nil {
+ panic("Failed to dial: " + err.Error())
+ }
+
+ // Each ClientConn can support multiple interactive sessions,
+ // represented by a Session.
+ session, err := client.NewSession()
+ if err != nil {
+ panic("Failed to create session: " + err.Error())
+ }
+ defer session.Close()
+
+ // Once a Session is created, you can execute a single command on
+ // the remote side using the Run method.
+ var b bytes.Buffer
+ session.Stdout = &b
+ if err := session.Run("/usr/bin/whoami"); err != nil {
+ panic("Failed to run: " + err.Error())
+ }
+ fmt.Println(b.String())
+}
+
+func ExampleClient_Listen() {
+ config := &ssh.ClientConfig{
+ User: "username",
+ Auth: []ssh.AuthMethod{
+ ssh.Password("password"),
+ },
+ }
+ // Dial your ssh server.
+ conn, err := ssh.Dial("tcp", "localhost:22", config)
+ if err != nil {
+ log.Fatalf("unable to connect: %s", err)
+ }
+ defer conn.Close()
+
+ // Request the remote side to open port 8080 on all interfaces.
+ l, err := conn.Listen("tcp", "0.0.0.0:8080")
+ if err != nil {
+ log.Fatalf("unable to register tcp forward: %v", err)
+ }
+ defer l.Close()
+
+ // Serve HTTP with your SSH server acting as a reverse proxy.
+ http.Serve(l, http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
+ fmt.Fprintf(resp, "Hello world!\n")
+ }))
+}
+
+func ExampleSession_RequestPty() {
+ // Create client config
+ config := &ssh.ClientConfig{
+ User: "username",
+ Auth: []ssh.AuthMethod{
+ ssh.Password("password"),
+ },
+ }
+ // Connect to ssh server
+ conn, err := ssh.Dial("tcp", "localhost:22", config)
+ if err != nil {
+ log.Fatalf("unable to connect: %s", err)
+ }
+ defer conn.Close()
+ // Create a session
+ session, err := conn.NewSession()
+ if err != nil {
+ log.Fatalf("unable to create session: %s", err)
+ }
+ defer session.Close()
+ // Set up terminal modes
+ modes := ssh.TerminalModes{
+ ssh.ECHO: 0, // disable echoing
+ ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
+ ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
+ }
+ // Request pseudo terminal
+ if err := session.RequestPty("xterm", 80, 40, modes); err != nil {
+ log.Fatalf("request for pseudo terminal failed: %s", err)
+ }
+ // Start remote shell
+ if err := session.Shell(); err != nil {
+ log.Fatalf("failed to start shell: %s", err)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/handshake.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/handshake.go
new file mode 100644
index 00000000000..1c54f758781
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/handshake.go
@@ -0,0 +1,412 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "sync"
+)
+
+// debugHandshake, if set, prints messages sent and received. Key
+// exchange messages are printed as if DH were used, so the debug
+// messages are wrong when using ECDH.
+const debugHandshake = false
+
+// keyingTransport is a packet based transport that supports key
+// changes. It need not be thread-safe. It should pass through
+// msgNewKeys in both directions.
+type keyingTransport interface {
+ packetConn
+
+ // prepareKeyChange sets up a key change. The key change for a
+ // direction will be effected if a msgNewKeys message is sent
+ // or received.
+ prepareKeyChange(*algorithms, *kexResult) error
+
+ // getSessionID returns the session ID. prepareKeyChange must
+ // have been called once.
+ getSessionID() []byte
+}
+
+// rekeyingTransport is the interface of handshakeTransport that we
+// (internally) expose to ClientConn and ServerConn.
+type rekeyingTransport interface {
+ packetConn
+
+ // requestKeyChange asks the remote side to change keys. All
+ // writes are blocked until the key change succeeds, which is
+ // signaled by reading a msgNewKeys.
+ requestKeyChange() error
+
+ // getSessionID returns the session ID. This is only valid
+ // after the first key change has completed.
+ getSessionID() []byte
+}
+
+// handshakeTransport implements rekeying on top of a keyingTransport
+// and offers a thread-safe writePacket() interface.
+type handshakeTransport struct {
+ conn keyingTransport
+ config *Config
+
+ serverVersion []byte
+ clientVersion []byte
+
+ // hostKeys is non-empty if we are the server. In that case,
+ // it contains all host keys that can be used to sign the
+ // connection.
+ hostKeys []Signer
+
+ // hostKeyAlgorithms is non-empty if we are the client. In that case,
+ // we accept these key types from the server as host key.
+ hostKeyAlgorithms []string
+
+ // On read error, incoming is closed, and readError is set.
+ incoming chan []byte
+ readError error
+
+ // data for host key checking
+ hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
+ dialAddress string
+ remoteAddr net.Addr
+
+ readSinceKex uint64
+
+ // Protects the writing side of the connection
+ mu sync.Mutex
+ cond *sync.Cond
+ sentInitPacket []byte
+ sentInitMsg *kexInitMsg
+ writtenSinceKex uint64
+ writeError error
+}
+
+func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
+ t := &handshakeTransport{
+ conn: conn,
+ serverVersion: serverVersion,
+ clientVersion: clientVersion,
+ incoming: make(chan []byte, 16),
+ config: config,
+ }
+ t.cond = sync.NewCond(&t.mu)
+ return t
+}
+
+func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport {
+ t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
+ t.dialAddress = dialAddr
+ t.remoteAddr = addr
+ t.hostKeyCallback = config.HostKeyCallback
+ if config.HostKeyAlgorithms != nil {
+ t.hostKeyAlgorithms = config.HostKeyAlgorithms
+ } else {
+ t.hostKeyAlgorithms = supportedHostKeyAlgos
+ }
+ go t.readLoop()
+ return t
+}
+
+func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
+ t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
+ t.hostKeys = config.hostKeys
+ go t.readLoop()
+ return t
+}
+
+func (t *handshakeTransport) getSessionID() []byte {
+ return t.conn.getSessionID()
+}
+
+func (t *handshakeTransport) id() string {
+ if len(t.hostKeys) > 0 {
+ return "server"
+ }
+ return "client"
+}
+
+func (t *handshakeTransport) readPacket() ([]byte, error) {
+ p, ok := <-t.incoming
+ if !ok {
+ return nil, t.readError
+ }
+ return p, nil
+}
+
+func (t *handshakeTransport) readLoop() {
+ for {
+ p, err := t.readOnePacket()
+ if err != nil {
+ t.readError = err
+ close(t.incoming)
+ break
+ }
+ if p[0] == msgIgnore || p[0] == msgDebug {
+ continue
+ }
+ t.incoming <- p
+ }
+
+ // If we can't read, declare the writing part dead too.
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.writeError == nil {
+ t.writeError = t.readError
+ }
+ t.cond.Broadcast()
+}
+
+func (t *handshakeTransport) readOnePacket() ([]byte, error) {
+ if t.readSinceKex > t.config.RekeyThreshold {
+ if err := t.requestKeyChange(); err != nil {
+ return nil, err
+ }
+ }
+
+ p, err := t.conn.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ t.readSinceKex += uint64(len(p))
+ if debugHandshake {
+ msg, err := decode(p)
+ log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err)
+ }
+ if p[0] != msgKexInit {
+ return p, nil
+ }
+ err = t.enterKeyExchange(p)
+
+ t.mu.Lock()
+ if err != nil {
+ // drop connection
+ t.conn.Close()
+ t.writeError = err
+ }
+
+ if debugHandshake {
+ log.Printf("%s exited key exchange, err %v", t.id(), err)
+ }
+
+ // Unblock writers.
+ t.sentInitMsg = nil
+ t.sentInitPacket = nil
+ t.cond.Broadcast()
+ t.writtenSinceKex = 0
+ t.mu.Unlock()
+
+ if err != nil {
+ return nil, err
+ }
+
+ t.readSinceKex = 0
+ return []byte{msgNewKeys}, nil
+}
+
+// sendKexInit sends a key change message, and returns the message
+// that was sent. After initiating the key change, all writes will be
+// blocked until the change is done, and a failed key change will
+// close the underlying transport. This function is safe for
+// concurrent use by multiple goroutines.
+func (t *handshakeTransport) sendKexInit() (*kexInitMsg, []byte, error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ return t.sendKexInitLocked()
+}
+
+func (t *handshakeTransport) requestKeyChange() error {
+ _, _, err := t.sendKexInit()
+ return err
+}
+
+// sendKexInitLocked sends a key change message. t.mu must be locked
+// while this happens.
+func (t *handshakeTransport) sendKexInitLocked() (*kexInitMsg, []byte, error) {
+ // kexInits may be sent either in response to the other side,
+ // or because our side wants to initiate a key change, so we
+ // may have already sent a kexInit. In that case, don't send a
+ // second kexInit.
+ if t.sentInitMsg != nil {
+ return t.sentInitMsg, t.sentInitPacket, nil
+ }
+ msg := &kexInitMsg{
+ KexAlgos: t.config.KeyExchanges,
+ CiphersClientServer: t.config.Ciphers,
+ CiphersServerClient: t.config.Ciphers,
+ MACsClientServer: t.config.MACs,
+ MACsServerClient: t.config.MACs,
+ CompressionClientServer: supportedCompressions,
+ CompressionServerClient: supportedCompressions,
+ }
+ io.ReadFull(rand.Reader, msg.Cookie[:])
+
+ if len(t.hostKeys) > 0 {
+ for _, k := range t.hostKeys {
+ msg.ServerHostKeyAlgos = append(
+ msg.ServerHostKeyAlgos, k.PublicKey().Type())
+ }
+ } else {
+ msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
+ }
+ packet := Marshal(msg)
+
+ // writePacket destroys the contents, so save a copy.
+ packetCopy := make([]byte, len(packet))
+ copy(packetCopy, packet)
+
+ if err := t.conn.writePacket(packetCopy); err != nil {
+ return nil, nil, err
+ }
+
+ t.sentInitMsg = msg
+ t.sentInitPacket = packet
+ return msg, packet, nil
+}
+
+func (t *handshakeTransport) writePacket(p []byte) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.writtenSinceKex > t.config.RekeyThreshold {
+ t.sendKexInitLocked()
+ }
+ for t.sentInitMsg != nil && t.writeError == nil {
+ t.cond.Wait()
+ }
+ if t.writeError != nil {
+ return t.writeError
+ }
+ t.writtenSinceKex += uint64(len(p))
+
+ switch p[0] {
+ case msgKexInit:
+ return errors.New("ssh: only handshakeTransport can send kexInit")
+ case msgNewKeys:
+ return errors.New("ssh: only handshakeTransport can send newKeys")
+ default:
+ return t.conn.writePacket(p)
+ }
+}
+
+func (t *handshakeTransport) Close() error {
+ return t.conn.Close()
+}
+
+// enterKeyExchange runs the key exchange.
+func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
+ if debugHandshake {
+ log.Printf("%s entered key exchange", t.id())
+ }
+ myInit, myInitPacket, err := t.sendKexInit()
+ if err != nil {
+ return err
+ }
+
+ otherInit := &kexInitMsg{}
+ if err := Unmarshal(otherInitPacket, otherInit); err != nil {
+ return err
+ }
+
+ magics := handshakeMagics{
+ clientVersion: t.clientVersion,
+ serverVersion: t.serverVersion,
+ clientKexInit: otherInitPacket,
+ serverKexInit: myInitPacket,
+ }
+
+ clientInit := otherInit
+ serverInit := myInit
+ if len(t.hostKeys) == 0 {
+ clientInit = myInit
+ serverInit = otherInit
+
+ magics.clientKexInit = myInitPacket
+ magics.serverKexInit = otherInitPacket
+ }
+
+ algs, err := findAgreedAlgorithms(clientInit, serverInit)
+ if err != nil {
+ return err
+ }
+
+ // We don't send FirstKexFollows, but we handle receiving it.
+ if otherInit.FirstKexFollows && algs.kex != otherInit.KexAlgos[0] {
+ // other side sent a kex message for the wrong algorithm,
+ // which we have to ignore.
+ if _, err := t.conn.readPacket(); err != nil {
+ return err
+ }
+ }
+
+ kex, ok := kexAlgoMap[algs.kex]
+ if !ok {
+ return fmt.Errorf("ssh: unexpected key exchange algorithm %v", algs.kex)
+ }
+
+ var result *kexResult
+ if len(t.hostKeys) > 0 {
+ result, err = t.server(kex, algs, &magics)
+ } else {
+ result, err = t.client(kex, algs, &magics)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ t.conn.prepareKeyChange(algs, result)
+ if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
+ return err
+ }
+ if packet, err := t.conn.readPacket(); err != nil {
+ return err
+ } else if packet[0] != msgNewKeys {
+ return unexpectedMessageError(msgNewKeys, packet[0])
+ }
+ return nil
+}
+
+func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
+ var hostKey Signer
+ for _, k := range t.hostKeys {
+ if algs.hostKey == k.PublicKey().Type() {
+ hostKey = k
+ }
+ }
+
+ r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
+ return r, err
+}
+
+func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
+ result, err := kex.Client(t.conn, t.config.Rand, magics)
+ if err != nil {
+ return nil, err
+ }
+
+ hostKey, err := ParsePublicKey(result.HostKey)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := verifyHostKeySignature(hostKey, result); err != nil {
+ return nil, err
+ }
+
+ if t.hostKeyCallback != nil {
+ err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return result, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/handshake_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/handshake_test.go
new file mode 100644
index 00000000000..b86d369cced
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/handshake_test.go
@@ -0,0 +1,415 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "net"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+)
+
+type testChecker struct {
+ calls []string
+}
+
+func (t *testChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error {
+ if dialAddr == "bad" {
+ return fmt.Errorf("dialAddr is bad")
+ }
+
+ if tcpAddr, ok := addr.(*net.TCPAddr); !ok || tcpAddr == nil {
+ return fmt.Errorf("testChecker: got %T want *net.TCPAddr", addr)
+ }
+
+ t.calls = append(t.calls, fmt.Sprintf("%s %v %s %x", dialAddr, addr, key.Type(), key.Marshal()))
+
+ return nil
+}
+
+// netPipe is analogous to net.Pipe, but it uses a real net.Conn, and
+// therefore is buffered (net.Pipe deadlocks if both sides start with
+// a write.)
+func netPipe() (net.Conn, net.Conn, error) {
+ listener, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, nil, err
+ }
+ defer listener.Close()
+ c1, err := net.Dial("tcp", listener.Addr().String())
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c2, err := listener.Accept()
+ if err != nil {
+ c1.Close()
+ return nil, nil, err
+ }
+
+ return c1, c2, nil
+}
+
+func handshakePair(clientConf *ClientConfig, addr string) (client *handshakeTransport, server *handshakeTransport, err error) {
+ a, b, err := netPipe()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ trC := newTransport(a, rand.Reader, true)
+ trS := newTransport(b, rand.Reader, false)
+ clientConf.SetDefaults()
+
+ v := []byte("version")
+ client = newClientTransport(trC, v, v, clientConf, addr, a.RemoteAddr())
+
+ serverConf := &ServerConfig{}
+ serverConf.AddHostKey(testSigners["ecdsa"])
+ serverConf.AddHostKey(testSigners["rsa"])
+ serverConf.SetDefaults()
+ server = newServerTransport(trS, v, v, serverConf)
+
+ return client, server, nil
+}
+
+func TestHandshakeBasic(t *testing.T) {
+ if runtime.GOOS == "plan9" {
+ t.Skip("see golang.org/issue/7237")
+ }
+ checker := &testChecker{}
+ trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+
+ defer trC.Close()
+ defer trS.Close()
+
+ go func() {
+ // Client writes a bunch of stuff, and does a key
+ // change in the middle. This should not confuse the
+ // handshake in progress
+ for i := 0; i < 10; i++ {
+ p := []byte{msgRequestSuccess, byte(i)}
+ if err := trC.writePacket(p); err != nil {
+ t.Fatalf("sendPacket: %v", err)
+ }
+ if i == 5 {
+ // halfway through, we request a key change.
+ _, _, err := trC.sendKexInit()
+ if err != nil {
+ t.Fatalf("sendKexInit: %v", err)
+ }
+ }
+ }
+ trC.Close()
+ }()
+
+ // Server checks that client messages come in cleanly
+ i := 0
+ for {
+ p, err := trS.readPacket()
+ if err != nil {
+ break
+ }
+ if p[0] == msgNewKeys {
+ continue
+ }
+ want := []byte{msgRequestSuccess, byte(i)}
+ if bytes.Compare(p, want) != 0 {
+ t.Errorf("message %d: got %q, want %q", i, p, want)
+ }
+ i++
+ }
+ if i != 10 {
+ t.Errorf("received %d messages, want 10.", i)
+ }
+
+ // If all went well, we registered exactly 1 key change.
+ if len(checker.calls) != 1 {
+ t.Fatalf("got %d host key checks, want 1", len(checker.calls))
+ }
+
+ pub := testSigners["ecdsa"].PublicKey()
+ want := fmt.Sprintf("%s %v %s %x", "addr", trC.remoteAddr, pub.Type(), pub.Marshal())
+ if want != checker.calls[0] {
+ t.Errorf("got %q want %q for host key check", checker.calls[0], want)
+ }
+}
+
+func TestHandshakeError(t *testing.T) {
+ checker := &testChecker{}
+ trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "bad")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+ defer trC.Close()
+ defer trS.Close()
+
+ // send a packet
+ packet := []byte{msgRequestSuccess, 42}
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ // Now request a key change.
+ _, _, err = trC.sendKexInit()
+ if err != nil {
+ t.Errorf("sendKexInit: %v", err)
+ }
+
+ // the key change will fail, and afterwards we can't write.
+ if err := trC.writePacket([]byte{msgRequestSuccess, 43}); err == nil {
+ t.Errorf("writePacket after botched rekey succeeded.")
+ }
+
+ readback, err := trS.readPacket()
+ if err != nil {
+ t.Fatalf("server closed too soon: %v", err)
+ }
+ if bytes.Compare(readback, packet) != 0 {
+ t.Errorf("got %q want %q", readback, packet)
+ }
+ readback, err = trS.readPacket()
+ if err == nil {
+ t.Errorf("got a message %q after failed key change", readback)
+ }
+}
+
+func TestHandshakeTwice(t *testing.T) {
+ checker := &testChecker{}
+ trC, trS, err := handshakePair(&ClientConfig{HostKeyCallback: checker.Check}, "addr")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+
+ defer trC.Close()
+ defer trS.Close()
+
+ // send a packet
+ packet := make([]byte, 5)
+ packet[0] = msgRequestSuccess
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ // Now request a key change.
+ _, _, err = trC.sendKexInit()
+ if err != nil {
+ t.Errorf("sendKexInit: %v", err)
+ }
+
+ // Send another packet. Use a fresh one, since writePacket destroys.
+ packet = make([]byte, 5)
+ packet[0] = msgRequestSuccess
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ // 2nd key change.
+ _, _, err = trC.sendKexInit()
+ if err != nil {
+ t.Errorf("sendKexInit: %v", err)
+ }
+
+ packet = make([]byte, 5)
+ packet[0] = msgRequestSuccess
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+
+ packet = make([]byte, 5)
+ packet[0] = msgRequestSuccess
+ for i := 0; i < 5; i++ {
+ msg, err := trS.readPacket()
+ if err != nil {
+ t.Fatalf("server closed too soon: %v", err)
+ }
+ if msg[0] == msgNewKeys {
+ continue
+ }
+
+ if bytes.Compare(msg, packet) != 0 {
+ t.Errorf("packet %d: got %q want %q", i, msg, packet)
+ }
+ }
+ if len(checker.calls) != 2 {
+ t.Errorf("got %d key changes, want 2", len(checker.calls))
+ }
+}
+
+func TestHandshakeAutoRekeyWrite(t *testing.T) {
+ checker := &testChecker{}
+ clientConf := &ClientConfig{HostKeyCallback: checker.Check}
+ clientConf.RekeyThreshold = 500
+ trC, trS, err := handshakePair(clientConf, "addr")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+ defer trC.Close()
+ defer trS.Close()
+
+ for i := 0; i < 5; i++ {
+ packet := make([]byte, 251)
+ packet[0] = msgRequestSuccess
+ if err := trC.writePacket(packet); err != nil {
+ t.Errorf("writePacket: %v", err)
+ }
+ }
+
+ j := 0
+ for ; j < 5; j++ {
+ _, err := trS.readPacket()
+ if err != nil {
+ break
+ }
+ }
+
+ if j != 5 {
+ t.Errorf("got %d, want 5 messages", j)
+ }
+
+ if len(checker.calls) != 2 {
+ t.Errorf("got %d key changes, wanted 2", len(checker.calls))
+ }
+}
+
+type syncChecker struct {
+ called chan int
+}
+
+func (t *syncChecker) Check(dialAddr string, addr net.Addr, key PublicKey) error {
+ t.called <- 1
+ return nil
+}
+
+func TestHandshakeAutoRekeyRead(t *testing.T) {
+ sync := &syncChecker{make(chan int, 2)}
+ clientConf := &ClientConfig{
+ HostKeyCallback: sync.Check,
+ }
+ clientConf.RekeyThreshold = 500
+
+ trC, trS, err := handshakePair(clientConf, "addr")
+ if err != nil {
+ t.Fatalf("handshakePair: %v", err)
+ }
+ defer trC.Close()
+ defer trS.Close()
+
+ packet := make([]byte, 501)
+ packet[0] = msgRequestSuccess
+ if err := trS.writePacket(packet); err != nil {
+ t.Fatalf("writePacket: %v", err)
+ }
+ // While we read out the packet, a key change will be
+ // initiated.
+ if _, err := trC.readPacket(); err != nil {
+ t.Fatalf("readPacket(client): %v", err)
+ }
+
+ <-sync.called
+}
+
+// errorKeyingTransport generates errors after a given number of
+// read/write operations.
+type errorKeyingTransport struct {
+ packetConn
+ readLeft, writeLeft int
+}
+
+func (n *errorKeyingTransport) prepareKeyChange(*algorithms, *kexResult) error {
+ return nil
+}
+func (n *errorKeyingTransport) getSessionID() []byte {
+ return nil
+}
+
+func (n *errorKeyingTransport) writePacket(packet []byte) error {
+ if n.writeLeft == 0 {
+ n.Close()
+ return errors.New("barf")
+ }
+
+ n.writeLeft--
+ return n.packetConn.writePacket(packet)
+}
+
+func (n *errorKeyingTransport) readPacket() ([]byte, error) {
+ if n.readLeft == 0 {
+ n.Close()
+ return nil, errors.New("barf")
+ }
+
+ n.readLeft--
+ return n.packetConn.readPacket()
+}
+
+func TestHandshakeErrorHandlingRead(t *testing.T) {
+ for i := 0; i < 20; i++ {
+ testHandshakeErrorHandlingN(t, i, -1)
+ }
+}
+
+func TestHandshakeErrorHandlingWrite(t *testing.T) {
+ for i := 0; i < 20; i++ {
+ testHandshakeErrorHandlingN(t, -1, i)
+ }
+}
+
+// testHandshakeErrorHandlingN runs handshakes, injecting errors. If
+// handshakeTransport deadlocks, the go runtime will detect it and
+// panic.
+func testHandshakeErrorHandlingN(t *testing.T, readLimit, writeLimit int) {
+ msg := Marshal(&serviceRequestMsg{strings.Repeat("x", int(minRekeyThreshold)/4)})
+
+ a, b := memPipe()
+ defer a.Close()
+ defer b.Close()
+
+ key := testSigners["ecdsa"]
+ serverConf := Config{RekeyThreshold: minRekeyThreshold}
+ serverConf.SetDefaults()
+ serverConn := newHandshakeTransport(&errorKeyingTransport{a, readLimit, writeLimit}, &serverConf, []byte{'a'}, []byte{'b'})
+ serverConn.hostKeys = []Signer{key}
+ go serverConn.readLoop()
+
+ clientConf := Config{RekeyThreshold: 10 * minRekeyThreshold}
+ clientConf.SetDefaults()
+ clientConn := newHandshakeTransport(&errorKeyingTransport{b, -1, -1}, &clientConf, []byte{'a'}, []byte{'b'})
+ clientConn.hostKeyAlgorithms = []string{key.PublicKey().Type()}
+ go clientConn.readLoop()
+
+ var wg sync.WaitGroup
+ wg.Add(4)
+
+ for _, hs := range []packetConn{serverConn, clientConn} {
+ go func(c packetConn) {
+ for {
+ err := c.writePacket(msg)
+ if err != nil {
+ break
+ }
+ }
+ wg.Done()
+ }(hs)
+ go func(c packetConn) {
+ for {
+ _, err := c.readPacket()
+ if err != nil {
+ break
+ }
+ }
+ wg.Done()
+ }(hs)
+ }
+
+ wg.Wait()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/kex.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/kex.go
new file mode 100644
index 00000000000..3ec603c0a15
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/kex.go
@@ -0,0 +1,526 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/subtle"
+ "errors"
+ "io"
+ "math/big"
+
+ "golang.org/x/crypto/curve25519"
+)
+
+const (
+ kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
+ kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
+ kexAlgoECDH256 = "ecdh-sha2-nistp256"
+ kexAlgoECDH384 = "ecdh-sha2-nistp384"
+ kexAlgoECDH521 = "ecdh-sha2-nistp521"
+ kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
+)
+
+// kexResult captures the outcome of a key exchange.
+type kexResult struct {
+ // Session hash. See also RFC 4253, section 8.
+ H []byte
+
+ // Shared secret. See also RFC 4253, section 8.
+ K []byte
+
+ // Host key as hashed into H.
+ HostKey []byte
+
+ // Signature of H.
+ Signature []byte
+
+ // A cryptographic hash function that matches the security
+ // level of the key exchange algorithm. It is used for
+ // calculating H, and for deriving keys from H and K.
+ Hash crypto.Hash
+
+ // The session ID, which is the first H computed. This is used
+ // to signal data inside transport.
+ SessionID []byte
+}
+
+// handshakeMagics contains data that is always included in the
+// session hash.
+type handshakeMagics struct {
+ clientVersion, serverVersion []byte
+ clientKexInit, serverKexInit []byte
+}
+
+func (m *handshakeMagics) write(w io.Writer) {
+ writeString(w, m.clientVersion)
+ writeString(w, m.serverVersion)
+ writeString(w, m.clientKexInit)
+ writeString(w, m.serverKexInit)
+}
+
+// kexAlgorithm abstracts different key exchange algorithms.
+type kexAlgorithm interface {
+ // Server runs server-side key agreement, signing the result
+ // with a hostkey.
+ Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
+
+ // Client runs the client-side key agreement. Caller is
+ // responsible for verifying the host key signature.
+ Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
+}
+
+// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
+type dhGroup struct {
+ g, p *big.Int
+}
+
+func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
+ if theirPublic.Sign() <= 0 || theirPublic.Cmp(group.p) >= 0 {
+ return nil, errors.New("ssh: DH parameter out of bounds")
+ }
+ return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
+}
+
+func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
+ hashFunc := crypto.SHA1
+
+ x, err := rand.Int(randSource, group.p)
+ if err != nil {
+ return nil, err
+ }
+ X := new(big.Int).Exp(group.g, x, group.p)
+ kexDHInit := kexDHInitMsg{
+ X: X,
+ }
+ if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
+ return nil, err
+ }
+
+ packet, err := c.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ var kexDHReply kexDHReplyMsg
+ if err = Unmarshal(packet, &kexDHReply); err != nil {
+ return nil, err
+ }
+
+ kInt, err := group.diffieHellman(kexDHReply.Y, x)
+ if err != nil {
+ return nil, err
+ }
+
+ h := hashFunc.New()
+ magics.write(h)
+ writeString(h, kexDHReply.HostKey)
+ writeInt(h, X)
+ writeInt(h, kexDHReply.Y)
+ K := make([]byte, intLength(kInt))
+ marshalInt(K, kInt)
+ h.Write(K)
+
+ return &kexResult{
+ H: h.Sum(nil),
+ K: K,
+ HostKey: kexDHReply.HostKey,
+ Signature: kexDHReply.Signature,
+ Hash: crypto.SHA1,
+ }, nil
+}
+
+func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+ hashFunc := crypto.SHA1
+ packet, err := c.readPacket()
+ if err != nil {
+ return
+ }
+ var kexDHInit kexDHInitMsg
+ if err = Unmarshal(packet, &kexDHInit); err != nil {
+ return
+ }
+
+ y, err := rand.Int(randSource, group.p)
+ if err != nil {
+ return
+ }
+
+ Y := new(big.Int).Exp(group.g, y, group.p)
+ kInt, err := group.diffieHellman(kexDHInit.X, y)
+ if err != nil {
+ return nil, err
+ }
+
+ hostKeyBytes := priv.PublicKey().Marshal()
+
+ h := hashFunc.New()
+ magics.write(h)
+ writeString(h, hostKeyBytes)
+ writeInt(h, kexDHInit.X)
+ writeInt(h, Y)
+
+ K := make([]byte, intLength(kInt))
+ marshalInt(K, kInt)
+ h.Write(K)
+
+ H := h.Sum(nil)
+
+ // H is already a hash, but the hostkey signing will apply its
+ // own key-specific hash algorithm.
+ sig, err := signAndMarshal(priv, randSource, H)
+ if err != nil {
+ return nil, err
+ }
+
+ kexDHReply := kexDHReplyMsg{
+ HostKey: hostKeyBytes,
+ Y: Y,
+ Signature: sig,
+ }
+ packet = Marshal(&kexDHReply)
+
+ err = c.writePacket(packet)
+ return &kexResult{
+ H: H,
+ K: K,
+ HostKey: hostKeyBytes,
+ Signature: sig,
+ Hash: crypto.SHA1,
+ }, nil
+}
+
+// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
+// described in RFC 5656, section 4.
+type ecdh struct {
+ curve elliptic.Curve
+}
+
+func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
+ ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
+ if err != nil {
+ return nil, err
+ }
+
+ kexInit := kexECDHInitMsg{
+ ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
+ }
+
+ serialized := Marshal(&kexInit)
+ if err := c.writePacket(serialized); err != nil {
+ return nil, err
+ }
+
+ packet, err := c.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ var reply kexECDHReplyMsg
+ if err = Unmarshal(packet, &reply); err != nil {
+ return nil, err
+ }
+
+ x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
+ if err != nil {
+ return nil, err
+ }
+
+ // generate shared secret
+ secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
+
+ h := ecHash(kex.curve).New()
+ magics.write(h)
+ writeString(h, reply.HostKey)
+ writeString(h, kexInit.ClientPubKey)
+ writeString(h, reply.EphemeralPubKey)
+ K := make([]byte, intLength(secret))
+ marshalInt(K, secret)
+ h.Write(K)
+
+ return &kexResult{
+ H: h.Sum(nil),
+ K: K,
+ HostKey: reply.HostKey,
+ Signature: reply.Signature,
+ Hash: ecHash(kex.curve),
+ }, nil
+}
+
+// unmarshalECKey parses and checks an EC key.
+func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
+ x, y = elliptic.Unmarshal(curve, pubkey)
+ if x == nil {
+ return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
+ }
+ if !validateECPublicKey(curve, x, y) {
+ return nil, nil, errors.New("ssh: public key not on curve")
+ }
+ return x, y, nil
+}
+
+// validateECPublicKey checks that the point is a valid public key for
+// the given curve. See [SEC1], 3.2.2
+func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
+ if x.Sign() == 0 && y.Sign() == 0 {
+ return false
+ }
+
+ if x.Cmp(curve.Params().P) >= 0 {
+ return false
+ }
+
+ if y.Cmp(curve.Params().P) >= 0 {
+ return false
+ }
+
+ if !curve.IsOnCurve(x, y) {
+ return false
+ }
+
+ // We don't check if N * PubKey == 0, since
+ //
+ // - the NIST curves have cofactor = 1, so this is implicit.
+ // (We don't foresee an implementation that supports non NIST
+ // curves)
+ //
+ // - for ephemeral keys, we don't need to worry about small
+ // subgroup attacks.
+ return true
+}
+
+func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+ packet, err := c.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ var kexECDHInit kexECDHInitMsg
+ if err = Unmarshal(packet, &kexECDHInit); err != nil {
+ return nil, err
+ }
+
+ clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
+ if err != nil {
+ return nil, err
+ }
+
+ // We could cache this key across multiple users/multiple
+ // connection attempts, but the benefit is small. OpenSSH
+ // generates a new key for each incoming connection.
+ ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
+ if err != nil {
+ return nil, err
+ }
+
+ hostKeyBytes := priv.PublicKey().Marshal()
+
+ serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
+
+ // generate shared secret
+ secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
+
+ h := ecHash(kex.curve).New()
+ magics.write(h)
+ writeString(h, hostKeyBytes)
+ writeString(h, kexECDHInit.ClientPubKey)
+ writeString(h, serializedEphKey)
+
+ K := make([]byte, intLength(secret))
+ marshalInt(K, secret)
+ h.Write(K)
+
+ H := h.Sum(nil)
+
+ // H is already a hash, but the hostkey signing will apply its
+ // own key-specific hash algorithm.
+ sig, err := signAndMarshal(priv, rand, H)
+ if err != nil {
+ return nil, err
+ }
+
+ reply := kexECDHReplyMsg{
+ EphemeralPubKey: serializedEphKey,
+ HostKey: hostKeyBytes,
+ Signature: sig,
+ }
+
+ serialized := Marshal(&reply)
+ if err := c.writePacket(serialized); err != nil {
+ return nil, err
+ }
+
+ return &kexResult{
+ H: H,
+ K: K,
+ HostKey: reply.HostKey,
+ Signature: sig,
+ Hash: ecHash(kex.curve),
+ }, nil
+}
+
+var kexAlgoMap = map[string]kexAlgorithm{}
+
+func init() {
+ // This is the group called diffie-hellman-group1-sha1 in RFC
+ // 4253 and Oakley Group 2 in RFC 2409.
+ p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
+ kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
+ g: new(big.Int).SetInt64(2),
+ p: p,
+ }
+
+ // This is the group called diffie-hellman-group14-sha1 in RFC
+ // 4253 and Oakley Group 14 in RFC 3526.
+ p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
+
+ kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
+ g: new(big.Int).SetInt64(2),
+ p: p,
+ }
+
+ kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
+ kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
+ kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
+ kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
+}
+
+// curve25519sha256 implements the curve25519-sha256@libssh.org key
+// agreement protocol, as described in
+// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
+type curve25519sha256 struct{}
+
+type curve25519KeyPair struct {
+ priv [32]byte
+ pub [32]byte
+}
+
+func (kp *curve25519KeyPair) generate(rand io.Reader) error {
+ if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
+ return err
+ }
+ curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
+ return nil
+}
+
+// curve25519Zeros is just an array of 32 zero bytes so that we have something
+// convenient to compare against in order to reject curve25519 points with the
+// wrong order.
+var curve25519Zeros [32]byte
+
+func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
+ var kp curve25519KeyPair
+ if err := kp.generate(rand); err != nil {
+ return nil, err
+ }
+ if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
+ return nil, err
+ }
+
+ packet, err := c.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ var reply kexECDHReplyMsg
+ if err = Unmarshal(packet, &reply); err != nil {
+ return nil, err
+ }
+ if len(reply.EphemeralPubKey) != 32 {
+ return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
+ }
+
+ var servPub, secret [32]byte
+ copy(servPub[:], reply.EphemeralPubKey)
+ curve25519.ScalarMult(&secret, &kp.priv, &servPub)
+ if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
+ return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
+ }
+
+ h := crypto.SHA256.New()
+ magics.write(h)
+ writeString(h, reply.HostKey)
+ writeString(h, kp.pub[:])
+ writeString(h, reply.EphemeralPubKey)
+
+ kInt := new(big.Int).SetBytes(secret[:])
+ K := make([]byte, intLength(kInt))
+ marshalInt(K, kInt)
+ h.Write(K)
+
+ return &kexResult{
+ H: h.Sum(nil),
+ K: K,
+ HostKey: reply.HostKey,
+ Signature: reply.Signature,
+ Hash: crypto.SHA256,
+ }, nil
+}
+
+func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+ packet, err := c.readPacket()
+ if err != nil {
+ return
+ }
+ var kexInit kexECDHInitMsg
+ if err = Unmarshal(packet, &kexInit); err != nil {
+ return
+ }
+
+ if len(kexInit.ClientPubKey) != 32 {
+ return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
+ }
+
+ var kp curve25519KeyPair
+ if err := kp.generate(rand); err != nil {
+ return nil, err
+ }
+
+ var clientPub, secret [32]byte
+ copy(clientPub[:], kexInit.ClientPubKey)
+ curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
+ if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
+ return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
+ }
+
+ hostKeyBytes := priv.PublicKey().Marshal()
+
+ h := crypto.SHA256.New()
+ magics.write(h)
+ writeString(h, hostKeyBytes)
+ writeString(h, kexInit.ClientPubKey)
+ writeString(h, kp.pub[:])
+
+ kInt := new(big.Int).SetBytes(secret[:])
+ K := make([]byte, intLength(kInt))
+ marshalInt(K, kInt)
+ h.Write(K)
+
+ H := h.Sum(nil)
+
+ sig, err := signAndMarshal(priv, rand, H)
+ if err != nil {
+ return nil, err
+ }
+
+ reply := kexECDHReplyMsg{
+ EphemeralPubKey: kp.pub[:],
+ HostKey: hostKeyBytes,
+ Signature: sig,
+ }
+ if err := c.writePacket(Marshal(&reply)); err != nil {
+ return nil, err
+ }
+ return &kexResult{
+ H: H,
+ K: K,
+ HostKey: hostKeyBytes,
+ Signature: sig,
+ Hash: crypto.SHA256,
+ }, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/kex_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/kex_test.go
new file mode 100644
index 00000000000..12ca0acd31d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/kex_test.go
@@ -0,0 +1,50 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+// Key exchange tests.
+
+import (
+ "crypto/rand"
+ "reflect"
+ "testing"
+)
+
+func TestKexes(t *testing.T) {
+ type kexResultErr struct {
+ result *kexResult
+ err error
+ }
+
+ for name, kex := range kexAlgoMap {
+ a, b := memPipe()
+
+ s := make(chan kexResultErr, 1)
+ c := make(chan kexResultErr, 1)
+ var magics handshakeMagics
+ go func() {
+ r, e := kex.Client(a, rand.Reader, &magics)
+ a.Close()
+ c <- kexResultErr{r, e}
+ }()
+ go func() {
+ r, e := kex.Server(b, rand.Reader, &magics, testSigners["ecdsa"])
+ b.Close()
+ s <- kexResultErr{r, e}
+ }()
+
+ clientRes := <-c
+ serverRes := <-s
+ if clientRes.err != nil {
+ t.Errorf("client: %v", clientRes.err)
+ }
+ if serverRes.err != nil {
+ t.Errorf("server: %v", serverRes.err)
+ }
+ if !reflect.DeepEqual(clientRes.result, serverRes.result) {
+ t.Errorf("kex %q: mismatch %#v, %#v", name, clientRes.result, serverRes.result)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/keys.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/keys.go
new file mode 100644
index 00000000000..cfc970b2cd2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/keys.go
@@ -0,0 +1,720 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/asn1"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "strings"
+)
+
+// These constants represent the algorithm names for key types supported by this
+// package.
+const (
+ KeyAlgoRSA = "ssh-rsa"
+ KeyAlgoDSA = "ssh-dss"
+ KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
+ KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
+ KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
+)
+
+// parsePubKey parses a public key of the given algorithm.
+// Use ParsePublicKey for keys with prepended algorithm.
+func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
+ switch algo {
+ case KeyAlgoRSA:
+ return parseRSA(in)
+ case KeyAlgoDSA:
+ return parseDSA(in)
+ case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
+ return parseECDSA(in)
+ case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
+ cert, err := parseCert(in, certToPrivAlgo(algo))
+ if err != nil {
+ return nil, nil, err
+ }
+ return cert, nil, nil
+ }
+ return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", err)
+}
+
+// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format
+// (see sshd(8) manual page) once the options and key type fields have been
+// removed.
+func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) {
+ in = bytes.TrimSpace(in)
+
+ i := bytes.IndexAny(in, " \t")
+ if i == -1 {
+ i = len(in)
+ }
+ base64Key := in[:i]
+
+ key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key)))
+ n, err := base64.StdEncoding.Decode(key, base64Key)
+ if err != nil {
+ return nil, "", err
+ }
+ key = key[:n]
+ out, err = ParsePublicKey(key)
+ if err != nil {
+ return nil, "", err
+ }
+ comment = string(bytes.TrimSpace(in[i:]))
+ return out, comment, nil
+}
+
+// ParseKnownHosts parses an entry in the format of the known_hosts file.
+//
+// The known_hosts format is documented in the sshd(8) manual page. This
+// function will parse a single entry from in. On successful return, marker
+// will contain the optional marker value (i.e. "cert-authority" or "revoked")
+// or else be empty, hosts will contain the hosts that this entry matches,
+// pubKey will contain the public key and comment will contain any trailing
+// comment at the end of the line. See the sshd(8) manual page for the various
+// forms that a host string can take.
+//
+// The unparsed remainder of the input will be returned in rest. This function
+// can be called repeatedly to parse multiple entries.
+//
+// If no entries were found in the input then err will be io.EOF. Otherwise a
+// non-nil err value indicates a parse error.
+func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) {
+ for len(in) > 0 {
+ end := bytes.IndexByte(in, '\n')
+ if end != -1 {
+ rest = in[end+1:]
+ in = in[:end]
+ } else {
+ rest = nil
+ }
+
+ end = bytes.IndexByte(in, '\r')
+ if end != -1 {
+ in = in[:end]
+ }
+
+ in = bytes.TrimSpace(in)
+ if len(in) == 0 || in[0] == '#' {
+ in = rest
+ continue
+ }
+
+ i := bytes.IndexAny(in, " \t")
+ if i == -1 {
+ in = rest
+ continue
+ }
+
+ // Strip out the begining of the known_host key.
+ // This is either an optional marker or a (set of) hostname(s).
+ keyFields := bytes.Fields(in)
+ if len(keyFields) < 3 || len(keyFields) > 5 {
+ return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data")
+ }
+
+ // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated
+ // list of hosts
+ marker := ""
+ if keyFields[0][0] == '@' {
+ marker = string(keyFields[0][1:])
+ keyFields = keyFields[1:]
+ }
+
+ hosts := string(keyFields[0])
+ // keyFields[1] contains the key type (e.g. “ssh-rsa”).
+ // However, that information is duplicated inside the
+ // base64-encoded key and so is ignored here.
+
+ key := bytes.Join(keyFields[2:], []byte(" "))
+ if pubKey, comment, err = parseAuthorizedKey(key); err != nil {
+ return "", nil, nil, "", nil, err
+ }
+
+ return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil
+ }
+
+ return "", nil, nil, "", nil, io.EOF
+}
+
+// ParseAuthorizedKeys parses a public key from an authorized_keys
+// file used in OpenSSH according to the sshd(8) manual page.
+func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
+ for len(in) > 0 {
+ end := bytes.IndexByte(in, '\n')
+ if end != -1 {
+ rest = in[end+1:]
+ in = in[:end]
+ } else {
+ rest = nil
+ }
+
+ end = bytes.IndexByte(in, '\r')
+ if end != -1 {
+ in = in[:end]
+ }
+
+ in = bytes.TrimSpace(in)
+ if len(in) == 0 || in[0] == '#' {
+ in = rest
+ continue
+ }
+
+ i := bytes.IndexAny(in, " \t")
+ if i == -1 {
+ in = rest
+ continue
+ }
+
+ if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
+ return out, comment, options, rest, nil
+ }
+
+ // No key type recognised. Maybe there's an options field at
+ // the beginning.
+ var b byte
+ inQuote := false
+ var candidateOptions []string
+ optionStart := 0
+ for i, b = range in {
+ isEnd := !inQuote && (b == ' ' || b == '\t')
+ if (b == ',' && !inQuote) || isEnd {
+ if i-optionStart > 0 {
+ candidateOptions = append(candidateOptions, string(in[optionStart:i]))
+ }
+ optionStart = i + 1
+ }
+ if isEnd {
+ break
+ }
+ if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) {
+ inQuote = !inQuote
+ }
+ }
+ for i < len(in) && (in[i] == ' ' || in[i] == '\t') {
+ i++
+ }
+ if i == len(in) {
+ // Invalid line: unmatched quote
+ in = rest
+ continue
+ }
+
+ in = in[i:]
+ i = bytes.IndexAny(in, " \t")
+ if i == -1 {
+ in = rest
+ continue
+ }
+
+ if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
+ options = candidateOptions
+ return out, comment, options, rest, nil
+ }
+
+ in = rest
+ continue
+ }
+
+ return nil, "", nil, nil, errors.New("ssh: no key found")
+}
+
+// ParsePublicKey parses an SSH public key formatted for use in
+// the SSH wire protocol according to RFC 4253, section 6.6.
+func ParsePublicKey(in []byte) (out PublicKey, err error) {
+ algo, in, ok := parseString(in)
+ if !ok {
+ return nil, errShortRead
+ }
+ var rest []byte
+ out, rest, err = parsePubKey(in, string(algo))
+ if len(rest) > 0 {
+ return nil, errors.New("ssh: trailing junk in public key")
+ }
+
+ return out, err
+}
+
+// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH
+// authorized_keys file. The return value ends with newline.
+func MarshalAuthorizedKey(key PublicKey) []byte {
+ b := &bytes.Buffer{}
+ b.WriteString(key.Type())
+ b.WriteByte(' ')
+ e := base64.NewEncoder(base64.StdEncoding, b)
+ e.Write(key.Marshal())
+ e.Close()
+ b.WriteByte('\n')
+ return b.Bytes()
+}
+
+// PublicKey is an abstraction of different types of public keys.
+type PublicKey interface {
+ // Type returns the key's type, e.g. "ssh-rsa".
+ Type() string
+
+ // Marshal returns the serialized key data in SSH wire format,
+ // with the name prefix.
+ Marshal() []byte
+
+ // Verify that sig is a signature on the given data using this
+ // key. This function will hash the data appropriately first.
+ Verify(data []byte, sig *Signature) error
+}
+
+// A Signer can create signatures that verify against a public key.
+type Signer interface {
+ // PublicKey returns an associated PublicKey instance.
+ PublicKey() PublicKey
+
+ // Sign returns raw signature for the given data. This method
+ // will apply the hash specified for the keytype to the data.
+ Sign(rand io.Reader, data []byte) (*Signature, error)
+}
+
+type rsaPublicKey rsa.PublicKey
+
+func (r *rsaPublicKey) Type() string {
+ return "ssh-rsa"
+}
+
+// parseRSA parses an RSA key according to RFC 4253, section 6.6.
+func parseRSA(in []byte) (out PublicKey, rest []byte, err error) {
+ var w struct {
+ E *big.Int
+ N *big.Int
+ Rest []byte `ssh:"rest"`
+ }
+ if err := Unmarshal(in, &w); err != nil {
+ return nil, nil, err
+ }
+
+ if w.E.BitLen() > 24 {
+ return nil, nil, errors.New("ssh: exponent too large")
+ }
+ e := w.E.Int64()
+ if e < 3 || e&1 == 0 {
+ return nil, nil, errors.New("ssh: incorrect exponent")
+ }
+
+ var key rsa.PublicKey
+ key.E = int(e)
+ key.N = w.N
+ return (*rsaPublicKey)(&key), w.Rest, nil
+}
+
+func (r *rsaPublicKey) Marshal() []byte {
+ e := new(big.Int).SetInt64(int64(r.E))
+ wirekey := struct {
+ Name string
+ E *big.Int
+ N *big.Int
+ }{
+ KeyAlgoRSA,
+ e,
+ r.N,
+ }
+ return Marshal(&wirekey)
+}
+
+func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
+ if sig.Format != r.Type() {
+ return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
+ }
+ h := crypto.SHA1.New()
+ h.Write(data)
+ digest := h.Sum(nil)
+ return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
+}
+
+type dsaPublicKey dsa.PublicKey
+
+func (r *dsaPublicKey) Type() string {
+ return "ssh-dss"
+}
+
+// parseDSA parses an DSA key according to RFC 4253, section 6.6.
+func parseDSA(in []byte) (out PublicKey, rest []byte, err error) {
+ var w struct {
+ P, Q, G, Y *big.Int
+ Rest []byte `ssh:"rest"`
+ }
+ if err := Unmarshal(in, &w); err != nil {
+ return nil, nil, err
+ }
+
+ key := &dsaPublicKey{
+ Parameters: dsa.Parameters{
+ P: w.P,
+ Q: w.Q,
+ G: w.G,
+ },
+ Y: w.Y,
+ }
+ return key, w.Rest, nil
+}
+
+func (k *dsaPublicKey) Marshal() []byte {
+ w := struct {
+ Name string
+ P, Q, G, Y *big.Int
+ }{
+ k.Type(),
+ k.P,
+ k.Q,
+ k.G,
+ k.Y,
+ }
+
+ return Marshal(&w)
+}
+
+func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
+ if sig.Format != k.Type() {
+ return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
+ }
+ h := crypto.SHA1.New()
+ h.Write(data)
+ digest := h.Sum(nil)
+
+ // Per RFC 4253, section 6.6,
+ // The value for 'dss_signature_blob' is encoded as a string containing
+ // r, followed by s (which are 160-bit integers, without lengths or
+ // padding, unsigned, and in network byte order).
+ // For DSS purposes, sig.Blob should be exactly 40 bytes in length.
+ if len(sig.Blob) != 40 {
+ return errors.New("ssh: DSA signature parse error")
+ }
+ r := new(big.Int).SetBytes(sig.Blob[:20])
+ s := new(big.Int).SetBytes(sig.Blob[20:])
+ if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) {
+ return nil
+ }
+ return errors.New("ssh: signature did not verify")
+}
+
+type dsaPrivateKey struct {
+ *dsa.PrivateKey
+}
+
+func (k *dsaPrivateKey) PublicKey() PublicKey {
+ return (*dsaPublicKey)(&k.PrivateKey.PublicKey)
+}
+
+func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
+ h := crypto.SHA1.New()
+ h.Write(data)
+ digest := h.Sum(nil)
+ r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
+ if err != nil {
+ return nil, err
+ }
+
+ sig := make([]byte, 40)
+ rb := r.Bytes()
+ sb := s.Bytes()
+
+ copy(sig[20-len(rb):20], rb)
+ copy(sig[40-len(sb):], sb)
+
+ return &Signature{
+ Format: k.PublicKey().Type(),
+ Blob: sig,
+ }, nil
+}
+
+type ecdsaPublicKey ecdsa.PublicKey
+
+func (key *ecdsaPublicKey) Type() string {
+ return "ecdsa-sha2-" + key.nistID()
+}
+
+func (key *ecdsaPublicKey) nistID() string {
+ switch key.Params().BitSize {
+ case 256:
+ return "nistp256"
+ case 384:
+ return "nistp384"
+ case 521:
+ return "nistp521"
+ }
+ panic("ssh: unsupported ecdsa key size")
+}
+
+func supportedEllipticCurve(curve elliptic.Curve) bool {
+ return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
+}
+
+// ecHash returns the hash to match the given elliptic curve, see RFC
+// 5656, section 6.2.1
+func ecHash(curve elliptic.Curve) crypto.Hash {
+ bitSize := curve.Params().BitSize
+ switch {
+ case bitSize <= 256:
+ return crypto.SHA256
+ case bitSize <= 384:
+ return crypto.SHA384
+ }
+ return crypto.SHA512
+}
+
+// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
+func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
+ var w struct {
+ Curve string
+ KeyBytes []byte
+ Rest []byte `ssh:"rest"`
+ }
+
+ if err := Unmarshal(in, &w); err != nil {
+ return nil, nil, err
+ }
+
+ key := new(ecdsa.PublicKey)
+
+ switch w.Curve {
+ case "nistp256":
+ key.Curve = elliptic.P256()
+ case "nistp384":
+ key.Curve = elliptic.P384()
+ case "nistp521":
+ key.Curve = elliptic.P521()
+ default:
+ return nil, nil, errors.New("ssh: unsupported curve")
+ }
+
+ key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
+ if key.X == nil || key.Y == nil {
+ return nil, nil, errors.New("ssh: invalid curve point")
+ }
+ return (*ecdsaPublicKey)(key), w.Rest, nil
+}
+
+func (key *ecdsaPublicKey) Marshal() []byte {
+ // See RFC 5656, section 3.1.
+ keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y)
+ w := struct {
+ Name string
+ ID string
+ Key []byte
+ }{
+ key.Type(),
+ key.nistID(),
+ keyBytes,
+ }
+
+ return Marshal(&w)
+}
+
+func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
+ if sig.Format != key.Type() {
+ return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
+ }
+
+ h := ecHash(key.Curve).New()
+ h.Write(data)
+ digest := h.Sum(nil)
+
+ // Per RFC 5656, section 3.1.2,
+ // The ecdsa_signature_blob value has the following specific encoding:
+ // mpint r
+ // mpint s
+ var ecSig struct {
+ R *big.Int
+ S *big.Int
+ }
+
+ if err := Unmarshal(sig.Blob, &ecSig); err != nil {
+ return err
+ }
+
+ if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) {
+ return nil
+ }
+ return errors.New("ssh: signature did not verify")
+}
+
+// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
+// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding
+// Signer instance. ECDSA keys must use P-256, P-384 or P-521.
+func NewSignerFromKey(key interface{}) (Signer, error) {
+ switch key := key.(type) {
+ case crypto.Signer:
+ return NewSignerFromSigner(key)
+ case *dsa.PrivateKey:
+ return &dsaPrivateKey{key}, nil
+ default:
+ return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+ }
+}
+
+type wrappedSigner struct {
+ signer crypto.Signer
+ pubKey PublicKey
+}
+
+// NewSignerFromSigner takes any crypto.Signer implementation and
+// returns a corresponding Signer interface. This can be used, for
+// example, with keys kept in hardware modules.
+func NewSignerFromSigner(signer crypto.Signer) (Signer, error) {
+ pubKey, err := NewPublicKey(signer.Public())
+ if err != nil {
+ return nil, err
+ }
+
+ return &wrappedSigner{signer, pubKey}, nil
+}
+
+func (s *wrappedSigner) PublicKey() PublicKey {
+ return s.pubKey
+}
+
+func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
+ var hashFunc crypto.Hash
+
+ switch key := s.pubKey.(type) {
+ case *rsaPublicKey, *dsaPublicKey:
+ hashFunc = crypto.SHA1
+ case *ecdsaPublicKey:
+ hashFunc = ecHash(key.Curve)
+ default:
+ return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+ }
+
+ h := hashFunc.New()
+ h.Write(data)
+ digest := h.Sum(nil)
+
+ signature, err := s.signer.Sign(rand, digest, hashFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ // crypto.Signer.Sign is expected to return an ASN.1-encoded signature
+ // for ECDSA and DSA, but that's not the encoding expected by SSH, so
+ // re-encode.
+ switch s.pubKey.(type) {
+ case *ecdsaPublicKey, *dsaPublicKey:
+ type asn1Signature struct {
+ R, S *big.Int
+ }
+ asn1Sig := new(asn1Signature)
+ _, err := asn1.Unmarshal(signature, asn1Sig)
+ if err != nil {
+ return nil, err
+ }
+
+ switch s.pubKey.(type) {
+ case *ecdsaPublicKey:
+ signature = Marshal(asn1Sig)
+
+ case *dsaPublicKey:
+ signature = make([]byte, 40)
+ r := asn1Sig.R.Bytes()
+ s := asn1Sig.S.Bytes()
+ copy(signature[20-len(r):20], r)
+ copy(signature[40-len(s):40], s)
+ }
+ }
+
+ return &Signature{
+ Format: s.pubKey.Type(),
+ Blob: signature,
+ }, nil
+}
+
+// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or
+// any other crypto.Signer and returns a corresponding Signer instance. ECDSA
+// keys must use P-256, P-384 or P-521.
+func NewPublicKey(key interface{}) (PublicKey, error) {
+ switch key := key.(type) {
+ case *rsa.PublicKey:
+ return (*rsaPublicKey)(key), nil
+ case *ecdsa.PublicKey:
+ if !supportedEllipticCurve(key.Curve) {
+ return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.")
+ }
+ return (*ecdsaPublicKey)(key), nil
+ case *dsa.PublicKey:
+ return (*dsaPublicKey)(key), nil
+ default:
+ return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+ }
+}
+
+// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
+// the same keys as ParseRawPrivateKey.
+func ParsePrivateKey(pemBytes []byte) (Signer, error) {
+ key, err := ParseRawPrivateKey(pemBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewSignerFromKey(key)
+}
+
+// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
+// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
+func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
+ block, _ := pem.Decode(pemBytes)
+ if block == nil {
+ return nil, errors.New("ssh: no key found")
+ }
+
+ switch block.Type {
+ case "RSA PRIVATE KEY":
+ return x509.ParsePKCS1PrivateKey(block.Bytes)
+ case "EC PRIVATE KEY":
+ return x509.ParseECPrivateKey(block.Bytes)
+ case "DSA PRIVATE KEY":
+ return ParseDSAPrivateKey(block.Bytes)
+ default:
+ return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
+ }
+}
+
+// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as
+// specified by the OpenSSL DSA man page.
+func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
+ var k struct {
+ Version int
+ P *big.Int
+ Q *big.Int
+ G *big.Int
+ Priv *big.Int
+ Pub *big.Int
+ }
+ rest, err := asn1.Unmarshal(der, &k)
+ if err != nil {
+ return nil, errors.New("ssh: failed to parse DSA key: " + err.Error())
+ }
+ if len(rest) > 0 {
+ return nil, errors.New("ssh: garbage after DSA key")
+ }
+
+ return &dsa.PrivateKey{
+ PublicKey: dsa.PublicKey{
+ Parameters: dsa.Parameters{
+ P: k.P,
+ Q: k.Q,
+ G: k.G,
+ },
+ Y: k.Priv,
+ },
+ X: k.Pub,
+ }, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/keys_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/keys_test.go
new file mode 100644
index 00000000000..27569473fcf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/keys_test.go
@@ -0,0 +1,437 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+func rawKey(pub PublicKey) interface{} {
+ switch k := pub.(type) {
+ case *rsaPublicKey:
+ return (*rsa.PublicKey)(k)
+ case *dsaPublicKey:
+ return (*dsa.PublicKey)(k)
+ case *ecdsaPublicKey:
+ return (*ecdsa.PublicKey)(k)
+ case *Certificate:
+ return k
+ }
+ panic("unknown key type")
+}
+
+func TestKeyMarshalParse(t *testing.T) {
+ for _, priv := range testSigners {
+ pub := priv.PublicKey()
+ roundtrip, err := ParsePublicKey(pub.Marshal())
+ if err != nil {
+ t.Errorf("ParsePublicKey(%T): %v", pub, err)
+ }
+
+ k1 := rawKey(pub)
+ k2 := rawKey(roundtrip)
+
+ if !reflect.DeepEqual(k1, k2) {
+ t.Errorf("got %#v in roundtrip, want %#v", k2, k1)
+ }
+ }
+}
+
+func TestUnsupportedCurves(t *testing.T) {
+ raw, err := ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
+ if err != nil {
+ t.Fatalf("GenerateKey: %v", err)
+ }
+
+ if _, err = NewSignerFromKey(raw); err == nil || !strings.Contains(err.Error(), "only P-256") {
+ t.Fatalf("NewPrivateKey should not succeed with P-224, got: %v", err)
+ }
+
+ if _, err = NewPublicKey(&raw.PublicKey); err == nil || !strings.Contains(err.Error(), "only P-256") {
+ t.Fatalf("NewPublicKey should not succeed with P-224, got: %v", err)
+ }
+}
+
+func TestNewPublicKey(t *testing.T) {
+ for _, k := range testSigners {
+ raw := rawKey(k.PublicKey())
+ // Skip certificates, as NewPublicKey does not support them.
+ if _, ok := raw.(*Certificate); ok {
+ continue
+ }
+ pub, err := NewPublicKey(raw)
+ if err != nil {
+ t.Errorf("NewPublicKey(%#v): %v", raw, err)
+ }
+ if !reflect.DeepEqual(k.PublicKey(), pub) {
+ t.Errorf("NewPublicKey(%#v) = %#v, want %#v", raw, pub, k.PublicKey())
+ }
+ }
+}
+
+func TestKeySignVerify(t *testing.T) {
+ for _, priv := range testSigners {
+ pub := priv.PublicKey()
+
+ data := []byte("sign me")
+ sig, err := priv.Sign(rand.Reader, data)
+ if err != nil {
+ t.Fatalf("Sign(%T): %v", priv, err)
+ }
+
+ if err := pub.Verify(data, sig); err != nil {
+ t.Errorf("publicKey.Verify(%T): %v", priv, err)
+ }
+ sig.Blob[5]++
+ if err := pub.Verify(data, sig); err == nil {
+ t.Errorf("publicKey.Verify on broken sig did not fail")
+ }
+ }
+}
+
+func TestParseRSAPrivateKey(t *testing.T) {
+ key := testPrivateKeys["rsa"]
+
+ rsa, ok := key.(*rsa.PrivateKey)
+ if !ok {
+ t.Fatalf("got %T, want *rsa.PrivateKey", rsa)
+ }
+
+ if err := rsa.Validate(); err != nil {
+ t.Errorf("Validate: %v", err)
+ }
+}
+
+func TestParseECPrivateKey(t *testing.T) {
+ key := testPrivateKeys["ecdsa"]
+
+ ecKey, ok := key.(*ecdsa.PrivateKey)
+ if !ok {
+ t.Fatalf("got %T, want *ecdsa.PrivateKey", ecKey)
+ }
+
+ if !validateECPublicKey(ecKey.Curve, ecKey.X, ecKey.Y) {
+ t.Fatalf("public key does not validate.")
+ }
+}
+
+func TestParseDSA(t *testing.T) {
+ // We actually exercise the ParsePrivateKey codepath here, as opposed to
+ // using the ParseRawPrivateKey+NewSignerFromKey path that testdata_test.go
+ // uses.
+ s, err := ParsePrivateKey(testdata.PEMBytes["dsa"])
+ if err != nil {
+ t.Fatalf("ParsePrivateKey returned error: %s", err)
+ }
+
+ data := []byte("sign me")
+ sig, err := s.Sign(rand.Reader, data)
+ if err != nil {
+ t.Fatalf("dsa.Sign: %v", err)
+ }
+
+ if err := s.PublicKey().Verify(data, sig); err != nil {
+ t.Errorf("Verify failed: %v", err)
+ }
+}
+
+// Tests for authorized_keys parsing.
+
+// getTestKey returns a public key, and its base64 encoding.
+func getTestKey() (PublicKey, string) {
+ k := testPublicKeys["rsa"]
+
+ b := &bytes.Buffer{}
+ e := base64.NewEncoder(base64.StdEncoding, b)
+ e.Write(k.Marshal())
+ e.Close()
+
+ return k, b.String()
+}
+
+func TestMarshalParsePublicKey(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ line := fmt.Sprintf("%s %s user@host", pub.Type(), pubSerialized)
+
+ authKeys := MarshalAuthorizedKey(pub)
+ actualFields := strings.Fields(string(authKeys))
+ if len(actualFields) == 0 {
+ t.Fatalf("failed authKeys: %v", authKeys)
+ }
+
+ // drop the comment
+ expectedFields := strings.Fields(line)[0:2]
+
+ if !reflect.DeepEqual(actualFields, expectedFields) {
+ t.Errorf("got %v, expected %v", actualFields, expectedFields)
+ }
+
+ actPub, _, _, _, err := ParseAuthorizedKey([]byte(line))
+ if err != nil {
+ t.Fatalf("cannot parse %v: %v", line, err)
+ }
+ if !reflect.DeepEqual(actPub, pub) {
+ t.Errorf("got %v, expected %v", actPub, pub)
+ }
+}
+
+type authResult struct {
+ pubKey PublicKey
+ options []string
+ comments string
+ rest string
+ ok bool
+}
+
+func testAuthorizedKeys(t *testing.T, authKeys []byte, expected []authResult) {
+ rest := authKeys
+ var values []authResult
+ for len(rest) > 0 {
+ var r authResult
+ var err error
+ r.pubKey, r.comments, r.options, rest, err = ParseAuthorizedKey(rest)
+ r.ok = (err == nil)
+ t.Log(err)
+ r.rest = string(rest)
+ values = append(values, r)
+ }
+
+ if !reflect.DeepEqual(values, expected) {
+ t.Errorf("got %#v, expected %#v", values, expected)
+ }
+}
+
+func TestAuthorizedKeyBasic(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ line := "ssh-rsa " + pubSerialized + " user@host"
+ testAuthorizedKeys(t, []byte(line),
+ []authResult{
+ {pub, nil, "user@host", "", true},
+ })
+}
+
+func TestAuth(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithOptions := []string{
+ `# comments to ignore before any keys...`,
+ ``,
+ `env="HOME=/home/root",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`,
+ `# comments to ignore, along with a blank line`,
+ ``,
+ `env="HOME=/home/root2" ssh-rsa ` + pubSerialized + ` user2@host2`,
+ ``,
+ `# more comments, plus a invalid entry`,
+ `ssh-rsa data-that-will-not-parse user@host3`,
+ }
+ for _, eol := range []string{"\n", "\r\n"} {
+ authOptions := strings.Join(authWithOptions, eol)
+ rest2 := strings.Join(authWithOptions[3:], eol)
+ rest3 := strings.Join(authWithOptions[6:], eol)
+ testAuthorizedKeys(t, []byte(authOptions), []authResult{
+ {pub, []string{`env="HOME=/home/root"`, "no-port-forwarding"}, "user@host", rest2, true},
+ {pub, []string{`env="HOME=/home/root2"`}, "user2@host2", rest3, true},
+ {nil, nil, "", "", false},
+ })
+ }
+}
+
+func TestAuthWithQuotedSpaceInEnv(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithQuotedSpaceInEnv := []byte(`env="HOME=/home/root dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`)
+ testAuthorizedKeys(t, []byte(authWithQuotedSpaceInEnv), []authResult{
+ {pub, []string{`env="HOME=/home/root dir"`, "no-port-forwarding"}, "user@host", "", true},
+ })
+}
+
+func TestAuthWithQuotedCommaInEnv(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithQuotedCommaInEnv := []byte(`env="HOME=/home/root,dir",no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host`)
+ testAuthorizedKeys(t, []byte(authWithQuotedCommaInEnv), []authResult{
+ {pub, []string{`env="HOME=/home/root,dir"`, "no-port-forwarding"}, "user@host", "", true},
+ })
+}
+
+func TestAuthWithQuotedQuoteInEnv(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithQuotedQuoteInEnv := []byte(`env="HOME=/home/\"root dir",no-port-forwarding` + "\t" + `ssh-rsa` + "\t" + pubSerialized + ` user@host`)
+ authWithDoubleQuotedQuote := []byte(`no-port-forwarding,env="HOME=/home/ \"root dir\"" ssh-rsa ` + pubSerialized + "\t" + `user@host`)
+ testAuthorizedKeys(t, []byte(authWithQuotedQuoteInEnv), []authResult{
+ {pub, []string{`env="HOME=/home/\"root dir"`, "no-port-forwarding"}, "user@host", "", true},
+ })
+
+ testAuthorizedKeys(t, []byte(authWithDoubleQuotedQuote), []authResult{
+ {pub, []string{"no-port-forwarding", `env="HOME=/home/ \"root dir\""`}, "user@host", "", true},
+ })
+}
+
+func TestAuthWithInvalidSpace(t *testing.T) {
+ _, pubSerialized := getTestKey()
+ authWithInvalidSpace := []byte(`env="HOME=/home/root dir", no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host
+#more to follow but still no valid keys`)
+ testAuthorizedKeys(t, []byte(authWithInvalidSpace), []authResult{
+ {nil, nil, "", "", false},
+ })
+}
+
+func TestAuthWithMissingQuote(t *testing.T) {
+ pub, pubSerialized := getTestKey()
+ authWithMissingQuote := []byte(`env="HOME=/home/root,no-port-forwarding ssh-rsa ` + pubSerialized + ` user@host
+env="HOME=/home/root",shared-control ssh-rsa ` + pubSerialized + ` user@host`)
+
+ testAuthorizedKeys(t, []byte(authWithMissingQuote), []authResult{
+ {pub, []string{`env="HOME=/home/root"`, `shared-control`}, "user@host", "", true},
+ })
+}
+
+func TestInvalidEntry(t *testing.T) {
+ authInvalid := []byte(`ssh-rsa`)
+ _, _, _, _, err := ParseAuthorizedKey(authInvalid)
+ if err == nil {
+ t.Errorf("got valid entry for %q", authInvalid)
+ }
+}
+
+var knownHostsParseTests = []struct {
+ input string
+ err string
+
+ marker string
+ comment string
+ hosts []string
+ rest string
+} {
+ {
+ "",
+ "EOF",
+
+ "", "", nil, "",
+ },
+ {
+ "# Just a comment",
+ "EOF",
+
+ "", "", nil, "",
+ },
+ {
+ " \t ",
+ "EOF",
+
+ "", "", nil, "",
+ },
+ {
+ "localhost ssh-rsa {RSAPUB}",
+ "",
+
+ "", "", []string{"localhost"}, "",
+ },
+ {
+ "localhost\tssh-rsa {RSAPUB}",
+ "",
+
+ "", "", []string{"localhost"}, "",
+ },
+ {
+ "localhost\tssh-rsa {RSAPUB}\tcomment comment",
+ "",
+
+ "", "comment comment", []string{"localhost"}, "",
+ },
+ {
+ "localhost\tssh-rsa {RSAPUB}\tcomment comment\n",
+ "",
+
+ "", "comment comment", []string{"localhost"}, "",
+ },
+ {
+ "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\n",
+ "",
+
+ "", "comment comment", []string{"localhost"}, "",
+ },
+ {
+ "localhost\tssh-rsa {RSAPUB}\tcomment comment\r\nnext line",
+ "",
+
+ "", "comment comment", []string{"localhost"}, "next line",
+ },
+ {
+ "localhost,[host2:123]\tssh-rsa {RSAPUB}\tcomment comment",
+ "",
+
+ "", "comment comment", []string{"localhost","[host2:123]"}, "",
+ },
+ {
+ "@marker \tlocalhost,[host2:123]\tssh-rsa {RSAPUB}",
+ "",
+
+ "marker", "", []string{"localhost","[host2:123]"}, "",
+ },
+ {
+ "@marker \tlocalhost,[host2:123]\tssh-rsa aabbccdd",
+ "short read",
+
+ "", "", nil, "",
+ },
+}
+
+func TestKnownHostsParsing(t *testing.T) {
+ rsaPub, rsaPubSerialized := getTestKey()
+
+ for i, test := range knownHostsParseTests {
+ var expectedKey PublicKey
+ const rsaKeyToken = "{RSAPUB}"
+
+ input := test.input
+ if strings.Contains(input, rsaKeyToken) {
+ expectedKey = rsaPub
+ input = strings.Replace(test.input, rsaKeyToken, rsaPubSerialized, -1)
+ }
+
+ marker, hosts, pubKey, comment, rest, err := ParseKnownHosts([]byte(input))
+ if err != nil {
+ if len(test.err) == 0 {
+ t.Errorf("#%d: unexpectedly failed with %q", i, err)
+ } else if !strings.Contains(err.Error(), test.err) {
+ t.Errorf("#%d: expected error containing %q, but got %q", i, test.err, err)
+ }
+ continue
+ } else if len(test.err) != 0 {
+ t.Errorf("#%d: succeeded but expected error including %q", i, test.err)
+ continue
+ }
+
+ if !reflect.DeepEqual(expectedKey, pubKey) {
+ t.Errorf("#%d: expected key %#v, but got %#v", i, expectedKey, pubKey)
+ }
+
+ if marker != test.marker {
+ t.Errorf("#%d: expected marker %q, but got %q", i, test.marker, marker)
+ }
+
+ if comment != test.comment {
+ t.Errorf("#%d: expected comment %q, but got %q", i, test.comment, comment)
+ }
+
+ if !reflect.DeepEqual(test.hosts, hosts) {
+ t.Errorf("#%d: expected hosts %#v, but got %#v", i, test.hosts, hosts)
+ }
+
+ if rest := string(rest); rest != test.rest {
+ t.Errorf("#%d: expected remaining input to be %q, but got %q", i, test.rest, rest)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mac.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mac.go
new file mode 100644
index 00000000000..07744ad6713
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mac.go
@@ -0,0 +1,57 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+// Message authentication support
+
+import (
+ "crypto/hmac"
+ "crypto/sha1"
+ "crypto/sha256"
+ "hash"
+)
+
+type macMode struct {
+ keySize int
+ new func(key []byte) hash.Hash
+}
+
+// truncatingMAC wraps around a hash.Hash and truncates the output digest to
+// a given size.
+type truncatingMAC struct {
+ length int
+ hmac hash.Hash
+}
+
+func (t truncatingMAC) Write(data []byte) (int, error) {
+ return t.hmac.Write(data)
+}
+
+func (t truncatingMAC) Sum(in []byte) []byte {
+ out := t.hmac.Sum(in)
+ return out[:len(in)+t.length]
+}
+
+func (t truncatingMAC) Reset() {
+ t.hmac.Reset()
+}
+
+func (t truncatingMAC) Size() int {
+ return t.length
+}
+
+func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
+
+var macModes = map[string]*macMode{
+ "hmac-sha2-256": {32, func(key []byte) hash.Hash {
+ return hmac.New(sha256.New, key)
+ }},
+ "hmac-sha1": {20, func(key []byte) hash.Hash {
+ return hmac.New(sha1.New, key)
+ }},
+ "hmac-sha1-96": {20, func(key []byte) hash.Hash {
+ return truncatingMAC{12, hmac.New(sha1.New, key)}
+ }},
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mempipe_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mempipe_test.go
new file mode 100644
index 00000000000..8697cd6140a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mempipe_test.go
@@ -0,0 +1,110 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "io"
+ "sync"
+ "testing"
+)
+
+// An in-memory packetConn. It is safe to call Close and writePacket
+// from different goroutines.
+type memTransport struct {
+ eof bool
+ pending [][]byte
+ write *memTransport
+ sync.Mutex
+ *sync.Cond
+}
+
+func (t *memTransport) readPacket() ([]byte, error) {
+ t.Lock()
+ defer t.Unlock()
+ for {
+ if len(t.pending) > 0 {
+ r := t.pending[0]
+ t.pending = t.pending[1:]
+ return r, nil
+ }
+ if t.eof {
+ return nil, io.EOF
+ }
+ t.Cond.Wait()
+ }
+}
+
+func (t *memTransport) closeSelf() error {
+ t.Lock()
+ defer t.Unlock()
+ if t.eof {
+ return io.EOF
+ }
+ t.eof = true
+ t.Cond.Broadcast()
+ return nil
+}
+
+func (t *memTransport) Close() error {
+ err := t.write.closeSelf()
+ t.closeSelf()
+ return err
+}
+
+func (t *memTransport) writePacket(p []byte) error {
+ t.write.Lock()
+ defer t.write.Unlock()
+ if t.write.eof {
+ return io.EOF
+ }
+ c := make([]byte, len(p))
+ copy(c, p)
+ t.write.pending = append(t.write.pending, c)
+ t.write.Cond.Signal()
+ return nil
+}
+
+func memPipe() (a, b packetConn) {
+ t1 := memTransport{}
+ t2 := memTransport{}
+ t1.write = &t2
+ t2.write = &t1
+ t1.Cond = sync.NewCond(&t1.Mutex)
+ t2.Cond = sync.NewCond(&t2.Mutex)
+ return &t1, &t2
+}
+
+func TestMemPipe(t *testing.T) {
+ a, b := memPipe()
+ if err := a.writePacket([]byte{42}); err != nil {
+ t.Fatalf("writePacket: %v", err)
+ }
+ if err := a.Close(); err != nil {
+ t.Fatal("Close: ", err)
+ }
+ p, err := b.readPacket()
+ if err != nil {
+ t.Fatal("readPacket: ", err)
+ }
+ if len(p) != 1 || p[0] != 42 {
+ t.Fatalf("got %v, want {42}", p)
+ }
+ p, err = b.readPacket()
+ if err != io.EOF {
+ t.Fatalf("got %v, %v, want EOF", p, err)
+ }
+}
+
+func TestDoubleClose(t *testing.T) {
+ a, _ := memPipe()
+ err := a.Close()
+ if err != nil {
+ t.Errorf("Close: %v", err)
+ }
+ err = a.Close()
+ if err != io.EOF {
+ t.Errorf("expect EOF on double close.")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/messages.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/messages.go
new file mode 100644
index 00000000000..eaf6106698e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/messages.go
@@ -0,0 +1,725 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "reflect"
+ "strconv"
+)
+
+// These are SSH message type numbers. They are scattered around several
+// documents but many were taken from [SSH-PARAMETERS].
+const (
+ msgIgnore = 2
+ msgUnimplemented = 3
+ msgDebug = 4
+ msgNewKeys = 21
+
+ // Standard authentication messages
+ msgUserAuthSuccess = 52
+ msgUserAuthBanner = 53
+)
+
+// SSH messages:
+//
+// These structures mirror the wire format of the corresponding SSH messages.
+// They are marshaled using reflection with the marshal and unmarshal functions
+// in this file. The only wrinkle is that a final member of type []byte with a
+// ssh tag of "rest" receives the remainder of a packet when unmarshaling.
+
+// See RFC 4253, section 11.1.
+const msgDisconnect = 1
+
+// disconnectMsg is the message that signals a disconnect. It is also
+// the error type returned from mux.Wait()
+type disconnectMsg struct {
+ Reason uint32 `sshtype:"1"`
+ Message string
+ Language string
+}
+
+func (d *disconnectMsg) Error() string {
+ return fmt.Sprintf("ssh: disconnect reason %d: %s", d.Reason, d.Message)
+}
+
+// See RFC 4253, section 7.1.
+const msgKexInit = 20
+
+type kexInitMsg struct {
+ Cookie [16]byte `sshtype:"20"`
+ KexAlgos []string
+ ServerHostKeyAlgos []string
+ CiphersClientServer []string
+ CiphersServerClient []string
+ MACsClientServer []string
+ MACsServerClient []string
+ CompressionClientServer []string
+ CompressionServerClient []string
+ LanguagesClientServer []string
+ LanguagesServerClient []string
+ FirstKexFollows bool
+ Reserved uint32
+}
+
+// See RFC 4253, section 8.
+
+// Diffie-Helman
+const msgKexDHInit = 30
+
+type kexDHInitMsg struct {
+ X *big.Int `sshtype:"30"`
+}
+
+const msgKexECDHInit = 30
+
+type kexECDHInitMsg struct {
+ ClientPubKey []byte `sshtype:"30"`
+}
+
+const msgKexECDHReply = 31
+
+type kexECDHReplyMsg struct {
+ HostKey []byte `sshtype:"31"`
+ EphemeralPubKey []byte
+ Signature []byte
+}
+
+const msgKexDHReply = 31
+
+type kexDHReplyMsg struct {
+ HostKey []byte `sshtype:"31"`
+ Y *big.Int
+ Signature []byte
+}
+
+// See RFC 4253, section 10.
+const msgServiceRequest = 5
+
+type serviceRequestMsg struct {
+ Service string `sshtype:"5"`
+}
+
+// See RFC 4253, section 10.
+const msgServiceAccept = 6
+
+type serviceAcceptMsg struct {
+ Service string `sshtype:"6"`
+}
+
+// See RFC 4252, section 5.
+const msgUserAuthRequest = 50
+
+type userAuthRequestMsg struct {
+ User string `sshtype:"50"`
+ Service string
+ Method string
+ Payload []byte `ssh:"rest"`
+}
+
+// See RFC 4252, section 5.1
+const msgUserAuthFailure = 51
+
+type userAuthFailureMsg struct {
+ Methods []string `sshtype:"51"`
+ PartialSuccess bool
+}
+
+// See RFC 4256, section 3.2
+const msgUserAuthInfoRequest = 60
+const msgUserAuthInfoResponse = 61
+
+type userAuthInfoRequestMsg struct {
+ User string `sshtype:"60"`
+ Instruction string
+ DeprecatedLanguage string
+ NumPrompts uint32
+ Prompts []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 5.1.
+const msgChannelOpen = 90
+
+type channelOpenMsg struct {
+ ChanType string `sshtype:"90"`
+ PeersId uint32
+ PeersWindow uint32
+ MaxPacketSize uint32
+ TypeSpecificData []byte `ssh:"rest"`
+}
+
+const msgChannelExtendedData = 95
+const msgChannelData = 94
+
+// See RFC 4254, section 5.1.
+const msgChannelOpenConfirm = 91
+
+type channelOpenConfirmMsg struct {
+ PeersId uint32 `sshtype:"91"`
+ MyId uint32
+ MyWindow uint32
+ MaxPacketSize uint32
+ TypeSpecificData []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 5.1.
+const msgChannelOpenFailure = 92
+
+type channelOpenFailureMsg struct {
+ PeersId uint32 `sshtype:"92"`
+ Reason RejectionReason
+ Message string
+ Language string
+}
+
+const msgChannelRequest = 98
+
+type channelRequestMsg struct {
+ PeersId uint32 `sshtype:"98"`
+ Request string
+ WantReply bool
+ RequestSpecificData []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 5.4.
+const msgChannelSuccess = 99
+
+type channelRequestSuccessMsg struct {
+ PeersId uint32 `sshtype:"99"`
+}
+
+// See RFC 4254, section 5.4.
+const msgChannelFailure = 100
+
+type channelRequestFailureMsg struct {
+ PeersId uint32 `sshtype:"100"`
+}
+
+// See RFC 4254, section 5.3
+const msgChannelClose = 97
+
+type channelCloseMsg struct {
+ PeersId uint32 `sshtype:"97"`
+}
+
+// See RFC 4254, section 5.3
+const msgChannelEOF = 96
+
+type channelEOFMsg struct {
+ PeersId uint32 `sshtype:"96"`
+}
+
+// See RFC 4254, section 4
+const msgGlobalRequest = 80
+
+type globalRequestMsg struct {
+ Type string `sshtype:"80"`
+ WantReply bool
+ Data []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 4
+const msgRequestSuccess = 81
+
+type globalRequestSuccessMsg struct {
+ Data []byte `ssh:"rest" sshtype:"81"`
+}
+
+// See RFC 4254, section 4
+const msgRequestFailure = 82
+
+type globalRequestFailureMsg struct {
+ Data []byte `ssh:"rest" sshtype:"82"`
+}
+
+// See RFC 4254, section 5.2
+const msgChannelWindowAdjust = 93
+
+type windowAdjustMsg struct {
+ PeersId uint32 `sshtype:"93"`
+ AdditionalBytes uint32
+}
+
+// See RFC 4252, section 7
+const msgUserAuthPubKeyOk = 60
+
+type userAuthPubKeyOkMsg struct {
+ Algo string `sshtype:"60"`
+ PubKey []byte
+}
+
+// typeTag returns the type byte for the given type. The type should
+// be struct.
+func typeTag(structType reflect.Type) byte {
+ var tag byte
+ var tagStr string
+ tagStr = structType.Field(0).Tag.Get("sshtype")
+ i, err := strconv.Atoi(tagStr)
+ if err == nil {
+ tag = byte(i)
+ }
+ return tag
+}
+
+func fieldError(t reflect.Type, field int, problem string) error {
+ if problem != "" {
+ problem = ": " + problem
+ }
+ return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem)
+}
+
+var errShortRead = errors.New("ssh: short read")
+
+// Unmarshal parses data in SSH wire format into a structure. The out
+// argument should be a pointer to struct. If the first member of the
+// struct has the "sshtype" tag set to a number in decimal, the packet
+// must start that number. In case of error, Unmarshal returns a
+// ParseError or UnexpectedMessageError.
+func Unmarshal(data []byte, out interface{}) error {
+ v := reflect.ValueOf(out).Elem()
+ structType := v.Type()
+ expectedType := typeTag(structType)
+ if len(data) == 0 {
+ return parseError(expectedType)
+ }
+ if expectedType > 0 {
+ if data[0] != expectedType {
+ return unexpectedMessageError(expectedType, data[0])
+ }
+ data = data[1:]
+ }
+
+ var ok bool
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ t := field.Type()
+ switch t.Kind() {
+ case reflect.Bool:
+ if len(data) < 1 {
+ return errShortRead
+ }
+ field.SetBool(data[0] != 0)
+ data = data[1:]
+ case reflect.Array:
+ if t.Elem().Kind() != reflect.Uint8 {
+ return fieldError(structType, i, "array of unsupported type")
+ }
+ if len(data) < t.Len() {
+ return errShortRead
+ }
+ for j, n := 0, t.Len(); j < n; j++ {
+ field.Index(j).Set(reflect.ValueOf(data[j]))
+ }
+ data = data[t.Len():]
+ case reflect.Uint64:
+ var u64 uint64
+ if u64, data, ok = parseUint64(data); !ok {
+ return errShortRead
+ }
+ field.SetUint(u64)
+ case reflect.Uint32:
+ var u32 uint32
+ if u32, data, ok = parseUint32(data); !ok {
+ return errShortRead
+ }
+ field.SetUint(uint64(u32))
+ case reflect.Uint8:
+ if len(data) < 1 {
+ return errShortRead
+ }
+ field.SetUint(uint64(data[0]))
+ data = data[1:]
+ case reflect.String:
+ var s []byte
+ if s, data, ok = parseString(data); !ok {
+ return fieldError(structType, i, "")
+ }
+ field.SetString(string(s))
+ case reflect.Slice:
+ switch t.Elem().Kind() {
+ case reflect.Uint8:
+ if structType.Field(i).Tag.Get("ssh") == "rest" {
+ field.Set(reflect.ValueOf(data))
+ data = nil
+ } else {
+ var s []byte
+ if s, data, ok = parseString(data); !ok {
+ return errShortRead
+ }
+ field.Set(reflect.ValueOf(s))
+ }
+ case reflect.String:
+ var nl []string
+ if nl, data, ok = parseNameList(data); !ok {
+ return errShortRead
+ }
+ field.Set(reflect.ValueOf(nl))
+ default:
+ return fieldError(structType, i, "slice of unsupported type")
+ }
+ case reflect.Ptr:
+ if t == bigIntType {
+ var n *big.Int
+ if n, data, ok = parseInt(data); !ok {
+ return errShortRead
+ }
+ field.Set(reflect.ValueOf(n))
+ } else {
+ return fieldError(structType, i, "pointer to unsupported type")
+ }
+ default:
+ return fieldError(structType, i, "unsupported type")
+ }
+ }
+
+ if len(data) != 0 {
+ return parseError(expectedType)
+ }
+
+ return nil
+}
+
+// Marshal serializes the message in msg to SSH wire format. The msg
+// argument should be a struct or pointer to struct. If the first
+// member has the "sshtype" tag set to a number in decimal, that
+// number is prepended to the result. If the last of member has the
+// "ssh" tag set to "rest", its contents are appended to the output.
+func Marshal(msg interface{}) []byte {
+ out := make([]byte, 0, 64)
+ return marshalStruct(out, msg)
+}
+
+func marshalStruct(out []byte, msg interface{}) []byte {
+ v := reflect.Indirect(reflect.ValueOf(msg))
+ msgType := typeTag(v.Type())
+ if msgType > 0 {
+ out = append(out, msgType)
+ }
+
+ for i, n := 0, v.NumField(); i < n; i++ {
+ field := v.Field(i)
+ switch t := field.Type(); t.Kind() {
+ case reflect.Bool:
+ var v uint8
+ if field.Bool() {
+ v = 1
+ }
+ out = append(out, v)
+ case reflect.Array:
+ if t.Elem().Kind() != reflect.Uint8 {
+ panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface()))
+ }
+ for j, l := 0, t.Len(); j < l; j++ {
+ out = append(out, uint8(field.Index(j).Uint()))
+ }
+ case reflect.Uint32:
+ out = appendU32(out, uint32(field.Uint()))
+ case reflect.Uint64:
+ out = appendU64(out, uint64(field.Uint()))
+ case reflect.Uint8:
+ out = append(out, uint8(field.Uint()))
+ case reflect.String:
+ s := field.String()
+ out = appendInt(out, len(s))
+ out = append(out, s...)
+ case reflect.Slice:
+ switch t.Elem().Kind() {
+ case reflect.Uint8:
+ if v.Type().Field(i).Tag.Get("ssh") != "rest" {
+ out = appendInt(out, field.Len())
+ }
+ out = append(out, field.Bytes()...)
+ case reflect.String:
+ offset := len(out)
+ out = appendU32(out, 0)
+ if n := field.Len(); n > 0 {
+ for j := 0; j < n; j++ {
+ f := field.Index(j)
+ if j != 0 {
+ out = append(out, ',')
+ }
+ out = append(out, f.String()...)
+ }
+ // overwrite length value
+ binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4))
+ }
+ default:
+ panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface()))
+ }
+ case reflect.Ptr:
+ if t == bigIntType {
+ var n *big.Int
+ nValue := reflect.ValueOf(&n)
+ nValue.Elem().Set(field)
+ needed := intLength(n)
+ oldLength := len(out)
+
+ if cap(out)-len(out) < needed {
+ newOut := make([]byte, len(out), 2*(len(out)+needed))
+ copy(newOut, out)
+ out = newOut
+ }
+ out = out[:oldLength+needed]
+ marshalInt(out[oldLength:], n)
+ } else {
+ panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface()))
+ }
+ }
+ }
+
+ return out
+}
+
+var bigOne = big.NewInt(1)
+
+func parseString(in []byte) (out, rest []byte, ok bool) {
+ if len(in) < 4 {
+ return
+ }
+ length := binary.BigEndian.Uint32(in)
+ in = in[4:]
+ if uint32(len(in)) < length {
+ return
+ }
+ out = in[:length]
+ rest = in[length:]
+ ok = true
+ return
+}
+
+var (
+ comma = []byte{','}
+ emptyNameList = []string{}
+)
+
+func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
+ contents, rest, ok := parseString(in)
+ if !ok {
+ return
+ }
+ if len(contents) == 0 {
+ out = emptyNameList
+ return
+ }
+ parts := bytes.Split(contents, comma)
+ out = make([]string, len(parts))
+ for i, part := range parts {
+ out[i] = string(part)
+ }
+ return
+}
+
+func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
+ contents, rest, ok := parseString(in)
+ if !ok {
+ return
+ }
+ out = new(big.Int)
+
+ if len(contents) > 0 && contents[0]&0x80 == 0x80 {
+ // This is a negative number
+ notBytes := make([]byte, len(contents))
+ for i := range notBytes {
+ notBytes[i] = ^contents[i]
+ }
+ out.SetBytes(notBytes)
+ out.Add(out, bigOne)
+ out.Neg(out)
+ } else {
+ // Positive number
+ out.SetBytes(contents)
+ }
+ ok = true
+ return
+}
+
+func parseUint32(in []byte) (uint32, []byte, bool) {
+ if len(in) < 4 {
+ return 0, nil, false
+ }
+ return binary.BigEndian.Uint32(in), in[4:], true
+}
+
+func parseUint64(in []byte) (uint64, []byte, bool) {
+ if len(in) < 8 {
+ return 0, nil, false
+ }
+ return binary.BigEndian.Uint64(in), in[8:], true
+}
+
+func intLength(n *big.Int) int {
+ length := 4 /* length bytes */
+ if n.Sign() < 0 {
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bitLen := nMinus1.BitLen()
+ if bitLen%8 == 0 {
+ // The number will need 0xff padding
+ length++
+ }
+ length += (bitLen + 7) / 8
+ } else if n.Sign() == 0 {
+ // A zero is the zero length string
+ } else {
+ bitLen := n.BitLen()
+ if bitLen%8 == 0 {
+ // The number will need 0x00 padding
+ length++
+ }
+ length += (bitLen + 7) / 8
+ }
+
+ return length
+}
+
+func marshalUint32(to []byte, n uint32) []byte {
+ binary.BigEndian.PutUint32(to, n)
+ return to[4:]
+}
+
+func marshalUint64(to []byte, n uint64) []byte {
+ binary.BigEndian.PutUint64(to, n)
+ return to[8:]
+}
+
+func marshalInt(to []byte, n *big.Int) []byte {
+ lengthBytes := to
+ to = to[4:]
+ length := 0
+
+ if n.Sign() < 0 {
+ // A negative number has to be converted to two's-complement
+ // form. So we'll subtract 1 and invert. If the
+ // most-significant-bit isn't set then we'll need to pad the
+ // beginning with 0xff in order to keep the number negative.
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bytes := nMinus1.Bytes()
+ for i := range bytes {
+ bytes[i] ^= 0xff
+ }
+ if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+ to[0] = 0xff
+ to = to[1:]
+ length++
+ }
+ nBytes := copy(to, bytes)
+ to = to[nBytes:]
+ length += nBytes
+ } else if n.Sign() == 0 {
+ // A zero is the zero length string
+ } else {
+ bytes := n.Bytes()
+ if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+ // We'll have to pad this with a 0x00 in order to
+ // stop it looking like a negative number.
+ to[0] = 0
+ to = to[1:]
+ length++
+ }
+ nBytes := copy(to, bytes)
+ to = to[nBytes:]
+ length += nBytes
+ }
+
+ lengthBytes[0] = byte(length >> 24)
+ lengthBytes[1] = byte(length >> 16)
+ lengthBytes[2] = byte(length >> 8)
+ lengthBytes[3] = byte(length)
+ return to
+}
+
+func writeInt(w io.Writer, n *big.Int) {
+ length := intLength(n)
+ buf := make([]byte, length)
+ marshalInt(buf, n)
+ w.Write(buf)
+}
+
+func writeString(w io.Writer, s []byte) {
+ var lengthBytes [4]byte
+ lengthBytes[0] = byte(len(s) >> 24)
+ lengthBytes[1] = byte(len(s) >> 16)
+ lengthBytes[2] = byte(len(s) >> 8)
+ lengthBytes[3] = byte(len(s))
+ w.Write(lengthBytes[:])
+ w.Write(s)
+}
+
+func stringLength(n int) int {
+ return 4 + n
+}
+
+func marshalString(to []byte, s []byte) []byte {
+ to[0] = byte(len(s) >> 24)
+ to[1] = byte(len(s) >> 16)
+ to[2] = byte(len(s) >> 8)
+ to[3] = byte(len(s))
+ to = to[4:]
+ copy(to, s)
+ return to[len(s):]
+}
+
+var bigIntType = reflect.TypeOf((*big.Int)(nil))
+
+// Decode a packet into its corresponding message.
+func decode(packet []byte) (interface{}, error) {
+ var msg interface{}
+ switch packet[0] {
+ case msgDisconnect:
+ msg = new(disconnectMsg)
+ case msgServiceRequest:
+ msg = new(serviceRequestMsg)
+ case msgServiceAccept:
+ msg = new(serviceAcceptMsg)
+ case msgKexInit:
+ msg = new(kexInitMsg)
+ case msgKexDHInit:
+ msg = new(kexDHInitMsg)
+ case msgKexDHReply:
+ msg = new(kexDHReplyMsg)
+ case msgUserAuthRequest:
+ msg = new(userAuthRequestMsg)
+ case msgUserAuthFailure:
+ msg = new(userAuthFailureMsg)
+ case msgUserAuthPubKeyOk:
+ msg = new(userAuthPubKeyOkMsg)
+ case msgGlobalRequest:
+ msg = new(globalRequestMsg)
+ case msgRequestSuccess:
+ msg = new(globalRequestSuccessMsg)
+ case msgRequestFailure:
+ msg = new(globalRequestFailureMsg)
+ case msgChannelOpen:
+ msg = new(channelOpenMsg)
+ case msgChannelOpenConfirm:
+ msg = new(channelOpenConfirmMsg)
+ case msgChannelOpenFailure:
+ msg = new(channelOpenFailureMsg)
+ case msgChannelWindowAdjust:
+ msg = new(windowAdjustMsg)
+ case msgChannelEOF:
+ msg = new(channelEOFMsg)
+ case msgChannelClose:
+ msg = new(channelCloseMsg)
+ case msgChannelRequest:
+ msg = new(channelRequestMsg)
+ case msgChannelSuccess:
+ msg = new(channelRequestSuccessMsg)
+ case msgChannelFailure:
+ msg = new(channelRequestFailureMsg)
+ default:
+ return nil, unexpectedMessageError(0, packet[0])
+ }
+ if err := Unmarshal(packet, msg); err != nil {
+ return nil, err
+ }
+ return msg, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/messages_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/messages_test.go
new file mode 100644
index 00000000000..955b5127f9b
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/messages_test.go
@@ -0,0 +1,254 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "math/big"
+ "math/rand"
+ "reflect"
+ "testing"
+ "testing/quick"
+)
+
+var intLengthTests = []struct {
+ val, length int
+}{
+ {0, 4 + 0},
+ {1, 4 + 1},
+ {127, 4 + 1},
+ {128, 4 + 2},
+ {-1, 4 + 1},
+}
+
+func TestIntLength(t *testing.T) {
+ for _, test := range intLengthTests {
+ v := new(big.Int).SetInt64(int64(test.val))
+ length := intLength(v)
+ if length != test.length {
+ t.Errorf("For %d, got length %d but expected %d", test.val, length, test.length)
+ }
+ }
+}
+
+type msgAllTypes struct {
+ Bool bool `sshtype:"21"`
+ Array [16]byte
+ Uint64 uint64
+ Uint32 uint32
+ Uint8 uint8
+ String string
+ Strings []string
+ Bytes []byte
+ Int *big.Int
+ Rest []byte `ssh:"rest"`
+}
+
+func (t *msgAllTypes) Generate(rand *rand.Rand, size int) reflect.Value {
+ m := &msgAllTypes{}
+ m.Bool = rand.Intn(2) == 1
+ randomBytes(m.Array[:], rand)
+ m.Uint64 = uint64(rand.Int63n(1<<63 - 1))
+ m.Uint32 = uint32(rand.Intn((1 << 31) - 1))
+ m.Uint8 = uint8(rand.Intn(1 << 8))
+ m.String = string(m.Array[:])
+ m.Strings = randomNameList(rand)
+ m.Bytes = m.Array[:]
+ m.Int = randomInt(rand)
+ m.Rest = m.Array[:]
+ return reflect.ValueOf(m)
+}
+
+func TestMarshalUnmarshal(t *testing.T) {
+ rand := rand.New(rand.NewSource(0))
+ iface := &msgAllTypes{}
+ ty := reflect.ValueOf(iface).Type()
+
+ n := 100
+ if testing.Short() {
+ n = 5
+ }
+ for j := 0; j < n; j++ {
+ v, ok := quick.Value(ty, rand)
+ if !ok {
+ t.Errorf("failed to create value")
+ break
+ }
+
+ m1 := v.Elem().Interface()
+ m2 := iface
+
+ marshaled := Marshal(m1)
+ if err := Unmarshal(marshaled, m2); err != nil {
+ t.Errorf("Unmarshal %#v: %s", m1, err)
+ break
+ }
+
+ if !reflect.DeepEqual(v.Interface(), m2) {
+ t.Errorf("got: %#v\nwant:%#v\n%x", m2, m1, marshaled)
+ break
+ }
+ }
+}
+
+func TestUnmarshalEmptyPacket(t *testing.T) {
+ var b []byte
+ var m channelRequestSuccessMsg
+ if err := Unmarshal(b, &m); err == nil {
+ t.Fatalf("unmarshal of empty slice succeeded")
+ }
+}
+
+func TestUnmarshalUnexpectedPacket(t *testing.T) {
+ type S struct {
+ I uint32 `sshtype:"43"`
+ S string
+ B bool
+ }
+
+ s := S{11, "hello", true}
+ packet := Marshal(s)
+ packet[0] = 42
+ roundtrip := S{}
+ err := Unmarshal(packet, &roundtrip)
+ if err == nil {
+ t.Fatal("expected error, not nil")
+ }
+}
+
+func TestMarshalPtr(t *testing.T) {
+ s := struct {
+ S string
+ }{"hello"}
+
+ m1 := Marshal(s)
+ m2 := Marshal(&s)
+ if !bytes.Equal(m1, m2) {
+ t.Errorf("got %q, want %q for marshaled pointer", m2, m1)
+ }
+}
+
+func TestBareMarshalUnmarshal(t *testing.T) {
+ type S struct {
+ I uint32
+ S string
+ B bool
+ }
+
+ s := S{42, "hello", true}
+ packet := Marshal(s)
+ roundtrip := S{}
+ Unmarshal(packet, &roundtrip)
+
+ if !reflect.DeepEqual(s, roundtrip) {
+ t.Errorf("got %#v, want %#v", roundtrip, s)
+ }
+}
+
+func TestBareMarshal(t *testing.T) {
+ type S2 struct {
+ I uint32
+ }
+ s := S2{42}
+ packet := Marshal(s)
+ i, rest, ok := parseUint32(packet)
+ if len(rest) > 0 || !ok {
+ t.Errorf("parseInt(%q): parse error", packet)
+ }
+ if i != s.I {
+ t.Errorf("got %d, want %d", i, s.I)
+ }
+}
+
+func TestUnmarshalShortKexInitPacket(t *testing.T) {
+ // This used to panic.
+ // Issue 11348
+ packet := []byte{0x14, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0xff, 0xff, 0xff, 0xff}
+ kim := &kexInitMsg{}
+ if err := Unmarshal(packet, kim); err == nil {
+ t.Error("truncated packet unmarshaled without error")
+ }
+}
+
+func randomBytes(out []byte, rand *rand.Rand) {
+ for i := 0; i < len(out); i++ {
+ out[i] = byte(rand.Int31())
+ }
+}
+
+func randomNameList(rand *rand.Rand) []string {
+ ret := make([]string, rand.Int31()&15)
+ for i := range ret {
+ s := make([]byte, 1+(rand.Int31()&15))
+ for j := range s {
+ s[j] = 'a' + uint8(rand.Int31()&15)
+ }
+ ret[i] = string(s)
+ }
+ return ret
+}
+
+func randomInt(rand *rand.Rand) *big.Int {
+ return new(big.Int).SetInt64(int64(int32(rand.Uint32())))
+}
+
+func (*kexInitMsg) Generate(rand *rand.Rand, size int) reflect.Value {
+ ki := &kexInitMsg{}
+ randomBytes(ki.Cookie[:], rand)
+ ki.KexAlgos = randomNameList(rand)
+ ki.ServerHostKeyAlgos = randomNameList(rand)
+ ki.CiphersClientServer = randomNameList(rand)
+ ki.CiphersServerClient = randomNameList(rand)
+ ki.MACsClientServer = randomNameList(rand)
+ ki.MACsServerClient = randomNameList(rand)
+ ki.CompressionClientServer = randomNameList(rand)
+ ki.CompressionServerClient = randomNameList(rand)
+ ki.LanguagesClientServer = randomNameList(rand)
+ ki.LanguagesServerClient = randomNameList(rand)
+ if rand.Int31()&1 == 1 {
+ ki.FirstKexFollows = true
+ }
+ return reflect.ValueOf(ki)
+}
+
+func (*kexDHInitMsg) Generate(rand *rand.Rand, size int) reflect.Value {
+ dhi := &kexDHInitMsg{}
+ dhi.X = randomInt(rand)
+ return reflect.ValueOf(dhi)
+}
+
+var (
+ _kexInitMsg = new(kexInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface()
+ _kexDHInitMsg = new(kexDHInitMsg).Generate(rand.New(rand.NewSource(0)), 10).Elem().Interface()
+
+ _kexInit = Marshal(_kexInitMsg)
+ _kexDHInit = Marshal(_kexDHInitMsg)
+)
+
+func BenchmarkMarshalKexInitMsg(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Marshal(_kexInitMsg)
+ }
+}
+
+func BenchmarkUnmarshalKexInitMsg(b *testing.B) {
+ m := new(kexInitMsg)
+ for i := 0; i < b.N; i++ {
+ Unmarshal(_kexInit, m)
+ }
+}
+
+func BenchmarkMarshalKexDHInitMsg(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Marshal(_kexDHInitMsg)
+ }
+}
+
+func BenchmarkUnmarshalKexDHInitMsg(b *testing.B) {
+ m := new(kexDHInitMsg)
+ for i := 0; i < b.N; i++ {
+ Unmarshal(_kexDHInit, m)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mux.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mux.go
new file mode 100644
index 00000000000..321880ad9a4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mux.go
@@ -0,0 +1,356 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "log"
+ "sync"
+ "sync/atomic"
+)
+
+// debugMux, if set, causes messages in the connection protocol to be
+// logged.
+const debugMux = false
+
+// chanList is a thread safe channel list.
+type chanList struct {
+ // protects concurrent access to chans
+ sync.Mutex
+
+ // chans are indexed by the local id of the channel, which the
+ // other side should send in the PeersId field.
+ chans []*channel
+
+ // This is a debugging aid: it offsets all IDs by this
+ // amount. This helps distinguish otherwise identical
+ // server/client muxes
+ offset uint32
+}
+
+// Assigns a channel ID to the given channel.
+func (c *chanList) add(ch *channel) uint32 {
+ c.Lock()
+ defer c.Unlock()
+ for i := range c.chans {
+ if c.chans[i] == nil {
+ c.chans[i] = ch
+ return uint32(i) + c.offset
+ }
+ }
+ c.chans = append(c.chans, ch)
+ return uint32(len(c.chans)-1) + c.offset
+}
+
+// getChan returns the channel for the given ID.
+func (c *chanList) getChan(id uint32) *channel {
+ id -= c.offset
+
+ c.Lock()
+ defer c.Unlock()
+ if id < uint32(len(c.chans)) {
+ return c.chans[id]
+ }
+ return nil
+}
+
+func (c *chanList) remove(id uint32) {
+ id -= c.offset
+ c.Lock()
+ if id < uint32(len(c.chans)) {
+ c.chans[id] = nil
+ }
+ c.Unlock()
+}
+
+// dropAll forgets all channels it knows, returning them in a slice.
+func (c *chanList) dropAll() []*channel {
+ c.Lock()
+ defer c.Unlock()
+ var r []*channel
+
+ for _, ch := range c.chans {
+ if ch == nil {
+ continue
+ }
+ r = append(r, ch)
+ }
+ c.chans = nil
+ return r
+}
+
+// mux represents the state for the SSH connection protocol, which
+// multiplexes many channels onto a single packet transport.
+type mux struct {
+ conn packetConn
+ chanList chanList
+
+ incomingChannels chan NewChannel
+
+ globalSentMu sync.Mutex
+ globalResponses chan interface{}
+ incomingRequests chan *Request
+
+ errCond *sync.Cond
+ err error
+}
+
+// When debugging, each new chanList instantiation has a different
+// offset.
+var globalOff uint32
+
+func (m *mux) Wait() error {
+ m.errCond.L.Lock()
+ defer m.errCond.L.Unlock()
+ for m.err == nil {
+ m.errCond.Wait()
+ }
+ return m.err
+}
+
+// newMux returns a mux that runs over the given connection.
+func newMux(p packetConn) *mux {
+ m := &mux{
+ conn: p,
+ incomingChannels: make(chan NewChannel, 16),
+ globalResponses: make(chan interface{}, 1),
+ incomingRequests: make(chan *Request, 16),
+ errCond: newCond(),
+ }
+ if debugMux {
+ m.chanList.offset = atomic.AddUint32(&globalOff, 1)
+ }
+
+ go m.loop()
+ return m
+}
+
+func (m *mux) sendMessage(msg interface{}) error {
+ p := Marshal(msg)
+ return m.conn.writePacket(p)
+}
+
+func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
+ if wantReply {
+ m.globalSentMu.Lock()
+ defer m.globalSentMu.Unlock()
+ }
+
+ if err := m.sendMessage(globalRequestMsg{
+ Type: name,
+ WantReply: wantReply,
+ Data: payload,
+ }); err != nil {
+ return false, nil, err
+ }
+
+ if !wantReply {
+ return false, nil, nil
+ }
+
+ msg, ok := <-m.globalResponses
+ if !ok {
+ return false, nil, io.EOF
+ }
+ switch msg := msg.(type) {
+ case *globalRequestFailureMsg:
+ return false, msg.Data, nil
+ case *globalRequestSuccessMsg:
+ return true, msg.Data, nil
+ default:
+ return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
+ }
+}
+
+// ackRequest must be called after processing a global request that
+// has WantReply set.
+func (m *mux) ackRequest(ok bool, data []byte) error {
+ if ok {
+ return m.sendMessage(globalRequestSuccessMsg{Data: data})
+ }
+ return m.sendMessage(globalRequestFailureMsg{Data: data})
+}
+
+// TODO(hanwen): Disconnect is a transport layer message. We should
+// probably send and receive Disconnect somewhere in the transport
+// code.
+
+// Disconnect sends a disconnect message.
+func (m *mux) Disconnect(reason uint32, message string) error {
+ return m.sendMessage(disconnectMsg{
+ Reason: reason,
+ Message: message,
+ })
+}
+
+func (m *mux) Close() error {
+ return m.conn.Close()
+}
+
+// loop runs the connection machine. It will process packets until an
+// error is encountered. To synchronize on loop exit, use mux.Wait.
+func (m *mux) loop() {
+ var err error
+ for err == nil {
+ err = m.onePacket()
+ }
+
+ for _, ch := range m.chanList.dropAll() {
+ ch.close()
+ }
+
+ close(m.incomingChannels)
+ close(m.incomingRequests)
+ close(m.globalResponses)
+
+ m.conn.Close()
+
+ m.errCond.L.Lock()
+ m.err = err
+ m.errCond.Broadcast()
+ m.errCond.L.Unlock()
+
+ if debugMux {
+ log.Println("loop exit", err)
+ }
+}
+
+// onePacket reads and processes one packet.
+func (m *mux) onePacket() error {
+ packet, err := m.conn.readPacket()
+ if err != nil {
+ return err
+ }
+
+ if debugMux {
+ if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
+ log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
+ } else {
+ p, _ := decode(packet)
+ log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
+ }
+ }
+
+ switch packet[0] {
+ case msgNewKeys:
+ // Ignore notification of key change.
+ return nil
+ case msgDisconnect:
+ return m.handleDisconnect(packet)
+ case msgChannelOpen:
+ return m.handleChannelOpen(packet)
+ case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
+ return m.handleGlobalPacket(packet)
+ }
+
+ // assume a channel packet.
+ if len(packet) < 5 {
+ return parseError(packet[0])
+ }
+ id := binary.BigEndian.Uint32(packet[1:])
+ ch := m.chanList.getChan(id)
+ if ch == nil {
+ return fmt.Errorf("ssh: invalid channel %d", id)
+ }
+
+ return ch.handlePacket(packet)
+}
+
+func (m *mux) handleDisconnect(packet []byte) error {
+ var d disconnectMsg
+ if err := Unmarshal(packet, &d); err != nil {
+ return err
+ }
+
+ if debugMux {
+ log.Printf("caught disconnect: %v", d)
+ }
+ return &d
+}
+
+func (m *mux) handleGlobalPacket(packet []byte) error {
+ msg, err := decode(packet)
+ if err != nil {
+ return err
+ }
+
+ switch msg := msg.(type) {
+ case *globalRequestMsg:
+ m.incomingRequests <- &Request{
+ Type: msg.Type,
+ WantReply: msg.WantReply,
+ Payload: msg.Data,
+ mux: m,
+ }
+ case *globalRequestSuccessMsg, *globalRequestFailureMsg:
+ m.globalResponses <- msg
+ default:
+ panic(fmt.Sprintf("not a global message %#v", msg))
+ }
+
+ return nil
+}
+
+// handleChannelOpen schedules a channel to be Accept()ed.
+func (m *mux) handleChannelOpen(packet []byte) error {
+ var msg channelOpenMsg
+ if err := Unmarshal(packet, &msg); err != nil {
+ return err
+ }
+
+ if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
+ failMsg := channelOpenFailureMsg{
+ PeersId: msg.PeersId,
+ Reason: ConnectionFailed,
+ Message: "invalid request",
+ Language: "en_US.UTF-8",
+ }
+ return m.sendMessage(failMsg)
+ }
+
+ c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
+ c.remoteId = msg.PeersId
+ c.maxRemotePayload = msg.MaxPacketSize
+ c.remoteWin.add(msg.PeersWindow)
+ m.incomingChannels <- c
+ return nil
+}
+
+func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
+ ch, err := m.openChannel(chanType, extra)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return ch, ch.incomingRequests, nil
+}
+
+func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
+ ch := m.newChannel(chanType, channelOutbound, extra)
+
+ ch.maxIncomingPayload = channelMaxPacket
+
+ open := channelOpenMsg{
+ ChanType: chanType,
+ PeersWindow: ch.myWindow,
+ MaxPacketSize: ch.maxIncomingPayload,
+ TypeSpecificData: extra,
+ PeersId: ch.localId,
+ }
+ if err := m.sendMessage(open); err != nil {
+ return nil, err
+ }
+
+ switch msg := (<-ch.msg).(type) {
+ case *channelOpenConfirmMsg:
+ return ch, nil
+ case *channelOpenFailureMsg:
+ return nil, &OpenChannelError{msg.Reason, msg.Message}
+ default:
+ return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mux_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mux_test.go
new file mode 100644
index 00000000000..523038960f2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/mux_test.go
@@ -0,0 +1,525 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "io"
+ "io/ioutil"
+ "sync"
+ "testing"
+)
+
+func muxPair() (*mux, *mux) {
+ a, b := memPipe()
+
+ s := newMux(a)
+ c := newMux(b)
+
+ return s, c
+}
+
+// Returns both ends of a channel, and the mux for the the 2nd
+// channel.
+func channelPair(t *testing.T) (*channel, *channel, *mux) {
+ c, s := muxPair()
+
+ res := make(chan *channel, 1)
+ go func() {
+ newCh, ok := <-s.incomingChannels
+ if !ok {
+ t.Fatalf("No incoming channel")
+ }
+ if newCh.ChannelType() != "chan" {
+ t.Fatalf("got type %q want chan", newCh.ChannelType())
+ }
+ ch, _, err := newCh.Accept()
+ if err != nil {
+ t.Fatalf("Accept %v", err)
+ }
+ res <- ch.(*channel)
+ }()
+
+ ch, err := c.openChannel("chan", nil)
+ if err != nil {
+ t.Fatalf("OpenChannel: %v", err)
+ }
+
+ return <-res, ch, c
+}
+
+// Test that stderr and stdout can be addressed from different
+// goroutines. This is intended for use with the race detector.
+func TestMuxChannelExtendedThreadSafety(t *testing.T) {
+ writer, reader, mux := channelPair(t)
+ defer writer.Close()
+ defer reader.Close()
+ defer mux.Close()
+
+ var wr, rd sync.WaitGroup
+ magic := "hello world"
+
+ wr.Add(2)
+ go func() {
+ io.WriteString(writer, magic)
+ wr.Done()
+ }()
+ go func() {
+ io.WriteString(writer.Stderr(), magic)
+ wr.Done()
+ }()
+
+ rd.Add(2)
+ go func() {
+ c, err := ioutil.ReadAll(reader)
+ if string(c) != magic {
+ t.Fatalf("stdout read got %q, want %q (error %s)", c, magic, err)
+ }
+ rd.Done()
+ }()
+ go func() {
+ c, err := ioutil.ReadAll(reader.Stderr())
+ if string(c) != magic {
+ t.Fatalf("stderr read got %q, want %q (error %s)", c, magic, err)
+ }
+ rd.Done()
+ }()
+
+ wr.Wait()
+ writer.CloseWrite()
+ rd.Wait()
+}
+
+func TestMuxReadWrite(t *testing.T) {
+ s, c, mux := channelPair(t)
+ defer s.Close()
+ defer c.Close()
+ defer mux.Close()
+
+ magic := "hello world"
+ magicExt := "hello stderr"
+ go func() {
+ _, err := s.Write([]byte(magic))
+ if err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ _, err = s.Extended(1).Write([]byte(magicExt))
+ if err != nil {
+ t.Fatalf("Write: %v", err)
+ }
+ err = s.Close()
+ if err != nil {
+ t.Fatalf("Close: %v", err)
+ }
+ }()
+
+ var buf [1024]byte
+ n, err := c.Read(buf[:])
+ if err != nil {
+ t.Fatalf("server Read: %v", err)
+ }
+ got := string(buf[:n])
+ if got != magic {
+ t.Fatalf("server: got %q want %q", got, magic)
+ }
+
+ n, err = c.Extended(1).Read(buf[:])
+ if err != nil {
+ t.Fatalf("server Read: %v", err)
+ }
+
+ got = string(buf[:n])
+ if got != magicExt {
+ t.Fatalf("server: got %q want %q", got, magic)
+ }
+}
+
+func TestMuxChannelOverflow(t *testing.T) {
+ reader, writer, mux := channelPair(t)
+ defer reader.Close()
+ defer writer.Close()
+ defer mux.Close()
+
+ wDone := make(chan int, 1)
+ go func() {
+ if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
+ t.Errorf("could not fill window: %v", err)
+ }
+ writer.Write(make([]byte, 1))
+ wDone <- 1
+ }()
+ writer.remoteWin.waitWriterBlocked()
+
+ // Send 1 byte.
+ packet := make([]byte, 1+4+4+1)
+ packet[0] = msgChannelData
+ marshalUint32(packet[1:], writer.remoteId)
+ marshalUint32(packet[5:], uint32(1))
+ packet[9] = 42
+
+ if err := writer.mux.conn.writePacket(packet); err != nil {
+ t.Errorf("could not send packet")
+ }
+ if _, err := reader.SendRequest("hello", true, nil); err == nil {
+ t.Errorf("SendRequest succeeded.")
+ }
+ <-wDone
+}
+
+func TestMuxChannelCloseWriteUnblock(t *testing.T) {
+ reader, writer, mux := channelPair(t)
+ defer reader.Close()
+ defer writer.Close()
+ defer mux.Close()
+
+ wDone := make(chan int, 1)
+ go func() {
+ if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
+ t.Errorf("could not fill window: %v", err)
+ }
+ if _, err := writer.Write(make([]byte, 1)); err != io.EOF {
+ t.Errorf("got %v, want EOF for unblock write", err)
+ }
+ wDone <- 1
+ }()
+
+ writer.remoteWin.waitWriterBlocked()
+ reader.Close()
+ <-wDone
+}
+
+func TestMuxConnectionCloseWriteUnblock(t *testing.T) {
+ reader, writer, mux := channelPair(t)
+ defer reader.Close()
+ defer writer.Close()
+ defer mux.Close()
+
+ wDone := make(chan int, 1)
+ go func() {
+ if _, err := writer.Write(make([]byte, channelWindowSize)); err != nil {
+ t.Errorf("could not fill window: %v", err)
+ }
+ if _, err := writer.Write(make([]byte, 1)); err != io.EOF {
+ t.Errorf("got %v, want EOF for unblock write", err)
+ }
+ wDone <- 1
+ }()
+
+ writer.remoteWin.waitWriterBlocked()
+ mux.Close()
+ <-wDone
+}
+
+func TestMuxReject(t *testing.T) {
+ client, server := muxPair()
+ defer server.Close()
+ defer client.Close()
+
+ go func() {
+ ch, ok := <-server.incomingChannels
+ if !ok {
+ t.Fatalf("Accept")
+ }
+ if ch.ChannelType() != "ch" || string(ch.ExtraData()) != "extra" {
+ t.Fatalf("unexpected channel: %q, %q", ch.ChannelType(), ch.ExtraData())
+ }
+ ch.Reject(RejectionReason(42), "message")
+ }()
+
+ ch, err := client.openChannel("ch", []byte("extra"))
+ if ch != nil {
+ t.Fatal("openChannel not rejected")
+ }
+
+ ocf, ok := err.(*OpenChannelError)
+ if !ok {
+ t.Errorf("got %#v want *OpenChannelError", err)
+ } else if ocf.Reason != 42 || ocf.Message != "message" {
+ t.Errorf("got %#v, want {Reason: 42, Message: %q}", ocf, "message")
+ }
+
+ want := "ssh: rejected: unknown reason 42 (message)"
+ if err.Error() != want {
+ t.Errorf("got %q, want %q", err.Error(), want)
+ }
+}
+
+func TestMuxChannelRequest(t *testing.T) {
+ client, server, mux := channelPair(t)
+ defer server.Close()
+ defer client.Close()
+ defer mux.Close()
+
+ var received int
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ for r := range server.incomingRequests {
+ received++
+ r.Reply(r.Type == "yes", nil)
+ }
+ wg.Done()
+ }()
+ _, err := client.SendRequest("yes", false, nil)
+ if err != nil {
+ t.Fatalf("SendRequest: %v", err)
+ }
+ ok, err := client.SendRequest("yes", true, nil)
+ if err != nil {
+ t.Fatalf("SendRequest: %v", err)
+ }
+
+ if !ok {
+ t.Errorf("SendRequest(yes): %v", ok)
+
+ }
+
+ ok, err = client.SendRequest("no", true, nil)
+ if err != nil {
+ t.Fatalf("SendRequest: %v", err)
+ }
+ if ok {
+ t.Errorf("SendRequest(no): %v", ok)
+
+ }
+
+ client.Close()
+ wg.Wait()
+
+ if received != 3 {
+ t.Errorf("got %d requests, want %d", received, 3)
+ }
+}
+
+func TestMuxGlobalRequest(t *testing.T) {
+ clientMux, serverMux := muxPair()
+ defer serverMux.Close()
+ defer clientMux.Close()
+
+ var seen bool
+ go func() {
+ for r := range serverMux.incomingRequests {
+ seen = seen || r.Type == "peek"
+ if r.WantReply {
+ err := r.Reply(r.Type == "yes",
+ append([]byte(r.Type), r.Payload...))
+ if err != nil {
+ t.Errorf("AckRequest: %v", err)
+ }
+ }
+ }
+ }()
+
+ _, _, err := clientMux.SendRequest("peek", false, nil)
+ if err != nil {
+ t.Errorf("SendRequest: %v", err)
+ }
+
+ ok, data, err := clientMux.SendRequest("yes", true, []byte("a"))
+ if !ok || string(data) != "yesa" || err != nil {
+ t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v",
+ ok, data, err)
+ }
+ if ok, data, err := clientMux.SendRequest("yes", true, []byte("a")); !ok || string(data) != "yesa" || err != nil {
+ t.Errorf("SendRequest(\"yes\", true, \"a\"): %v %v %v",
+ ok, data, err)
+ }
+
+ if ok, data, err := clientMux.SendRequest("no", true, []byte("a")); ok || string(data) != "noa" || err != nil {
+ t.Errorf("SendRequest(\"no\", true, \"a\"): %v %v %v",
+ ok, data, err)
+ }
+
+ clientMux.Disconnect(0, "")
+ if !seen {
+ t.Errorf("never saw 'peek' request")
+ }
+}
+
+func TestMuxGlobalRequestUnblock(t *testing.T) {
+ clientMux, serverMux := muxPair()
+ defer serverMux.Close()
+ defer clientMux.Close()
+
+ result := make(chan error, 1)
+ go func() {
+ _, _, err := clientMux.SendRequest("hello", true, nil)
+ result <- err
+ }()
+
+ <-serverMux.incomingRequests
+ serverMux.conn.Close()
+ err := <-result
+
+ if err != io.EOF {
+ t.Errorf("want EOF, got %v", io.EOF)
+ }
+}
+
+func TestMuxChannelRequestUnblock(t *testing.T) {
+ a, b, connB := channelPair(t)
+ defer a.Close()
+ defer b.Close()
+ defer connB.Close()
+
+ result := make(chan error, 1)
+ go func() {
+ _, err := a.SendRequest("hello", true, nil)
+ result <- err
+ }()
+
+ <-b.incomingRequests
+ connB.conn.Close()
+ err := <-result
+
+ if err != io.EOF {
+ t.Errorf("want EOF, got %v", err)
+ }
+}
+
+func TestMuxDisconnect(t *testing.T) {
+ a, b := muxPair()
+ defer a.Close()
+ defer b.Close()
+
+ go func() {
+ for r := range b.incomingRequests {
+ r.Reply(true, nil)
+ }
+ }()
+
+ a.Disconnect(42, "whatever")
+ ok, _, err := a.SendRequest("hello", true, nil)
+ if ok || err == nil {
+ t.Errorf("got reply after disconnecting")
+ }
+ err = b.Wait()
+ if d, ok := err.(*disconnectMsg); !ok || d.Reason != 42 {
+ t.Errorf("got %#v, want disconnectMsg{Reason:42}", err)
+ }
+}
+
+func TestMuxCloseChannel(t *testing.T) {
+ r, w, mux := channelPair(t)
+ defer mux.Close()
+ defer r.Close()
+ defer w.Close()
+
+ result := make(chan error, 1)
+ go func() {
+ var b [1024]byte
+ _, err := r.Read(b[:])
+ result <- err
+ }()
+ if err := w.Close(); err != nil {
+ t.Errorf("w.Close: %v", err)
+ }
+
+ if _, err := w.Write([]byte("hello")); err != io.EOF {
+ t.Errorf("got err %v, want io.EOF after Close", err)
+ }
+
+ if err := <-result; err != io.EOF {
+ t.Errorf("got %v (%T), want io.EOF", err, err)
+ }
+}
+
+func TestMuxCloseWriteChannel(t *testing.T) {
+ r, w, mux := channelPair(t)
+ defer mux.Close()
+
+ result := make(chan error, 1)
+ go func() {
+ var b [1024]byte
+ _, err := r.Read(b[:])
+ result <- err
+ }()
+ if err := w.CloseWrite(); err != nil {
+ t.Errorf("w.CloseWrite: %v", err)
+ }
+
+ if _, err := w.Write([]byte("hello")); err != io.EOF {
+ t.Errorf("got err %v, want io.EOF after CloseWrite", err)
+ }
+
+ if err := <-result; err != io.EOF {
+ t.Errorf("got %v (%T), want io.EOF", err, err)
+ }
+}
+
+func TestMuxInvalidRecord(t *testing.T) {
+ a, b := muxPair()
+ defer a.Close()
+ defer b.Close()
+
+ packet := make([]byte, 1+4+4+1)
+ packet[0] = msgChannelData
+ marshalUint32(packet[1:], 29348723 /* invalid channel id */)
+ marshalUint32(packet[5:], 1)
+ packet[9] = 42
+
+ a.conn.writePacket(packet)
+ go a.SendRequest("hello", false, nil)
+ // 'a' wrote an invalid packet, so 'b' has exited.
+ req, ok := <-b.incomingRequests
+ if ok {
+ t.Errorf("got request %#v after receiving invalid packet", req)
+ }
+}
+
+func TestZeroWindowAdjust(t *testing.T) {
+ a, b, mux := channelPair(t)
+ defer a.Close()
+ defer b.Close()
+ defer mux.Close()
+
+ go func() {
+ io.WriteString(a, "hello")
+ // bogus adjust.
+ a.sendMessage(windowAdjustMsg{})
+ io.WriteString(a, "world")
+ a.Close()
+ }()
+
+ want := "helloworld"
+ c, _ := ioutil.ReadAll(b)
+ if string(c) != want {
+ t.Errorf("got %q want %q", c, want)
+ }
+}
+
+func TestMuxMaxPacketSize(t *testing.T) {
+ a, b, mux := channelPair(t)
+ defer a.Close()
+ defer b.Close()
+ defer mux.Close()
+
+ large := make([]byte, a.maxRemotePayload+1)
+ packet := make([]byte, 1+4+4+1+len(large))
+ packet[0] = msgChannelData
+ marshalUint32(packet[1:], a.remoteId)
+ marshalUint32(packet[5:], uint32(len(large)))
+ packet[9] = 42
+
+ if err := a.mux.conn.writePacket(packet); err != nil {
+ t.Errorf("could not send packet")
+ }
+
+ go a.SendRequest("hello", false, nil)
+
+ _, ok := <-b.incomingRequests
+ if ok {
+ t.Errorf("connection still alive after receiving large packet.")
+ }
+}
+
+// Don't ship code with debug=true.
+func TestDebug(t *testing.T) {
+ if debugMux {
+ t.Error("mux debug switched on")
+ }
+ if debugHandshake {
+ t.Error("handshake debug switched on")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/server.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/server.go
new file mode 100644
index 00000000000..4781eb78050
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/server.go
@@ -0,0 +1,495 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+)
+
+// The Permissions type holds fine-grained permissions that are
+// specific to a user or a specific authentication method for a
+// user. Permissions, except for "source-address", must be enforced in
+// the server application layer, after successful authentication. The
+// Permissions are passed on in ServerConn so a server implementation
+// can honor them.
+type Permissions struct {
+ // Critical options restrict default permissions. Common
+ // restrictions are "source-address" and "force-command". If
+ // the server cannot enforce the restriction, or does not
+ // recognize it, the user should not authenticate.
+ CriticalOptions map[string]string
+
+ // Extensions are extra functionality that the server may
+ // offer on authenticated connections. Common extensions are
+ // "permit-agent-forwarding", "permit-X11-forwarding". Lack of
+ // support for an extension does not preclude authenticating a
+ // user.
+ Extensions map[string]string
+}
+
+// ServerConfig holds server specific configuration data.
+type ServerConfig struct {
+ // Config contains configuration shared between client and server.
+ Config
+
+ hostKeys []Signer
+
+ // NoClientAuth is true if clients are allowed to connect without
+ // authenticating.
+ NoClientAuth bool
+
+ // PasswordCallback, if non-nil, is called when a user
+ // attempts to authenticate using a password.
+ PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
+
+ // PublicKeyCallback, if non-nil, is called when a client attempts public
+ // key authentication. It must return true if the given public key is
+ // valid for the given user. For example, see CertChecker.Authenticate.
+ PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
+
+ // KeyboardInteractiveCallback, if non-nil, is called when
+ // keyboard-interactive authentication is selected (RFC
+ // 4256). The client object's Challenge function should be
+ // used to query the user. The callback may offer multiple
+ // Challenge rounds. To avoid information leaks, the client
+ // should be presented a challenge even if the user is
+ // unknown.
+ KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
+
+ // AuthLogCallback, if non-nil, is called to log all authentication
+ // attempts.
+ AuthLogCallback func(conn ConnMetadata, method string, err error)
+
+ // ServerVersion is the version identification string to announce in
+ // the public handshake.
+ // If empty, a reasonable default is used.
+ // Note that RFC 4253 section 4.2 requires that this string start with
+ // "SSH-2.0-".
+ ServerVersion string
+}
+
+// AddHostKey adds a private key as a host key. If an existing host
+// key exists with the same algorithm, it is overwritten. Each server
+// config must have at least one host key.
+func (s *ServerConfig) AddHostKey(key Signer) {
+ for i, k := range s.hostKeys {
+ if k.PublicKey().Type() == key.PublicKey().Type() {
+ s.hostKeys[i] = key
+ return
+ }
+ }
+
+ s.hostKeys = append(s.hostKeys, key)
+}
+
+// cachedPubKey contains the results of querying whether a public key is
+// acceptable for a user.
+type cachedPubKey struct {
+ user string
+ pubKeyData []byte
+ result error
+ perms *Permissions
+}
+
+const maxCachedPubKeys = 16
+
+// pubKeyCache caches tests for public keys. Since SSH clients
+// will query whether a public key is acceptable before attempting to
+// authenticate with it, we end up with duplicate queries for public
+// key validity. The cache only applies to a single ServerConn.
+type pubKeyCache struct {
+ keys []cachedPubKey
+}
+
+// get returns the result for a given user/algo/key tuple.
+func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
+ for _, k := range c.keys {
+ if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) {
+ return k, true
+ }
+ }
+ return cachedPubKey{}, false
+}
+
+// add adds the given tuple to the cache.
+func (c *pubKeyCache) add(candidate cachedPubKey) {
+ if len(c.keys) < maxCachedPubKeys {
+ c.keys = append(c.keys, candidate)
+ }
+}
+
+// ServerConn is an authenticated SSH connection, as seen from the
+// server
+type ServerConn struct {
+ Conn
+
+ // If the succeeding authentication callback returned a
+ // non-nil Permissions pointer, it is stored here.
+ Permissions *Permissions
+}
+
+// NewServerConn starts a new SSH server with c as the underlying
+// transport. It starts with a handshake and, if the handshake is
+// unsuccessful, it closes the connection and returns an error. The
+// Request and NewChannel channels must be serviced, or the connection
+// will hang.
+func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
+ fullConf := *config
+ fullConf.SetDefaults()
+ s := &connection{
+ sshConn: sshConn{conn: c},
+ }
+ perms, err := s.serverHandshake(&fullConf)
+ if err != nil {
+ c.Close()
+ return nil, nil, nil, err
+ }
+ return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil
+}
+
+// signAndMarshal signs the data with the appropriate algorithm,
+// and serializes the result in SSH wire format.
+func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
+ sig, err := k.Sign(rand, data)
+ if err != nil {
+ return nil, err
+ }
+
+ return Marshal(sig), nil
+}
+
+// handshake performs key exchange and user authentication.
+func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) {
+ if len(config.hostKeys) == 0 {
+ return nil, errors.New("ssh: server has no host keys")
+ }
+
+ if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil {
+ return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
+ }
+
+ if config.ServerVersion != "" {
+ s.serverVersion = []byte(config.ServerVersion)
+ } else {
+ s.serverVersion = []byte(packageVersion)
+ }
+ var err error
+ s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
+ s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
+
+ if err := s.transport.requestKeyChange(); err != nil {
+ return nil, err
+ }
+
+ if packet, err := s.transport.readPacket(); err != nil {
+ return nil, err
+ } else if packet[0] != msgNewKeys {
+ return nil, unexpectedMessageError(msgNewKeys, packet[0])
+ }
+
+ // We just did the key change, so the session ID is established.
+ s.sessionID = s.transport.getSessionID()
+
+ var packet []byte
+ if packet, err = s.transport.readPacket(); err != nil {
+ return nil, err
+ }
+
+ var serviceRequest serviceRequestMsg
+ if err = Unmarshal(packet, &serviceRequest); err != nil {
+ return nil, err
+ }
+ if serviceRequest.Service != serviceUserAuth {
+ return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
+ }
+ serviceAccept := serviceAcceptMsg{
+ Service: serviceUserAuth,
+ }
+ if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil {
+ return nil, err
+ }
+
+ perms, err := s.serverAuthenticate(config)
+ if err != nil {
+ return nil, err
+ }
+ s.mux = newMux(s.transport)
+ return perms, err
+}
+
+func isAcceptableAlgo(algo string) bool {
+ switch algo {
+ case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
+ CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
+ return true
+ }
+ return false
+}
+
+func checkSourceAddress(addr net.Addr, sourceAddr string) error {
+ if addr == nil {
+ return errors.New("ssh: no address known for client, but source-address match required")
+ }
+
+ tcpAddr, ok := addr.(*net.TCPAddr)
+ if !ok {
+ return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
+ }
+
+ if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
+ if bytes.Equal(allowedIP, tcpAddr.IP) {
+ return nil
+ }
+ } else {
+ _, ipNet, err := net.ParseCIDR(sourceAddr)
+ if err != nil {
+ return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
+ }
+
+ if ipNet.Contains(tcpAddr.IP) {
+ return nil
+ }
+ }
+
+ return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
+}
+
+func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
+ var err error
+ var cache pubKeyCache
+ var perms *Permissions
+
+userAuthLoop:
+ for {
+ var userAuthReq userAuthRequestMsg
+ if packet, err := s.transport.readPacket(); err != nil {
+ return nil, err
+ } else if err = Unmarshal(packet, &userAuthReq); err != nil {
+ return nil, err
+ }
+
+ if userAuthReq.Service != serviceSSH {
+ return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
+ }
+
+ s.user = userAuthReq.User
+ perms = nil
+ authErr := errors.New("no auth passed yet")
+
+ switch userAuthReq.Method {
+ case "none":
+ if config.NoClientAuth {
+ s.user = ""
+ authErr = nil
+ }
+ case "password":
+ if config.PasswordCallback == nil {
+ authErr = errors.New("ssh: password auth not configured")
+ break
+ }
+ payload := userAuthReq.Payload
+ if len(payload) < 1 || payload[0] != 0 {
+ return nil, parseError(msgUserAuthRequest)
+ }
+ payload = payload[1:]
+ password, payload, ok := parseString(payload)
+ if !ok || len(payload) > 0 {
+ return nil, parseError(msgUserAuthRequest)
+ }
+
+ perms, authErr = config.PasswordCallback(s, password)
+ case "keyboard-interactive":
+ if config.KeyboardInteractiveCallback == nil {
+ authErr = errors.New("ssh: keyboard-interactive auth not configubred")
+ break
+ }
+
+ prompter := &sshClientKeyboardInteractive{s}
+ perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
+ case "publickey":
+ if config.PublicKeyCallback == nil {
+ authErr = errors.New("ssh: publickey auth not configured")
+ break
+ }
+ payload := userAuthReq.Payload
+ if len(payload) < 1 {
+ return nil, parseError(msgUserAuthRequest)
+ }
+ isQuery := payload[0] == 0
+ payload = payload[1:]
+ algoBytes, payload, ok := parseString(payload)
+ if !ok {
+ return nil, parseError(msgUserAuthRequest)
+ }
+ algo := string(algoBytes)
+ if !isAcceptableAlgo(algo) {
+ authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
+ break
+ }
+
+ pubKeyData, payload, ok := parseString(payload)
+ if !ok {
+ return nil, parseError(msgUserAuthRequest)
+ }
+
+ pubKey, err := ParsePublicKey(pubKeyData)
+ if err != nil {
+ return nil, err
+ }
+
+ candidate, ok := cache.get(s.user, pubKeyData)
+ if !ok {
+ candidate.user = s.user
+ candidate.pubKeyData = pubKeyData
+ candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
+ if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
+ candidate.result = checkSourceAddress(
+ s.RemoteAddr(),
+ candidate.perms.CriticalOptions[sourceAddressCriticalOption])
+ }
+ cache.add(candidate)
+ }
+
+ if isQuery {
+ // The client can query if the given public key
+ // would be okay.
+ if len(payload) > 0 {
+ return nil, parseError(msgUserAuthRequest)
+ }
+
+ if candidate.result == nil {
+ okMsg := userAuthPubKeyOkMsg{
+ Algo: algo,
+ PubKey: pubKeyData,
+ }
+ if err = s.transport.writePacket(Marshal(&okMsg)); err != nil {
+ return nil, err
+ }
+ continue userAuthLoop
+ }
+ authErr = candidate.result
+ } else {
+ sig, payload, ok := parseSignature(payload)
+ if !ok || len(payload) > 0 {
+ return nil, parseError(msgUserAuthRequest)
+ }
+ // Ensure the public key algo and signature algo
+ // are supported. Compare the private key
+ // algorithm name that corresponds to algo with
+ // sig.Format. This is usually the same, but
+ // for certs, the names differ.
+ if !isAcceptableAlgo(sig.Format) {
+ break
+ }
+ signedData := buildDataSignedForAuth(s.transport.getSessionID(), userAuthReq, algoBytes, pubKeyData)
+
+ if err := pubKey.Verify(signedData, sig); err != nil {
+ return nil, err
+ }
+
+ authErr = candidate.result
+ perms = candidate.perms
+ }
+ default:
+ authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method)
+ }
+
+ if config.AuthLogCallback != nil {
+ config.AuthLogCallback(s, userAuthReq.Method, authErr)
+ }
+
+ if authErr == nil {
+ break userAuthLoop
+ }
+
+ var failureMsg userAuthFailureMsg
+ if config.PasswordCallback != nil {
+ failureMsg.Methods = append(failureMsg.Methods, "password")
+ }
+ if config.PublicKeyCallback != nil {
+ failureMsg.Methods = append(failureMsg.Methods, "publickey")
+ }
+ if config.KeyboardInteractiveCallback != nil {
+ failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
+ }
+
+ if len(failureMsg.Methods) == 0 {
+ return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
+ }
+
+ if err = s.transport.writePacket(Marshal(&failureMsg)); err != nil {
+ return nil, err
+ }
+ }
+
+ if err = s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
+ return nil, err
+ }
+ return perms, nil
+}
+
+// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by
+// asking the client on the other side of a ServerConn.
+type sshClientKeyboardInteractive struct {
+ *connection
+}
+
+func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
+ if len(questions) != len(echos) {
+ return nil, errors.New("ssh: echos and questions must have equal length")
+ }
+
+ var prompts []byte
+ for i := range questions {
+ prompts = appendString(prompts, questions[i])
+ prompts = appendBool(prompts, echos[i])
+ }
+
+ if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
+ Instruction: instruction,
+ NumPrompts: uint32(len(questions)),
+ Prompts: prompts,
+ })); err != nil {
+ return nil, err
+ }
+
+ packet, err := c.transport.readPacket()
+ if err != nil {
+ return nil, err
+ }
+ if packet[0] != msgUserAuthInfoResponse {
+ return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0])
+ }
+ packet = packet[1:]
+
+ n, packet, ok := parseUint32(packet)
+ if !ok || int(n) != len(questions) {
+ return nil, parseError(msgUserAuthInfoResponse)
+ }
+
+ for i := uint32(0); i < n; i++ {
+ ans, rest, ok := parseString(packet)
+ if !ok {
+ return nil, parseError(msgUserAuthInfoResponse)
+ }
+
+ answers = append(answers, string(ans))
+ packet = rest
+ }
+ if len(packet) != 0 {
+ return nil, errors.New("ssh: junk at end of message")
+ }
+
+ return answers, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/session.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/session.go
new file mode 100644
index 00000000000..fd10cd1aaf2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/session.go
@@ -0,0 +1,605 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+// Session implements an interactive session described in
+// "RFC 4254, section 6".
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "sync"
+)
+
+type Signal string
+
+// POSIX signals as listed in RFC 4254 Section 6.10.
+const (
+ SIGABRT Signal = "ABRT"
+ SIGALRM Signal = "ALRM"
+ SIGFPE Signal = "FPE"
+ SIGHUP Signal = "HUP"
+ SIGILL Signal = "ILL"
+ SIGINT Signal = "INT"
+ SIGKILL Signal = "KILL"
+ SIGPIPE Signal = "PIPE"
+ SIGQUIT Signal = "QUIT"
+ SIGSEGV Signal = "SEGV"
+ SIGTERM Signal = "TERM"
+ SIGUSR1 Signal = "USR1"
+ SIGUSR2 Signal = "USR2"
+)
+
+var signals = map[Signal]int{
+ SIGABRT: 6,
+ SIGALRM: 14,
+ SIGFPE: 8,
+ SIGHUP: 1,
+ SIGILL: 4,
+ SIGINT: 2,
+ SIGKILL: 9,
+ SIGPIPE: 13,
+ SIGQUIT: 3,
+ SIGSEGV: 11,
+ SIGTERM: 15,
+}
+
+type TerminalModes map[uint8]uint32
+
+// POSIX terminal mode flags as listed in RFC 4254 Section 8.
+const (
+ tty_OP_END = 0
+ VINTR = 1
+ VQUIT = 2
+ VERASE = 3
+ VKILL = 4
+ VEOF = 5
+ VEOL = 6
+ VEOL2 = 7
+ VSTART = 8
+ VSTOP = 9
+ VSUSP = 10
+ VDSUSP = 11
+ VREPRINT = 12
+ VWERASE = 13
+ VLNEXT = 14
+ VFLUSH = 15
+ VSWTCH = 16
+ VSTATUS = 17
+ VDISCARD = 18
+ IGNPAR = 30
+ PARMRK = 31
+ INPCK = 32
+ ISTRIP = 33
+ INLCR = 34
+ IGNCR = 35
+ ICRNL = 36
+ IUCLC = 37
+ IXON = 38
+ IXANY = 39
+ IXOFF = 40
+ IMAXBEL = 41
+ ISIG = 50
+ ICANON = 51
+ XCASE = 52
+ ECHO = 53
+ ECHOE = 54
+ ECHOK = 55
+ ECHONL = 56
+ NOFLSH = 57
+ TOSTOP = 58
+ IEXTEN = 59
+ ECHOCTL = 60
+ ECHOKE = 61
+ PENDIN = 62
+ OPOST = 70
+ OLCUC = 71
+ ONLCR = 72
+ OCRNL = 73
+ ONOCR = 74
+ ONLRET = 75
+ CS7 = 90
+ CS8 = 91
+ PARENB = 92
+ PARODD = 93
+ TTY_OP_ISPEED = 128
+ TTY_OP_OSPEED = 129
+)
+
+// A Session represents a connection to a remote command or shell.
+type Session struct {
+ // Stdin specifies the remote process's standard input.
+ // If Stdin is nil, the remote process reads from an empty
+ // bytes.Buffer.
+ Stdin io.Reader
+
+ // Stdout and Stderr specify the remote process's standard
+ // output and error.
+ //
+ // If either is nil, Run connects the corresponding file
+ // descriptor to an instance of ioutil.Discard. There is a
+ // fixed amount of buffering that is shared for the two streams.
+ // If either blocks it may eventually cause the remote
+ // command to block.
+ Stdout io.Writer
+ Stderr io.Writer
+
+ ch Channel // the channel backing this session
+ started bool // true once Start, Run or Shell is invoked.
+ copyFuncs []func() error
+ errors chan error // one send per copyFunc
+
+ // true if pipe method is active
+ stdinpipe, stdoutpipe, stderrpipe bool
+
+ // stdinPipeWriter is non-nil if StdinPipe has not been called
+ // and Stdin was specified by the user; it is the write end of
+ // a pipe connecting Session.Stdin to the stdin channel.
+ stdinPipeWriter io.WriteCloser
+
+ exitStatus chan error
+}
+
+// SendRequest sends an out-of-band channel request on the SSH channel
+// underlying the session.
+func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
+ return s.ch.SendRequest(name, wantReply, payload)
+}
+
+func (s *Session) Close() error {
+ return s.ch.Close()
+}
+
+// RFC 4254 Section 6.4.
+type setenvRequest struct {
+ Name string
+ Value string
+}
+
+// Setenv sets an environment variable that will be applied to any
+// command executed by Shell or Run.
+func (s *Session) Setenv(name, value string) error {
+ msg := setenvRequest{
+ Name: name,
+ Value: value,
+ }
+ ok, err := s.ch.SendRequest("env", true, Marshal(&msg))
+ if err == nil && !ok {
+ err = errors.New("ssh: setenv failed")
+ }
+ return err
+}
+
+// RFC 4254 Section 6.2.
+type ptyRequestMsg struct {
+ Term string
+ Columns uint32
+ Rows uint32
+ Width uint32
+ Height uint32
+ Modelist string
+}
+
+// RequestPty requests the association of a pty with the session on the remote host.
+func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error {
+ var tm []byte
+ for k, v := range termmodes {
+ kv := struct {
+ Key byte
+ Val uint32
+ }{k, v}
+
+ tm = append(tm, Marshal(&kv)...)
+ }
+ tm = append(tm, tty_OP_END)
+ req := ptyRequestMsg{
+ Term: term,
+ Columns: uint32(w),
+ Rows: uint32(h),
+ Width: uint32(w * 8),
+ Height: uint32(h * 8),
+ Modelist: string(tm),
+ }
+ ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req))
+ if err == nil && !ok {
+ err = errors.New("ssh: pty-req failed")
+ }
+ return err
+}
+
+// RFC 4254 Section 6.5.
+type subsystemRequestMsg struct {
+ Subsystem string
+}
+
+// RequestSubsystem requests the association of a subsystem with the session on the remote host.
+// A subsystem is a predefined command that runs in the background when the ssh session is initiated
+func (s *Session) RequestSubsystem(subsystem string) error {
+ msg := subsystemRequestMsg{
+ Subsystem: subsystem,
+ }
+ ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg))
+ if err == nil && !ok {
+ err = errors.New("ssh: subsystem request failed")
+ }
+ return err
+}
+
+// RFC 4254 Section 6.9.
+type signalMsg struct {
+ Signal string
+}
+
+// Signal sends the given signal to the remote process.
+// sig is one of the SIG* constants.
+func (s *Session) Signal(sig Signal) error {
+ msg := signalMsg{
+ Signal: string(sig),
+ }
+
+ _, err := s.ch.SendRequest("signal", false, Marshal(&msg))
+ return err
+}
+
+// RFC 4254 Section 6.5.
+type execMsg struct {
+ Command string
+}
+
+// Start runs cmd on the remote host. Typically, the remote
+// server passes cmd to the shell for interpretation.
+// A Session only accepts one call to Run, Start or Shell.
+func (s *Session) Start(cmd string) error {
+ if s.started {
+ return errors.New("ssh: session already started")
+ }
+ req := execMsg{
+ Command: cmd,
+ }
+
+ ok, err := s.ch.SendRequest("exec", true, Marshal(&req))
+ if err == nil && !ok {
+ err = fmt.Errorf("ssh: command %v failed", cmd)
+ }
+ if err != nil {
+ return err
+ }
+ return s.start()
+}
+
+// Run runs cmd on the remote host. Typically, the remote
+// server passes cmd to the shell for interpretation.
+// A Session only accepts one call to Run, Start, Shell, Output,
+// or CombinedOutput.
+//
+// The returned error is nil if the command runs, has no problems
+// copying stdin, stdout, and stderr, and exits with a zero exit
+// status.
+//
+// If the command fails to run or doesn't complete successfully, the
+// error is of type *ExitError. Other error types may be
+// returned for I/O problems.
+func (s *Session) Run(cmd string) error {
+ err := s.Start(cmd)
+ if err != nil {
+ return err
+ }
+ return s.Wait()
+}
+
+// Output runs cmd on the remote host and returns its standard output.
+func (s *Session) Output(cmd string) ([]byte, error) {
+ if s.Stdout != nil {
+ return nil, errors.New("ssh: Stdout already set")
+ }
+ var b bytes.Buffer
+ s.Stdout = &b
+ err := s.Run(cmd)
+ return b.Bytes(), err
+}
+
+type singleWriter struct {
+ b bytes.Buffer
+ mu sync.Mutex
+}
+
+func (w *singleWriter) Write(p []byte) (int, error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+ return w.b.Write(p)
+}
+
+// CombinedOutput runs cmd on the remote host and returns its combined
+// standard output and standard error.
+func (s *Session) CombinedOutput(cmd string) ([]byte, error) {
+ if s.Stdout != nil {
+ return nil, errors.New("ssh: Stdout already set")
+ }
+ if s.Stderr != nil {
+ return nil, errors.New("ssh: Stderr already set")
+ }
+ var b singleWriter
+ s.Stdout = &b
+ s.Stderr = &b
+ err := s.Run(cmd)
+ return b.b.Bytes(), err
+}
+
+// Shell starts a login shell on the remote host. A Session only
+// accepts one call to Run, Start, Shell, Output, or CombinedOutput.
+func (s *Session) Shell() error {
+ if s.started {
+ return errors.New("ssh: session already started")
+ }
+
+ ok, err := s.ch.SendRequest("shell", true, nil)
+ if err == nil && !ok {
+ return errors.New("ssh: could not start shell")
+ }
+ if err != nil {
+ return err
+ }
+ return s.start()
+}
+
+func (s *Session) start() error {
+ s.started = true
+
+ type F func(*Session)
+ for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} {
+ setupFd(s)
+ }
+
+ s.errors = make(chan error, len(s.copyFuncs))
+ for _, fn := range s.copyFuncs {
+ go func(fn func() error) {
+ s.errors <- fn()
+ }(fn)
+ }
+ return nil
+}
+
+// Wait waits for the remote command to exit.
+//
+// The returned error is nil if the command runs, has no problems
+// copying stdin, stdout, and stderr, and exits with a zero exit
+// status.
+//
+// If the command fails to run or doesn't complete successfully, the
+// error is of type *ExitError. Other error types may be
+// returned for I/O problems.
+func (s *Session) Wait() error {
+ if !s.started {
+ return errors.New("ssh: session not started")
+ }
+ waitErr := <-s.exitStatus
+
+ if s.stdinPipeWriter != nil {
+ s.stdinPipeWriter.Close()
+ }
+ var copyError error
+ for _ = range s.copyFuncs {
+ if err := <-s.errors; err != nil && copyError == nil {
+ copyError = err
+ }
+ }
+ if waitErr != nil {
+ return waitErr
+ }
+ return copyError
+}
+
+func (s *Session) wait(reqs <-chan *Request) error {
+ wm := Waitmsg{status: -1}
+ // Wait for msg channel to be closed before returning.
+ for msg := range reqs {
+ switch msg.Type {
+ case "exit-status":
+ d := msg.Payload
+ wm.status = int(d[0])<<24 | int(d[1])<<16 | int(d[2])<<8 | int(d[3])
+ case "exit-signal":
+ var sigval struct {
+ Signal string
+ CoreDumped bool
+ Error string
+ Lang string
+ }
+ if err := Unmarshal(msg.Payload, &sigval); err != nil {
+ return err
+ }
+
+ // Must sanitize strings?
+ wm.signal = sigval.Signal
+ wm.msg = sigval.Error
+ wm.lang = sigval.Lang
+ default:
+ // This handles keepalives and matches
+ // OpenSSH's behaviour.
+ if msg.WantReply {
+ msg.Reply(false, nil)
+ }
+ }
+ }
+ if wm.status == 0 {
+ return nil
+ }
+ if wm.status == -1 {
+ // exit-status was never sent from server
+ if wm.signal == "" {
+ return errors.New("wait: remote command exited without exit status or exit signal")
+ }
+ wm.status = 128
+ if _, ok := signals[Signal(wm.signal)]; ok {
+ wm.status += signals[Signal(wm.signal)]
+ }
+ }
+ return &ExitError{wm}
+}
+
+func (s *Session) stdin() {
+ if s.stdinpipe {
+ return
+ }
+ var stdin io.Reader
+ if s.Stdin == nil {
+ stdin = new(bytes.Buffer)
+ } else {
+ r, w := io.Pipe()
+ go func() {
+ _, err := io.Copy(w, s.Stdin)
+ w.CloseWithError(err)
+ }()
+ stdin, s.stdinPipeWriter = r, w
+ }
+ s.copyFuncs = append(s.copyFuncs, func() error {
+ _, err := io.Copy(s.ch, stdin)
+ if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF {
+ err = err1
+ }
+ return err
+ })
+}
+
+func (s *Session) stdout() {
+ if s.stdoutpipe {
+ return
+ }
+ if s.Stdout == nil {
+ s.Stdout = ioutil.Discard
+ }
+ s.copyFuncs = append(s.copyFuncs, func() error {
+ _, err := io.Copy(s.Stdout, s.ch)
+ return err
+ })
+}
+
+func (s *Session) stderr() {
+ if s.stderrpipe {
+ return
+ }
+ if s.Stderr == nil {
+ s.Stderr = ioutil.Discard
+ }
+ s.copyFuncs = append(s.copyFuncs, func() error {
+ _, err := io.Copy(s.Stderr, s.ch.Stderr())
+ return err
+ })
+}
+
+// sessionStdin reroutes Close to CloseWrite.
+type sessionStdin struct {
+ io.Writer
+ ch Channel
+}
+
+func (s *sessionStdin) Close() error {
+ return s.ch.CloseWrite()
+}
+
+// StdinPipe returns a pipe that will be connected to the
+// remote command's standard input when the command starts.
+func (s *Session) StdinPipe() (io.WriteCloser, error) {
+ if s.Stdin != nil {
+ return nil, errors.New("ssh: Stdin already set")
+ }
+ if s.started {
+ return nil, errors.New("ssh: StdinPipe after process started")
+ }
+ s.stdinpipe = true
+ return &sessionStdin{s.ch, s.ch}, nil
+}
+
+// StdoutPipe returns a pipe that will be connected to the
+// remote command's standard output when the command starts.
+// There is a fixed amount of buffering that is shared between
+// stdout and stderr streams. If the StdoutPipe reader is
+// not serviced fast enough it may eventually cause the
+// remote command to block.
+func (s *Session) StdoutPipe() (io.Reader, error) {
+ if s.Stdout != nil {
+ return nil, errors.New("ssh: Stdout already set")
+ }
+ if s.started {
+ return nil, errors.New("ssh: StdoutPipe after process started")
+ }
+ s.stdoutpipe = true
+ return s.ch, nil
+}
+
+// StderrPipe returns a pipe that will be connected to the
+// remote command's standard error when the command starts.
+// There is a fixed amount of buffering that is shared between
+// stdout and stderr streams. If the StderrPipe reader is
+// not serviced fast enough it may eventually cause the
+// remote command to block.
+func (s *Session) StderrPipe() (io.Reader, error) {
+ if s.Stderr != nil {
+ return nil, errors.New("ssh: Stderr already set")
+ }
+ if s.started {
+ return nil, errors.New("ssh: StderrPipe after process started")
+ }
+ s.stderrpipe = true
+ return s.ch.Stderr(), nil
+}
+
+// newSession returns a new interactive session on the remote host.
+func newSession(ch Channel, reqs <-chan *Request) (*Session, error) {
+ s := &Session{
+ ch: ch,
+ }
+ s.exitStatus = make(chan error, 1)
+ go func() {
+ s.exitStatus <- s.wait(reqs)
+ }()
+
+ return s, nil
+}
+
+// An ExitError reports unsuccessful completion of a remote command.
+type ExitError struct {
+ Waitmsg
+}
+
+func (e *ExitError) Error() string {
+ return e.Waitmsg.String()
+}
+
+// Waitmsg stores the information about an exited remote command
+// as reported by Wait.
+type Waitmsg struct {
+ status int
+ signal string
+ msg string
+ lang string
+}
+
+// ExitStatus returns the exit status of the remote command.
+func (w Waitmsg) ExitStatus() int {
+ return w.status
+}
+
+// Signal returns the exit signal of the remote command if
+// it was terminated violently.
+func (w Waitmsg) Signal() string {
+ return w.signal
+}
+
+// Msg returns the exit message given by the remote command
+func (w Waitmsg) Msg() string {
+ return w.msg
+}
+
+// Lang returns the language tag. See RFC 3066
+func (w Waitmsg) Lang() string {
+ return w.lang
+}
+
+func (w Waitmsg) String() string {
+ return fmt.Sprintf("Process exited with: %v. Reason was: %v (%v)", w.status, w.msg, w.signal)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/session_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/session_test.go
new file mode 100644
index 00000000000..f7f0f7642e7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/session_test.go
@@ -0,0 +1,774 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+// Session tests.
+
+import (
+ "bytes"
+ crypto_rand "crypto/rand"
+ "errors"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "testing"
+
+ "golang.org/x/crypto/ssh/terminal"
+)
+
+type serverType func(Channel, <-chan *Request, *testing.T)
+
+// dial constructs a new test server and returns a *ClientConn.
+func dial(handler serverType, t *testing.T) *Client {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+
+ go func() {
+ defer c1.Close()
+ conf := ServerConfig{
+ NoClientAuth: true,
+ }
+ conf.AddHostKey(testSigners["rsa"])
+
+ _, chans, reqs, err := NewServerConn(c1, &conf)
+ if err != nil {
+ t.Fatalf("Unable to handshake: %v", err)
+ }
+ go DiscardRequests(reqs)
+
+ for newCh := range chans {
+ if newCh.ChannelType() != "session" {
+ newCh.Reject(UnknownChannelType, "unknown channel type")
+ continue
+ }
+
+ ch, inReqs, err := newCh.Accept()
+ if err != nil {
+ t.Errorf("Accept: %v", err)
+ continue
+ }
+ go func() {
+ handler(ch, inReqs, t)
+ }()
+ }
+ }()
+
+ config := &ClientConfig{
+ User: "testuser",
+ }
+
+ conn, chans, reqs, err := NewClientConn(c2, "", config)
+ if err != nil {
+ t.Fatalf("unable to dial remote side: %v", err)
+ }
+
+ return NewClient(conn, chans, reqs)
+}
+
+// Test a simple string is returned to session.Stdout.
+func TestSessionShell(t *testing.T) {
+ conn := dial(shellHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ stdout := new(bytes.Buffer)
+ session.Stdout = stdout
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %s", err)
+ }
+ if err := session.Wait(); err != nil {
+ t.Fatalf("Remote command did not exit cleanly: %v", err)
+ }
+ actual := stdout.String()
+ if actual != "golang" {
+ t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual)
+ }
+}
+
+// TODO(dfc) add support for Std{in,err}Pipe when the Server supports it.
+
+// Test a simple string is returned via StdoutPipe.
+func TestSessionStdoutPipe(t *testing.T) {
+ conn := dial(shellHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("Unable to request StdoutPipe(): %v", err)
+ }
+ var buf bytes.Buffer
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ done := make(chan bool, 1)
+ go func() {
+ if _, err := io.Copy(&buf, stdout); err != nil {
+ t.Errorf("Copy of stdout failed: %v", err)
+ }
+ done <- true
+ }()
+ if err := session.Wait(); err != nil {
+ t.Fatalf("Remote command did not exit cleanly: %v", err)
+ }
+ <-done
+ actual := buf.String()
+ if actual != "golang" {
+ t.Fatalf("Remote shell did not return expected string: expected=golang, actual=%s", actual)
+ }
+}
+
+// Test that a simple string is returned via the Output helper,
+// and that stderr is discarded.
+func TestSessionOutput(t *testing.T) {
+ conn := dial(fixedOutputHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+
+ buf, err := session.Output("") // cmd is ignored by fixedOutputHandler
+ if err != nil {
+ t.Error("Remote command did not exit cleanly:", err)
+ }
+ w := "this-is-stdout."
+ g := string(buf)
+ if g != w {
+ t.Error("Remote command did not return expected string:")
+ t.Logf("want %q", w)
+ t.Logf("got %q", g)
+ }
+}
+
+// Test that both stdout and stderr are returned
+// via the CombinedOutput helper.
+func TestSessionCombinedOutput(t *testing.T) {
+ conn := dial(fixedOutputHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+
+ buf, err := session.CombinedOutput("") // cmd is ignored by fixedOutputHandler
+ if err != nil {
+ t.Error("Remote command did not exit cleanly:", err)
+ }
+ const stdout = "this-is-stdout."
+ const stderr = "this-is-stderr."
+ g := string(buf)
+ if g != stdout+stderr && g != stderr+stdout {
+ t.Error("Remote command did not return expected string:")
+ t.Logf("want %q, or %q", stdout+stderr, stderr+stdout)
+ t.Logf("got %q", g)
+ }
+}
+
+// Test non-0 exit status is returned correctly.
+func TestExitStatusNonZero(t *testing.T) {
+ conn := dial(exitStatusNonZeroHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ e, ok := err.(*ExitError)
+ if !ok {
+ t.Fatalf("expected *ExitError but got %T", err)
+ }
+ if e.ExitStatus() != 15 {
+ t.Fatalf("expected command to exit with 15 but got %v", e.ExitStatus())
+ }
+}
+
+// Test 0 exit status is returned correctly.
+func TestExitStatusZero(t *testing.T) {
+ conn := dial(exitStatusZeroHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err != nil {
+ t.Fatalf("expected nil but got %v", err)
+ }
+}
+
+// Test exit signal and status are both returned correctly.
+func TestExitSignalAndStatus(t *testing.T) {
+ conn := dial(exitSignalAndStatusHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ e, ok := err.(*ExitError)
+ if !ok {
+ t.Fatalf("expected *ExitError but got %T", err)
+ }
+ if e.Signal() != "TERM" || e.ExitStatus() != 15 {
+ t.Fatalf("expected command to exit with signal TERM and status 15 but got signal %s and status %v", e.Signal(), e.ExitStatus())
+ }
+}
+
+// Test exit signal and status are both returned correctly.
+func TestKnownExitSignalOnly(t *testing.T) {
+ conn := dial(exitSignalHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ e, ok := err.(*ExitError)
+ if !ok {
+ t.Fatalf("expected *ExitError but got %T", err)
+ }
+ if e.Signal() != "TERM" || e.ExitStatus() != 143 {
+ t.Fatalf("expected command to exit with signal TERM and status 143 but got signal %s and status %v", e.Signal(), e.ExitStatus())
+ }
+}
+
+// Test exit signal and status are both returned correctly.
+func TestUnknownExitSignal(t *testing.T) {
+ conn := dial(exitSignalUnknownHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ e, ok := err.(*ExitError)
+ if !ok {
+ t.Fatalf("expected *ExitError but got %T", err)
+ }
+ if e.Signal() != "SYS" || e.ExitStatus() != 128 {
+ t.Fatalf("expected command to exit with signal SYS and status 128 but got signal %s and status %v", e.Signal(), e.ExitStatus())
+ }
+}
+
+// Test WaitMsg is not returned if the channel closes abruptly.
+func TestExitWithoutStatusOrSignal(t *testing.T) {
+ conn := dial(exitWithoutSignalOrStatus, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("Unable to request new session: %v", err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err == nil {
+ t.Fatalf("expected command to fail but it didn't")
+ }
+ _, ok := err.(*ExitError)
+ if ok {
+ // you can't actually test for errors.errorString
+ // because it's not exported.
+ t.Fatalf("expected *errorString but got %T", err)
+ }
+}
+
+// windowTestBytes is the number of bytes that we'll send to the SSH server.
+const windowTestBytes = 16000 * 200
+
+// TestServerWindow writes random data to the server. The server is expected to echo
+// the same data back, which is compared against the original.
+func TestServerWindow(t *testing.T) {
+ origBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes))
+ io.CopyN(origBuf, crypto_rand.Reader, windowTestBytes)
+ origBytes := origBuf.Bytes()
+
+ conn := dial(echoHandler, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer session.Close()
+ result := make(chan []byte)
+
+ go func() {
+ defer close(result)
+ echoedBuf := bytes.NewBuffer(make([]byte, 0, windowTestBytes))
+ serverStdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Errorf("StdoutPipe failed: %v", err)
+ return
+ }
+ n, err := copyNRandomly("stdout", echoedBuf, serverStdout, windowTestBytes)
+ if err != nil && err != io.EOF {
+ t.Errorf("Read only %d bytes from server, expected %d: %v", n, windowTestBytes, err)
+ }
+ result <- echoedBuf.Bytes()
+ }()
+
+ serverStdin, err := session.StdinPipe()
+ if err != nil {
+ t.Fatalf("StdinPipe failed: %v", err)
+ }
+ written, err := copyNRandomly("stdin", serverStdin, origBuf, windowTestBytes)
+ if err != nil {
+ t.Fatalf("failed to copy origBuf to serverStdin: %v", err)
+ }
+ if written != windowTestBytes {
+ t.Fatalf("Wrote only %d of %d bytes to server", written, windowTestBytes)
+ }
+
+ echoedBytes := <-result
+
+ if !bytes.Equal(origBytes, echoedBytes) {
+ t.Fatalf("Echoed buffer differed from original, orig %d, echoed %d", len(origBytes), len(echoedBytes))
+ }
+}
+
+// Verify the client can handle a keepalive packet from the server.
+func TestClientHandlesKeepalives(t *testing.T) {
+ conn := dial(channelKeepaliveSender, t)
+ defer conn.Close()
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer session.Close()
+ if err := session.Shell(); err != nil {
+ t.Fatalf("Unable to execute command: %v", err)
+ }
+ err = session.Wait()
+ if err != nil {
+ t.Fatalf("expected nil but got: %v", err)
+ }
+}
+
+type exitStatusMsg struct {
+ Status uint32
+}
+
+type exitSignalMsg struct {
+ Signal string
+ CoreDumped bool
+ Errmsg string
+ Lang string
+}
+
+func handleTerminalRequests(in <-chan *Request) {
+ for req := range in {
+ ok := false
+ switch req.Type {
+ case "shell":
+ ok = true
+ if len(req.Payload) > 0 {
+ // We don't accept any commands, only the default shell.
+ ok = false
+ }
+ case "env":
+ ok = true
+ }
+ req.Reply(ok, nil)
+ }
+}
+
+func newServerShell(ch Channel, in <-chan *Request, prompt string) *terminal.Terminal {
+ term := terminal.NewTerminal(ch, prompt)
+ go handleTerminalRequests(in)
+ return term
+}
+
+func exitStatusZeroHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ // this string is returned to stdout
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendStatus(0, ch, t)
+}
+
+func exitStatusNonZeroHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendStatus(15, ch, t)
+}
+
+func exitSignalAndStatusHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendStatus(15, ch, t)
+ sendSignal("TERM", ch, t)
+}
+
+func exitSignalHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendSignal("TERM", ch, t)
+}
+
+func exitSignalUnknownHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ sendSignal("SYS", ch, t)
+}
+
+func exitWithoutSignalOrStatus(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+}
+
+func shellHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ // this string is returned to stdout
+ shell := newServerShell(ch, in, "golang")
+ readLine(shell, t)
+ sendStatus(0, ch, t)
+}
+
+// Ignores the command, writes fixed strings to stderr and stdout.
+// Strings are "this-is-stdout." and "this-is-stderr.".
+func fixedOutputHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ _, err := ch.Read(nil)
+
+ req, ok := <-in
+ if !ok {
+ t.Fatalf("error: expected channel request, got: %#v", err)
+ return
+ }
+
+ // ignore request, always send some text
+ req.Reply(true, nil)
+
+ _, err = io.WriteString(ch, "this-is-stdout.")
+ if err != nil {
+ t.Fatalf("error writing on server: %v", err)
+ }
+ _, err = io.WriteString(ch.Stderr(), "this-is-stderr.")
+ if err != nil {
+ t.Fatalf("error writing on server: %v", err)
+ }
+ sendStatus(0, ch, t)
+}
+
+func readLine(shell *terminal.Terminal, t *testing.T) {
+ if _, err := shell.ReadLine(); err != nil && err != io.EOF {
+ t.Errorf("unable to read line: %v", err)
+ }
+}
+
+func sendStatus(status uint32, ch Channel, t *testing.T) {
+ msg := exitStatusMsg{
+ Status: status,
+ }
+ if _, err := ch.SendRequest("exit-status", false, Marshal(&msg)); err != nil {
+ t.Errorf("unable to send status: %v", err)
+ }
+}
+
+func sendSignal(signal string, ch Channel, t *testing.T) {
+ sig := exitSignalMsg{
+ Signal: signal,
+ CoreDumped: false,
+ Errmsg: "Process terminated",
+ Lang: "en-GB-oed",
+ }
+ if _, err := ch.SendRequest("exit-signal", false, Marshal(&sig)); err != nil {
+ t.Errorf("unable to send signal: %v", err)
+ }
+}
+
+func discardHandler(ch Channel, t *testing.T) {
+ defer ch.Close()
+ io.Copy(ioutil.Discard, ch)
+}
+
+func echoHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ if n, err := copyNRandomly("echohandler", ch, ch, windowTestBytes); err != nil {
+ t.Errorf("short write, wrote %d, expected %d: %v ", n, windowTestBytes, err)
+ }
+}
+
+// copyNRandomly copies n bytes from src to dst. It uses a variable, and random,
+// buffer size to exercise more code paths.
+func copyNRandomly(title string, dst io.Writer, src io.Reader, n int) (int, error) {
+ var (
+ buf = make([]byte, 32*1024)
+ written int
+ remaining = n
+ )
+ for remaining > 0 {
+ l := rand.Intn(1 << 15)
+ if remaining < l {
+ l = remaining
+ }
+ nr, er := src.Read(buf[:l])
+ nw, ew := dst.Write(buf[:nr])
+ remaining -= nw
+ written += nw
+ if ew != nil {
+ return written, ew
+ }
+ if nr != nw {
+ return written, io.ErrShortWrite
+ }
+ if er != nil && er != io.EOF {
+ return written, er
+ }
+ }
+ return written, nil
+}
+
+func channelKeepaliveSender(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ shell := newServerShell(ch, in, "> ")
+ readLine(shell, t)
+ if _, err := ch.SendRequest("keepalive@openssh.com", true, nil); err != nil {
+ t.Errorf("unable to send channel keepalive request: %v", err)
+ }
+ sendStatus(0, ch, t)
+}
+
+func TestClientWriteEOF(t *testing.T) {
+ conn := dial(simpleEchoHandler, t)
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer session.Close()
+ stdin, err := session.StdinPipe()
+ if err != nil {
+ t.Fatalf("StdinPipe failed: %v", err)
+ }
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("StdoutPipe failed: %v", err)
+ }
+
+ data := []byte(`0000`)
+ _, err = stdin.Write(data)
+ if err != nil {
+ t.Fatalf("Write failed: %v", err)
+ }
+ stdin.Close()
+
+ res, err := ioutil.ReadAll(stdout)
+ if err != nil {
+ t.Fatalf("Read failed: %v", err)
+ }
+
+ if !bytes.Equal(data, res) {
+ t.Fatalf("Read differed from write, wrote: %v, read: %v", data, res)
+ }
+}
+
+func simpleEchoHandler(ch Channel, in <-chan *Request, t *testing.T) {
+ defer ch.Close()
+ data, err := ioutil.ReadAll(ch)
+ if err != nil {
+ t.Errorf("handler read error: %v", err)
+ }
+ _, err = ch.Write(data)
+ if err != nil {
+ t.Errorf("handler write error: %v", err)
+ }
+}
+
+func TestSessionID(t *testing.T) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ serverID := make(chan []byte, 1)
+ clientID := make(chan []byte, 1)
+
+ serverConf := &ServerConfig{
+ NoClientAuth: true,
+ }
+ serverConf.AddHostKey(testSigners["ecdsa"])
+ clientConf := &ClientConfig{
+ User: "user",
+ }
+
+ go func() {
+ conn, chans, reqs, err := NewServerConn(c1, serverConf)
+ if err != nil {
+ t.Fatalf("server handshake: %v", err)
+ }
+ serverID <- conn.SessionID()
+ go DiscardRequests(reqs)
+ for ch := range chans {
+ ch.Reject(Prohibited, "")
+ }
+ }()
+
+ go func() {
+ conn, chans, reqs, err := NewClientConn(c2, "", clientConf)
+ if err != nil {
+ t.Fatalf("client handshake: %v", err)
+ }
+ clientID <- conn.SessionID()
+ go DiscardRequests(reqs)
+ for ch := range chans {
+ ch.Reject(Prohibited, "")
+ }
+ }()
+
+ s := <-serverID
+ c := <-clientID
+ if bytes.Compare(s, c) != 0 {
+ t.Errorf("server session ID (%x) != client session ID (%x)", s, c)
+ } else if len(s) == 0 {
+ t.Errorf("client and server SessionID were empty.")
+ }
+}
+
+type noReadConn struct {
+ readSeen bool
+ net.Conn
+}
+
+func (c *noReadConn) Close() error {
+ return nil
+}
+
+func (c *noReadConn) Read(b []byte) (int, error) {
+ c.readSeen = true
+ return 0, errors.New("noReadConn error")
+}
+
+func TestInvalidServerConfiguration(t *testing.T) {
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ serveConn := noReadConn{Conn: c1}
+ serverConf := &ServerConfig{}
+
+ NewServerConn(&serveConn, serverConf)
+ if serveConn.readSeen {
+ t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing host key")
+ }
+
+ serverConf.AddHostKey(testSigners["ecdsa"])
+
+ NewServerConn(&serveConn, serverConf)
+ if serveConn.readSeen {
+ t.Fatalf("NewServerConn attempted to Read() from Conn while configuration is missing authentication method")
+ }
+}
+
+func TestHostKeyAlgorithms(t *testing.T) {
+ serverConf := &ServerConfig{
+ NoClientAuth: true,
+ }
+ serverConf.AddHostKey(testSigners["rsa"])
+ serverConf.AddHostKey(testSigners["ecdsa"])
+
+ connect := func(clientConf *ClientConfig, want string) {
+ var alg string
+ clientConf.HostKeyCallback = func(h string, a net.Addr, key PublicKey) error {
+ alg = key.Type()
+ return nil
+ }
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ go NewServerConn(c1, serverConf)
+ _, _, _, err = NewClientConn(c2, "", clientConf)
+ if err != nil {
+ t.Fatalf("NewClientConn: %v", err)
+ }
+ if alg != want {
+ t.Errorf("selected key algorithm %s, want %s", alg, want)
+ }
+ }
+
+ // By default, we get the preferred algorithm, which is ECDSA 256.
+
+ clientConf := &ClientConfig{}
+ connect(clientConf, KeyAlgoECDSA256)
+
+ // Client asks for RSA explicitly.
+ clientConf.HostKeyAlgorithms = []string{KeyAlgoRSA}
+ connect(clientConf, KeyAlgoRSA)
+
+ c1, c2, err := netPipe()
+ if err != nil {
+ t.Fatalf("netPipe: %v", err)
+ }
+ defer c1.Close()
+ defer c2.Close()
+
+ go NewServerConn(c1, serverConf)
+ clientConf.HostKeyAlgorithms = []string{"nonexistent-hostkey-algo"}
+ _, _, _, err = NewClientConn(c2, "", clientConf)
+ if err == nil {
+ t.Fatal("succeeded connecting with unknown hostkey algorithm")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/tcpip.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/tcpip.go
new file mode 100644
index 00000000000..6151241ff08
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/tcpip.go
@@ -0,0 +1,407 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Listen requests the remote peer open a listening socket on
+// addr. Incoming connections will be available by calling Accept on
+// the returned net.Listener. The listener must be serviced, or the
+// SSH connection may hang.
+func (c *Client) Listen(n, addr string) (net.Listener, error) {
+ laddr, err := net.ResolveTCPAddr(n, addr)
+ if err != nil {
+ return nil, err
+ }
+ return c.ListenTCP(laddr)
+}
+
+// Automatic port allocation is broken with OpenSSH before 6.0. See
+// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In
+// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0,
+// rather than the actual port number. This means you can never open
+// two different listeners with auto allocated ports. We work around
+// this by trying explicit ports until we succeed.
+
+const openSSHPrefix = "OpenSSH_"
+
+var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// isBrokenOpenSSHVersion returns true if the given version string
+// specifies a version of OpenSSH that is known to have a bug in port
+// forwarding.
+func isBrokenOpenSSHVersion(versionStr string) bool {
+ i := strings.Index(versionStr, openSSHPrefix)
+ if i < 0 {
+ return false
+ }
+ i += len(openSSHPrefix)
+ j := i
+ for ; j < len(versionStr); j++ {
+ if versionStr[j] < '0' || versionStr[j] > '9' {
+ break
+ }
+ }
+ version, _ := strconv.Atoi(versionStr[i:j])
+ return version < 6
+}
+
+// autoPortListenWorkaround simulates automatic port allocation by
+// trying random ports repeatedly.
+func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) {
+ var sshListener net.Listener
+ var err error
+ const tries = 10
+ for i := 0; i < tries; i++ {
+ addr := *laddr
+ addr.Port = 1024 + portRandomizer.Intn(60000)
+ sshListener, err = c.ListenTCP(&addr)
+ if err == nil {
+ laddr.Port = addr.Port
+ return sshListener, err
+ }
+ }
+ return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err)
+}
+
+// RFC 4254 7.1
+type channelForwardMsg struct {
+ addr string
+ rport uint32
+}
+
+// ListenTCP requests the remote peer open a listening socket
+// on laddr. Incoming connections will be available by calling
+// Accept on the returned net.Listener.
+func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
+ if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
+ return c.autoPortListenWorkaround(laddr)
+ }
+
+ m := channelForwardMsg{
+ laddr.IP.String(),
+ uint32(laddr.Port),
+ }
+ // send message
+ ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ return nil, errors.New("ssh: tcpip-forward request denied by peer")
+ }
+
+ // If the original port was 0, then the remote side will
+ // supply a real port number in the response.
+ if laddr.Port == 0 {
+ var p struct {
+ Port uint32
+ }
+ if err := Unmarshal(resp, &p); err != nil {
+ return nil, err
+ }
+ laddr.Port = int(p.Port)
+ }
+
+ // Register this forward, using the port number we obtained.
+ ch := c.forwards.add(*laddr)
+
+ return &tcpListener{laddr, c, ch}, nil
+}
+
+// forwardList stores a mapping between remote
+// forward requests and the tcpListeners.
+type forwardList struct {
+ sync.Mutex
+ entries []forwardEntry
+}
+
+// forwardEntry represents an established mapping of a laddr on a
+// remote ssh server to a channel connected to a tcpListener.
+type forwardEntry struct {
+ laddr net.TCPAddr
+ c chan forward
+}
+
+// forward represents an incoming forwarded tcpip connection. The
+// arguments to add/remove/lookup should be address as specified in
+// the original forward-request.
+type forward struct {
+ newCh NewChannel // the ssh client channel underlying this forward
+ raddr *net.TCPAddr // the raddr of the incoming connection
+}
+
+func (l *forwardList) add(addr net.TCPAddr) chan forward {
+ l.Lock()
+ defer l.Unlock()
+ f := forwardEntry{
+ addr,
+ make(chan forward, 1),
+ }
+ l.entries = append(l.entries, f)
+ return f.c
+}
+
+// See RFC 4254, section 7.2
+type forwardedTCPPayload struct {
+ Addr string
+ Port uint32
+ OriginAddr string
+ OriginPort uint32
+}
+
+// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr.
+func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
+ if port == 0 || port > 65535 {
+ return nil, fmt.Errorf("ssh: port number out of range: %d", port)
+ }
+ ip := net.ParseIP(string(addr))
+ if ip == nil {
+ return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
+ }
+ return &net.TCPAddr{IP: ip, Port: int(port)}, nil
+}
+
+func (l *forwardList) handleChannels(in <-chan NewChannel) {
+ for ch := range in {
+ var payload forwardedTCPPayload
+ if err := Unmarshal(ch.ExtraData(), &payload); err != nil {
+ ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error())
+ continue
+ }
+
+ // RFC 4254 section 7.2 specifies that incoming
+ // addresses should list the address, in string
+ // format. It is implied that this should be an IP
+ // address, as it would be impossible to connect to it
+ // otherwise.
+ laddr, err := parseTCPAddr(payload.Addr, payload.Port)
+ if err != nil {
+ ch.Reject(ConnectionFailed, err.Error())
+ continue
+ }
+ raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort)
+ if err != nil {
+ ch.Reject(ConnectionFailed, err.Error())
+ continue
+ }
+
+ if ok := l.forward(*laddr, *raddr, ch); !ok {
+ // Section 7.2, implementations MUST reject spurious incoming
+ // connections.
+ ch.Reject(Prohibited, "no forward for address")
+ continue
+ }
+ }
+}
+
+// remove removes the forward entry, and the channel feeding its
+// listener.
+func (l *forwardList) remove(addr net.TCPAddr) {
+ l.Lock()
+ defer l.Unlock()
+ for i, f := range l.entries {
+ if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port {
+ l.entries = append(l.entries[:i], l.entries[i+1:]...)
+ close(f.c)
+ return
+ }
+ }
+}
+
+// closeAll closes and clears all forwards.
+func (l *forwardList) closeAll() {
+ l.Lock()
+ defer l.Unlock()
+ for _, f := range l.entries {
+ close(f.c)
+ }
+ l.entries = nil
+}
+
+func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool {
+ l.Lock()
+ defer l.Unlock()
+ for _, f := range l.entries {
+ if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port {
+ f.c <- forward{ch, &raddr}
+ return true
+ }
+ }
+ return false
+}
+
+type tcpListener struct {
+ laddr *net.TCPAddr
+
+ conn *Client
+ in <-chan forward
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (l *tcpListener) Accept() (net.Conn, error) {
+ s, ok := <-l.in
+ if !ok {
+ return nil, io.EOF
+ }
+ ch, incoming, err := s.newCh.Accept()
+ if err != nil {
+ return nil, err
+ }
+ go DiscardRequests(incoming)
+
+ return &tcpChanConn{
+ Channel: ch,
+ laddr: l.laddr,
+ raddr: s.raddr,
+ }, nil
+}
+
+// Close closes the listener.
+func (l *tcpListener) Close() error {
+ m := channelForwardMsg{
+ l.laddr.IP.String(),
+ uint32(l.laddr.Port),
+ }
+
+ // this also closes the listener.
+ l.conn.forwards.remove(*l.laddr)
+ ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
+ if err == nil && !ok {
+ err = errors.New("ssh: cancel-tcpip-forward failed")
+ }
+ return err
+}
+
+// Addr returns the listener's network address.
+func (l *tcpListener) Addr() net.Addr {
+ return l.laddr
+}
+
+// Dial initiates a connection to the addr from the remote host.
+// The resulting connection has a zero LocalAddr() and RemoteAddr().
+func (c *Client) Dial(n, addr string) (net.Conn, error) {
+ // Parse the address into host and numeric port.
+ host, portString, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ port, err := strconv.ParseUint(portString, 10, 16)
+ if err != nil {
+ return nil, err
+ }
+ // Use a zero address for local and remote address.
+ zeroAddr := &net.TCPAddr{
+ IP: net.IPv4zero,
+ Port: 0,
+ }
+ ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port))
+ if err != nil {
+ return nil, err
+ }
+ return &tcpChanConn{
+ Channel: ch,
+ laddr: zeroAddr,
+ raddr: zeroAddr,
+ }, nil
+}
+
+// DialTCP connects to the remote address raddr on the network net,
+// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used
+// as the local address for the connection.
+func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) {
+ if laddr == nil {
+ laddr = &net.TCPAddr{
+ IP: net.IPv4zero,
+ Port: 0,
+ }
+ }
+ ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port)
+ if err != nil {
+ return nil, err
+ }
+ return &tcpChanConn{
+ Channel: ch,
+ laddr: laddr,
+ raddr: raddr,
+ }, nil
+}
+
+// RFC 4254 7.2
+type channelOpenDirectMsg struct {
+ raddr string
+ rport uint32
+ laddr string
+ lport uint32
+}
+
+func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) {
+ msg := channelOpenDirectMsg{
+ raddr: raddr,
+ rport: uint32(rport),
+ laddr: laddr,
+ lport: uint32(lport),
+ }
+ ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg))
+ if err != nil {
+ return nil, err
+ }
+ go DiscardRequests(in)
+ return ch, err
+}
+
+type tcpChan struct {
+ Channel // the backing channel
+}
+
+// tcpChanConn fulfills the net.Conn interface without
+// the tcpChan having to hold laddr or raddr directly.
+type tcpChanConn struct {
+ Channel
+ laddr, raddr net.Addr
+}
+
+// LocalAddr returns the local network address.
+func (t *tcpChanConn) LocalAddr() net.Addr {
+ return t.laddr
+}
+
+// RemoteAddr returns the remote network address.
+func (t *tcpChanConn) RemoteAddr() net.Addr {
+ return t.raddr
+}
+
+// SetDeadline sets the read and write deadlines associated
+// with the connection.
+func (t *tcpChanConn) SetDeadline(deadline time.Time) error {
+ if err := t.SetReadDeadline(deadline); err != nil {
+ return err
+ }
+ return t.SetWriteDeadline(deadline)
+}
+
+// SetReadDeadline sets the read deadline.
+// A zero value for t means Read will not time out.
+// After the deadline, the error from Read will implement net.Error
+// with Timeout() == true.
+func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error {
+ return errors.New("ssh: tcpChan: deadline not supported")
+}
+
+// SetWriteDeadline exists to satisfy the net.Conn interface
+// but is not implemented by this type. It always returns an error.
+func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error {
+ return errors.New("ssh: tcpChan: deadline not supported")
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/tcpip_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/tcpip_test.go
new file mode 100644
index 00000000000..f1265cb4964
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/tcpip_test.go
@@ -0,0 +1,20 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "testing"
+)
+
+func TestAutoPortListenBroken(t *testing.T) {
+ broken := "SSH-2.0-OpenSSH_5.9hh11"
+ works := "SSH-2.0-OpenSSH_6.1"
+ if !isBrokenOpenSSHVersion(broken) {
+ t.Errorf("version %q not marked as broken", broken)
+ }
+ if isBrokenOpenSSHVersion(works) {
+ t.Errorf("version %q marked as broken", works)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/terminal.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/terminal.go
new file mode 100644
index 00000000000..741eeb13f0f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -0,0 +1,892 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+import (
+ "bytes"
+ "io"
+ "sync"
+ "unicode/utf8"
+)
+
+// EscapeCodes contains escape sequences that can be written to the terminal in
+// order to achieve different styles of text.
+type EscapeCodes struct {
+ // Foreground colors
+ Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
+
+ // Reset all attributes
+ Reset []byte
+}
+
+var vt100EscapeCodes = EscapeCodes{
+ Black: []byte{keyEscape, '[', '3', '0', 'm'},
+ Red: []byte{keyEscape, '[', '3', '1', 'm'},
+ Green: []byte{keyEscape, '[', '3', '2', 'm'},
+ Yellow: []byte{keyEscape, '[', '3', '3', 'm'},
+ Blue: []byte{keyEscape, '[', '3', '4', 'm'},
+ Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
+ Cyan: []byte{keyEscape, '[', '3', '6', 'm'},
+ White: []byte{keyEscape, '[', '3', '7', 'm'},
+
+ Reset: []byte{keyEscape, '[', '0', 'm'},
+}
+
+// Terminal contains the state for running a VT100 terminal that is capable of
+// reading lines of input.
+type Terminal struct {
+ // AutoCompleteCallback, if non-null, is called for each keypress with
+ // the full input line and the current position of the cursor (in
+ // bytes, as an index into |line|). If it returns ok=false, the key
+ // press is processed normally. Otherwise it returns a replacement line
+ // and the new cursor position.
+ AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
+
+ // Escape contains a pointer to the escape codes for this terminal.
+ // It's always a valid pointer, although the escape codes themselves
+ // may be empty if the terminal doesn't support them.
+ Escape *EscapeCodes
+
+ // lock protects the terminal and the state in this object from
+ // concurrent processing of a key press and a Write() call.
+ lock sync.Mutex
+
+ c io.ReadWriter
+ prompt []rune
+
+ // line is the current line being entered.
+ line []rune
+ // pos is the logical position of the cursor in line
+ pos int
+ // echo is true if local echo is enabled
+ echo bool
+ // pasteActive is true iff there is a bracketed paste operation in
+ // progress.
+ pasteActive bool
+
+ // cursorX contains the current X value of the cursor where the left
+ // edge is 0. cursorY contains the row number where the first row of
+ // the current line is 0.
+ cursorX, cursorY int
+ // maxLine is the greatest value of cursorY so far.
+ maxLine int
+
+ termWidth, termHeight int
+
+ // outBuf contains the terminal data to be sent.
+ outBuf []byte
+ // remainder contains the remainder of any partial key sequences after
+ // a read. It aliases into inBuf.
+ remainder []byte
+ inBuf [256]byte
+
+ // history contains previously entered commands so that they can be
+ // accessed with the up and down keys.
+ history stRingBuffer
+ // historyIndex stores the currently accessed history entry, where zero
+ // means the immediately previous entry.
+ historyIndex int
+ // When navigating up and down the history it's possible to return to
+ // the incomplete, initial line. That value is stored in
+ // historyPending.
+ historyPending string
+}
+
+// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
+// a local terminal, that terminal must first have been put into raw mode.
+// prompt is a string that is written at the start of each input line (i.e.
+// "> ").
+func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
+ return &Terminal{
+ Escape: &vt100EscapeCodes,
+ c: c,
+ prompt: []rune(prompt),
+ termWidth: 80,
+ termHeight: 24,
+ echo: true,
+ historyIndex: -1,
+ }
+}
+
+const (
+ keyCtrlD = 4
+ keyCtrlU = 21
+ keyEnter = '\r'
+ keyEscape = 27
+ keyBackspace = 127
+ keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
+ keyUp
+ keyDown
+ keyLeft
+ keyRight
+ keyAltLeft
+ keyAltRight
+ keyHome
+ keyEnd
+ keyDeleteWord
+ keyDeleteLine
+ keyClearScreen
+ keyPasteStart
+ keyPasteEnd
+)
+
+var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
+var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
+
+// bytesToKey tries to parse a key sequence from b. If successful, it returns
+// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
+func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
+ if len(b) == 0 {
+ return utf8.RuneError, nil
+ }
+
+ if !pasteActive {
+ switch b[0] {
+ case 1: // ^A
+ return keyHome, b[1:]
+ case 5: // ^E
+ return keyEnd, b[1:]
+ case 8: // ^H
+ return keyBackspace, b[1:]
+ case 11: // ^K
+ return keyDeleteLine, b[1:]
+ case 12: // ^L
+ return keyClearScreen, b[1:]
+ case 23: // ^W
+ return keyDeleteWord, b[1:]
+ }
+ }
+
+ if b[0] != keyEscape {
+ if !utf8.FullRune(b) {
+ return utf8.RuneError, b
+ }
+ r, l := utf8.DecodeRune(b)
+ return r, b[l:]
+ }
+
+ if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
+ switch b[2] {
+ case 'A':
+ return keyUp, b[3:]
+ case 'B':
+ return keyDown, b[3:]
+ case 'C':
+ return keyRight, b[3:]
+ case 'D':
+ return keyLeft, b[3:]
+ case 'H':
+ return keyHome, b[3:]
+ case 'F':
+ return keyEnd, b[3:]
+ }
+ }
+
+ if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
+ switch b[5] {
+ case 'C':
+ return keyAltRight, b[6:]
+ case 'D':
+ return keyAltLeft, b[6:]
+ }
+ }
+
+ if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
+ return keyPasteStart, b[6:]
+ }
+
+ if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
+ return keyPasteEnd, b[6:]
+ }
+
+ // If we get here then we have a key that we don't recognise, or a
+ // partial sequence. It's not clear how one should find the end of a
+ // sequence without knowing them all, but it seems that [a-zA-Z~] only
+ // appears at the end of a sequence.
+ for i, c := range b[0:] {
+ if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
+ return keyUnknown, b[i+1:]
+ }
+ }
+
+ return utf8.RuneError, b
+}
+
+// queue appends data to the end of t.outBuf
+func (t *Terminal) queue(data []rune) {
+ t.outBuf = append(t.outBuf, []byte(string(data))...)
+}
+
+var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
+var space = []rune{' '}
+
+func isPrintable(key rune) bool {
+ isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
+ return key >= 32 && !isInSurrogateArea
+}
+
+// moveCursorToPos appends data to t.outBuf which will move the cursor to the
+// given, logical position in the text.
+func (t *Terminal) moveCursorToPos(pos int) {
+ if !t.echo {
+ return
+ }
+
+ x := visualLength(t.prompt) + pos
+ y := x / t.termWidth
+ x = x % t.termWidth
+
+ up := 0
+ if y < t.cursorY {
+ up = t.cursorY - y
+ }
+
+ down := 0
+ if y > t.cursorY {
+ down = y - t.cursorY
+ }
+
+ left := 0
+ if x < t.cursorX {
+ left = t.cursorX - x
+ }
+
+ right := 0
+ if x > t.cursorX {
+ right = x - t.cursorX
+ }
+
+ t.cursorX = x
+ t.cursorY = y
+ t.move(up, down, left, right)
+}
+
+func (t *Terminal) move(up, down, left, right int) {
+ movement := make([]rune, 3*(up+down+left+right))
+ m := movement
+ for i := 0; i < up; i++ {
+ m[0] = keyEscape
+ m[1] = '['
+ m[2] = 'A'
+ m = m[3:]
+ }
+ for i := 0; i < down; i++ {
+ m[0] = keyEscape
+ m[1] = '['
+ m[2] = 'B'
+ m = m[3:]
+ }
+ for i := 0; i < left; i++ {
+ m[0] = keyEscape
+ m[1] = '['
+ m[2] = 'D'
+ m = m[3:]
+ }
+ for i := 0; i < right; i++ {
+ m[0] = keyEscape
+ m[1] = '['
+ m[2] = 'C'
+ m = m[3:]
+ }
+
+ t.queue(movement)
+}
+
+func (t *Terminal) clearLineToRight() {
+ op := []rune{keyEscape, '[', 'K'}
+ t.queue(op)
+}
+
+const maxLineLength = 4096
+
+func (t *Terminal) setLine(newLine []rune, newPos int) {
+ if t.echo {
+ t.moveCursorToPos(0)
+ t.writeLine(newLine)
+ for i := len(newLine); i < len(t.line); i++ {
+ t.writeLine(space)
+ }
+ t.moveCursorToPos(newPos)
+ }
+ t.line = newLine
+ t.pos = newPos
+}
+
+func (t *Terminal) advanceCursor(places int) {
+ t.cursorX += places
+ t.cursorY += t.cursorX / t.termWidth
+ if t.cursorY > t.maxLine {
+ t.maxLine = t.cursorY
+ }
+ t.cursorX = t.cursorX % t.termWidth
+
+ if places > 0 && t.cursorX == 0 {
+ // Normally terminals will advance the current position
+ // when writing a character. But that doesn't happen
+ // for the last character in a line. However, when
+ // writing a character (except a new line) that causes
+ // a line wrap, the position will be advanced two
+ // places.
+ //
+ // So, if we are stopping at the end of a line, we
+ // need to write a newline so that our cursor can be
+ // advanced to the next line.
+ t.outBuf = append(t.outBuf, '\n')
+ }
+}
+
+func (t *Terminal) eraseNPreviousChars(n int) {
+ if n == 0 {
+ return
+ }
+
+ if t.pos < n {
+ n = t.pos
+ }
+ t.pos -= n
+ t.moveCursorToPos(t.pos)
+
+ copy(t.line[t.pos:], t.line[n+t.pos:])
+ t.line = t.line[:len(t.line)-n]
+ if t.echo {
+ t.writeLine(t.line[t.pos:])
+ for i := 0; i < n; i++ {
+ t.queue(space)
+ }
+ t.advanceCursor(n)
+ t.moveCursorToPos(t.pos)
+ }
+}
+
+// countToLeftWord returns then number of characters from the cursor to the
+// start of the previous word.
+func (t *Terminal) countToLeftWord() int {
+ if t.pos == 0 {
+ return 0
+ }
+
+ pos := t.pos - 1
+ for pos > 0 {
+ if t.line[pos] != ' ' {
+ break
+ }
+ pos--
+ }
+ for pos > 0 {
+ if t.line[pos] == ' ' {
+ pos++
+ break
+ }
+ pos--
+ }
+
+ return t.pos - pos
+}
+
+// countToRightWord returns then number of characters from the cursor to the
+// start of the next word.
+func (t *Terminal) countToRightWord() int {
+ pos := t.pos
+ for pos < len(t.line) {
+ if t.line[pos] == ' ' {
+ break
+ }
+ pos++
+ }
+ for pos < len(t.line) {
+ if t.line[pos] != ' ' {
+ break
+ }
+ pos++
+ }
+ return pos - t.pos
+}
+
+// visualLength returns the number of visible glyphs in s.
+func visualLength(runes []rune) int {
+ inEscapeSeq := false
+ length := 0
+
+ for _, r := range runes {
+ switch {
+ case inEscapeSeq:
+ if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
+ inEscapeSeq = false
+ }
+ case r == '\x1b':
+ inEscapeSeq = true
+ default:
+ length++
+ }
+ }
+
+ return length
+}
+
+// handleKey processes the given key and, optionally, returns a line of text
+// that the user has entered.
+func (t *Terminal) handleKey(key rune) (line string, ok bool) {
+ if t.pasteActive && key != keyEnter {
+ t.addKeyToLine(key)
+ return
+ }
+
+ switch key {
+ case keyBackspace:
+ if t.pos == 0 {
+ return
+ }
+ t.eraseNPreviousChars(1)
+ case keyAltLeft:
+ // move left by a word.
+ t.pos -= t.countToLeftWord()
+ t.moveCursorToPos(t.pos)
+ case keyAltRight:
+ // move right by a word.
+ t.pos += t.countToRightWord()
+ t.moveCursorToPos(t.pos)
+ case keyLeft:
+ if t.pos == 0 {
+ return
+ }
+ t.pos--
+ t.moveCursorToPos(t.pos)
+ case keyRight:
+ if t.pos == len(t.line) {
+ return
+ }
+ t.pos++
+ t.moveCursorToPos(t.pos)
+ case keyHome:
+ if t.pos == 0 {
+ return
+ }
+ t.pos = 0
+ t.moveCursorToPos(t.pos)
+ case keyEnd:
+ if t.pos == len(t.line) {
+ return
+ }
+ t.pos = len(t.line)
+ t.moveCursorToPos(t.pos)
+ case keyUp:
+ entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
+ if !ok {
+ return "", false
+ }
+ if t.historyIndex == -1 {
+ t.historyPending = string(t.line)
+ }
+ t.historyIndex++
+ runes := []rune(entry)
+ t.setLine(runes, len(runes))
+ case keyDown:
+ switch t.historyIndex {
+ case -1:
+ return
+ case 0:
+ runes := []rune(t.historyPending)
+ t.setLine(runes, len(runes))
+ t.historyIndex--
+ default:
+ entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
+ if ok {
+ t.historyIndex--
+ runes := []rune(entry)
+ t.setLine(runes, len(runes))
+ }
+ }
+ case keyEnter:
+ t.moveCursorToPos(len(t.line))
+ t.queue([]rune("\r\n"))
+ line = string(t.line)
+ ok = true
+ t.line = t.line[:0]
+ t.pos = 0
+ t.cursorX = 0
+ t.cursorY = 0
+ t.maxLine = 0
+ case keyDeleteWord:
+ // Delete zero or more spaces and then one or more characters.
+ t.eraseNPreviousChars(t.countToLeftWord())
+ case keyDeleteLine:
+ // Delete everything from the current cursor position to the
+ // end of line.
+ for i := t.pos; i < len(t.line); i++ {
+ t.queue(space)
+ t.advanceCursor(1)
+ }
+ t.line = t.line[:t.pos]
+ t.moveCursorToPos(t.pos)
+ case keyCtrlD:
+ // Erase the character under the current position.
+ // The EOF case when the line is empty is handled in
+ // readLine().
+ if t.pos < len(t.line) {
+ t.pos++
+ t.eraseNPreviousChars(1)
+ }
+ case keyCtrlU:
+ t.eraseNPreviousChars(t.pos)
+ case keyClearScreen:
+ // Erases the screen and moves the cursor to the home position.
+ t.queue([]rune("\x1b[2J\x1b[H"))
+ t.queue(t.prompt)
+ t.cursorX, t.cursorY = 0, 0
+ t.advanceCursor(visualLength(t.prompt))
+ t.setLine(t.line, t.pos)
+ default:
+ if t.AutoCompleteCallback != nil {
+ prefix := string(t.line[:t.pos])
+ suffix := string(t.line[t.pos:])
+
+ t.lock.Unlock()
+ newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
+ t.lock.Lock()
+
+ if completeOk {
+ t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
+ return
+ }
+ }
+ if !isPrintable(key) {
+ return
+ }
+ if len(t.line) == maxLineLength {
+ return
+ }
+ t.addKeyToLine(key)
+ }
+ return
+}
+
+// addKeyToLine inserts the given key at the current position in the current
+// line.
+func (t *Terminal) addKeyToLine(key rune) {
+ if len(t.line) == cap(t.line) {
+ newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
+ copy(newLine, t.line)
+ t.line = newLine
+ }
+ t.line = t.line[:len(t.line)+1]
+ copy(t.line[t.pos+1:], t.line[t.pos:])
+ t.line[t.pos] = key
+ if t.echo {
+ t.writeLine(t.line[t.pos:])
+ }
+ t.pos++
+ t.moveCursorToPos(t.pos)
+}
+
+func (t *Terminal) writeLine(line []rune) {
+ for len(line) != 0 {
+ remainingOnLine := t.termWidth - t.cursorX
+ todo := len(line)
+ if todo > remainingOnLine {
+ todo = remainingOnLine
+ }
+ t.queue(line[:todo])
+ t.advanceCursor(visualLength(line[:todo]))
+ line = line[todo:]
+ }
+}
+
+func (t *Terminal) Write(buf []byte) (n int, err error) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ if t.cursorX == 0 && t.cursorY == 0 {
+ // This is the easy case: there's nothing on the screen that we
+ // have to move out of the way.
+ return t.c.Write(buf)
+ }
+
+ // We have a prompt and possibly user input on the screen. We
+ // have to clear it first.
+ t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
+ t.cursorX = 0
+ t.clearLineToRight()
+
+ for t.cursorY > 0 {
+ t.move(1 /* up */, 0, 0, 0)
+ t.cursorY--
+ t.clearLineToRight()
+ }
+
+ if _, err = t.c.Write(t.outBuf); err != nil {
+ return
+ }
+ t.outBuf = t.outBuf[:0]
+
+ if n, err = t.c.Write(buf); err != nil {
+ return
+ }
+
+ t.writeLine(t.prompt)
+ if t.echo {
+ t.writeLine(t.line)
+ }
+
+ t.moveCursorToPos(t.pos)
+
+ if _, err = t.c.Write(t.outBuf); err != nil {
+ return
+ }
+ t.outBuf = t.outBuf[:0]
+ return
+}
+
+// ReadPassword temporarily changes the prompt and reads a password, without
+// echo, from the terminal.
+func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ oldPrompt := t.prompt
+ t.prompt = []rune(prompt)
+ t.echo = false
+
+ line, err = t.readLine()
+
+ t.prompt = oldPrompt
+ t.echo = true
+
+ return
+}
+
+// ReadLine returns a line of input from the terminal.
+func (t *Terminal) ReadLine() (line string, err error) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ return t.readLine()
+}
+
+func (t *Terminal) readLine() (line string, err error) {
+ // t.lock must be held at this point
+
+ if t.cursorX == 0 && t.cursorY == 0 {
+ t.writeLine(t.prompt)
+ t.c.Write(t.outBuf)
+ t.outBuf = t.outBuf[:0]
+ }
+
+ lineIsPasted := t.pasteActive
+
+ for {
+ rest := t.remainder
+ lineOk := false
+ for !lineOk {
+ var key rune
+ key, rest = bytesToKey(rest, t.pasteActive)
+ if key == utf8.RuneError {
+ break
+ }
+ if !t.pasteActive {
+ if key == keyCtrlD {
+ if len(t.line) == 0 {
+ return "", io.EOF
+ }
+ }
+ if key == keyPasteStart {
+ t.pasteActive = true
+ if len(t.line) == 0 {
+ lineIsPasted = true
+ }
+ continue
+ }
+ } else if key == keyPasteEnd {
+ t.pasteActive = false
+ continue
+ }
+ if !t.pasteActive {
+ lineIsPasted = false
+ }
+ line, lineOk = t.handleKey(key)
+ }
+ if len(rest) > 0 {
+ n := copy(t.inBuf[:], rest)
+ t.remainder = t.inBuf[:n]
+ } else {
+ t.remainder = nil
+ }
+ t.c.Write(t.outBuf)
+ t.outBuf = t.outBuf[:0]
+ if lineOk {
+ if t.echo {
+ t.historyIndex = -1
+ t.history.Add(line)
+ }
+ if lineIsPasted {
+ err = ErrPasteIndicator
+ }
+ return
+ }
+
+ // t.remainder is a slice at the beginning of t.inBuf
+ // containing a partial key sequence
+ readBuf := t.inBuf[len(t.remainder):]
+ var n int
+
+ t.lock.Unlock()
+ n, err = t.c.Read(readBuf)
+ t.lock.Lock()
+
+ if err != nil {
+ return
+ }
+
+ t.remainder = t.inBuf[:n+len(t.remainder)]
+ }
+
+ panic("unreachable") // for Go 1.0.
+}
+
+// SetPrompt sets the prompt to be used when reading subsequent lines.
+func (t *Terminal) SetPrompt(prompt string) {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ t.prompt = []rune(prompt)
+}
+
+func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
+ // Move cursor to column zero at the start of the line.
+ t.move(t.cursorY, 0, t.cursorX, 0)
+ t.cursorX, t.cursorY = 0, 0
+ t.clearLineToRight()
+ for t.cursorY < numPrevLines {
+ // Move down a line
+ t.move(0, 1, 0, 0)
+ t.cursorY++
+ t.clearLineToRight()
+ }
+ // Move back to beginning.
+ t.move(t.cursorY, 0, 0, 0)
+ t.cursorX, t.cursorY = 0, 0
+
+ t.queue(t.prompt)
+ t.advanceCursor(visualLength(t.prompt))
+ t.writeLine(t.line)
+ t.moveCursorToPos(t.pos)
+}
+
+func (t *Terminal) SetSize(width, height int) error {
+ t.lock.Lock()
+ defer t.lock.Unlock()
+
+ if width == 0 {
+ width = 1
+ }
+
+ oldWidth := t.termWidth
+ t.termWidth, t.termHeight = width, height
+
+ switch {
+ case width == oldWidth:
+ // If the width didn't change then nothing else needs to be
+ // done.
+ return nil
+ case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
+ // If there is nothing on current line and no prompt printed,
+ // just do nothing
+ return nil
+ case width < oldWidth:
+ // Some terminals (e.g. xterm) will truncate lines that were
+ // too long when shinking. Others, (e.g. gnome-terminal) will
+ // attempt to wrap them. For the former, repainting t.maxLine
+ // works great, but that behaviour goes badly wrong in the case
+ // of the latter because they have doubled every full line.
+
+ // We assume that we are working on a terminal that wraps lines
+ // and adjust the cursor position based on every previous line
+ // wrapping and turning into two. This causes the prompt on
+ // xterms to move upwards, which isn't great, but it avoids a
+ // huge mess with gnome-terminal.
+ if t.cursorX >= t.termWidth {
+ t.cursorX = t.termWidth - 1
+ }
+ t.cursorY *= 2
+ t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
+ case width > oldWidth:
+ // If the terminal expands then our position calculations will
+ // be wrong in the future because we think the cursor is
+ // |t.pos| chars into the string, but there will be a gap at
+ // the end of any wrapped line.
+ //
+ // But the position will actually be correct until we move, so
+ // we can move back to the beginning and repaint everything.
+ t.clearAndRepaintLinePlusNPrevious(t.maxLine)
+ }
+
+ _, err := t.c.Write(t.outBuf)
+ t.outBuf = t.outBuf[:0]
+ return err
+}
+
+type pasteIndicatorError struct{}
+
+func (pasteIndicatorError) Error() string {
+ return "terminal: ErrPasteIndicator not correctly handled"
+}
+
+// ErrPasteIndicator may be returned from ReadLine as the error, in addition
+// to valid line data. It indicates that bracketed paste mode is enabled and
+// that the returned line consists only of pasted data. Programs may wish to
+// interpret pasted data more literally than typed data.
+var ErrPasteIndicator = pasteIndicatorError{}
+
+// SetBracketedPasteMode requests that the terminal bracket paste operations
+// with markers. Not all terminals support this but, if it is supported, then
+// enabling this mode will stop any autocomplete callback from running due to
+// pastes. Additionally, any lines that are completely pasted will be returned
+// from ReadLine with the error set to ErrPasteIndicator.
+func (t *Terminal) SetBracketedPasteMode(on bool) {
+ if on {
+ io.WriteString(t.c, "\x1b[?2004h")
+ } else {
+ io.WriteString(t.c, "\x1b[?2004l")
+ }
+}
+
+// stRingBuffer is a ring buffer of strings.
+type stRingBuffer struct {
+ // entries contains max elements.
+ entries []string
+ max int
+ // head contains the index of the element most recently added to the ring.
+ head int
+ // size contains the number of elements in the ring.
+ size int
+}
+
+func (s *stRingBuffer) Add(a string) {
+ if s.entries == nil {
+ const defaultNumEntries = 100
+ s.entries = make([]string, defaultNumEntries)
+ s.max = defaultNumEntries
+ }
+
+ s.head = (s.head + 1) % s.max
+ s.entries[s.head] = a
+ if s.size < s.max {
+ s.size++
+ }
+}
+
+// NthPreviousEntry returns the value passed to the nth previous call to Add.
+// If n is zero then the immediately prior value is returned, if one, then the
+// next most recent, and so on. If such an element doesn't exist then ok is
+// false.
+func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
+ if n >= s.size {
+ return "", false
+ }
+ index := s.head - n
+ if index < 0 {
+ index += s.max
+ }
+ return s.entries[index], true
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/terminal_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/terminal_test.go
new file mode 100644
index 00000000000..a663fe41b77
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/terminal_test.go
@@ -0,0 +1,269 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+import (
+ "io"
+ "testing"
+)
+
+type MockTerminal struct {
+ toSend []byte
+ bytesPerRead int
+ received []byte
+}
+
+func (c *MockTerminal) Read(data []byte) (n int, err error) {
+ n = len(data)
+ if n == 0 {
+ return
+ }
+ if n > len(c.toSend) {
+ n = len(c.toSend)
+ }
+ if n == 0 {
+ return 0, io.EOF
+ }
+ if c.bytesPerRead > 0 && n > c.bytesPerRead {
+ n = c.bytesPerRead
+ }
+ copy(data, c.toSend[:n])
+ c.toSend = c.toSend[n:]
+ return
+}
+
+func (c *MockTerminal) Write(data []byte) (n int, err error) {
+ c.received = append(c.received, data...)
+ return len(data), nil
+}
+
+func TestClose(t *testing.T) {
+ c := &MockTerminal{}
+ ss := NewTerminal(c, "> ")
+ line, err := ss.ReadLine()
+ if line != "" {
+ t.Errorf("Expected empty line but got: %s", line)
+ }
+ if err != io.EOF {
+ t.Errorf("Error should have been EOF but got: %s", err)
+ }
+}
+
+var keyPressTests = []struct {
+ in string
+ line string
+ err error
+ throwAwayLines int
+}{
+ {
+ err: io.EOF,
+ },
+ {
+ in: "\r",
+ line: "",
+ },
+ {
+ in: "foo\r",
+ line: "foo",
+ },
+ {
+ in: "a\x1b[Cb\r", // right
+ line: "ab",
+ },
+ {
+ in: "a\x1b[Db\r", // left
+ line: "ba",
+ },
+ {
+ in: "a\177b\r", // backspace
+ line: "b",
+ },
+ {
+ in: "\x1b[A\r", // up
+ },
+ {
+ in: "\x1b[B\r", // down
+ },
+ {
+ in: "line\x1b[A\x1b[B\r", // up then down
+ line: "line",
+ },
+ {
+ in: "line1\rline2\x1b[A\r", // recall previous line.
+ line: "line1",
+ throwAwayLines: 1,
+ },
+ {
+ // recall two previous lines and append.
+ in: "line1\rline2\rline3\x1b[A\x1b[Axxx\r",
+ line: "line1xxx",
+ throwAwayLines: 2,
+ },
+ {
+ // Ctrl-A to move to beginning of line followed by ^K to kill
+ // line.
+ in: "a b \001\013\r",
+ line: "",
+ },
+ {
+ // Ctrl-A to move to beginning of line, Ctrl-E to move to end,
+ // finally ^K to kill nothing.
+ in: "a b \001\005\013\r",
+ line: "a b ",
+ },
+ {
+ in: "\027\r",
+ line: "",
+ },
+ {
+ in: "a\027\r",
+ line: "",
+ },
+ {
+ in: "a \027\r",
+ line: "",
+ },
+ {
+ in: "a b\027\r",
+ line: "a ",
+ },
+ {
+ in: "a b \027\r",
+ line: "a ",
+ },
+ {
+ in: "one two thr\x1b[D\027\r",
+ line: "one two r",
+ },
+ {
+ in: "\013\r",
+ line: "",
+ },
+ {
+ in: "a\013\r",
+ line: "a",
+ },
+ {
+ in: "ab\x1b[D\013\r",
+ line: "a",
+ },
+ {
+ in: "Ξεσκεπάζω\r",
+ line: "Ξεσκεπάζω",
+ },
+ {
+ in: "£\r\x1b[A\177\r", // non-ASCII char, enter, up, backspace.
+ line: "",
+ throwAwayLines: 1,
+ },
+ {
+ in: "£\r££\x1b[A\x1b[B\177\r", // non-ASCII char, enter, 2x non-ASCII, up, down, backspace, enter.
+ line: "£",
+ throwAwayLines: 1,
+ },
+ {
+ // Ctrl-D at the end of the line should be ignored.
+ in: "a\004\r",
+ line: "a",
+ },
+ {
+ // a, b, left, Ctrl-D should erase the b.
+ in: "ab\x1b[D\004\r",
+ line: "a",
+ },
+ {
+ // a, b, c, d, left, left, ^U should erase to the beginning of
+ // the line.
+ in: "abcd\x1b[D\x1b[D\025\r",
+ line: "cd",
+ },
+ {
+ // Bracketed paste mode: control sequences should be returned
+ // verbatim in paste mode.
+ in: "abc\x1b[200~de\177f\x1b[201~\177\r",
+ line: "abcde\177",
+ },
+ {
+ // Enter in bracketed paste mode should still work.
+ in: "abc\x1b[200~d\refg\x1b[201~h\r",
+ line: "efgh",
+ throwAwayLines: 1,
+ },
+ {
+ // Lines consisting entirely of pasted data should be indicated as such.
+ in: "\x1b[200~a\r",
+ line: "a",
+ err: ErrPasteIndicator,
+ },
+}
+
+func TestKeyPresses(t *testing.T) {
+ for i, test := range keyPressTests {
+ for j := 1; j < len(test.in); j++ {
+ c := &MockTerminal{
+ toSend: []byte(test.in),
+ bytesPerRead: j,
+ }
+ ss := NewTerminal(c, "> ")
+ for k := 0; k < test.throwAwayLines; k++ {
+ _, err := ss.ReadLine()
+ if err != nil {
+ t.Errorf("Throwaway line %d from test %d resulted in error: %s", k, i, err)
+ }
+ }
+ line, err := ss.ReadLine()
+ if line != test.line {
+ t.Errorf("Line resulting from test %d (%d bytes per read) was '%s', expected '%s'", i, j, line, test.line)
+ break
+ }
+ if err != test.err {
+ t.Errorf("Error resulting from test %d (%d bytes per read) was '%v', expected '%v'", i, j, err, test.err)
+ break
+ }
+ }
+ }
+}
+
+func TestPasswordNotSaved(t *testing.T) {
+ c := &MockTerminal{
+ toSend: []byte("password\r\x1b[A\r"),
+ bytesPerRead: 1,
+ }
+ ss := NewTerminal(c, "> ")
+ pw, _ := ss.ReadPassword("> ")
+ if pw != "password" {
+ t.Fatalf("failed to read password, got %s", pw)
+ }
+ line, _ := ss.ReadLine()
+ if len(line) > 0 {
+ t.Fatalf("password was saved in history")
+ }
+}
+
+var setSizeTests = []struct {
+ width, height int
+}{
+ {40, 13},
+ {80, 24},
+ {132, 43},
+}
+
+func TestTerminalSetSize(t *testing.T) {
+ for _, setSize := range setSizeTests {
+ c := &MockTerminal{
+ toSend: []byte("password\r\x1b[A\r"),
+ bytesPerRead: 1,
+ }
+ ss := NewTerminal(c, "> ")
+ ss.SetSize(setSize.width, setSize.height)
+ pw, _ := ss.ReadPassword("Password: ")
+ if pw != "password" {
+ t.Fatalf("failed to read password, got %s", pw)
+ }
+ if string(c.received) != "Password: \r\n" {
+ t.Errorf("failed to set the temporary prompt expected %q, got %q", "Password: ", c.received)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util.go
new file mode 100644
index 00000000000..598e3df77e7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util.go
@@ -0,0 +1,128 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// oldState, err := terminal.MakeRaw(0)
+// if err != nil {
+// panic(err)
+// }
+// defer terminal.Restore(0, oldState)
+package terminal // import "golang.org/x/crypto/ssh/terminal"
+
+import (
+ "io"
+ "syscall"
+ "unsafe"
+)
+
+// State contains the state of a terminal.
+type State struct {
+ termios syscall.Termios
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ var termios syscall.Termios
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+ var oldState State
+ if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState.termios
+ newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF
+ newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG
+ if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+ var oldState State
+ if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, state *State) error {
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
+ return err
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+ var dimensions [4]uint16
+
+ if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
+ return -1, -1, err
+ }
+ return int(dimensions[1]), int(dimensions[0]), nil
+}
+
+// ReadPassword reads a line of input from a terminal without local echo. This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+ var oldState syscall.Termios
+ if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
+ return nil, err
+ }
+
+ newState := oldState
+ newState.Lflag &^= syscall.ECHO
+ newState.Lflag |= syscall.ICANON | syscall.ISIG
+ newState.Iflag |= syscall.ICRNL
+ if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
+ return nil, err
+ }
+
+ defer func() {
+ syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
+ }()
+
+ var buf [16]byte
+ var ret []byte
+ for {
+ n, err := syscall.Read(fd, buf[:])
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ if len(ret) == 0 {
+ return nil, io.EOF
+ }
+ break
+ }
+ if buf[n-1] == '\n' {
+ n--
+ }
+ ret = append(ret, buf[:n]...)
+ if n < len(buf) {
+ break
+ }
+ }
+
+ return ret, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_bsd.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_bsd.go
new file mode 100644
index 00000000000..9c1ffd145a7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_bsd.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package terminal
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+const ioctlWriteTermios = syscall.TIOCSETA
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_linux.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_linux.go
new file mode 100644
index 00000000000..5883b22d780
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_linux.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+// These constants are declared here, rather than importing
+// them from the syscall package as some syscall packages, even
+// on linux, for example gccgo, do not declare them.
+const ioctlReadTermios = 0x5401 // syscall.TCGETS
+const ioctlWriteTermios = 0x5402 // syscall.TCSETS
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_windows.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_windows.go
new file mode 100644
index 00000000000..2dd6c3d9788
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -0,0 +1,174 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// oldState, err := terminal.MakeRaw(0)
+// if err != nil {
+// panic(err)
+// }
+// defer terminal.Restore(0, oldState)
+package terminal
+
+import (
+ "io"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ enableLineInput = 2
+ enableEchoInput = 4
+ enableProcessedInput = 1
+ enableWindowInput = 8
+ enableMouseInput = 16
+ enableInsertMode = 32
+ enableQuickEditMode = 64
+ enableExtendedFlags = 128
+ enableAutoPosition = 256
+ enableProcessedOutput = 1
+ enableWrapAtEolOutput = 2
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+ procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
+ procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+)
+
+type (
+ short int16
+ word uint16
+
+ coord struct {
+ x short
+ y short
+ }
+ smallRect struct {
+ left short
+ top short
+ right short
+ bottom short
+ }
+ consoleScreenBufferInfo struct {
+ size coord
+ cursorPosition coord
+ attributes word
+ window smallRect
+ maximumWindowSize coord
+ }
+)
+
+type State struct {
+ mode uint32
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
+ _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ return &State{st}, nil
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ return &State{st}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, state *State) error {
+ _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
+ return err
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+ var info consoleScreenBufferInfo
+ _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
+ if e != 0 {
+ return 0, 0, error(e)
+ }
+ return int(info.size.x), int(info.size.y), nil
+}
+
+// ReadPassword reads a line of input from a terminal without local echo. This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+ var st uint32
+ _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+ old := st
+
+ st &^= (enableEchoInput)
+ st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
+ _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
+ if e != 0 {
+ return nil, error(e)
+ }
+
+ defer func() {
+ syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
+ }()
+
+ var buf [16]byte
+ var ret []byte
+ for {
+ n, err := syscall.Read(syscall.Handle(fd), buf[:])
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ if len(ret) == 0 {
+ return nil, io.EOF
+ }
+ break
+ }
+ if buf[n-1] == '\n' {
+ n--
+ }
+ if n > 0 && buf[n-1] == '\r' {
+ n--
+ }
+ ret = append(ret, buf[:n]...)
+ if n < len(buf) {
+ break
+ }
+ }
+
+ return ret, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/agent_unix_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/agent_unix_test.go
new file mode 100644
index 00000000000..f481253c9eb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/agent_unix_test.go
@@ -0,0 +1,59 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package test
+
+import (
+ "bytes"
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/agent"
+)
+
+func TestAgentForward(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ keyring := agent.NewKeyring()
+ if err := keyring.Add(agent.AddedKey{PrivateKey: testPrivateKeys["dsa"]}); err != nil {
+ t.Fatalf("Error adding key: %s", err)
+ }
+ if err := keyring.Add(agent.AddedKey{
+ PrivateKey: testPrivateKeys["dsa"],
+ ConfirmBeforeUse: true,
+ LifetimeSecs: 3600,
+ }); err != nil {
+ t.Fatalf("Error adding key with constraints: %s", err)
+ }
+ pub := testPublicKeys["dsa"]
+
+ sess, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("NewSession: %v", err)
+ }
+ if err := agent.RequestAgentForwarding(sess); err != nil {
+ t.Fatalf("RequestAgentForwarding: %v", err)
+ }
+
+ if err := agent.ForwardToAgent(conn, keyring); err != nil {
+ t.Fatalf("SetupForwardKeyring: %v", err)
+ }
+ out, err := sess.CombinedOutput("ssh-add -L")
+ if err != nil {
+ t.Fatalf("running ssh-add: %v, out %s", err, out)
+ }
+ key, _, _, _, err := ssh.ParseAuthorizedKey(out)
+ if err != nil {
+ t.Fatalf("ParseAuthorizedKey(%q): %v", out, err)
+ }
+
+ if !bytes.Equal(key.Marshal(), pub.Marshal()) {
+ t.Fatalf("got key %s, want %s", ssh.MarshalAuthorizedKey(key), ssh.MarshalAuthorizedKey(pub))
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/cert_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/cert_test.go
new file mode 100644
index 00000000000..364790f17d7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/cert_test.go
@@ -0,0 +1,47 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package test
+
+import (
+ "crypto/rand"
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+)
+
+func TestCertLogin(t *testing.T) {
+ s := newServer(t)
+ defer s.Shutdown()
+
+ // Use a key different from the default.
+ clientKey := testSigners["dsa"]
+ caAuthKey := testSigners["ecdsa"]
+ cert := &ssh.Certificate{
+ Key: clientKey.PublicKey(),
+ ValidPrincipals: []string{username()},
+ CertType: ssh.UserCert,
+ ValidBefore: ssh.CertTimeInfinity,
+ }
+ if err := cert.SignCert(rand.Reader, caAuthKey); err != nil {
+ t.Fatalf("SetSignature: %v", err)
+ }
+
+ certSigner, err := ssh.NewCertSigner(cert, clientKey)
+ if err != nil {
+ t.Fatalf("NewCertSigner: %v", err)
+ }
+
+ conf := &ssh.ClientConfig{
+ User: username(),
+ }
+ conf.Auth = append(conf.Auth, ssh.PublicKeys(certSigner))
+ client, err := s.TryDial(conf)
+ if err != nil {
+ t.Fatalf("TryDial: %v", err)
+ }
+ client.Close()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/doc.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/doc.go
new file mode 100644
index 00000000000..3f9b3346dfa
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package contains integration tests for the
+// golang.org/x/crypto/ssh package.
+package test // import "golang.org/x/crypto/ssh/test"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/forward_unix_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/forward_unix_test.go
new file mode 100644
index 00000000000..877a88cde3d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/forward_unix_test.go
@@ -0,0 +1,160 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package test
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "net"
+ "testing"
+ "time"
+)
+
+func TestPortForward(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ sshListener, err := conn.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ go func() {
+ sshConn, err := sshListener.Accept()
+ if err != nil {
+ t.Fatalf("listen.Accept failed: %v", err)
+ }
+
+ _, err = io.Copy(sshConn, sshConn)
+ if err != nil && err != io.EOF {
+ t.Fatalf("ssh client copy: %v", err)
+ }
+ sshConn.Close()
+ }()
+
+ forwardedAddr := sshListener.Addr().String()
+ tcpConn, err := net.Dial("tcp", forwardedAddr)
+ if err != nil {
+ t.Fatalf("TCP dial failed: %v", err)
+ }
+
+ readChan := make(chan []byte)
+ go func() {
+ data, _ := ioutil.ReadAll(tcpConn)
+ readChan <- data
+ }()
+
+ // Invent some data.
+ data := make([]byte, 100*1000)
+ for i := range data {
+ data[i] = byte(i % 255)
+ }
+
+ var sent []byte
+ for len(sent) < 1000*1000 {
+ // Send random sized chunks
+ m := rand.Intn(len(data))
+ n, err := tcpConn.Write(data[:m])
+ if err != nil {
+ break
+ }
+ sent = append(sent, data[:n]...)
+ }
+ if err := tcpConn.(*net.TCPConn).CloseWrite(); err != nil {
+ t.Errorf("tcpConn.CloseWrite: %v", err)
+ }
+
+ read := <-readChan
+
+ if len(sent) != len(read) {
+ t.Fatalf("got %d bytes, want %d", len(read), len(sent))
+ }
+ if bytes.Compare(sent, read) != 0 {
+ t.Fatalf("read back data does not match")
+ }
+
+ if err := sshListener.Close(); err != nil {
+ t.Fatalf("sshListener.Close: %v", err)
+ }
+
+ // Check that the forward disappeared.
+ tcpConn, err = net.Dial("tcp", forwardedAddr)
+ if err == nil {
+ tcpConn.Close()
+ t.Errorf("still listening to %s after closing", forwardedAddr)
+ }
+}
+
+func TestAcceptClose(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+
+ sshListener, err := conn.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ quit := make(chan error, 1)
+ go func() {
+ for {
+ c, err := sshListener.Accept()
+ if err != nil {
+ quit <- err
+ break
+ }
+ c.Close()
+ }
+ }()
+ sshListener.Close()
+
+ select {
+ case <-time.After(1 * time.Second):
+ t.Errorf("timeout: listener did not close.")
+ case err := <-quit:
+ t.Logf("quit as expected (error %v)", err)
+ }
+}
+
+// Check that listeners exit if the underlying client transport dies.
+func TestPortForwardConnectionClose(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+
+ sshListener, err := conn.Listen("tcp", "localhost:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ quit := make(chan error, 1)
+ go func() {
+ for {
+ c, err := sshListener.Accept()
+ if err != nil {
+ quit <- err
+ break
+ }
+ c.Close()
+ }
+ }()
+
+ // It would be even nicer if we closed the server side, but it
+ // is more involved as the fd for that side is dup()ed.
+ server.clientConn.Close()
+
+ select {
+ case <-time.After(1 * time.Second):
+ t.Errorf("timeout: listener did not close.")
+ case err := <-quit:
+ t.Logf("quit as expected (error %v)", err)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/session_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/session_test.go
new file mode 100644
index 00000000000..c0e714ba906
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/session_test.go
@@ -0,0 +1,340 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package test
+
+// Session functional tests.
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "strings"
+ "testing"
+
+ "golang.org/x/crypto/ssh"
+)
+
+func TestRunCommandSuccess(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+ err = session.Run("true")
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+}
+
+func TestHostKeyCheck(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+
+ conf := clientConfig()
+ hostDB := hostKeyDB()
+ conf.HostKeyCallback = hostDB.Check
+
+ // change the keys.
+ hostDB.keys[ssh.KeyAlgoRSA][25]++
+ hostDB.keys[ssh.KeyAlgoDSA][25]++
+ hostDB.keys[ssh.KeyAlgoECDSA256][25]++
+
+ conn, err := server.TryDial(conf)
+ if err == nil {
+ conn.Close()
+ t.Fatalf("dial should have failed.")
+ } else if !strings.Contains(err.Error(), "host key mismatch") {
+ t.Fatalf("'host key mismatch' not found in %v", err)
+ }
+}
+
+func TestRunCommandStdin(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+
+ r, w := io.Pipe()
+ defer r.Close()
+ defer w.Close()
+ session.Stdin = r
+
+ err = session.Run("true")
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+}
+
+func TestRunCommandStdinError(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+
+ r, w := io.Pipe()
+ defer r.Close()
+ session.Stdin = r
+ pipeErr := errors.New("closing write end of pipe")
+ w.CloseWithError(pipeErr)
+
+ err = session.Run("true")
+ if err != pipeErr {
+ t.Fatalf("expected %v, found %v", pipeErr, err)
+ }
+}
+
+func TestRunCommandFailed(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+ err = session.Run(`bash -c "kill -9 $$"`)
+ if err == nil {
+ t.Fatalf("session succeeded: %v", err)
+ }
+}
+
+func TestRunCommandWeClosed(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ err = session.Shell()
+ if err != nil {
+ t.Fatalf("shell failed: %v", err)
+ }
+ err = session.Close()
+ if err != nil {
+ t.Fatalf("shell failed: %v", err)
+ }
+}
+
+func TestFuncLargeRead(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("unable to create new session: %s", err)
+ }
+
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("unable to acquire stdout pipe: %s", err)
+ }
+
+ err = session.Start("dd if=/dev/urandom bs=2048 count=1024")
+ if err != nil {
+ t.Fatalf("unable to execute remote command: %s", err)
+ }
+
+ buf := new(bytes.Buffer)
+ n, err := io.Copy(buf, stdout)
+ if err != nil {
+ t.Fatalf("error reading from remote stdout: %s", err)
+ }
+
+ if n != 2048*1024 {
+ t.Fatalf("Expected %d bytes but read only %d from remote command", 2048, n)
+ }
+}
+
+func TestKeyChange(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ hostDB := hostKeyDB()
+ conf.HostKeyCallback = hostDB.Check
+ conf.RekeyThreshold = 1024
+ conn := server.Dial(conf)
+ defer conn.Close()
+
+ for i := 0; i < 4; i++ {
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("unable to create new session: %s", err)
+ }
+
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("unable to acquire stdout pipe: %s", err)
+ }
+
+ err = session.Start("dd if=/dev/urandom bs=1024 count=1")
+ if err != nil {
+ t.Fatalf("unable to execute remote command: %s", err)
+ }
+ buf := new(bytes.Buffer)
+ n, err := io.Copy(buf, stdout)
+ if err != nil {
+ t.Fatalf("error reading from remote stdout: %s", err)
+ }
+
+ want := int64(1024)
+ if n != want {
+ t.Fatalf("Expected %d bytes but read only %d from remote command", want, n)
+ }
+ }
+
+ if changes := hostDB.checkCount; changes < 4 {
+ t.Errorf("got %d key changes, want 4", changes)
+ }
+}
+
+func TestInvalidTerminalMode(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+
+ if err = session.RequestPty("vt100", 80, 40, ssh.TerminalModes{255: 1984}); err == nil {
+ t.Fatalf("req-pty failed: successful request with invalid mode")
+ }
+}
+
+func TestValidTerminalMode(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ conn := server.Dial(clientConfig())
+ defer conn.Close()
+
+ session, err := conn.NewSession()
+ if err != nil {
+ t.Fatalf("session failed: %v", err)
+ }
+ defer session.Close()
+
+ stdout, err := session.StdoutPipe()
+ if err != nil {
+ t.Fatalf("unable to acquire stdout pipe: %s", err)
+ }
+
+ stdin, err := session.StdinPipe()
+ if err != nil {
+ t.Fatalf("unable to acquire stdin pipe: %s", err)
+ }
+
+ tm := ssh.TerminalModes{ssh.ECHO: 0}
+ if err = session.RequestPty("xterm", 80, 40, tm); err != nil {
+ t.Fatalf("req-pty failed: %s", err)
+ }
+
+ err = session.Shell()
+ if err != nil {
+ t.Fatalf("session failed: %s", err)
+ }
+
+ stdin.Write([]byte("stty -a && exit\n"))
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, stdout); err != nil {
+ t.Fatalf("reading failed: %s", err)
+ }
+
+ if sttyOutput := buf.String(); !strings.Contains(sttyOutput, "-echo ") {
+ t.Fatalf("terminal mode failure: expected -echo in stty output, got %s", sttyOutput)
+ }
+}
+
+func TestCiphers(t *testing.T) {
+ var config ssh.Config
+ config.SetDefaults()
+ cipherOrder := config.Ciphers
+ // This cipher will not be tested when commented out in cipher.go it will
+ // fallback to the next available as per line 292.
+ cipherOrder = append(cipherOrder, "aes128-cbc")
+
+ for _, ciph := range cipherOrder {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ conf.Ciphers = []string{ciph}
+ // Don't fail if sshd doesnt have the cipher.
+ conf.Ciphers = append(conf.Ciphers, cipherOrder...)
+ conn, err := server.TryDial(conf)
+ if err == nil {
+ conn.Close()
+ } else {
+ t.Fatalf("failed for cipher %q", ciph)
+ }
+ }
+}
+
+func TestMACs(t *testing.T) {
+ var config ssh.Config
+ config.SetDefaults()
+ macOrder := config.MACs
+
+ for _, mac := range macOrder {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ conf.MACs = []string{mac}
+ // Don't fail if sshd doesnt have the MAC.
+ conf.MACs = append(conf.MACs, macOrder...)
+ if conn, err := server.TryDial(conf); err == nil {
+ conn.Close()
+ } else {
+ t.Fatalf("failed for MAC %q", mac)
+ }
+ }
+}
+
+func TestKeyExchanges(t *testing.T) {
+ var config ssh.Config
+ config.SetDefaults()
+ kexOrder := config.KeyExchanges
+ for _, kex := range kexOrder {
+ server := newServer(t)
+ defer server.Shutdown()
+ conf := clientConfig()
+ // Don't fail if sshd doesnt have the kex.
+ conf.KeyExchanges = append([]string{kex}, kexOrder...)
+ conn, err := server.TryDial(conf)
+ if err == nil {
+ conn.Close()
+ } else {
+ t.Errorf("failed for kex %q", kex)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/tcpip_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/tcpip_test.go
new file mode 100644
index 00000000000..a2eb9358d02
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/tcpip_test.go
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !windows
+
+package test
+
+// direct-tcpip functional tests
+
+import (
+ "io"
+ "net"
+ "testing"
+)
+
+func TestDial(t *testing.T) {
+ server := newServer(t)
+ defer server.Shutdown()
+ sshConn := server.Dial(clientConfig())
+ defer sshConn.Close()
+
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatalf("Listen: %v", err)
+ }
+ defer l.Close()
+
+ go func() {
+ for {
+ c, err := l.Accept()
+ if err != nil {
+ break
+ }
+
+ io.WriteString(c, c.RemoteAddr().String())
+ c.Close()
+ }
+ }()
+
+ conn, err := sshConn.Dial("tcp", l.Addr().String())
+ if err != nil {
+ t.Fatalf("Dial: %v", err)
+ }
+ defer conn.Close()
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/test_unix_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/test_unix_test.go
new file mode 100644
index 00000000000..f1fc50b2e48
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/test_unix_test.go
@@ -0,0 +1,261 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd plan9
+
+package test
+
+// functional test harness for unix.
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "testing"
+ "text/template"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+const sshd_config = `
+Protocol 2
+HostKey {{.Dir}}/id_rsa
+HostKey {{.Dir}}/id_dsa
+HostKey {{.Dir}}/id_ecdsa
+Pidfile {{.Dir}}/sshd.pid
+#UsePrivilegeSeparation no
+KeyRegenerationInterval 3600
+ServerKeyBits 768
+SyslogFacility AUTH
+LogLevel DEBUG2
+LoginGraceTime 120
+PermitRootLogin no
+StrictModes no
+RSAAuthentication yes
+PubkeyAuthentication yes
+AuthorizedKeysFile {{.Dir}}/id_user.pub
+TrustedUserCAKeys {{.Dir}}/id_ecdsa.pub
+IgnoreRhosts yes
+RhostsRSAAuthentication no
+HostbasedAuthentication no
+`
+
+var configTmpl = template.Must(template.New("").Parse(sshd_config))
+
+type server struct {
+ t *testing.T
+ cleanup func() // executed during Shutdown
+ configfile string
+ cmd *exec.Cmd
+ output bytes.Buffer // holds stderr from sshd process
+
+ // Client half of the network connection.
+ clientConn net.Conn
+}
+
+func username() string {
+ var username string
+ if user, err := user.Current(); err == nil {
+ username = user.Username
+ } else {
+ // user.Current() currently requires cgo. If an error is
+ // returned attempt to get the username from the environment.
+ log.Printf("user.Current: %v; falling back on $USER", err)
+ username = os.Getenv("USER")
+ }
+ if username == "" {
+ panic("Unable to get username")
+ }
+ return username
+}
+
+type storedHostKey struct {
+ // keys map from an algorithm string to binary key data.
+ keys map[string][]byte
+
+ // checkCount counts the Check calls. Used for testing
+ // rekeying.
+ checkCount int
+}
+
+func (k *storedHostKey) Add(key ssh.PublicKey) {
+ if k.keys == nil {
+ k.keys = map[string][]byte{}
+ }
+ k.keys[key.Type()] = key.Marshal()
+}
+
+func (k *storedHostKey) Check(addr string, remote net.Addr, key ssh.PublicKey) error {
+ k.checkCount++
+ algo := key.Type()
+
+ if k.keys == nil || bytes.Compare(key.Marshal(), k.keys[algo]) != 0 {
+ return fmt.Errorf("host key mismatch. Got %q, want %q", key, k.keys[algo])
+ }
+ return nil
+}
+
+func hostKeyDB() *storedHostKey {
+ keyChecker := &storedHostKey{}
+ keyChecker.Add(testPublicKeys["ecdsa"])
+ keyChecker.Add(testPublicKeys["rsa"])
+ keyChecker.Add(testPublicKeys["dsa"])
+ return keyChecker
+}
+
+func clientConfig() *ssh.ClientConfig {
+ config := &ssh.ClientConfig{
+ User: username(),
+ Auth: []ssh.AuthMethod{
+ ssh.PublicKeys(testSigners["user"]),
+ },
+ HostKeyCallback: hostKeyDB().Check,
+ }
+ return config
+}
+
+// unixConnection creates two halves of a connected net.UnixConn. It
+// is used for connecting the Go SSH client with sshd without opening
+// ports.
+func unixConnection() (*net.UnixConn, *net.UnixConn, error) {
+ dir, err := ioutil.TempDir("", "unixConnection")
+ if err != nil {
+ return nil, nil, err
+ }
+ defer os.Remove(dir)
+
+ addr := filepath.Join(dir, "ssh")
+ listener, err := net.Listen("unix", addr)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer listener.Close()
+ c1, err := net.Dial("unix", addr)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ c2, err := listener.Accept()
+ if err != nil {
+ c1.Close()
+ return nil, nil, err
+ }
+
+ return c1.(*net.UnixConn), c2.(*net.UnixConn), nil
+}
+
+func (s *server) TryDial(config *ssh.ClientConfig) (*ssh.Client, error) {
+ sshd, err := exec.LookPath("sshd")
+ if err != nil {
+ s.t.Skipf("skipping test: %v", err)
+ }
+
+ c1, c2, err := unixConnection()
+ if err != nil {
+ s.t.Fatalf("unixConnection: %v", err)
+ }
+
+ s.cmd = exec.Command(sshd, "-f", s.configfile, "-i", "-e")
+ f, err := c2.File()
+ if err != nil {
+ s.t.Fatalf("UnixConn.File: %v", err)
+ }
+ defer f.Close()
+ s.cmd.Stdin = f
+ s.cmd.Stdout = f
+ s.cmd.Stderr = &s.output
+ if err := s.cmd.Start(); err != nil {
+ s.t.Fail()
+ s.Shutdown()
+ s.t.Fatalf("s.cmd.Start: %v", err)
+ }
+ s.clientConn = c1
+ conn, chans, reqs, err := ssh.NewClientConn(c1, "", config)
+ if err != nil {
+ return nil, err
+ }
+ return ssh.NewClient(conn, chans, reqs), nil
+}
+
+func (s *server) Dial(config *ssh.ClientConfig) *ssh.Client {
+ conn, err := s.TryDial(config)
+ if err != nil {
+ s.t.Fail()
+ s.Shutdown()
+ s.t.Fatalf("ssh.Client: %v", err)
+ }
+ return conn
+}
+
+func (s *server) Shutdown() {
+ if s.cmd != nil && s.cmd.Process != nil {
+ // Don't check for errors; if it fails it's most
+ // likely "os: process already finished", and we don't
+ // care about that. Use os.Interrupt, so child
+ // processes are killed too.
+ s.cmd.Process.Signal(os.Interrupt)
+ s.cmd.Wait()
+ }
+ if s.t.Failed() {
+ // log any output from sshd process
+ s.t.Logf("sshd: %s", s.output.String())
+ }
+ s.cleanup()
+}
+
+func writeFile(path string, contents []byte) {
+ f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600)
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ if _, err := f.Write(contents); err != nil {
+ panic(err)
+ }
+}
+
+// newServer returns a new mock ssh server.
+func newServer(t *testing.T) *server {
+ if testing.Short() {
+ t.Skip("skipping test due to -short")
+ }
+ dir, err := ioutil.TempDir("", "sshtest")
+ if err != nil {
+ t.Fatal(err)
+ }
+ f, err := os.Create(filepath.Join(dir, "sshd_config"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = configTmpl.Execute(f, map[string]string{
+ "Dir": dir,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ f.Close()
+
+ for k, v := range testdata.PEMBytes {
+ filename := "id_" + k
+ writeFile(filepath.Join(dir, filename), v)
+ writeFile(filepath.Join(dir, filename+".pub"), ssh.MarshalAuthorizedKey(testPublicKeys[k]))
+ }
+
+ return &server{
+ t: t,
+ configfile: f.Name(),
+ cleanup: func() {
+ if err := os.RemoveAll(dir); err != nil {
+ t.Error(err)
+ }
+ },
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/testdata_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/testdata_test.go
new file mode 100644
index 00000000000..ae48c7516cb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/test/testdata_test.go
@@ -0,0 +1,64 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
+// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
+// instances.
+
+package test
+
+import (
+ "crypto/rand"
+ "fmt"
+
+ "golang.org/x/crypto/ssh"
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+var (
+ testPrivateKeys map[string]interface{}
+ testSigners map[string]ssh.Signer
+ testPublicKeys map[string]ssh.PublicKey
+)
+
+func init() {
+ var err error
+
+ n := len(testdata.PEMBytes)
+ testPrivateKeys = make(map[string]interface{}, n)
+ testSigners = make(map[string]ssh.Signer, n)
+ testPublicKeys = make(map[string]ssh.PublicKey, n)
+ for t, k := range testdata.PEMBytes {
+ testPrivateKeys[t], err = ssh.ParseRawPrivateKey(k)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
+ }
+ testSigners[t], err = ssh.NewSignerFromKey(testPrivateKeys[t])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
+ }
+ testPublicKeys[t] = testSigners[t].PublicKey()
+ }
+
+ // Create a cert and sign it for use in tests.
+ testCert := &ssh.Certificate{
+ Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
+ ValidAfter: 0, // unix epoch
+ ValidBefore: ssh.CertTimeInfinity, // The end of currently representable time.
+ Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ Key: testPublicKeys["ecdsa"],
+ SignatureKey: testPublicKeys["rsa"],
+ Permissions: ssh.Permissions{
+ CriticalOptions: map[string]string{},
+ Extensions: map[string]string{},
+ },
+ }
+ testCert.SignCert(rand.Reader, testSigners["rsa"])
+ testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
+ testSigners["cert"], err = ssh.NewCertSigner(testCert, testSigners["ecdsa"])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata/doc.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata/doc.go
new file mode 100644
index 00000000000..fcae47ca687
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata/doc.go
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This package contains test data shared between the various subpackages of
+// the golang.org/x/crypto/ssh package. Under no circumstance should
+// this data be used for production code.
+package testdata // import "golang.org/x/crypto/ssh/testdata"
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata/keys.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata/keys.go
new file mode 100644
index 00000000000..5ff1c0e0358
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata/keys.go
@@ -0,0 +1,43 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package testdata
+
+var PEMBytes = map[string][]byte{
+ "dsa": []byte(`-----BEGIN DSA PRIVATE KEY-----
+MIIBuwIBAAKBgQD6PDSEyXiI9jfNs97WuM46MSDCYlOqWw80ajN16AohtBncs1YB
+lHk//dQOvCYOsYaE+gNix2jtoRjwXhDsc25/IqQbU1ahb7mB8/rsaILRGIbA5WH3
+EgFtJmXFovDz3if6F6TzvhFpHgJRmLYVR8cqsezL3hEZOvvs2iH7MorkxwIVAJHD
+nD82+lxh2fb4PMsIiaXudAsBAoGAQRf7Q/iaPRn43ZquUhd6WwvirqUj+tkIu6eV
+2nZWYmXLlqFQKEy4Tejl7Wkyzr2OSYvbXLzo7TNxLKoWor6ips0phYPPMyXld14r
+juhT24CrhOzuLMhDduMDi032wDIZG4Y+K7ElU8Oufn8Sj5Wge8r6ANmmVgmFfynr
+FhdYCngCgYEA3ucGJ93/Mx4q4eKRDxcWD3QzWyqpbRVRRV1Vmih9Ha/qC994nJFz
+DQIdjxDIT2Rk2AGzMqFEB68Zc3O+Wcsmz5eWWzEwFxaTwOGWTyDqsDRLm3fD+QYj
+nOwuxb0Kce+gWI8voWcqC9cyRm09jGzu2Ab3Bhtpg8JJ8L7gS3MRZK4CFEx4UAfY
+Fmsr0W6fHB9nhS4/UXM8
+-----END DSA PRIVATE KEY-----
+`),
+ "ecdsa": []byte(`-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49
+AwEHoUQDQgAEi9Hdw6KvZcWxfg2IDhA7UkpDtzzt6ZqJXSsFdLd+Kx4S3Sx4cVO+
+6/ZOXRnPmNAlLUqjShUsUBBngG0u2fqEqA==
+-----END EC PRIVATE KEY-----
+`),
+ "rsa": []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIBOwIBAAJBALdGZxkXDAjsYk10ihwU6Id2KeILz1TAJuoq4tOgDWxEEGeTrcld
+r/ZwVaFzjWzxaf6zQIJbfaSEAhqD5yo72+sCAwEAAQJBAK8PEVU23Wj8mV0QjwcJ
+tZ4GcTUYQL7cF4+ezTCE9a1NrGnCP2RuQkHEKxuTVrxXt+6OF15/1/fuXnxKjmJC
+nxkCIQDaXvPPBi0c7vAxGwNY9726x01/dNbHCE0CBtcotobxpwIhANbbQbh3JHVW
+2haQh4fAG5mhesZKAGcxTyv4mQ7uMSQdAiAj+4dzMpJWdSzQ+qGHlHMIBvVHLkqB
+y2VdEyF7DPCZewIhAI7GOI/6LDIFOvtPo6Bj2nNmyQ1HU6k/LRtNIXi4c9NJAiAr
+rrxx26itVhJmcvoUhOjwuzSlP2bE5VHAvkGB352YBg==
+-----END RSA PRIVATE KEY-----
+`),
+ "user": []byte(`-----BEGIN EC PRIVATE KEY-----
+MHcCAQEEILYCAeq8f7V4vSSypRw7pxy8yz3V5W4qg8kSC3zJhqpQoAoGCCqGSM49
+AwEHoUQDQgAEYcO2xNKiRUYOLEHM7VYAp57HNyKbOdYtHD83Z4hzNPVC4tM5mdGD
+PLL8IEwvYu2wq+lpXfGQnNMbzYf9gspG0w==
+-----END EC PRIVATE KEY-----
+`),
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata_test.go
new file mode 100644
index 00000000000..f2828c1b5fb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/testdata_test.go
@@ -0,0 +1,63 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IMPLEMENTOR NOTE: To avoid a package loop, this file is in three places:
+// ssh/, ssh/agent, and ssh/test/. It should be kept in sync across all three
+// instances.
+
+package ssh
+
+import (
+ "crypto/rand"
+ "fmt"
+
+ "golang.org/x/crypto/ssh/testdata"
+)
+
+var (
+ testPrivateKeys map[string]interface{}
+ testSigners map[string]Signer
+ testPublicKeys map[string]PublicKey
+)
+
+func init() {
+ var err error
+
+ n := len(testdata.PEMBytes)
+ testPrivateKeys = make(map[string]interface{}, n)
+ testSigners = make(map[string]Signer, n)
+ testPublicKeys = make(map[string]PublicKey, n)
+ for t, k := range testdata.PEMBytes {
+ testPrivateKeys[t], err = ParseRawPrivateKey(k)
+ if err != nil {
+ panic(fmt.Sprintf("Unable to parse test key %s: %v", t, err))
+ }
+ testSigners[t], err = NewSignerFromKey(testPrivateKeys[t])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create signer for test key %s: %v", t, err))
+ }
+ testPublicKeys[t] = testSigners[t].PublicKey()
+ }
+
+ // Create a cert and sign it for use in tests.
+ testCert := &Certificate{
+ Nonce: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ ValidPrincipals: []string{"gopher1", "gopher2"}, // increases test coverage
+ ValidAfter: 0, // unix epoch
+ ValidBefore: CertTimeInfinity, // The end of currently representable time.
+ Reserved: []byte{}, // To pass reflect.DeepEqual after marshal & parse, this must be non-nil
+ Key: testPublicKeys["ecdsa"],
+ SignatureKey: testPublicKeys["rsa"],
+ Permissions: Permissions{
+ CriticalOptions: map[string]string{},
+ Extensions: map[string]string{},
+ },
+ }
+ testCert.SignCert(rand.Reader, testSigners["rsa"])
+ testPrivateKeys["cert"] = testPrivateKeys["ecdsa"]
+ testSigners["cert"], err = NewCertSigner(testCert, testSigners["ecdsa"])
+ if err != nil {
+ panic(fmt.Sprintf("Unable to create certificate signer: %v", err))
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/transport.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/transport.go
new file mode 100644
index 00000000000..8351d378e7e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/transport.go
@@ -0,0 +1,332 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bufio"
+ "errors"
+ "io"
+)
+
+const (
+ gcmCipherID = "aes128-gcm@openssh.com"
+ aes128cbcID = "aes128-cbc"
+)
+
+// packetConn represents a transport that implements packet based
+// operations.
+type packetConn interface {
+ // Encrypt and send a packet of data to the remote peer.
+ writePacket(packet []byte) error
+
+ // Read a packet from the connection
+ readPacket() ([]byte, error)
+
+ // Close closes the write-side of the connection.
+ Close() error
+}
+
+// transport is the keyingTransport that implements the SSH packet
+// protocol.
+type transport struct {
+ reader connectionState
+ writer connectionState
+
+ bufReader *bufio.Reader
+ bufWriter *bufio.Writer
+ rand io.Reader
+
+ io.Closer
+
+ // Initial H used for the session ID. Once assigned this does
+ // not change, even during subsequent key exchanges.
+ sessionID []byte
+}
+
+// getSessionID returns the ID of the SSH connection. The return value
+// should not be modified.
+func (t *transport) getSessionID() []byte {
+ if t.sessionID == nil {
+ panic("session ID not set yet")
+ }
+ return t.sessionID
+}
+
+// packetCipher represents a combination of SSH encryption/MAC
+// protocol. A single instance should be used for one direction only.
+type packetCipher interface {
+ // writePacket encrypts the packet and writes it to w. The
+ // contents of the packet are generally scrambled.
+ writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error
+
+ // readPacket reads and decrypts a packet of data. The
+ // returned packet may be overwritten by future calls of
+ // readPacket.
+ readPacket(seqnum uint32, r io.Reader) ([]byte, error)
+}
+
+// connectionState represents one side (read or write) of the
+// connection. This is necessary because each direction has its own
+// keys, and can even have its own algorithms
+type connectionState struct {
+ packetCipher
+ seqNum uint32
+ dir direction
+ pendingKeyChange chan packetCipher
+}
+
+// prepareKeyChange sets up key material for a keychange. The key changes in
+// both directions are triggered by reading and writing a msgNewKey packet
+// respectively.
+func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
+ if t.sessionID == nil {
+ t.sessionID = kexResult.H
+ }
+
+ kexResult.SessionID = t.sessionID
+
+ if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
+ return err
+ } else {
+ t.reader.pendingKeyChange <- ciph
+ }
+
+ if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
+ return err
+ } else {
+ t.writer.pendingKeyChange <- ciph
+ }
+
+ return nil
+}
+
+// Read and decrypt next packet.
+func (t *transport) readPacket() ([]byte, error) {
+ return t.reader.readPacket(t.bufReader)
+}
+
+func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
+ packet, err := s.packetCipher.readPacket(s.seqNum, r)
+ s.seqNum++
+ if err == nil && len(packet) == 0 {
+ err = errors.New("ssh: zero length packet")
+ }
+
+ if len(packet) > 0 && packet[0] == msgNewKeys {
+ select {
+ case cipher := <-s.pendingKeyChange:
+ s.packetCipher = cipher
+ default:
+ return nil, errors.New("ssh: got bogus newkeys message.")
+ }
+ }
+
+ // The packet may point to an internal buffer, so copy the
+ // packet out here.
+ fresh := make([]byte, len(packet))
+ copy(fresh, packet)
+
+ return fresh, err
+}
+
+func (t *transport) writePacket(packet []byte) error {
+ return t.writer.writePacket(t.bufWriter, t.rand, packet)
+}
+
+func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
+ changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
+
+ err := s.packetCipher.writePacket(s.seqNum, w, rand, packet)
+ if err != nil {
+ return err
+ }
+ if err = w.Flush(); err != nil {
+ return err
+ }
+ s.seqNum++
+ if changeKeys {
+ select {
+ case cipher := <-s.pendingKeyChange:
+ s.packetCipher = cipher
+ default:
+ panic("ssh: no key material for msgNewKeys")
+ }
+ }
+ return err
+}
+
+func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport {
+ t := &transport{
+ bufReader: bufio.NewReader(rwc),
+ bufWriter: bufio.NewWriter(rwc),
+ rand: rand,
+ reader: connectionState{
+ packetCipher: &streamPacketCipher{cipher: noneCipher{}},
+ pendingKeyChange: make(chan packetCipher, 1),
+ },
+ writer: connectionState{
+ packetCipher: &streamPacketCipher{cipher: noneCipher{}},
+ pendingKeyChange: make(chan packetCipher, 1),
+ },
+ Closer: rwc,
+ }
+ if isClient {
+ t.reader.dir = serverKeys
+ t.writer.dir = clientKeys
+ } else {
+ t.reader.dir = clientKeys
+ t.writer.dir = serverKeys
+ }
+
+ return t
+}
+
+type direction struct {
+ ivTag []byte
+ keyTag []byte
+ macKeyTag []byte
+}
+
+var (
+ serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}}
+ clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
+)
+
+// generateKeys generates key material for IV, MAC and encryption.
+func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
+ cipherMode := cipherModes[algs.Cipher]
+ macMode := macModes[algs.MAC]
+
+ iv = make([]byte, cipherMode.ivSize)
+ key = make([]byte, cipherMode.keySize)
+ macKey = make([]byte, macMode.keySize)
+
+ generateKeyMaterial(iv, d.ivTag, kex)
+ generateKeyMaterial(key, d.keyTag, kex)
+ generateKeyMaterial(macKey, d.macKeyTag, kex)
+ return
+}
+
+// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
+// described in RFC 4253, section 6.4. direction should either be serverKeys
+// (to setup server->client keys) or clientKeys (for client->server keys).
+func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
+ iv, key, macKey := generateKeys(d, algs, kex)
+
+ if algs.Cipher == gcmCipherID {
+ return newGCMCipher(iv, key, macKey)
+ }
+
+ if algs.Cipher == aes128cbcID {
+ return newAESCBCCipher(iv, key, macKey, algs)
+ }
+
+ c := &streamPacketCipher{
+ mac: macModes[algs.MAC].new(macKey),
+ }
+ c.macResult = make([]byte, c.mac.Size())
+
+ var err error
+ c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
+ if err != nil {
+ return nil, err
+ }
+
+ return c, nil
+}
+
+// generateKeyMaterial fills out with key material generated from tag, K, H
+// and sessionId, as specified in RFC 4253, section 7.2.
+func generateKeyMaterial(out, tag []byte, r *kexResult) {
+ var digestsSoFar []byte
+
+ h := r.Hash.New()
+ for len(out) > 0 {
+ h.Reset()
+ h.Write(r.K)
+ h.Write(r.H)
+
+ if len(digestsSoFar) == 0 {
+ h.Write(tag)
+ h.Write(r.SessionID)
+ } else {
+ h.Write(digestsSoFar)
+ }
+
+ digest := h.Sum(nil)
+ n := copy(out, digest)
+ out = out[n:]
+ if len(out) > 0 {
+ digestsSoFar = append(digestsSoFar, digest...)
+ }
+ }
+}
+
+const packageVersion = "SSH-2.0-Go"
+
+// Sends and receives a version line. The versionLine string should
+// be US ASCII, start with "SSH-2.0-", and should not include a
+// newline. exchangeVersions returns the other side's version line.
+func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) {
+ // Contrary to the RFC, we do not ignore lines that don't
+ // start with "SSH-2.0-" to make the library usable with
+ // nonconforming servers.
+ for _, c := range versionLine {
+ // The spec disallows non US-ASCII chars, and
+ // specifically forbids null chars.
+ if c < 32 {
+ return nil, errors.New("ssh: junk character in version line")
+ }
+ }
+ if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil {
+ return
+ }
+
+ them, err = readVersion(rw)
+ return them, err
+}
+
+// maxVersionStringBytes is the maximum number of bytes that we'll
+// accept as a version string. RFC 4253 section 4.2 limits this at 255
+// chars
+const maxVersionStringBytes = 255
+
+// Read version string as specified by RFC 4253, section 4.2.
+func readVersion(r io.Reader) ([]byte, error) {
+ versionString := make([]byte, 0, 64)
+ var ok bool
+ var buf [1]byte
+
+ for len(versionString) < maxVersionStringBytes {
+ _, err := io.ReadFull(r, buf[:])
+ if err != nil {
+ return nil, err
+ }
+ // The RFC says that the version should be terminated with \r\n
+ // but several SSH servers actually only send a \n.
+ if buf[0] == '\n' {
+ ok = true
+ break
+ }
+
+ // non ASCII chars are disallowed, but we are lenient,
+ // since Go doesn't use null-terminated strings.
+
+ // The RFC allows a comment after a space, however,
+ // all of it (version and comments) goes into the
+ // session hash.
+ versionString = append(versionString, buf[0])
+ }
+
+ if !ok {
+ return nil, errors.New("ssh: overflow reading version string")
+ }
+
+ // There might be a '\r' on the end which we should remove.
+ if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
+ versionString = versionString[:len(versionString)-1]
+ }
+ return versionString, nil
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/transport_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/transport_test.go
new file mode 100644
index 00000000000..92d83abf93f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/ssh/transport_test.go
@@ -0,0 +1,109 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/binary"
+ "strings"
+ "testing"
+)
+
+func TestReadVersion(t *testing.T) {
+ longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
+ cases := map[string]string{
+ "SSH-2.0-bla\r\n": "SSH-2.0-bla",
+ "SSH-2.0-bla\n": "SSH-2.0-bla",
+ longversion + "\r\n": longversion,
+ }
+
+ for in, want := range cases {
+ result, err := readVersion(bytes.NewBufferString(in))
+ if err != nil {
+ t.Errorf("readVersion(%q): %s", in, err)
+ }
+ got := string(result)
+ if got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+ }
+}
+
+func TestReadVersionError(t *testing.T) {
+ longversion := strings.Repeat("SSH-2.0-bla", 50)[:253]
+ cases := []string{
+ longversion + "too-long\r\n",
+ }
+ for _, in := range cases {
+ if _, err := readVersion(bytes.NewBufferString(in)); err == nil {
+ t.Errorf("readVersion(%q) should have failed", in)
+ }
+ }
+}
+
+func TestExchangeVersionsBasic(t *testing.T) {
+ v := "SSH-2.0-bla"
+ buf := bytes.NewBufferString(v + "\r\n")
+ them, err := exchangeVersions(buf, []byte("xyz"))
+ if err != nil {
+ t.Errorf("exchangeVersions: %v", err)
+ }
+
+ if want := "SSH-2.0-bla"; string(them) != want {
+ t.Errorf("got %q want %q for our version", them, want)
+ }
+}
+
+func TestExchangeVersions(t *testing.T) {
+ cases := []string{
+ "not\x000allowed",
+ "not allowed\n",
+ }
+ for _, c := range cases {
+ buf := bytes.NewBufferString("SSH-2.0-bla\r\n")
+ if _, err := exchangeVersions(buf, []byte(c)); err == nil {
+ t.Errorf("exchangeVersions(%q): should have failed", c)
+ }
+ }
+}
+
+type closerBuffer struct {
+ bytes.Buffer
+}
+
+func (b *closerBuffer) Close() error {
+ return nil
+}
+
+func TestTransportMaxPacketWrite(t *testing.T) {
+ buf := &closerBuffer{}
+ tr := newTransport(buf, rand.Reader, true)
+ huge := make([]byte, maxPacket+1)
+ err := tr.writePacket(huge)
+ if err == nil {
+ t.Errorf("transport accepted write for a huge packet.")
+ }
+}
+
+func TestTransportMaxPacketReader(t *testing.T) {
+ var header [5]byte
+ huge := make([]byte, maxPacket+128)
+ binary.BigEndian.PutUint32(header[0:], uint32(len(huge)))
+ // padding.
+ header[4] = 0
+
+ buf := &closerBuffer{}
+ buf.Write(header[:])
+ buf.Write(huge)
+
+ tr := newTransport(buf, rand.Reader, true)
+ _, err := tr.readPacket()
+ if err == nil {
+ t.Errorf("transport succeeded reading huge packet.")
+ } else if !strings.Contains(err.Error(), "large") {
+ t.Errorf("got %q, should mention %q", err.Error(), "large")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/tea/cipher.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/tea/cipher.go
new file mode 100644
index 00000000000..9c13d12a22c
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/tea/cipher.go
@@ -0,0 +1,109 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tea implements the TEA algorithm, as defined in Needham and
+// Wheeler's 1994 technical report, “TEA, a Tiny Encryption Algorithm”. See
+// http://www.cix.co.uk/~klockstone/tea.pdf for details.
+
+package tea
+
+import (
+ "crypto/cipher"
+ "encoding/binary"
+ "errors"
+)
+
+const (
+ // BlockSize is the size of a TEA block, in bytes.
+ BlockSize = 8
+
+ // KeySize is the size of a TEA key, in bytes.
+ KeySize = 16
+
+ // delta is the TEA key schedule constant.
+ delta = 0x9e3779b9
+
+ // numRounds is the standard number of rounds in TEA.
+ numRounds = 64
+)
+
+// tea is an instance of the TEA cipher with a particular key.
+type tea struct {
+ key [16]byte
+ rounds int
+}
+
+// NewCipher returns an instance of the TEA cipher with the standard number of
+// rounds. The key argument must be 16 bytes long.
+func NewCipher(key []byte) (cipher.Block, error) {
+ return NewCipherWithRounds(key, numRounds)
+}
+
+// NewCipherWithRounds returns an instance of the TEA cipher with a given
+// number of rounds, which must be even. The key argument must be 16 bytes
+// long.
+func NewCipherWithRounds(key []byte, rounds int) (cipher.Block, error) {
+ if len(key) != 16 {
+ return nil, errors.New("tea: incorrect key size")
+ }
+
+ if rounds&1 != 0 {
+ return nil, errors.New("tea: odd number of rounds specified")
+ }
+
+ c := &tea{
+ rounds: rounds,
+ }
+ copy(c.key[:], key)
+
+ return c, nil
+}
+
+// BlockSize returns the TEA block size, which is eight bytes. It is necessary
+// to satisfy the Block interface in the package "crypto/cipher".
+func (*tea) BlockSize() int {
+ return BlockSize
+}
+
+// Encrypt encrypts the 8 byte buffer src using the key in t and stores the
+// result in dst. Note that for amounts of data larger than a block, it is not
+// safe to just call Encrypt on successive blocks; instead, use an encryption
+// mode like CBC (see crypto/cipher/cbc.go).
+func (t *tea) Encrypt(dst, src []byte) {
+ e := binary.BigEndian
+ v0, v1 := e.Uint32(src), e.Uint32(src[4:])
+ k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:])
+
+ sum := uint32(0)
+ delta := uint32(delta)
+
+ for i := 0; i < t.rounds/2; i++ {
+ sum += delta
+ v0 += ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1)
+ v1 += ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3)
+ }
+
+ e.PutUint32(dst, v0)
+ e.PutUint32(dst[4:], v1)
+}
+
+// Decrypt decrypts the 8 byte buffer src using the key in t and stores the
+// result in dst.
+func (t *tea) Decrypt(dst, src []byte) {
+ e := binary.BigEndian
+ v0, v1 := e.Uint32(src), e.Uint32(src[4:])
+ k0, k1, k2, k3 := e.Uint32(t.key[0:]), e.Uint32(t.key[4:]), e.Uint32(t.key[8:]), e.Uint32(t.key[12:])
+
+ delta := uint32(delta)
+ sum := delta * uint32(t.rounds/2) // in general, sum = delta * n
+
+ for i := 0; i < t.rounds/2; i++ {
+ v1 -= ((v0 << 4) + k2) ^ (v0 + sum) ^ ((v0 >> 5) + k3)
+ v0 -= ((v1 << 4) + k0) ^ (v1 + sum) ^ ((v1 >> 5) + k1)
+ sum -= delta
+ }
+
+ e.PutUint32(dst, v0)
+ e.PutUint32(dst[4:], v1)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/tea/tea_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/tea/tea_test.go
new file mode 100644
index 00000000000..eb98d1e0e03
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/tea/tea_test.go
@@ -0,0 +1,93 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tea
+
+import (
+ "bytes"
+ "testing"
+)
+
+// A sample test key for when we just want to initialize a cipher
+var testKey = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}
+
+// Test that the block size for tea is correct
+func TestBlocksize(t *testing.T) {
+ c, err := NewCipher(testKey)
+ if err != nil {
+ t.Fatalf("NewCipher returned error: %s", err)
+ }
+
+ if result := c.BlockSize(); result != BlockSize {
+ t.Errorf("cipher.BlockSize returned %d, but expected %d", result, BlockSize)
+ }
+}
+
+// Test that invalid key sizes return an error
+func TestInvalidKeySize(t *testing.T) {
+ var key [KeySize + 1]byte
+
+ if _, err := NewCipher(key[:]); err == nil {
+ t.Errorf("invalid key size %d didn't result in an error.", len(key))
+ }
+
+ if _, err := NewCipher(key[:KeySize-1]); err == nil {
+ t.Errorf("invalid key size %d didn't result in an error.", KeySize-1)
+ }
+}
+
+// Test Vectors
+type teaTest struct {
+ rounds int
+ key []byte
+ plaintext []byte
+ ciphertext []byte
+}
+
+var teaTests = []teaTest{
+ // These were sourced from https://github.com/froydnj/ironclad/blob/master/testing/test-vectors/tea.testvec
+ {
+ numRounds,
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x41, 0xea, 0x3a, 0x0a, 0x94, 0xba, 0xa9, 0x40},
+ },
+ {
+ numRounds,
+ []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ []byte{0x31, 0x9b, 0xbe, 0xfb, 0x01, 0x6a, 0xbd, 0xb2},
+ },
+ {
+ 16,
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0xed, 0x28, 0x5d, 0xa1, 0x45, 0x5b, 0x33, 0xc1},
+ },
+}
+
+// Test encryption
+func TestCipherEncrypt(t *testing.T) {
+ // Test encryption with standard 64 rounds
+ for i, test := range teaTests {
+ c, err := NewCipherWithRounds(test.key, test.rounds)
+ if err != nil {
+ t.Fatalf("#%d: NewCipher returned error: %s", i, err)
+ }
+
+ var ciphertext [BlockSize]byte
+ c.Encrypt(ciphertext[:], test.plaintext)
+
+ if !bytes.Equal(ciphertext[:], test.ciphertext) {
+ t.Errorf("#%d: incorrect ciphertext. Got %x, wanted %x", i, ciphertext, test.ciphertext)
+ }
+
+ var plaintext2 [BlockSize]byte
+ c.Decrypt(plaintext2[:], ciphertext[:])
+
+ if !bytes.Equal(plaintext2[:], test.plaintext) {
+ t.Errorf("#%d: incorrect plaintext. Got %x, wanted %x", i, plaintext2, test.plaintext)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/twofish/twofish.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/twofish/twofish.go
new file mode 100644
index 00000000000..376fa0ec2d9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/twofish/twofish.go
@@ -0,0 +1,342 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package twofish implements Bruce Schneier's Twofish encryption algorithm.
+package twofish // import "golang.org/x/crypto/twofish"
+
+// Twofish is defined in http://www.schneier.com/paper-twofish-paper.pdf [TWOFISH]
+
+// This code is a port of the LibTom C implementation.
+// See http://libtom.org/?page=features&newsitems=5&whatfile=crypt.
+// LibTomCrypt is free for all purposes under the public domain.
+// It was heavily inspired by the go blowfish package.
+
+import "strconv"
+
+// BlockSize is the constant block size of Twofish.
+const BlockSize = 16
+
+const mdsPolynomial = 0x169 // x^8 + x^6 + x^5 + x^3 + 1, see [TWOFISH] 4.2
+const rsPolynomial = 0x14d // x^8 + x^6 + x^3 + x^2 + 1, see [TWOFISH] 4.3
+
+// A Cipher is an instance of Twofish encryption using a particular key.
+type Cipher struct {
+ s [4][256]uint32
+ k [40]uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+ return "crypto/twofish: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a Cipher.
+// The key argument should be the Twofish key, 16, 24 or 32 bytes.
+func NewCipher(key []byte) (*Cipher, error) {
+ keylen := len(key)
+
+ if keylen != 16 && keylen != 24 && keylen != 32 {
+ return nil, KeySizeError(keylen)
+ }
+
+ // k is the number of 64 bit words in key
+ k := keylen / 8
+
+ // Create the S[..] words
+ var S [4 * 4]byte
+ for i := 0; i < k; i++ {
+ // Computes [y0 y1 y2 y3] = rs . [x0 x1 x2 x3 x4 x5 x6 x7]
+ for j, rsRow := range rs {
+ for k, rsVal := range rsRow {
+ S[4*i+j] ^= gfMult(key[8*i+k], rsVal, rsPolynomial)
+ }
+ }
+ }
+
+ // Calculate subkeys
+ c := new(Cipher)
+ var tmp [4]byte
+ for i := byte(0); i < 20; i++ {
+ // A = h(p * 2x, Me)
+ for j := range tmp {
+ tmp[j] = 2 * i
+ }
+ A := h(tmp[:], key, 0)
+
+ // B = rolc(h(p * (2x + 1), Mo), 8)
+ for j := range tmp {
+ tmp[j] = 2*i + 1
+ }
+ B := h(tmp[:], key, 1)
+ B = rol(B, 8)
+
+ c.k[2*i] = A + B
+
+ // K[2i+1] = (A + 2B) <<< 9
+ c.k[2*i+1] = rol(2*B+A, 9)
+ }
+
+ // Calculate sboxes
+ switch k {
+ case 2:
+ for i := range c.s[0] {
+ c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][byte(i)]^S[0]]^S[4]], 0)
+ c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][byte(i)]^S[1]]^S[5]], 1)
+ c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][byte(i)]^S[2]]^S[6]], 2)
+ c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][byte(i)]^S[3]]^S[7]], 3)
+ }
+ case 3:
+ for i := range c.s[0] {
+ c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]], 0)
+ c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[1]]^S[5]]^S[9]], 1)
+ c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]], 2)
+ c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[3]]^S[7]]^S[11]], 3)
+ }
+ default:
+ for i := range c.s[0] {
+ c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]]^S[12]], 0)
+ c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[1]]^S[5]]^S[9]]^S[13]], 1)
+ c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]]^S[14]], 2)
+ c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][sbox[1][byte(i)]^S[3]]^S[7]]^S[11]]^S[15]], 3)
+ }
+ }
+
+ return c, nil
+}
+
+// BlockSize returns the Twofish block size, 16 bytes.
+func (c *Cipher) BlockSize() int { return BlockSize }
+
+// store32l stores src in dst in little-endian form.
+func store32l(dst []byte, src uint32) {
+ dst[0] = byte(src)
+ dst[1] = byte(src >> 8)
+ dst[2] = byte(src >> 16)
+ dst[3] = byte(src >> 24)
+ return
+}
+
+// load32l reads a little-endian uint32 from src.
+func load32l(src []byte) uint32 {
+ return uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24
+}
+
+// rol returns x after a left circular rotation of y bits.
+func rol(x, y uint32) uint32 {
+ return (x << (y & 31)) | (x >> (32 - (y & 31)))
+}
+
+// ror returns x after a right circular rotation of y bits.
+func ror(x, y uint32) uint32 {
+ return (x >> (y & 31)) | (x << (32 - (y & 31)))
+}
+
+// The RS matrix. See [TWOFISH] 4.3
+var rs = [4][8]byte{
+ {0x01, 0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E},
+ {0xA4, 0x56, 0x82, 0xF3, 0x1E, 0xC6, 0x68, 0xE5},
+ {0x02, 0xA1, 0xFC, 0xC1, 0x47, 0xAE, 0x3D, 0x19},
+ {0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E, 0x03},
+}
+
+// sbox tables
+var sbox = [2][256]byte{
+ {
+ 0xa9, 0x67, 0xb3, 0xe8, 0x04, 0xfd, 0xa3, 0x76, 0x9a, 0x92, 0x80, 0x78, 0xe4, 0xdd, 0xd1, 0x38,
+ 0x0d, 0xc6, 0x35, 0x98, 0x18, 0xf7, 0xec, 0x6c, 0x43, 0x75, 0x37, 0x26, 0xfa, 0x13, 0x94, 0x48,
+ 0xf2, 0xd0, 0x8b, 0x30, 0x84, 0x54, 0xdf, 0x23, 0x19, 0x5b, 0x3d, 0x59, 0xf3, 0xae, 0xa2, 0x82,
+ 0x63, 0x01, 0x83, 0x2e, 0xd9, 0x51, 0x9b, 0x7c, 0xa6, 0xeb, 0xa5, 0xbe, 0x16, 0x0c, 0xe3, 0x61,
+ 0xc0, 0x8c, 0x3a, 0xf5, 0x73, 0x2c, 0x25, 0x0b, 0xbb, 0x4e, 0x89, 0x6b, 0x53, 0x6a, 0xb4, 0xf1,
+ 0xe1, 0xe6, 0xbd, 0x45, 0xe2, 0xf4, 0xb6, 0x66, 0xcc, 0x95, 0x03, 0x56, 0xd4, 0x1c, 0x1e, 0xd7,
+ 0xfb, 0xc3, 0x8e, 0xb5, 0xe9, 0xcf, 0xbf, 0xba, 0xea, 0x77, 0x39, 0xaf, 0x33, 0xc9, 0x62, 0x71,
+ 0x81, 0x79, 0x09, 0xad, 0x24, 0xcd, 0xf9, 0xd8, 0xe5, 0xc5, 0xb9, 0x4d, 0x44, 0x08, 0x86, 0xe7,
+ 0xa1, 0x1d, 0xaa, 0xed, 0x06, 0x70, 0xb2, 0xd2, 0x41, 0x7b, 0xa0, 0x11, 0x31, 0xc2, 0x27, 0x90,
+ 0x20, 0xf6, 0x60, 0xff, 0x96, 0x5c, 0xb1, 0xab, 0x9e, 0x9c, 0x52, 0x1b, 0x5f, 0x93, 0x0a, 0xef,
+ 0x91, 0x85, 0x49, 0xee, 0x2d, 0x4f, 0x8f, 0x3b, 0x47, 0x87, 0x6d, 0x46, 0xd6, 0x3e, 0x69, 0x64,
+ 0x2a, 0xce, 0xcb, 0x2f, 0xfc, 0x97, 0x05, 0x7a, 0xac, 0x7f, 0xd5, 0x1a, 0x4b, 0x0e, 0xa7, 0x5a,
+ 0x28, 0x14, 0x3f, 0x29, 0x88, 0x3c, 0x4c, 0x02, 0xb8, 0xda, 0xb0, 0x17, 0x55, 0x1f, 0x8a, 0x7d,
+ 0x57, 0xc7, 0x8d, 0x74, 0xb7, 0xc4, 0x9f, 0x72, 0x7e, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34,
+ 0x6e, 0x50, 0xde, 0x68, 0x65, 0xbc, 0xdb, 0xf8, 0xc8, 0xa8, 0x2b, 0x40, 0xdc, 0xfe, 0x32, 0xa4,
+ 0xca, 0x10, 0x21, 0xf0, 0xd3, 0x5d, 0x0f, 0x00, 0x6f, 0x9d, 0x36, 0x42, 0x4a, 0x5e, 0xc1, 0xe0,
+ },
+ {
+ 0x75, 0xf3, 0xc6, 0xf4, 0xdb, 0x7b, 0xfb, 0xc8, 0x4a, 0xd3, 0xe6, 0x6b, 0x45, 0x7d, 0xe8, 0x4b,
+ 0xd6, 0x32, 0xd8, 0xfd, 0x37, 0x71, 0xf1, 0xe1, 0x30, 0x0f, 0xf8, 0x1b, 0x87, 0xfa, 0x06, 0x3f,
+ 0x5e, 0xba, 0xae, 0x5b, 0x8a, 0x00, 0xbc, 0x9d, 0x6d, 0xc1, 0xb1, 0x0e, 0x80, 0x5d, 0xd2, 0xd5,
+ 0xa0, 0x84, 0x07, 0x14, 0xb5, 0x90, 0x2c, 0xa3, 0xb2, 0x73, 0x4c, 0x54, 0x92, 0x74, 0x36, 0x51,
+ 0x38, 0xb0, 0xbd, 0x5a, 0xfc, 0x60, 0x62, 0x96, 0x6c, 0x42, 0xf7, 0x10, 0x7c, 0x28, 0x27, 0x8c,
+ 0x13, 0x95, 0x9c, 0xc7, 0x24, 0x46, 0x3b, 0x70, 0xca, 0xe3, 0x85, 0xcb, 0x11, 0xd0, 0x93, 0xb8,
+ 0xa6, 0x83, 0x20, 0xff, 0x9f, 0x77, 0xc3, 0xcc, 0x03, 0x6f, 0x08, 0xbf, 0x40, 0xe7, 0x2b, 0xe2,
+ 0x79, 0x0c, 0xaa, 0x82, 0x41, 0x3a, 0xea, 0xb9, 0xe4, 0x9a, 0xa4, 0x97, 0x7e, 0xda, 0x7a, 0x17,
+ 0x66, 0x94, 0xa1, 0x1d, 0x3d, 0xf0, 0xde, 0xb3, 0x0b, 0x72, 0xa7, 0x1c, 0xef, 0xd1, 0x53, 0x3e,
+ 0x8f, 0x33, 0x26, 0x5f, 0xec, 0x76, 0x2a, 0x49, 0x81, 0x88, 0xee, 0x21, 0xc4, 0x1a, 0xeb, 0xd9,
+ 0xc5, 0x39, 0x99, 0xcd, 0xad, 0x31, 0x8b, 0x01, 0x18, 0x23, 0xdd, 0x1f, 0x4e, 0x2d, 0xf9, 0x48,
+ 0x4f, 0xf2, 0x65, 0x8e, 0x78, 0x5c, 0x58, 0x19, 0x8d, 0xe5, 0x98, 0x57, 0x67, 0x7f, 0x05, 0x64,
+ 0xaf, 0x63, 0xb6, 0xfe, 0xf5, 0xb7, 0x3c, 0xa5, 0xce, 0xe9, 0x68, 0x44, 0xe0, 0x4d, 0x43, 0x69,
+ 0x29, 0x2e, 0xac, 0x15, 0x59, 0xa8, 0x0a, 0x9e, 0x6e, 0x47, 0xdf, 0x34, 0x35, 0x6a, 0xcf, 0xdc,
+ 0x22, 0xc9, 0xc0, 0x9b, 0x89, 0xd4, 0xed, 0xab, 0x12, 0xa2, 0x0d, 0x52, 0xbb, 0x02, 0x2f, 0xa9,
+ 0xd7, 0x61, 0x1e, 0xb4, 0x50, 0x04, 0xf6, 0xc2, 0x16, 0x25, 0x86, 0x56, 0x55, 0x09, 0xbe, 0x91,
+ },
+}
+
+// gfMult returns a·b in GF(2^8)/p
+func gfMult(a, b byte, p uint32) byte {
+ B := [2]uint32{0, uint32(b)}
+ P := [2]uint32{0, p}
+ var result uint32
+
+ // branchless GF multiplier
+ for i := 0; i < 7; i++ {
+ result ^= B[a&1]
+ a >>= 1
+ B[1] = P[B[1]>>7] ^ (B[1] << 1)
+ }
+ result ^= B[a&1]
+ return byte(result)
+}
+
+// mdsColumnMult calculates y{col} where [y0 y1 y2 y3] = MDS · [x0]
+func mdsColumnMult(in byte, col int) uint32 {
+ mul01 := in
+ mul5B := gfMult(in, 0x5B, mdsPolynomial)
+ mulEF := gfMult(in, 0xEF, mdsPolynomial)
+
+ switch col {
+ case 0:
+ return uint32(mul01) | uint32(mul5B)<<8 | uint32(mulEF)<<16 | uint32(mulEF)<<24
+ case 1:
+ return uint32(mulEF) | uint32(mulEF)<<8 | uint32(mul5B)<<16 | uint32(mul01)<<24
+ case 2:
+ return uint32(mul5B) | uint32(mulEF)<<8 | uint32(mul01)<<16 | uint32(mulEF)<<24
+ case 3:
+ return uint32(mul5B) | uint32(mul01)<<8 | uint32(mulEF)<<16 | uint32(mul5B)<<24
+ }
+
+ panic("unreachable")
+}
+
+// h implements the S-box generation function. See [TWOFISH] 4.3.5
+func h(in, key []byte, offset int) uint32 {
+ var y [4]byte
+ for x := range y {
+ y[x] = in[x]
+ }
+ switch len(key) / 8 {
+ case 4:
+ y[0] = sbox[1][y[0]] ^ key[4*(6+offset)+0]
+ y[1] = sbox[0][y[1]] ^ key[4*(6+offset)+1]
+ y[2] = sbox[0][y[2]] ^ key[4*(6+offset)+2]
+ y[3] = sbox[1][y[3]] ^ key[4*(6+offset)+3]
+ fallthrough
+ case 3:
+ y[0] = sbox[1][y[0]] ^ key[4*(4+offset)+0]
+ y[1] = sbox[1][y[1]] ^ key[4*(4+offset)+1]
+ y[2] = sbox[0][y[2]] ^ key[4*(4+offset)+2]
+ y[3] = sbox[0][y[3]] ^ key[4*(4+offset)+3]
+ fallthrough
+ case 2:
+ y[0] = sbox[1][sbox[0][sbox[0][y[0]]^key[4*(2+offset)+0]]^key[4*(0+offset)+0]]
+ y[1] = sbox[0][sbox[0][sbox[1][y[1]]^key[4*(2+offset)+1]]^key[4*(0+offset)+1]]
+ y[2] = sbox[1][sbox[1][sbox[0][y[2]]^key[4*(2+offset)+2]]^key[4*(0+offset)+2]]
+ y[3] = sbox[0][sbox[1][sbox[1][y[3]]^key[4*(2+offset)+3]]^key[4*(0+offset)+3]]
+ }
+ // [y0 y1 y2 y3] = MDS . [x0 x1 x2 x3]
+ var mdsMult uint32
+ for i := range y {
+ mdsMult ^= mdsColumnMult(y[i], i)
+ }
+ return mdsMult
+}
+
+// Encrypt encrypts a 16-byte block from src to dst, which may overlap.
+// Note that for amounts of data larger than a block,
+// it is not safe to just call Encrypt on successive blocks;
+// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
+func (c *Cipher) Encrypt(dst, src []byte) {
+ S1 := c.s[0]
+ S2 := c.s[1]
+ S3 := c.s[2]
+ S4 := c.s[3]
+
+ // Load input
+ ia := load32l(src[0:4])
+ ib := load32l(src[4:8])
+ ic := load32l(src[8:12])
+ id := load32l(src[12:16])
+
+ // Pre-whitening
+ ia ^= c.k[0]
+ ib ^= c.k[1]
+ ic ^= c.k[2]
+ id ^= c.k[3]
+
+ for i := 0; i < 8; i++ {
+ k := c.k[8+i*4 : 12+i*4]
+ t2 := S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)]
+ t1 := S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2
+ ic = ror(ic^(t1+k[0]), 1)
+ id = rol(id, 1) ^ (t2 + t1 + k[1])
+
+ t2 = S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)]
+ t1 = S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2
+ ia = ror(ia^(t1+k[2]), 1)
+ ib = rol(ib, 1) ^ (t2 + t1 + k[3])
+ }
+
+ // Output with "undo last swap"
+ ta := ic ^ c.k[4]
+ tb := id ^ c.k[5]
+ tc := ia ^ c.k[6]
+ td := ib ^ c.k[7]
+
+ store32l(dst[0:4], ta)
+ store32l(dst[4:8], tb)
+ store32l(dst[8:12], tc)
+ store32l(dst[12:16], td)
+}
+
+// Decrypt decrypts a 16-byte block from src to dst, which may overlap.
+func (c *Cipher) Decrypt(dst, src []byte) {
+ S1 := c.s[0]
+ S2 := c.s[1]
+ S3 := c.s[2]
+ S4 := c.s[3]
+
+ // Load input
+ ta := load32l(src[0:4])
+ tb := load32l(src[4:8])
+ tc := load32l(src[8:12])
+ td := load32l(src[12:16])
+
+ // Undo undo final swap
+ ia := tc ^ c.k[6]
+ ib := td ^ c.k[7]
+ ic := ta ^ c.k[4]
+ id := tb ^ c.k[5]
+
+ for i := 8; i > 0; i-- {
+ k := c.k[4+i*4 : 8+i*4]
+ t2 := S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)]
+ t1 := S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2
+ ia = rol(ia, 1) ^ (t1 + k[2])
+ ib = ror(ib^(t2+t1+k[3]), 1)
+
+ t2 = S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)]
+ t1 = S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2
+ ic = rol(ic, 1) ^ (t1 + k[0])
+ id = ror(id^(t2+t1+k[1]), 1)
+ }
+
+ // Undo pre-whitening
+ ia ^= c.k[0]
+ ib ^= c.k[1]
+ ic ^= c.k[2]
+ id ^= c.k[3]
+
+ store32l(dst[0:4], ia)
+ store32l(dst[4:8], ib)
+ store32l(dst[8:12], ic)
+ store32l(dst[12:16], id)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/twofish/twofish_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/twofish/twofish_test.go
new file mode 100644
index 00000000000..303081f3f28
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/twofish/twofish_test.go
@@ -0,0 +1,129 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package twofish
+
+import (
+ "bytes"
+ "testing"
+)
+
+var qbox = [2][4][16]byte{
+ {
+ {0x8, 0x1, 0x7, 0xD, 0x6, 0xF, 0x3, 0x2, 0x0, 0xB, 0x5, 0x9, 0xE, 0xC, 0xA, 0x4},
+ {0xE, 0xC, 0xB, 0x8, 0x1, 0x2, 0x3, 0x5, 0xF, 0x4, 0xA, 0x6, 0x7, 0x0, 0x9, 0xD},
+ {0xB, 0xA, 0x5, 0xE, 0x6, 0xD, 0x9, 0x0, 0xC, 0x8, 0xF, 0x3, 0x2, 0x4, 0x7, 0x1},
+ {0xD, 0x7, 0xF, 0x4, 0x1, 0x2, 0x6, 0xE, 0x9, 0xB, 0x3, 0x0, 0x8, 0x5, 0xC, 0xA},
+ },
+ {
+ {0x2, 0x8, 0xB, 0xD, 0xF, 0x7, 0x6, 0xE, 0x3, 0x1, 0x9, 0x4, 0x0, 0xA, 0xC, 0x5},
+ {0x1, 0xE, 0x2, 0xB, 0x4, 0xC, 0x3, 0x7, 0x6, 0xD, 0xA, 0x5, 0xF, 0x9, 0x0, 0x8},
+ {0x4, 0xC, 0x7, 0x5, 0x1, 0x6, 0x9, 0xA, 0x0, 0xE, 0xD, 0x8, 0x2, 0xB, 0x3, 0xF},
+ {0xB, 0x9, 0x5, 0x1, 0xC, 0x3, 0xD, 0xE, 0x6, 0x4, 0x7, 0xF, 0x2, 0x0, 0x8, 0xA},
+ },
+}
+
+// genSbox generates the variable sbox
+func genSbox(qi int, x byte) byte {
+ a0, b0 := x/16, x%16
+ for i := 0; i < 2; i++ {
+ a1 := a0 ^ b0
+ b1 := (a0 ^ ((b0 << 3) | (b0 >> 1)) ^ (a0 << 3)) & 15
+ a0 = qbox[qi][2*i][a1]
+ b0 = qbox[qi][2*i+1][b1]
+ }
+ return (b0 << 4) + a0
+}
+
+func TestSbox(t *testing.T) {
+ for n := range sbox {
+ for m := range sbox[n] {
+ if genSbox(n, byte(m)) != sbox[n][m] {
+ t.Errorf("#%d|%d: sbox value = %d want %d", n, m, sbox[n][m], genSbox(n, byte(m)))
+ }
+ }
+ }
+}
+
+var testVectors = []struct {
+ key []byte
+ dec []byte
+ enc []byte
+}{
+ // These tests are extracted from LibTom
+ {
+ []byte{0x9F, 0x58, 0x9F, 0x5C, 0xF6, 0x12, 0x2C, 0x32, 0xB6, 0xBF, 0xEC, 0x2F, 0x2A, 0xE8, 0xC3, 0x5A},
+ []byte{0xD4, 0x91, 0xDB, 0x16, 0xE7, 0xB1, 0xC3, 0x9E, 0x86, 0xCB, 0x08, 0x6B, 0x78, 0x9F, 0x54, 0x19},
+ []byte{0x01, 0x9F, 0x98, 0x09, 0xDE, 0x17, 0x11, 0x85, 0x8F, 0xAA, 0xC3, 0xA3, 0xBA, 0x20, 0xFB, 0xC3},
+ },
+ {
+ []byte{0x88, 0xB2, 0xB2, 0x70, 0x6B, 0x10, 0x5E, 0x36, 0xB4, 0x46, 0xBB, 0x6D, 0x73, 0x1A, 0x1E, 0x88,
+ 0xEF, 0xA7, 0x1F, 0x78, 0x89, 0x65, 0xBD, 0x44},
+ []byte{0x39, 0xDA, 0x69, 0xD6, 0xBA, 0x49, 0x97, 0xD5, 0x85, 0xB6, 0xDC, 0x07, 0x3C, 0xA3, 0x41, 0xB2},
+ []byte{0x18, 0x2B, 0x02, 0xD8, 0x14, 0x97, 0xEA, 0x45, 0xF9, 0xDA, 0xAC, 0xDC, 0x29, 0x19, 0x3A, 0x65},
+ },
+ {
+ []byte{0xD4, 0x3B, 0xB7, 0x55, 0x6E, 0xA3, 0x2E, 0x46, 0xF2, 0xA2, 0x82, 0xB7, 0xD4, 0x5B, 0x4E, 0x0D,
+ 0x57, 0xFF, 0x73, 0x9D, 0x4D, 0xC9, 0x2C, 0x1B, 0xD7, 0xFC, 0x01, 0x70, 0x0C, 0xC8, 0x21, 0x6F},
+ []byte{0x90, 0xAF, 0xE9, 0x1B, 0xB2, 0x88, 0x54, 0x4F, 0x2C, 0x32, 0xDC, 0x23, 0x9B, 0x26, 0x35, 0xE6},
+ []byte{0x6C, 0xB4, 0x56, 0x1C, 0x40, 0xBF, 0x0A, 0x97, 0x05, 0x93, 0x1C, 0xB6, 0xD4, 0x08, 0xE7, 0xFA},
+ },
+ // These test are derived from http://www.schneier.com/code/ecb_ival.txt
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x9F, 0x58, 0x9F, 0x5C, 0xF6, 0x12, 0x2C, 0x32, 0xB6, 0xBF, 0xEC, 0x2F, 0x2A, 0xE8, 0xC3, 0x5A},
+ },
+ {
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ },
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0xCF, 0xD1, 0xD2, 0xE5, 0xA9, 0xBE, 0x9C, 0xDF, 0x50, 0x1F, 0x13, 0xB8, 0x92, 0xBD, 0x22, 0x48},
+ },
+ {
+ []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0xFE, 0xDC, 0xBA, 0x98, 0x76, 0x54, 0x32, 0x10,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
+ },
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x37, 0x52, 0x7B, 0xE0, 0x05, 0x23, 0x34, 0xB8, 0x9F, 0x0C, 0xFC, 0xCA, 0xE8, 0x7C, 0xFA, 0x20},
+ },
+}
+
+func TestCipher(t *testing.T) {
+ for n, tt := range testVectors {
+ // Test if the plaintext (dec) is encrypts to the given
+ // ciphertext (enc) using the given key. Test also if enc can
+ // be decrypted again into dec.
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("#%d: NewCipher: %v", n, err)
+ return
+ }
+
+ buf := make([]byte, 16)
+ c.Encrypt(buf, tt.dec)
+ if !bytes.Equal(buf, tt.enc) {
+ t.Errorf("#%d: encrypt = %x want %x", n, buf, tt.enc)
+ }
+ c.Decrypt(buf, tt.enc)
+ if !bytes.Equal(buf, tt.dec) {
+ t.Errorf("#%d: decrypt = %x want %x", n, buf, tt.dec)
+ }
+
+ // Test that 16 zero bytes, encrypted 1000 times then decrypted
+ // 1000 times results in zero bytes again.
+ zero := make([]byte, 16)
+ buf = make([]byte, 16)
+ for i := 0; i < 1000; i++ {
+ c.Encrypt(buf, buf)
+ }
+ for i := 0; i < 1000; i++ {
+ c.Decrypt(buf, buf)
+ }
+ if !bytes.Equal(buf, zero) {
+ t.Errorf("#%d: encrypt/decrypt 1000: have %x want %x", n, buf, zero)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/block.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/block.go
new file mode 100644
index 00000000000..bf5d245992d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/block.go
@@ -0,0 +1,66 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ Implementation adapted from Needham and Wheeler's paper:
+ http://www.cix.co.uk/~klockstone/xtea.pdf
+
+ A precalculated look up table is used during encryption/decryption for values that are based purely on the key.
+*/
+
+package xtea
+
+// XTEA is based on 64 rounds.
+const numRounds = 64
+
+// blockToUint32 reads an 8 byte slice into two uint32s.
+// The block is treated as big endian.
+func blockToUint32(src []byte) (uint32, uint32) {
+ r0 := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
+ r1 := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
+ return r0, r1
+}
+
+// uint32ToBlock writes two uint32s into an 8 byte data block.
+// Values are written as big endian.
+func uint32ToBlock(v0, v1 uint32, dst []byte) {
+ dst[0] = byte(v0 >> 24)
+ dst[1] = byte(v0 >> 16)
+ dst[2] = byte(v0 >> 8)
+ dst[3] = byte(v0)
+ dst[4] = byte(v1 >> 24)
+ dst[5] = byte(v1 >> 16)
+ dst[6] = byte(v1 >> 8)
+ dst[7] = byte(v1 >> 0)
+}
+
+// encryptBlock encrypts a single 8 byte block using XTEA.
+func encryptBlock(c *Cipher, dst, src []byte) {
+ v0, v1 := blockToUint32(src)
+
+ // Two rounds of XTEA applied per loop
+ for i := 0; i < numRounds; {
+ v0 += ((v1<<4 ^ v1>>5) + v1) ^ c.table[i]
+ i++
+ v1 += ((v0<<4 ^ v0>>5) + v0) ^ c.table[i]
+ i++
+ }
+
+ uint32ToBlock(v0, v1, dst)
+}
+
+// decryptBlock decrypt a single 8 byte block using XTEA.
+func decryptBlock(c *Cipher, dst, src []byte) {
+ v0, v1 := blockToUint32(src)
+
+ // Two rounds of XTEA applied per loop
+ for i := numRounds; i > 0; {
+ i--
+ v1 -= ((v0<<4 ^ v0>>5) + v0) ^ c.table[i]
+ i--
+ v0 -= ((v1<<4 ^ v1>>5) + v1) ^ c.table[i]
+ }
+
+ uint32ToBlock(v0, v1, dst)
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/cipher.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/cipher.go
new file mode 100644
index 00000000000..108b4263559
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/cipher.go
@@ -0,0 +1,82 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xtea implements XTEA encryption, as defined in Needham and Wheeler's
+// 1997 technical report, "Tea extensions."
+package xtea // import "golang.org/x/crypto/xtea"
+
+// For details, see http://www.cix.co.uk/~klockstone/xtea.pdf
+
+import "strconv"
+
+// The XTEA block size in bytes.
+const BlockSize = 8
+
+// A Cipher is an instance of an XTEA cipher using a particular key.
+// table contains a series of precalculated values that are used each round.
+type Cipher struct {
+ table [64]uint32
+}
+
+type KeySizeError int
+
+func (k KeySizeError) Error() string {
+ return "crypto/xtea: invalid key size " + strconv.Itoa(int(k))
+}
+
+// NewCipher creates and returns a new Cipher.
+// The key argument should be the XTEA key.
+// XTEA only supports 128 bit (16 byte) keys.
+func NewCipher(key []byte) (*Cipher, error) {
+ k := len(key)
+ switch k {
+ default:
+ return nil, KeySizeError(k)
+ case 16:
+ break
+ }
+
+ c := new(Cipher)
+ initCipher(c, key)
+
+ return c, nil
+}
+
+// BlockSize returns the XTEA block size, 8 bytes.
+// It is necessary to satisfy the Block interface in the
+// package "crypto/cipher".
+func (c *Cipher) BlockSize() int { return BlockSize }
+
+// Encrypt encrypts the 8 byte buffer src using the key and stores the result in dst.
+// Note that for amounts of data larger than a block,
+// it is not safe to just call Encrypt on successive blocks;
+// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go).
+func (c *Cipher) Encrypt(dst, src []byte) { encryptBlock(c, dst, src) }
+
+// Decrypt decrypts the 8 byte buffer src using the key k and stores the result in dst.
+func (c *Cipher) Decrypt(dst, src []byte) { decryptBlock(c, dst, src) }
+
+// initCipher initializes the cipher context by creating a look up table
+// of precalculated values that are based on the key.
+func initCipher(c *Cipher, key []byte) {
+ // Load the key into four uint32s
+ var k [4]uint32
+ for i := 0; i < len(k); i++ {
+ j := i << 2 // Multiply by 4
+ k[i] = uint32(key[j+0])<<24 | uint32(key[j+1])<<16 | uint32(key[j+2])<<8 | uint32(key[j+3])
+ }
+
+ // Precalculate the table
+ const delta = 0x9E3779B9
+ var sum uint32 = 0
+
+ // Two rounds of XTEA applied per loop
+ for i := 0; i < numRounds; {
+ c.table[i] = sum + k[sum&3]
+ i++
+ sum += delta
+ c.table[i] = sum + k[(sum>>11)&3]
+ i++
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/xtea_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/xtea_test.go
new file mode 100644
index 00000000000..be711bf5af0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xtea/xtea_test.go
@@ -0,0 +1,229 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xtea
+
+import (
+ "testing"
+)
+
+// A sample test key for when we just want to initialize a cipher
+var testKey = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}
+
+// Test that the block size for XTEA is correct
+func TestBlocksize(t *testing.T) {
+ if BlockSize != 8 {
+ t.Errorf("BlockSize constant - expected 8, got %d", BlockSize)
+ return
+ }
+
+ c, err := NewCipher(testKey)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err)
+ return
+ }
+
+ result := c.BlockSize()
+ if result != 8 {
+ t.Errorf("BlockSize function - expected 8, got %d", result)
+ return
+ }
+}
+
+// A series of test values to confirm that the Cipher.table array was initialized correctly
+var testTable = []uint32{
+ 0x00112233, 0x6B1568B8, 0xE28CE030, 0xC5089E2D, 0xC5089E2D, 0x1EFBD3A2, 0xA7845C2A, 0x78EF0917,
+ 0x78EF0917, 0x172682D0, 0x5B6AC714, 0x822AC955, 0x3DE68511, 0xDC1DFECA, 0x2062430E, 0x3611343F,
+ 0xF1CCEFFB, 0x900469B4, 0xD448ADF8, 0x2E3BE36D, 0xB6C46BF5, 0x994029F2, 0x994029F2, 0xF3335F67,
+ 0x6AAAD6DF, 0x4D2694DC, 0x4D2694DC, 0xEB5E0E95, 0x2FA252D9, 0x4551440A, 0x121E10D6, 0xB0558A8F,
+ 0xE388BDC3, 0x0A48C004, 0xC6047BC0, 0x643BF579, 0xA88039BD, 0x02736F32, 0x8AFBF7BA, 0x5C66A4A7,
+ 0x5C66A4A7, 0xC76AEB2C, 0x3EE262A4, 0x215E20A1, 0x215E20A1, 0x7B515616, 0x03D9DE9E, 0x1988CFCF,
+ 0xD5448B8B, 0x737C0544, 0xB7C04988, 0xDE804BC9, 0x9A3C0785, 0x3873813E, 0x7CB7C582, 0xD6AAFAF7,
+ 0x4E22726F, 0x309E306C, 0x309E306C, 0x8A9165E1, 0x1319EE69, 0xF595AC66, 0xF595AC66, 0x4F88E1DB,
+}
+
+// Test that the cipher context is initialized correctly
+func TestCipherInit(t *testing.T) {
+ c, err := NewCipher(testKey)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err)
+ return
+ }
+
+ for i := 0; i < len(c.table); i++ {
+ if c.table[i] != testTable[i] {
+ t.Errorf("NewCipher() failed to initialize Cipher.table[%d] correctly. Expected %08X, got %08X", i, testTable[i], c.table[i])
+ break
+ }
+ }
+}
+
+// Test that invalid key sizes return an error
+func TestInvalidKeySize(t *testing.T) {
+ // Test a long key
+ key := []byte{
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF,
+ }
+
+ _, err := NewCipher(key)
+ if err == nil {
+ t.Errorf("Invalid key size %d didn't result in an error.", len(key))
+ }
+
+ // Test a short key
+ key = []byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77}
+
+ _, err = NewCipher(key)
+ if err == nil {
+ t.Errorf("Invalid key size %d didn't result in an error.", len(key))
+ }
+}
+
+// Test that we can correctly decode some bytes we have encoded
+func TestEncodeDecode(t *testing.T) {
+ original := []byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF}
+ input := original
+ output := make([]byte, BlockSize)
+
+ c, err := NewCipher(testKey)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes) = %s", len(testKey), err)
+ return
+ }
+
+ // Encrypt the input block
+ c.Encrypt(output, input)
+
+ // Check that the output does not match the input
+ differs := false
+ for i := 0; i < len(input); i++ {
+ if output[i] != input[i] {
+ differs = true
+ break
+ }
+ }
+ if differs == false {
+ t.Error("Cipher.Encrypt: Failed to encrypt the input block.")
+ return
+ }
+
+ // Decrypt the block we just encrypted
+ input = output
+ output = make([]byte, BlockSize)
+ c.Decrypt(output, input)
+
+ // Check that the output from decrypt matches our initial input
+ for i := 0; i < len(input); i++ {
+ if output[i] != original[i] {
+ t.Errorf("Decrypted byte %d differed. Expected %02X, got %02X\n", i, original[i], output[i])
+ return
+ }
+ }
+}
+
+// Test Vectors
+type CryptTest struct {
+ key []byte
+ plainText []byte
+ cipherText []byte
+}
+
+var CryptTests = []CryptTest{
+ // These were sourced from http://www.freemedialibrary.com/index.php/XTEA_test_vectors
+ {
+ []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
+ []byte{0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48},
+ []byte{0x49, 0x7d, 0xf3, 0xd0, 0x72, 0x61, 0x2c, 0xb5},
+ },
+ {
+ []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
+ []byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41},
+ []byte{0xe7, 0x8f, 0x2d, 0x13, 0x74, 0x43, 0x41, 0xd8},
+ },
+ {
+ []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f},
+ []byte{0x5a, 0x5b, 0x6e, 0x27, 0x89, 0x48, 0xd7, 0x7f},
+ []byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41},
+ },
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48},
+ []byte{0xa0, 0x39, 0x05, 0x89, 0xf8, 0xb8, 0xef, 0xa5},
+ },
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41},
+ []byte{0xed, 0x23, 0x37, 0x5a, 0x82, 0x1a, 0x8c, 0x2d},
+ },
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x70, 0xe1, 0x22, 0x5d, 0x6e, 0x4e, 0x76, 0x55},
+ []byte{0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41},
+ },
+
+ // These vectors are from http://wiki.secondlife.com/wiki/XTEA_Strong_Encryption_Implementation#Bouncy_Castle_C.23_API
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0xDE, 0xE9, 0xD4, 0xD8, 0xF7, 0x13, 0x1E, 0xD9},
+ },
+ {
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
+ []byte{0x06, 0x5C, 0x1B, 0x89, 0x75, 0xC6, 0xA8, 0x16},
+ },
+ {
+ []byte{0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78, 0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A},
+ []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ []byte{0x1F, 0xF9, 0xA0, 0x26, 0x1A, 0xC6, 0x42, 0x64},
+ },
+ {
+ []byte{0x01, 0x23, 0x45, 0x67, 0x12, 0x34, 0x56, 0x78, 0x23, 0x45, 0x67, 0x89, 0x34, 0x56, 0x78, 0x9A},
+ []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08},
+ []byte{0x8C, 0x67, 0x15, 0x5B, 0x2E, 0xF9, 0x1E, 0xAD},
+ },
+}
+
+// Test encryption
+func TestCipherEncrypt(t *testing.T) {
+ for i, tt := range CryptTests {
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes), vector %d = %s", len(tt.key), i, err)
+ continue
+ }
+
+ out := make([]byte, len(tt.plainText))
+ c.Encrypt(out, tt.plainText)
+
+ for j := 0; j < len(out); j++ {
+ if out[j] != tt.cipherText[j] {
+ t.Errorf("Cipher.Encrypt %d: out[%d] = %02X, expected %02X", i, j, out[j], tt.cipherText[j])
+ break
+ }
+ }
+ }
+}
+
+// Test decryption
+func TestCipherDecrypt(t *testing.T) {
+ for i, tt := range CryptTests {
+ c, err := NewCipher(tt.key)
+ if err != nil {
+ t.Errorf("NewCipher(%d bytes), vector %d = %s", len(tt.key), i, err)
+ continue
+ }
+
+ out := make([]byte, len(tt.cipherText))
+ c.Decrypt(out, tt.cipherText)
+
+ for j := 0; j < len(out); j++ {
+ if out[j] != tt.plainText[j] {
+ t.Errorf("Cipher.Decrypt %d: out[%d] = %02X, expected %02X", i, j, out[j], tt.plainText[j])
+ break
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/xts/xts.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xts/xts.go
new file mode 100644
index 00000000000..c9a283b2e1d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xts/xts.go
@@ -0,0 +1,138 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package xts implements the XTS cipher mode as specified in IEEE P1619/D16.
+//
+// XTS mode is typically used for disk encryption, which presents a number of
+// novel problems that make more common modes inapplicable. The disk is
+// conceptually an array of sectors and we must be able to encrypt and decrypt
+// a sector in isolation. However, an attacker must not be able to transpose
+// two sectors of plaintext by transposing their ciphertext.
+//
+// XTS wraps a block cipher with Rogaway's XEX mode in order to build a
+// tweakable block cipher. This allows each sector to have a unique tweak and
+// effectively create a unique key for each sector.
+//
+// XTS does not provide any authentication. An attacker can manipulate the
+// ciphertext and randomise a block (16 bytes) of the plaintext.
+//
+// (Note: this package does not implement ciphertext-stealing so sectors must
+// be a multiple of 16 bytes.)
+package xts // import "golang.org/x/crypto/xts"
+
+import (
+ "crypto/cipher"
+ "errors"
+)
+
+// Cipher contains an expanded key structure. It doesn't contain mutable state
+// and therefore can be used concurrently.
+type Cipher struct {
+ k1, k2 cipher.Block
+}
+
+// blockSize is the block size that the underlying cipher must have. XTS is
+// only defined for 16-byte ciphers.
+const blockSize = 16
+
+// NewCipher creates a Cipher given a function for creating the underlying
+// block cipher (which must have a block size of 16 bytes). The key must be
+// twice the length of the underlying cipher's key.
+func NewCipher(cipherFunc func([]byte) (cipher.Block, error), key []byte) (c *Cipher, err error) {
+ c = new(Cipher)
+ if c.k1, err = cipherFunc(key[:len(key)/2]); err != nil {
+ return
+ }
+ c.k2, err = cipherFunc(key[len(key)/2:])
+
+ if c.k1.BlockSize() != blockSize {
+ err = errors.New("xts: cipher does not have a block size of 16")
+ }
+
+ return
+}
+
+// Encrypt encrypts a sector of plaintext and puts the result into ciphertext.
+// Plaintext and ciphertext may be the same slice but should not overlap.
+// Sectors must be a multiple of 16 bytes and less than 2²⁴ bytes.
+func (c *Cipher) Encrypt(ciphertext, plaintext []byte, sectorNum uint64) {
+ if len(ciphertext) < len(plaintext) {
+ panic("xts: ciphertext is smaller than plaintext")
+ }
+ if len(plaintext)%blockSize != 0 {
+ panic("xts: plaintext is not a multiple of the block size")
+ }
+
+ var tweak [blockSize]byte
+ for i := 0; i < 8; i++ {
+ tweak[i] = byte(sectorNum)
+ sectorNum >>= 8
+ }
+
+ c.k2.Encrypt(tweak[:], tweak[:])
+
+ for i := 0; i < len(plaintext); i += blockSize {
+ for j := 0; j < blockSize; j++ {
+ ciphertext[i+j] = plaintext[i+j] ^ tweak[j]
+ }
+ c.k1.Encrypt(ciphertext[i:], ciphertext[i:])
+ for j := 0; j < blockSize; j++ {
+ ciphertext[i+j] ^= tweak[j]
+ }
+
+ mul2(&tweak)
+ }
+}
+
+// Decrypt decrypts a sector of ciphertext and puts the result into plaintext.
+// Plaintext and ciphertext may be the same slice but should not overlap.
+// Sectors must be a multiple of 16 bytes and less than 2²⁴ bytes.
+func (c *Cipher) Decrypt(plaintext, ciphertext []byte, sectorNum uint64) {
+ if len(plaintext) < len(ciphertext) {
+ panic("xts: plaintext is smaller than ciphertext")
+ }
+ if len(ciphertext)%blockSize != 0 {
+ panic("xts: ciphertext is not a multiple of the block size")
+ }
+
+ var tweak [blockSize]byte
+ for i := 0; i < 8; i++ {
+ tweak[i] = byte(sectorNum)
+ sectorNum >>= 8
+ }
+
+ c.k2.Encrypt(tweak[:], tweak[:])
+
+ for i := 0; i < len(plaintext); i += blockSize {
+ for j := 0; j < blockSize; j++ {
+ plaintext[i+j] = ciphertext[i+j] ^ tweak[j]
+ }
+ c.k1.Decrypt(plaintext[i:], plaintext[i:])
+ for j := 0; j < blockSize; j++ {
+ plaintext[i+j] ^= tweak[j]
+ }
+
+ mul2(&tweak)
+ }
+}
+
+// mul2 multiplies tweak by 2 in GF(2¹²⁸) with an irreducible polynomial of
+// x¹²⁸ + x⁷ + x² + x + 1.
+func mul2(tweak *[blockSize]byte) {
+ var carryIn byte
+ for j := range tweak {
+ carryOut := tweak[j] >> 7
+ tweak[j] = (tweak[j] << 1) + carryIn
+ carryIn = carryOut
+ }
+ if carryIn != 0 {
+ // If we have a carry bit then we need to subtract a multiple
+ // of the irreducible polynomial (x¹²⁸ + x⁷ + x² + x + 1).
+ // By dropping the carry bit, we're subtracting the x^128 term
+ // so all that remains is to subtract x⁷ + x² + x + 1.
+ // Subtraction (and addition) in this representation is just
+ // XOR.
+ tweak[0] ^= 1<<7 | 1<<2 | 1<<1 | 1
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/golang.org/x/crypto/xts/xts_test.go b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xts/xts_test.go
new file mode 100644
index 00000000000..7a5e9fadd60
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/golang.org/x/crypto/xts/xts_test.go
@@ -0,0 +1,85 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package xts
+
+import (
+ "bytes"
+ "crypto/aes"
+ "encoding/hex"
+ "testing"
+)
+
+// These test vectors have been taken from IEEE P1619/D16, Annex B.
+var xtsTestVectors = []struct {
+ key string
+ sector uint64
+ plaintext string
+ ciphertext string
+}{
+ {
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0,
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "917cf69ebd68b2ec9b9fe9a3eadda692cd43d2f59598ed858c02c2652fbf922e",
+ }, {
+ "1111111111111111111111111111111122222222222222222222222222222222",
+ 0x3333333333,
+ "4444444444444444444444444444444444444444444444444444444444444444",
+ "c454185e6a16936e39334038acef838bfb186fff7480adc4289382ecd6d394f0",
+ }, {
+ "fffefdfcfbfaf9f8f7f6f5f4f3f2f1f022222222222222222222222222222222",
+ 0x3333333333,
+ "4444444444444444444444444444444444444444444444444444444444444444",
+ "af85336b597afc1a900b2eb21ec949d292df4c047e0b21532186a5971a227a89",
+ }, {
+ "2718281828459045235360287471352631415926535897932384626433832795",
+ 0,
+ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
+ "27a7479befa1d476489f308cd4cfa6e2a96e4bbe3208ff25287dd3819616e89cc78cf7f5e543445f8333d8fa7f56000005279fa5d8b5e4ad40e736ddb4d35412328063fd2aab53e5ea1e0a9f332500a5df9487d07a5c92cc512c8866c7e860ce93fdf166a24912b422976146ae20ce846bb7dc9ba94a767aaef20c0d61ad02655ea92dc4c4e41a8952c651d33174be51a10c421110e6d81588ede82103a252d8a750e8768defffed9122810aaeb99f9172af82b604dc4b8e51bcb08235a6f4341332e4ca60482a4ba1a03b3e65008fc5da76b70bf1690db4eae29c5f1badd03c5ccf2a55d705ddcd86d449511ceb7ec30bf12b1fa35b913f9f747a8afd1b130e94bff94effd01a91735ca1726acd0b197c4e5b03393697e126826fb6bbde8ecc1e08298516e2c9ed03ff3c1b7860f6de76d4cecd94c8119855ef5297ca67e9f3e7ff72b1e99785ca0a7e7720c5b36dc6d72cac9574c8cbbc2f801e23e56fd344b07f22154beba0f08ce8891e643ed995c94d9a69c9f1b5f499027a78572aeebd74d20cc39881c213ee770b1010e4bea718846977ae119f7a023ab58cca0ad752afe656bb3c17256a9f6e9bf19fdd5a38fc82bbe872c5539edb609ef4f79c203ebb140f2e583cb2ad15b4aa5b655016a8449277dbd477ef2c8d6c017db738b18deb4a427d1923ce3ff262735779a418f20a282df920147beabe421ee5319d0568",
+ }, {
+ "2718281828459045235360287471352631415926535897932384626433832795",
+ 1,
+ "27a7479befa1d476489f308cd4cfa6e2a96e4bbe3208ff25287dd3819616e89cc78cf7f5e543445f8333d8fa7f56000005279fa5d8b5e4ad40e736ddb4d35412328063fd2aab53e5ea1e0a9f332500a5df9487d07a5c92cc512c8866c7e860ce93fdf166a24912b422976146ae20ce846bb7dc9ba94a767aaef20c0d61ad02655ea92dc4c4e41a8952c651d33174be51a10c421110e6d81588ede82103a252d8a750e8768defffed9122810aaeb99f9172af82b604dc4b8e51bcb08235a6f4341332e4ca60482a4ba1a03b3e65008fc5da76b70bf1690db4eae29c5f1badd03c5ccf2a55d705ddcd86d449511ceb7ec30bf12b1fa35b913f9f747a8afd1b130e94bff94effd01a91735ca1726acd0b197c4e5b03393697e126826fb6bbde8ecc1e08298516e2c9ed03ff3c1b7860f6de76d4cecd94c8119855ef5297ca67e9f3e7ff72b1e99785ca0a7e7720c5b36dc6d72cac9574c8cbbc2f801e23e56fd344b07f22154beba0f08ce8891e643ed995c94d9a69c9f1b5f499027a78572aeebd74d20cc39881c213ee770b1010e4bea718846977ae119f7a023ab58cca0ad752afe656bb3c17256a9f6e9bf19fdd5a38fc82bbe872c5539edb609ef4f79c203ebb140f2e583cb2ad15b4aa5b655016a8449277dbd477ef2c8d6c017db738b18deb4a427d1923ce3ff262735779a418f20a282df920147beabe421ee5319d0568",
+ "264d3ca8512194fec312c8c9891f279fefdd608d0c027b60483a3fa811d65ee59d52d9e40ec5672d81532b38b6b089ce951f0f9c35590b8b978d175213f329bb1c2fd30f2f7f30492a61a532a79f51d36f5e31a7c9a12c286082ff7d2394d18f783e1a8e72c722caaaa52d8f065657d2631fd25bfd8e5baad6e527d763517501c68c5edc3cdd55435c532d7125c8614deed9adaa3acade5888b87bef641c4c994c8091b5bcd387f3963fb5bc37aa922fbfe3df4e5b915e6eb514717bdd2a74079a5073f5c4bfd46adf7d282e7a393a52579d11a028da4d9cd9c77124f9648ee383b1ac763930e7162a8d37f350b2f74b8472cf09902063c6b32e8c2d9290cefbd7346d1c779a0df50edcde4531da07b099c638e83a755944df2aef1aa31752fd323dcb710fb4bfbb9d22b925bc3577e1b8949e729a90bbafeacf7f7879e7b1147e28ba0bae940db795a61b15ecf4df8db07b824bb062802cc98a9545bb2aaeed77cb3fc6db15dcd7d80d7d5bc406c4970a3478ada8899b329198eb61c193fb6275aa8ca340344a75a862aebe92eee1ce032fd950b47d7704a3876923b4ad62844bf4a09c4dbe8b4397184b7471360c9564880aedddb9baa4af2e75394b08cd32ff479c57a07d3eab5d54de5f9738b8d27f27a9f0ab11799d7b7ffefb2704c95c6ad12c39f1e867a4b7b1d7818a4b753dfd2a89ccb45e001a03a867b187f225dd",
+ }, {
+ "27182818284590452353602874713526624977572470936999595749669676273141592653589793238462643383279502884197169399375105820974944592",
+ 0xff,
+ "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
+ "1c3b3a102f770386e4836c99e370cf9bea00803f5e482357a4ae12d414a3e63b5d31e276f8fe4a8d66b317f9ac683f44680a86ac35adfc3345befecb4bb188fd5776926c49a3095eb108fd1098baec70aaa66999a72a82f27d848b21d4a741b0c5cd4d5fff9dac89aeba122961d03a757123e9870f8acf1000020887891429ca2a3e7a7d7df7b10355165c8b9a6d0a7de8b062c4500dc4cd120c0f7418dae3d0b5781c34803fa75421c790dfe1de1834f280d7667b327f6c8cd7557e12ac3a0f93ec05c52e0493ef31a12d3d9260f79a289d6a379bc70c50841473d1a8cc81ec583e9645e07b8d9670655ba5bbcfecc6dc3966380ad8fecb17b6ba02469a020a84e18e8f84252070c13e9f1f289be54fbc481457778f616015e1327a02b140f1505eb309326d68378f8374595c849d84f4c333ec4423885143cb47bd71c5edae9be69a2ffeceb1bec9de244fbe15992b11b77c040f12bd8f6a975a44a0f90c29a9abc3d4d893927284c58754cce294529f8614dcd2aba991925fedc4ae74ffac6e333b93eb4aff0479da9a410e4450e0dd7ae4c6e2910900575da401fc07059f645e8b7e9bfdef33943054ff84011493c27b3429eaedb4ed5376441a77ed43851ad77f16f541dfd269d50d6a5f14fb0aab1cbb4c1550be97f7ab4066193c4caa773dad38014bd2092fa755c824bb5e54c4f36ffda9fcea70b9c6e693e148c151",
+ },
+}
+
+func fromHex(s string) []byte {
+ ret, err := hex.DecodeString(s)
+ if err != nil {
+ panic("xts: invalid hex in test")
+ }
+ return ret
+}
+
+func TestXTS(t *testing.T) {
+ for i, test := range xtsTestVectors {
+ c, err := NewCipher(aes.NewCipher, fromHex(test.key))
+ if err != nil {
+ t.Errorf("#%d: failed to create cipher: %s", i, err)
+ continue
+ }
+ plaintext := fromHex(test.plaintext)
+ ciphertext := make([]byte, len(plaintext))
+ c.Encrypt(ciphertext, plaintext, test.sector)
+
+ expectedCiphertext := fromHex(test.ciphertext)
+ if !bytes.Equal(ciphertext, expectedCiphertext) {
+ t.Errorf("#%d: encrypted failed, got: %x, want: %x", i, ciphertext, expectedCiphertext)
+ continue
+ }
+
+ decrypted := make([]byte, len(ciphertext))
+ c.Decrypt(decrypted, ciphertext, test.sector)
+ if !bytes.Equal(decrypted, plaintext) {
+ t.Errorf("#%d: decryption failed, got: %x, want: %x", i, decrypted, plaintext)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/.travis.yml b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/.travis.yml
new file mode 100644
index 00000000000..45b38cf13fb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/.travis.yml
@@ -0,0 +1,45 @@
+language: go
+
+go_import_path: gopkg.in/mgo.v2
+
+addons:
+ apt:
+ packages:
+
+env:
+ global:
+ - BUCKET=https://niemeyer.s3.amazonaws.com
+ matrix:
+ - GO=1.4.1 MONGODB=x86_64-2.2.7
+ - GO=1.4.1 MONGODB=x86_64-2.4.14
+ - GO=1.4.1 MONGODB=x86_64-2.6.11
+ - GO=1.4.1 MONGODB=x86_64-3.0.9
+ - GO=1.4.1 MONGODB=x86_64-3.2.3-nojournal
+ - GO=1.5.3 MONGODB=x86_64-3.0.9
+ - GO=1.6 MONGODB=x86_64-3.0.9
+
+install:
+ - eval "$(gimme $GO)"
+
+ - wget $BUCKET/mongodb-linux-$MONGODB.tgz
+ - tar xzvf mongodb-linux-$MONGODB.tgz
+ - export PATH=$PWD/mongodb-linux-$MONGODB/bin:$PATH
+
+ - wget $BUCKET/daemontools.tar.gz
+ - tar xzvf daemontools.tar.gz
+ - export PATH=$PWD/daemontools:$PATH
+
+ - go get gopkg.in/check.v1
+ - go get gopkg.in/yaml.v2
+ - go get gopkg.in/tomb.v2
+
+before_script:
+ - export NOIPV6=1
+ - make startdb
+
+script:
+ - (cd bson && go test -check.v)
+ - go test -check.v -fast
+ - (cd txn && go test -check.v)
+
+# vim:sw=4:ts=4:et
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/LICENSE b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/LICENSE
new file mode 100644
index 00000000000..770c7672b45
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/LICENSE
@@ -0,0 +1,25 @@
+mgo - MongoDB driver for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/Makefile b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/Makefile
new file mode 100644
index 00000000000..d1027d45090
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/Makefile
@@ -0,0 +1,5 @@
+startdb:
+ @harness/setup.sh start
+
+stopdb:
+ @harness/setup.sh stop
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/README.md b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/README.md
new file mode 100644
index 00000000000..f4e452c04e3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/README.md
@@ -0,0 +1,4 @@
+The MongoDB driver for Go
+-------------------------
+
+Please go to [http://labix.org/mgo](http://labix.org/mgo) for all project details.
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/auth.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/auth.go
new file mode 100644
index 00000000000..dc26e52f583
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/auth.go
@@ -0,0 +1,467 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "sync"
+
+ "gopkg.in/mgo.v2/bson"
+ "gopkg.in/mgo.v2/internal/scram"
+)
+
+type authCmd struct {
+ Authenticate int
+
+ Nonce string
+ User string
+ Key string
+}
+
+type startSaslCmd struct {
+ StartSASL int `bson:"startSasl"`
+}
+
+type authResult struct {
+ ErrMsg string
+ Ok bool
+}
+
+type getNonceCmd struct {
+ GetNonce int
+}
+
+type getNonceResult struct {
+ Nonce string
+ Err string "$err"
+ Code int
+}
+
+type logoutCmd struct {
+ Logout int
+}
+
+type saslCmd struct {
+ Start int `bson:"saslStart,omitempty"`
+ Continue int `bson:"saslContinue,omitempty"`
+ ConversationId int `bson:"conversationId,omitempty"`
+ Mechanism string `bson:"mechanism,omitempty"`
+ Payload []byte
+}
+
+type saslResult struct {
+ Ok bool `bson:"ok"`
+ NotOk bool `bson:"code"` // Server <= 2.3.2 returns ok=1 & code>0 on errors (WTF?)
+ Done bool
+
+ ConversationId int `bson:"conversationId"`
+ Payload []byte
+ ErrMsg string
+}
+
+type saslStepper interface {
+ Step(serverData []byte) (clientData []byte, done bool, err error)
+ Close()
+}
+
+func (socket *mongoSocket) getNonce() (nonce string, err error) {
+ socket.Lock()
+ for socket.cachedNonce == "" && socket.dead == nil {
+ debugf("Socket %p to %s: waiting for nonce", socket, socket.addr)
+ socket.gotNonce.Wait()
+ }
+ if socket.cachedNonce == "mongos" {
+ socket.Unlock()
+ return "", errors.New("Can't authenticate with mongos; see http://j.mp/mongos-auth")
+ }
+ debugf("Socket %p to %s: got nonce", socket, socket.addr)
+ nonce, err = socket.cachedNonce, socket.dead
+ socket.cachedNonce = ""
+ socket.Unlock()
+ if err != nil {
+ nonce = ""
+ }
+ return
+}
+
+func (socket *mongoSocket) resetNonce() {
+ debugf("Socket %p to %s: requesting a new nonce", socket, socket.addr)
+ op := &queryOp{}
+ op.query = &getNonceCmd{GetNonce: 1}
+ op.collection = "admin.$cmd"
+ op.limit = -1
+ op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+ if err != nil {
+ socket.kill(errors.New("getNonce: "+err.Error()), true)
+ return
+ }
+ result := &getNonceResult{}
+ err = bson.Unmarshal(docData, &result)
+ if err != nil {
+ socket.kill(errors.New("Failed to unmarshal nonce: "+err.Error()), true)
+ return
+ }
+ debugf("Socket %p to %s: nonce unmarshalled: %#v", socket, socket.addr, result)
+ if result.Code == 13390 {
+ // mongos doesn't yet support auth (see http://j.mp/mongos-auth)
+ result.Nonce = "mongos"
+ } else if result.Nonce == "" {
+ var msg string
+ if result.Err != "" {
+ msg = fmt.Sprintf("Got an empty nonce: %s (%d)", result.Err, result.Code)
+ } else {
+ msg = "Got an empty nonce"
+ }
+ socket.kill(errors.New(msg), true)
+ return
+ }
+ socket.Lock()
+ if socket.cachedNonce != "" {
+ socket.Unlock()
+ panic("resetNonce: nonce already cached")
+ }
+ socket.cachedNonce = result.Nonce
+ socket.gotNonce.Signal()
+ socket.Unlock()
+ }
+ err := socket.Query(op)
+ if err != nil {
+ socket.kill(errors.New("resetNonce: "+err.Error()), true)
+ }
+}
+
+func (socket *mongoSocket) Login(cred Credential) error {
+ socket.Lock()
+ if cred.Mechanism == "" && socket.serverInfo.MaxWireVersion >= 3 {
+ cred.Mechanism = "SCRAM-SHA-1"
+ }
+ for _, sockCred := range socket.creds {
+ if sockCred == cred {
+ debugf("Socket %p to %s: login: db=%q user=%q (already logged in)", socket, socket.addr, cred.Source, cred.Username)
+ socket.Unlock()
+ return nil
+ }
+ }
+ if socket.dropLogout(cred) {
+ debugf("Socket %p to %s: login: db=%q user=%q (cached)", socket, socket.addr, cred.Source, cred.Username)
+ socket.creds = append(socket.creds, cred)
+ socket.Unlock()
+ return nil
+ }
+ socket.Unlock()
+
+ debugf("Socket %p to %s: login: db=%q user=%q", socket, socket.addr, cred.Source, cred.Username)
+
+ var err error
+ switch cred.Mechanism {
+ case "", "MONGODB-CR", "MONGO-CR": // Name changed to MONGODB-CR in SERVER-8501.
+ err = socket.loginClassic(cred)
+ case "PLAIN":
+ err = socket.loginPlain(cred)
+ case "MONGODB-X509":
+ err = socket.loginX509(cred)
+ default:
+ // Try SASL for everything else, if it is available.
+ err = socket.loginSASL(cred)
+ }
+
+ if err != nil {
+ debugf("Socket %p to %s: login error: %s", socket, socket.addr, err)
+ } else {
+ debugf("Socket %p to %s: login successful", socket, socket.addr)
+ }
+ return err
+}
+
+func (socket *mongoSocket) loginClassic(cred Credential) error {
+ // Note that this only works properly because this function is
+ // synchronous, which means the nonce won't get reset while we're
+ // using it and any other login requests will block waiting for a
+ // new nonce provided in the defer call below.
+ nonce, err := socket.getNonce()
+ if err != nil {
+ return err
+ }
+ defer socket.resetNonce()
+
+ psum := md5.New()
+ psum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
+
+ ksum := md5.New()
+ ksum.Write([]byte(nonce + cred.Username))
+ ksum.Write([]byte(hex.EncodeToString(psum.Sum(nil))))
+
+ key := hex.EncodeToString(ksum.Sum(nil))
+
+ cmd := authCmd{Authenticate: 1, User: cred.Username, Nonce: nonce, Key: key}
+ res := authResult{}
+ return socket.loginRun(cred.Source, &cmd, &res, func() error {
+ if !res.Ok {
+ return errors.New(res.ErrMsg)
+ }
+ socket.Lock()
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ socket.Unlock()
+ return nil
+ })
+}
+
+type authX509Cmd struct {
+ Authenticate int
+ User string
+ Mechanism string
+}
+
+func (socket *mongoSocket) loginX509(cred Credential) error {
+ cmd := authX509Cmd{Authenticate: 1, User: cred.Username, Mechanism: "MONGODB-X509"}
+ res := authResult{}
+ return socket.loginRun(cred.Source, &cmd, &res, func() error {
+ if !res.Ok {
+ return errors.New(res.ErrMsg)
+ }
+ socket.Lock()
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ socket.Unlock()
+ return nil
+ })
+}
+
+func (socket *mongoSocket) loginPlain(cred Credential) error {
+ cmd := saslCmd{Start: 1, Mechanism: "PLAIN", Payload: []byte("\x00" + cred.Username + "\x00" + cred.Password)}
+ res := authResult{}
+ return socket.loginRun(cred.Source, &cmd, &res, func() error {
+ if !res.Ok {
+ return errors.New(res.ErrMsg)
+ }
+ socket.Lock()
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ socket.Unlock()
+ return nil
+ })
+}
+
+func (socket *mongoSocket) loginSASL(cred Credential) error {
+ var sasl saslStepper
+ var err error
+ if cred.Mechanism == "SCRAM-SHA-1" {
+ // SCRAM is handled without external libraries.
+ sasl = saslNewScram(cred)
+ } else if len(cred.ServiceHost) > 0 {
+ sasl, err = saslNew(cred, cred.ServiceHost)
+ } else {
+ sasl, err = saslNew(cred, socket.Server().Addr)
+ }
+ if err != nil {
+ return err
+ }
+ defer sasl.Close()
+
+ // The goal of this logic is to carry a locked socket until the
+ // local SASL step confirms the auth is valid; the socket needs to be
+ // locked so that concurrent action doesn't leave the socket in an
+ // auth state that doesn't reflect the operations that took place.
+ // As a simple case, imagine inverting login=>logout to logout=>login.
+ //
+ // The logic below works because the lock func isn't called concurrently.
+ locked := false
+ lock := func(b bool) {
+ if locked != b {
+ locked = b
+ if b {
+ socket.Lock()
+ } else {
+ socket.Unlock()
+ }
+ }
+ }
+
+ lock(true)
+ defer lock(false)
+
+ start := 1
+ cmd := saslCmd{}
+ res := saslResult{}
+ for {
+ payload, done, err := sasl.Step(res.Payload)
+ if err != nil {
+ return err
+ }
+ if done && res.Done {
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ break
+ }
+ lock(false)
+
+ cmd = saslCmd{
+ Start: start,
+ Continue: 1 - start,
+ ConversationId: res.ConversationId,
+ Mechanism: cred.Mechanism,
+ Payload: payload,
+ }
+ start = 0
+ err = socket.loginRun(cred.Source, &cmd, &res, func() error {
+ // See the comment on lock for why this is necessary.
+ lock(true)
+ if !res.Ok || res.NotOk {
+ return fmt.Errorf("server returned error on SASL authentication step: %s", res.ErrMsg)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if done && res.Done {
+ socket.dropAuth(cred.Source)
+ socket.creds = append(socket.creds, cred)
+ break
+ }
+ }
+
+ return nil
+}
+
+func saslNewScram(cred Credential) *saslScram {
+ credsum := md5.New()
+ credsum.Write([]byte(cred.Username + ":mongo:" + cred.Password))
+ client := scram.NewClient(sha1.New, cred.Username, hex.EncodeToString(credsum.Sum(nil)))
+ return &saslScram{cred: cred, client: client}
+}
+
+type saslScram struct {
+ cred Credential
+ client *scram.Client
+}
+
+func (s *saslScram) Close() {}
+
+func (s *saslScram) Step(serverData []byte) (clientData []byte, done bool, err error) {
+ more := s.client.Step(serverData)
+ return s.client.Out(), !more, s.client.Err()
+}
+
+func (socket *mongoSocket) loginRun(db string, query, result interface{}, f func() error) error {
+ var mutex sync.Mutex
+ var replyErr error
+ mutex.Lock()
+
+ op := queryOp{}
+ op.query = query
+ op.collection = db + ".$cmd"
+ op.limit = -1
+ op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+ defer mutex.Unlock()
+
+ if err != nil {
+ replyErr = err
+ return
+ }
+
+ err = bson.Unmarshal(docData, result)
+ if err != nil {
+ replyErr = err
+ } else {
+ // Must handle this within the read loop for the socket, so
+ // that concurrent login requests are properly ordered.
+ replyErr = f()
+ }
+ }
+
+ err := socket.Query(&op)
+ if err != nil {
+ return err
+ }
+ mutex.Lock() // Wait.
+ return replyErr
+}
+
+func (socket *mongoSocket) Logout(db string) {
+ socket.Lock()
+ cred, found := socket.dropAuth(db)
+ if found {
+ debugf("Socket %p to %s: logout: db=%q (flagged)", socket, socket.addr, db)
+ socket.logout = append(socket.logout, cred)
+ }
+ socket.Unlock()
+}
+
+func (socket *mongoSocket) LogoutAll() {
+ socket.Lock()
+ if l := len(socket.creds); l > 0 {
+ debugf("Socket %p to %s: logout all (flagged %d)", socket, socket.addr, l)
+ socket.logout = append(socket.logout, socket.creds...)
+ socket.creds = socket.creds[0:0]
+ }
+ socket.Unlock()
+}
+
+func (socket *mongoSocket) flushLogout() (ops []interface{}) {
+ socket.Lock()
+ if l := len(socket.logout); l > 0 {
+ debugf("Socket %p to %s: logout all (flushing %d)", socket, socket.addr, l)
+ for i := 0; i != l; i++ {
+ op := queryOp{}
+ op.query = &logoutCmd{1}
+ op.collection = socket.logout[i].Source + ".$cmd"
+ op.limit = -1
+ ops = append(ops, &op)
+ }
+ socket.logout = socket.logout[0:0]
+ }
+ socket.Unlock()
+ return
+}
+
+func (socket *mongoSocket) dropAuth(db string) (cred Credential, found bool) {
+ for i, sockCred := range socket.creds {
+ if sockCred.Source == db {
+ copy(socket.creds[i:], socket.creds[i+1:])
+ socket.creds = socket.creds[:len(socket.creds)-1]
+ return sockCred, true
+ }
+ }
+ return cred, false
+}
+
+func (socket *mongoSocket) dropLogout(cred Credential) (found bool) {
+ for i, sockCred := range socket.logout {
+ if sockCred == cred {
+ copy(socket.logout[i:], socket.logout[i+1:])
+ socket.logout = socket.logout[:len(socket.logout)-1]
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/auth_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/auth_test.go
new file mode 100644
index 00000000000..9952734757e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/auth_test.go
@@ -0,0 +1,1180 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+ "crypto/tls"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/url"
+ "os"
+ "runtime"
+ "sync"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2"
+)
+
+func (s *S) TestAuthLoginDatabase(c *C) {
+ // Test both with a normal database and with an authenticated shard.
+ for _, addr := range []string{"localhost:40002", "localhost:40203"} {
+ session, err := mgo.Dial(addr)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+
+ admindb := session.DB("admin")
+
+ err = admindb.Login("root", "wrong")
+ c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+ }
+}
+
+func (s *S) TestAuthLoginSession(c *C) {
+ // Test both with a normal database and with an authenticated shard.
+ for _, addr := range []string{"localhost:40002", "localhost:40203"} {
+ session, err := mgo.Dial(addr)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+
+ cred := mgo.Credential{
+ Username: "root",
+ Password: "wrong",
+ }
+ err = session.Login(&cred)
+ c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+
+ cred.Password = "rapadura"
+
+ err = session.Login(&cred)
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+ }
+}
+
+func (s *S) TestAuthLoginLogout(c *C) {
+ // Test both with a normal database and with an authenticated shard.
+ for _, addr := range []string{"localhost:40002", "localhost:40203"} {
+ session, err := mgo.Dial(addr)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ admindb.Logout()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+
+ // Must have dropped auth from the session too.
+ session = session.Copy()
+ defer session.Close()
+
+ coll = session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+ }
+}
+
+func (s *S) TestAuthLoginLogoutAll(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ session.LogoutAll()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+
+ // Must have dropped auth from the session too.
+ session = session.Copy()
+ defer session.Close()
+
+ coll = session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+}
+
+func (s *S) TestAuthUpsertUserErrors(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ mydb := session.DB("mydb")
+
+ err = mydb.UpsertUser(&mgo.User{})
+ c.Assert(err, ErrorMatches, "user has no Username")
+
+ err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", UserSource: "source"})
+ c.Assert(err, ErrorMatches, "user has both Password/PasswordHash and UserSource set")
+
+ err = mydb.UpsertUser(&mgo.User{Username: "user", Password: "pass", OtherDBRoles: map[string][]mgo.Role{"db": nil}})
+ c.Assert(err, ErrorMatches, "user with OtherDBRoles is only supported in the admin or \\$external databases")
+}
+
+func (s *S) TestAuthUpsertUser(c *C) {
+ if !s.versionAtLeast(2, 4) {
+ c.Skip("UpsertUser only works on 2.4+")
+ }
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ mydb := session.DB("mydb")
+
+ ruser := &mgo.User{
+ Username: "myruser",
+ Password: "mypass",
+ Roles: []mgo.Role{mgo.RoleRead},
+ }
+ rwuser := &mgo.User{
+ Username: "myrwuser",
+ Password: "mypass",
+ Roles: []mgo.Role{mgo.RoleReadWrite},
+ }
+
+ err = mydb.UpsertUser(ruser)
+ c.Assert(err, IsNil)
+ err = mydb.UpsertUser(rwuser)
+ c.Assert(err, IsNil)
+
+ err = mydb.Login("myruser", "mypass")
+ c.Assert(err, IsNil)
+
+ admindb.Logout()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ err = mydb.Login("myrwuser", "mypass")
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ myotherdb := session.DB("myotherdb")
+
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ // Test UserSource.
+ rwuserother := &mgo.User{
+ Username: "myrwuser",
+ UserSource: "mydb",
+ Roles: []mgo.Role{mgo.RoleRead},
+ }
+
+ err = myotherdb.UpsertUser(rwuserother)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(err, ErrorMatches, `MongoDB 2.6\+ does not support the UserSource setting`)
+ return
+ }
+ c.Assert(err, IsNil)
+
+ admindb.Logout()
+
+ // Test indirection via UserSource: we can't write to it, because
+ // the roles for myrwuser are different there.
+ othercoll := myotherdb.C("myothercoll")
+ err = othercoll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ // Reading works, though.
+ err = othercoll.Find(nil).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ // Can't login directly into the database using UserSource, though.
+ err = myotherdb.Login("myrwuser", "mypass")
+ c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+}
+
+func (s *S) TestAuthUpsertUserOtherDBRoles(c *C) {
+ if !s.versionAtLeast(2, 4) {
+ c.Skip("UpsertUser only works on 2.4+")
+ }
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ ruser := &mgo.User{
+ Username: "myruser",
+ Password: "mypass",
+ OtherDBRoles: map[string][]mgo.Role{"mydb": []mgo.Role{mgo.RoleRead}},
+ }
+
+ err = admindb.UpsertUser(ruser)
+ c.Assert(err, IsNil)
+ defer admindb.RemoveUser("myruser")
+
+ admindb.Logout()
+ err = admindb.Login("myruser", "mypass")
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ err = coll.Find(nil).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestAuthUpsertUserUpdates(c *C) {
+ if !s.versionAtLeast(2, 4) {
+ c.Skip("UpsertUser only works on 2.4+")
+ }
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ mydb := session.DB("mydb")
+
+ // Insert a user that can read.
+ user := &mgo.User{
+ Username: "myruser",
+ Password: "mypass",
+ Roles: []mgo.Role{mgo.RoleRead},
+ }
+ err = mydb.UpsertUser(user)
+ c.Assert(err, IsNil)
+
+ // Now update the user password.
+ user = &mgo.User{
+ Username: "myruser",
+ Password: "mynewpass",
+ }
+ err = mydb.UpsertUser(user)
+ c.Assert(err, IsNil)
+
+ // Login with the new user.
+ usession, err := mgo.Dial("myruser:mynewpass@localhost:40002/mydb")
+ c.Assert(err, IsNil)
+ defer usession.Close()
+
+ // Can read, but not write.
+ err = usession.DB("mydb").C("mycoll").Find(nil).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+ err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ // Update the user role.
+ user = &mgo.User{
+ Username: "myruser",
+ Roles: []mgo.Role{mgo.RoleReadWrite},
+ }
+ err = mydb.UpsertUser(user)
+ c.Assert(err, IsNil)
+
+ // Dial again to ensure the password hasn't changed.
+ usession, err = mgo.Dial("myruser:mynewpass@localhost:40002/mydb")
+ c.Assert(err, IsNil)
+ defer usession.Close()
+
+ // Now it can write.
+ err = usession.DB("mydb").C("mycoll").Insert(M{"ok": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthAddUser(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ mydb := session.DB("mydb")
+ err = mydb.AddUser("myruser", "mypass", true)
+ c.Assert(err, IsNil)
+ err = mydb.AddUser("mywuser", "mypass", false)
+ c.Assert(err, IsNil)
+
+ err = mydb.Login("myruser", "mypass")
+ c.Assert(err, IsNil)
+
+ admindb.Logout()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ err = mydb.Login("mywuser", "mypass")
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthAddUserReplaces(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ mydb := session.DB("mydb")
+ err = mydb.AddUser("myuser", "myoldpass", false)
+ c.Assert(err, IsNil)
+ err = mydb.AddUser("myuser", "mynewpass", true)
+ c.Assert(err, IsNil)
+
+ admindb.Logout()
+
+ err = mydb.Login("myuser", "myoldpass")
+ c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+ err = mydb.Login("myuser", "mynewpass")
+ c.Assert(err, IsNil)
+
+ // ReadOnly flag was changed too.
+ err = mydb.C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+}
+
+func (s *S) TestAuthRemoveUser(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ mydb := session.DB("mydb")
+ err = mydb.AddUser("myuser", "mypass", true)
+ c.Assert(err, IsNil)
+ err = mydb.RemoveUser("myuser")
+ c.Assert(err, IsNil)
+ err = mydb.RemoveUser("myuser")
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = mydb.Login("myuser", "mypass")
+ c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+}
+
+func (s *S) TestAuthLoginTwiceDoesNothing(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ oldStats := mgo.GetStats()
+
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ newStats := mgo.GetStats()
+ c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
+}
+
+func (s *S) TestAuthLoginLogoutLoginDoesNothing(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ oldStats := mgo.GetStats()
+
+ admindb.Logout()
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ newStats := mgo.GetStats()
+ c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
+}
+
+func (s *S) TestAuthLoginSwitchUser(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ err = admindb.Login("reader", "rapadura")
+ c.Assert(err, IsNil)
+
+ // Can't write.
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ // But can read.
+ result := struct{ N int }{}
+ err = coll.Find(nil).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 1)
+}
+
+func (s *S) TestAuthLoginChangePassword(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ mydb := session.DB("mydb")
+ err = mydb.AddUser("myuser", "myoldpass", false)
+ c.Assert(err, IsNil)
+
+ err = mydb.Login("myuser", "myoldpass")
+ c.Assert(err, IsNil)
+
+ err = mydb.AddUser("myuser", "mynewpass", true)
+ c.Assert(err, IsNil)
+
+ err = mydb.Login("myuser", "mynewpass")
+ c.Assert(err, IsNil)
+
+ admindb.Logout()
+
+ // The second login must be in effect, which means read-only.
+ err = mydb.C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+}
+
+func (s *S) TestAuthLoginCachingWithSessionRefresh(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ session.Refresh()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthLoginCachingWithSessionCopy(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ session = session.Copy()
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthLoginCachingWithSessionClone(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ session = session.Clone()
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthLoginCachingWithNewSession(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ session = session.New()
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|need to login|not authorized .*")
+}
+
+func (s *S) TestAuthLoginCachingAcrossPool(c *C) {
+ // Logins are cached even when the conenction goes back
+ // into the pool.
+
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ // Add another user to test the logout case at the same time.
+ mydb := session.DB("mydb")
+ err = mydb.AddUser("myuser", "mypass", false)
+ c.Assert(err, IsNil)
+
+ err = mydb.Login("myuser", "mypass")
+ c.Assert(err, IsNil)
+
+ // Logout root explicitly, to test both cases.
+ admindb.Logout()
+
+ // Give socket back to pool.
+ session.Refresh()
+
+ // Brand new session, should use socket from the pool.
+ other := session.New()
+ defer other.Close()
+
+ oldStats := mgo.GetStats()
+
+ err = other.DB("admin").Login("root", "rapadura")
+ c.Assert(err, IsNil)
+ err = other.DB("mydb").Login("myuser", "mypass")
+ c.Assert(err, IsNil)
+
+ // Both logins were cached, so no ops.
+ newStats := mgo.GetStats()
+ c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
+
+ // And they actually worked.
+ err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ other.DB("admin").Logout()
+
+ err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthLoginCachingAcrossPoolWithLogout(c *C) {
+ // Now verify that logouts are properly flushed if they
+ // are not revalidated after leaving the pool.
+
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ // Add another user to test the logout case at the same time.
+ mydb := session.DB("mydb")
+ err = mydb.AddUser("myuser", "mypass", true)
+ c.Assert(err, IsNil)
+
+ err = mydb.Login("myuser", "mypass")
+ c.Assert(err, IsNil)
+
+ // Just some data to query later.
+ err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ // Give socket back to pool.
+ session.Refresh()
+
+ // Brand new session, should use socket from the pool.
+ other := session.New()
+ defer other.Close()
+
+ oldStats := mgo.GetStats()
+
+ err = other.DB("mydb").Login("myuser", "mypass")
+ c.Assert(err, IsNil)
+
+ // Login was cached, so no ops.
+ newStats := mgo.GetStats()
+ c.Assert(newStats.SentOps, Equals, oldStats.SentOps)
+
+ // Can't write, since root has been implicitly logged out
+ // when the collection went into the pool, and not revalidated.
+ err = other.DB("mydb").C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ // But can read due to the revalidated myuser login.
+ result := struct{ N int }{}
+ err = other.DB("mydb").C("mycoll").Find(nil).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 1)
+}
+
+func (s *S) TestAuthEventual(c *C) {
+ // Eventual sessions don't keep sockets around, so they are
+ // an interesting test case.
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ admindb := session.DB("admin")
+ err = admindb.Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ var wg sync.WaitGroup
+ wg.Add(20)
+
+ for i := 0; i != 10; i++ {
+ go func() {
+ defer wg.Done()
+ var result struct{ N int }
+ err := session.DB("mydb").C("mycoll").Find(nil).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 1)
+ }()
+ }
+
+ for i := 0; i != 10; i++ {
+ go func() {
+ defer wg.Done()
+ err := session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+ }()
+ }
+
+ wg.Wait()
+}
+
+func (s *S) TestAuthURL(c *C) {
+ session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthURLWrongCredentials(c *C) {
+ session, err := mgo.Dial("mongodb://root:wrong@localhost:40002/")
+ if session != nil {
+ session.Close()
+ }
+ c.Assert(err, ErrorMatches, "auth fail(s|ed)|.*Authentication failed.")
+ c.Assert(session, IsNil)
+}
+
+func (s *S) TestAuthURLWithNewSession(c *C) {
+ // When authentication is in the URL, the new session will
+ // actually carry it on as well, even if logged out explicitly.
+ session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002/")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.DB("admin").Logout()
+
+ // Do it twice to ensure it passes the needed data on.
+ session = session.New()
+ defer session.Close()
+ session = session.New()
+ defer session.Close()
+
+ err = session.DB("mydb").C("mycoll").Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestAuthURLWithDatabase(c *C) {
+ session, err := mgo.Dial("mongodb://root:rapadura@localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ mydb := session.DB("mydb")
+ err = mydb.AddUser("myruser", "mypass", true)
+ c.Assert(err, IsNil)
+
+ // Test once with database, and once with source.
+ for i := 0; i < 2; i++ {
+ var url string
+ if i == 0 {
+ url = "mongodb://myruser:mypass@localhost:40002/mydb"
+ } else {
+ url = "mongodb://myruser:mypass@localhost:40002/admin?authSource=mydb"
+ }
+ usession, err := mgo.Dial(url)
+ c.Assert(err, IsNil)
+ defer usession.Close()
+
+ ucoll := usession.DB("mydb").C("mycoll")
+ err = ucoll.FindId(0).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+ err = ucoll.Insert(M{"n": 1})
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+ }
+}
+
+func (s *S) TestDefaultDatabase(c *C) {
+ tests := []struct{ url, db string }{
+ {"mongodb://root:rapadura@localhost:40002", "test"},
+ {"mongodb://root:rapadura@localhost:40002/admin", "admin"},
+ {"mongodb://localhost:40001", "test"},
+ {"mongodb://localhost:40001/", "test"},
+ {"mongodb://localhost:40001/mydb", "mydb"},
+ }
+
+ for _, test := range tests {
+ session, err := mgo.Dial(test.url)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ c.Logf("test: %#v", test)
+ c.Assert(session.DB("").Name, Equals, test.db)
+
+ scopy := session.Copy()
+ c.Check(scopy.DB("").Name, Equals, test.db)
+ scopy.Close()
+ }
+}
+
+func (s *S) TestAuthDirect(c *C) {
+ // Direct connections must work to the master and slaves.
+ for _, port := range []string{"40031", "40032", "40033"} {
+ url := fmt.Sprintf("mongodb://root:rapadura@localhost:%s/?connect=direct", port)
+ session, err := mgo.Dial(url)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, true)
+
+ var result struct{}
+ err = session.DB("mydb").C("mycoll").Find(nil).One(&result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+ }
+}
+
+func (s *S) TestAuthDirectWithLogin(c *C) {
+ // Direct connections must work to the master and slaves.
+ for _, port := range []string{"40031", "40032", "40033"} {
+ url := fmt.Sprintf("mongodb://localhost:%s/?connect=direct", port)
+ session, err := mgo.Dial(url)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, true)
+ session.SetSyncTimeout(3 * time.Second)
+
+ err = session.DB("admin").Login("root", "rapadura")
+ c.Assert(err, IsNil)
+
+ var result struct{}
+ err = session.DB("mydb").C("mycoll").Find(nil).One(&result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+ }
+}
+
+func (s *S) TestAuthScramSha1Cred(c *C) {
+ if !s.versionAtLeast(2, 7, 7) {
+ c.Skip("SCRAM-SHA-1 tests depend on 2.7.7")
+ }
+ cred := &mgo.Credential{
+ Username: "root",
+ Password: "rapadura",
+ Mechanism: "SCRAM-SHA-1",
+ Source: "admin",
+ }
+ host := "localhost:40002"
+ c.Logf("Connecting to %s...", host)
+ session, err := mgo.Dial(host)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ mycoll := session.DB("admin").C("mycoll")
+
+ c.Logf("Connected! Testing the need for authentication...")
+ err = mycoll.Find(nil).One(nil)
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ c.Logf("Authenticating...")
+ err = session.Login(cred)
+ c.Assert(err, IsNil)
+ c.Logf("Authenticated!")
+
+ c.Logf("Connected! Testing the need for authentication...")
+ err = mycoll.Find(nil).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestAuthScramSha1URL(c *C) {
+ if !s.versionAtLeast(2, 7, 7) {
+ c.Skip("SCRAM-SHA-1 tests depend on 2.7.7")
+ }
+ host := "localhost:40002"
+ c.Logf("Connecting to %s...", host)
+ session, err := mgo.Dial(fmt.Sprintf("root:rapadura@%s?authMechanism=SCRAM-SHA-1", host))
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ mycoll := session.DB("admin").C("mycoll")
+
+ c.Logf("Connected! Testing the need for authentication...")
+ err = mycoll.Find(nil).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestAuthX509Cred(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ binfo, err := session.BuildInfo()
+ c.Assert(err, IsNil)
+ if binfo.OpenSSLVersion == "" {
+ c.Skip("server does not support SSL")
+ }
+
+ clientCertPEM, err := ioutil.ReadFile("harness/certs/client.pem")
+ c.Assert(err, IsNil)
+
+ clientCert, err := tls.X509KeyPair(clientCertPEM, clientCertPEM)
+ c.Assert(err, IsNil)
+
+ tlsConfig := &tls.Config{
+ // Isolating tests to client certs, don't care about server validation.
+ InsecureSkipVerify: true,
+ Certificates: []tls.Certificate{clientCert},
+ }
+
+ var host = "localhost:40003"
+ c.Logf("Connecting to %s...", host)
+ session, err = mgo.DialWithInfo(&mgo.DialInfo{
+ Addrs: []string{host},
+ DialServer: func(addr *mgo.ServerAddr) (net.Conn, error) {
+ return tls.Dial("tcp", addr.String(), tlsConfig)
+ },
+ })
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ err = session.Login(&mgo.Credential{Username: "root", Password: "rapadura"})
+ c.Assert(err, IsNil)
+
+ // This needs to be kept in sync with client.pem
+ x509Subject := "CN=localhost,OU=Client,O=MGO,L=MGO,ST=MGO,C=GO"
+
+ externalDB := session.DB("$external")
+ var x509User mgo.User = mgo.User{
+ Username: x509Subject,
+ OtherDBRoles: map[string][]mgo.Role{"admin": []mgo.Role{mgo.RoleRoot}},
+ }
+ err = externalDB.UpsertUser(&x509User)
+ c.Assert(err, IsNil)
+
+ session.LogoutAll()
+
+ c.Logf("Connected! Ensuring authentication is required...")
+ names, err := session.DatabaseNames()
+ c.Assert(err, ErrorMatches, "not authorized .*")
+
+ cred := &mgo.Credential{
+ Username: x509Subject,
+ Mechanism: "MONGODB-X509",
+ Source: "$external",
+ }
+
+ c.Logf("Authenticating...")
+ err = session.Login(cred)
+ c.Assert(err, IsNil)
+ c.Logf("Authenticated!")
+
+ names, err = session.DatabaseNames()
+ c.Assert(err, IsNil)
+ c.Assert(len(names) > 0, Equals, true)
+}
+
+var (
+ plainFlag = flag.String("plain", "", "Host to test PLAIN authentication against (depends on custom environment)")
+ plainUser = "einstein"
+ plainPass = "password"
+)
+
+func (s *S) TestAuthPlainCred(c *C) {
+ if *plainFlag == "" {
+ c.Skip("no -plain")
+ }
+ cred := &mgo.Credential{
+ Username: plainUser,
+ Password: plainPass,
+ Source: "$external",
+ Mechanism: "PLAIN",
+ }
+ c.Logf("Connecting to %s...", *plainFlag)
+ session, err := mgo.Dial(*plainFlag)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ records := session.DB("records").C("records")
+
+ c.Logf("Connected! Testing the need for authentication...")
+ err = records.Find(nil).One(nil)
+ c.Assert(err, ErrorMatches, "unauthorized|not authorized .*")
+
+ c.Logf("Authenticating...")
+ err = session.Login(cred)
+ c.Assert(err, IsNil)
+ c.Logf("Authenticated!")
+
+ c.Logf("Connected! Testing the need for authentication...")
+ err = records.Find(nil).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestAuthPlainURL(c *C) {
+ if *plainFlag == "" {
+ c.Skip("no -plain")
+ }
+ c.Logf("Connecting to %s...", *plainFlag)
+ session, err := mgo.Dial(fmt.Sprintf("%s:%s@%s?authMechanism=PLAIN", url.QueryEscape(plainUser), url.QueryEscape(plainPass), *plainFlag))
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ c.Logf("Connected! Testing the need for authentication...")
+ err = session.DB("records").C("records").Find(nil).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+var (
+ kerberosFlag = flag.Bool("kerberos", false, "Test Kerberos authentication (depends on custom environment)")
+ kerberosHost = "ldaptest.10gen.cc"
+ kerberosUser = "drivers@LDAPTEST.10GEN.CC"
+
+ winKerberosPasswordEnv = "MGO_KERBEROS_PASSWORD"
+)
+
+// Kerberos has its own suite because it talks to a remote server
+// that is prepared to authenticate against a kerberos deployment.
+type KerberosSuite struct{}
+
+var _ = Suite(&KerberosSuite{})
+
+func (kerberosSuite *KerberosSuite) SetUpSuite(c *C) {
+ mgo.SetDebug(true)
+ mgo.SetStats(true)
+}
+
+func (kerberosSuite *KerberosSuite) TearDownSuite(c *C) {
+ mgo.SetDebug(false)
+ mgo.SetStats(false)
+}
+
+func (kerberosSuite *KerberosSuite) SetUpTest(c *C) {
+ mgo.SetLogger((*cLogger)(c))
+ mgo.ResetStats()
+}
+
+func (kerberosSuite *KerberosSuite) TearDownTest(c *C) {
+ mgo.SetLogger(nil)
+}
+
+func (kerberosSuite *KerberosSuite) TestAuthKerberosCred(c *C) {
+ if !*kerberosFlag {
+ c.Skip("no -kerberos")
+ }
+ cred := &mgo.Credential{
+ Username: kerberosUser,
+ Mechanism: "GSSAPI",
+ }
+ windowsAppendPasswordToCredential(cred)
+ c.Logf("Connecting to %s...", kerberosHost)
+ session, err := mgo.Dial(kerberosHost)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ c.Logf("Connected! Testing the need for authentication...")
+ n, err := session.DB("kerberos").C("test").Find(M{}).Count()
+ c.Assert(err, ErrorMatches, ".*authorized.*")
+
+ c.Logf("Authenticating...")
+ err = session.Login(cred)
+ c.Assert(err, IsNil)
+ c.Logf("Authenticated!")
+
+ n, err = session.DB("kerberos").C("test").Find(M{}).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 1)
+}
+
+func (kerberosSuite *KerberosSuite) TestAuthKerberosURL(c *C) {
+ if !*kerberosFlag {
+ c.Skip("no -kerberos")
+ }
+ c.Logf("Connecting to %s...", kerberosHost)
+ connectUri := url.QueryEscape(kerberosUser) + "@" + kerberosHost + "?authMechanism=GSSAPI"
+ if runtime.GOOS == "windows" {
+ connectUri = url.QueryEscape(kerberosUser) + ":" + url.QueryEscape(getWindowsKerberosPassword()) + "@" + kerberosHost + "?authMechanism=GSSAPI"
+ }
+ session, err := mgo.Dial(connectUri)
+ c.Assert(err, IsNil)
+ defer session.Close()
+ n, err := session.DB("kerberos").C("test").Find(M{}).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 1)
+}
+
+func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceName(c *C) {
+ if !*kerberosFlag {
+ c.Skip("no -kerberos")
+ }
+
+ wrongServiceName := "wrong"
+ rightServiceName := "mongodb"
+
+ cred := &mgo.Credential{
+ Username: kerberosUser,
+ Mechanism: "GSSAPI",
+ Service: wrongServiceName,
+ }
+ windowsAppendPasswordToCredential(cred)
+
+ c.Logf("Connecting to %s...", kerberosHost)
+ session, err := mgo.Dial(kerberosHost)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ c.Logf("Authenticating with incorrect service name...")
+ err = session.Login(cred)
+ c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*")
+
+ cred.Service = rightServiceName
+ c.Logf("Authenticating with correct service name...")
+ err = session.Login(cred)
+ c.Assert(err, IsNil)
+ c.Logf("Authenticated!")
+
+ n, err := session.DB("kerberos").C("test").Find(M{}).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 1)
+}
+
+func (kerberosSuite *KerberosSuite) TestAuthKerberosServiceHost(c *C) {
+ if !*kerberosFlag {
+ c.Skip("no -kerberos")
+ }
+
+ wrongServiceHost := "eggs.bacon.tk"
+ rightServiceHost := kerberosHost
+
+ cred := &mgo.Credential{
+ Username: kerberosUser,
+ Mechanism: "GSSAPI",
+ ServiceHost: wrongServiceHost,
+ }
+ windowsAppendPasswordToCredential(cred)
+
+ c.Logf("Connecting to %s...", kerberosHost)
+ session, err := mgo.Dial(kerberosHost)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ c.Logf("Authenticating with incorrect service host...")
+ err = session.Login(cred)
+ c.Assert(err, ErrorMatches, ".*@LDAPTEST.10GEN.CC not found.*")
+
+ cred.ServiceHost = rightServiceHost
+ c.Logf("Authenticating with correct service host...")
+ err = session.Login(cred)
+ c.Assert(err, IsNil)
+ c.Logf("Authenticated!")
+
+ n, err := session.DB("kerberos").C("test").Find(M{}).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 1)
+}
+
+// No kinit on SSPI-style Kerberos, so we need to provide a password. In order
+// to avoid inlining password, require it to be set as an environment variable,
+// for instance: `SET MGO_KERBEROS_PASSWORD=this_isnt_the_password`
+func getWindowsKerberosPassword() string {
+ pw := os.Getenv(winKerberosPasswordEnv)
+ if pw == "" {
+ panic(fmt.Sprintf("Need to set %v environment variable to run Kerberos tests on Windows", winKerberosPasswordEnv))
+ }
+ return pw
+}
+
+func windowsAppendPasswordToCredential(cred *mgo.Credential) {
+ if runtime.GOOS == "windows" {
+ cred.Password = getWindowsKerberosPassword()
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/LICENSE b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/LICENSE
new file mode 100644
index 00000000000..890326017b8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/LICENSE
@@ -0,0 +1,25 @@
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/bson.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/bson.go
new file mode 100644
index 00000000000..7fb7f8cae48
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/bson.go
@@ -0,0 +1,738 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package bson is an implementation of the BSON specification for Go:
+//
+// http://bsonspec.org
+//
+// It was created as part of the mgo MongoDB driver for Go, but is standalone
+// and may be used on its own without the driver.
+package bson
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// --------------------------------------------------------------------------
+// The public API.
+
+// A value implementing the bson.Getter interface will have its GetBSON
+// method called when the given value has to be marshalled, and the result
+// of this method will be marshaled in place of the actual object.
+//
+// If GetBSON returns return a non-nil error, the marshalling procedure
+// will stop and error out with the provided value.
+type Getter interface {
+ GetBSON() (interface{}, error)
+}
+
+// A value implementing the bson.Setter interface will receive the BSON
+// value via the SetBSON method during unmarshaling, and the object
+// itself will not be changed as usual.
+//
+// If setting the value works, the method should return nil or alternatively
+// bson.SetZero to set the respective field to its zero value (nil for
+// pointer types). If SetBSON returns a value of type bson.TypeError, the
+// BSON value will be omitted from a map or slice being decoded and the
+// unmarshalling will continue. If it returns any other non-nil error, the
+// unmarshalling procedure will stop and error out with the provided value.
+//
+// This interface is generally useful in pointer receivers, since the method
+// will want to change the receiver. A type field that implements the Setter
+// interface doesn't have to be a pointer, though.
+//
+// Unlike the usual behavior, unmarshalling onto a value that implements a
+// Setter interface will NOT reset the value to its zero state. This allows
+// the value to decide by itself how to be unmarshalled.
+//
+// For example:
+//
+// type MyString string
+//
+// func (s *MyString) SetBSON(raw bson.Raw) error {
+// return raw.Unmarshal(s)
+// }
+//
+type Setter interface {
+ SetBSON(raw Raw) error
+}
+
+// SetZero may be returned from a SetBSON method to have the value set to
+// its respective zero value. When used in pointer values, this will set the
+// field to nil rather than to the pre-allocated value.
+var SetZero = errors.New("set to zero")
+
+// M is a convenient alias for a map[string]interface{} map, useful for
+// dealing with BSON in a native way. For instance:
+//
+// bson.M{"a": 1, "b": true}
+//
+// There's no special handling for this type in addition to what's done anyway
+// for an equivalent map type. Elements in the map will be dumped in an
+// undefined ordered. See also the bson.D type for an ordered alternative.
+type M map[string]interface{}
+
+// D represents a BSON document containing ordered elements. For example:
+//
+// bson.D{{"a", 1}, {"b", true}}
+//
+// In some situations, such as when creating indexes for MongoDB, the order in
+// which the elements are defined is important. If the order is not important,
+// using a map is generally more comfortable. See bson.M and bson.RawD.
+type D []DocElem
+
+// DocElem is an element of the bson.D document representation.
+type DocElem struct {
+ Name string
+ Value interface{}
+}
+
+// Map returns a map out of the ordered element name/value pairs in d.
+func (d D) Map() (m M) {
+ m = make(M, len(d))
+ for _, item := range d {
+ m[item.Name] = item.Value
+ }
+ return m
+}
+
+// The Raw type represents raw unprocessed BSON documents and elements.
+// Kind is the kind of element as defined per the BSON specification, and
+// Data is the raw unprocessed data for the respective element.
+// Using this type it is possible to unmarshal or marshal values partially.
+//
+// Relevant documentation:
+//
+// http://bsonspec.org/#/specification
+//
+type Raw struct {
+ Kind byte
+ Data []byte
+}
+
+// RawD represents a BSON document containing raw unprocessed elements.
+// This low-level representation may be useful when lazily processing
+// documents of uncertain content, or when manipulating the raw content
+// documents in general.
+type RawD []RawDocElem
+
+// See the RawD type.
+type RawDocElem struct {
+ Name string
+ Value Raw
+}
+
+// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
+// long. MongoDB objects by default have such a property set in their "_id"
+// property.
+//
+// http://www.mongodb.org/display/DOCS/Object+IDs
+type ObjectId string
+
+// ObjectIdHex returns an ObjectId from the provided hex representation.
+// Calling this function with an invalid hex representation will
+// cause a runtime panic. See the IsObjectIdHex function.
+func ObjectIdHex(s string) ObjectId {
+ d, err := hex.DecodeString(s)
+ if err != nil || len(d) != 12 {
+ panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
+ }
+ return ObjectId(d)
+}
+
+// IsObjectIdHex returns whether s is a valid hex representation of
+// an ObjectId. See the ObjectIdHex function.
+func IsObjectIdHex(s string) bool {
+ if len(s) != 24 {
+ return false
+ }
+ _, err := hex.DecodeString(s)
+ return err == nil
+}
+
+// objectIdCounter is atomically incremented when generating a new ObjectId
+// using NewObjectId() function. It's used as a counter part of an id.
+var objectIdCounter uint32 = readRandomUint32()
+
+// readRandomUint32 returns a random objectIdCounter.
+func readRandomUint32() uint32 {
+ var b [4]byte
+ _, err := io.ReadFull(rand.Reader, b[:])
+ if err != nil {
+ panic(fmt.Errorf("cannot read random object id: %v", err))
+ }
+ return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
+}
+
+// machineId stores machine id generated once and used in subsequent calls
+// to NewObjectId function.
+var machineId = readMachineId()
+var processId = os.Getpid()
+
+// readMachineId generates and returns a machine id.
+// If this function fails to get the hostname it will cause a runtime error.
+func readMachineId() []byte {
+ var sum [3]byte
+ id := sum[:]
+ hostname, err1 := os.Hostname()
+ if err1 != nil {
+ _, err2 := io.ReadFull(rand.Reader, id)
+ if err2 != nil {
+ panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
+ }
+ return id
+ }
+ hw := md5.New()
+ hw.Write([]byte(hostname))
+ copy(id, hw.Sum(nil))
+ return id
+}
+
+// NewObjectId returns a new unique ObjectId.
+func NewObjectId() ObjectId {
+ var b [12]byte
+ // Timestamp, 4 bytes, big endian
+ binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
+ // Machine, first 3 bytes of md5(hostname)
+ b[4] = machineId[0]
+ b[5] = machineId[1]
+ b[6] = machineId[2]
+ // Pid, 2 bytes, specs don't specify endianness, but we use big endian.
+ b[7] = byte(processId >> 8)
+ b[8] = byte(processId)
+ // Increment, 3 bytes, big endian
+ i := atomic.AddUint32(&objectIdCounter, 1)
+ b[9] = byte(i >> 16)
+ b[10] = byte(i >> 8)
+ b[11] = byte(i)
+ return ObjectId(b[:])
+}
+
+// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
+// with the provided number of seconds from epoch UTC, and all other parts
+// filled with zeroes. It's not safe to insert a document with an id generated
+// by this method, it is useful only for queries to find documents with ids
+// generated before or after the specified timestamp.
+func NewObjectIdWithTime(t time.Time) ObjectId {
+ var b [12]byte
+ binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
+ return ObjectId(string(b[:]))
+}
+
+// String returns a hex string representation of the id.
+// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
+func (id ObjectId) String() string {
+ return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
+}
+
+// Hex returns a hex representation of the ObjectId.
+func (id ObjectId) Hex() string {
+ return hex.EncodeToString([]byte(id))
+}
+
+// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
+}
+
+var nullBytes = []byte("null")
+
+// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+ if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
+ var v struct {
+ Id json.RawMessage `json:"$oid"`
+ Func struct {
+ Id json.RawMessage
+ } `json:"$oidFunc"`
+ }
+ err := jdec(data, &v)
+ if err == nil {
+ if len(v.Id) > 0 {
+ data = []byte(v.Id)
+ } else {
+ data = []byte(v.Func.Id)
+ }
+ }
+ }
+ if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
+ *id = ""
+ return nil
+ }
+ if len(data) != 26 || data[0] != '"' || data[25] != '"' {
+ return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
+ }
+ var buf [12]byte
+ _, err := hex.Decode(buf[:], data[1:25])
+ if err != nil {
+ return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
+ }
+ *id = ObjectId(string(buf[:]))
+ return nil
+}
+
+// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
+func (id ObjectId) MarshalText() ([]byte, error) {
+ return []byte(fmt.Sprintf("%x", string(id))), nil
+}
+
+// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
+func (id *ObjectId) UnmarshalText(data []byte) error {
+ if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
+ *id = ""
+ return nil
+ }
+ if len(data) != 24 {
+ return fmt.Errorf("invalid ObjectId: %s", data)
+ }
+ var buf [12]byte
+ _, err := hex.Decode(buf[:], data[:])
+ if err != nil {
+ return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
+ }
+ *id = ObjectId(string(buf[:]))
+ return nil
+}
+
+// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
+func (id ObjectId) Valid() bool {
+ return len(id) == 12
+}
+
+// byteSlice returns byte slice of id from start to end.
+// Calling this function with an invalid id will cause a runtime panic.
+func (id ObjectId) byteSlice(start, end int) []byte {
+ if len(id) != 12 {
+ panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
+ }
+ return []byte(string(id)[start:end])
+}
+
+// Time returns the timestamp part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Time() time.Time {
+ // First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
+ secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
+ return time.Unix(secs, 0)
+}
+
+// Machine returns the 3-byte machine id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Machine() []byte {
+ return id.byteSlice(4, 7)
+}
+
+// Pid returns the process id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Pid() uint16 {
+ return binary.BigEndian.Uint16(id.byteSlice(7, 9))
+}
+
+// Counter returns the incrementing value part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Counter() int32 {
+ b := id.byteSlice(9, 12)
+ // Counter is stored as big-endian 3-byte value
+ return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
+}
+
+// The Symbol type is similar to a string and is used in languages with a
+// distinct symbol type.
+type Symbol string
+
+// Now returns the current time with millisecond precision. MongoDB stores
+// timestamps with the same precision, so a Time returned from this method
+// will not change after a roundtrip to the database. That's the only reason
+// why this function exists. Using the time.Now function also works fine
+// otherwise.
+func Now() time.Time {
+ return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
+}
+
+// MongoTimestamp is a special internal type used by MongoDB that for some
+// strange reason has its own datatype defined in BSON.
+type MongoTimestamp int64
+
+type orderKey int64
+
+// MaxKey is a special value that compares higher than all other possible BSON
+// values in a MongoDB database.
+var MaxKey = orderKey(1<<63 - 1)
+
+// MinKey is a special value that compares lower than all other possible BSON
+// values in a MongoDB database.
+var MinKey = orderKey(-1 << 63)
+
+type undefined struct{}
+
+// Undefined represents the undefined BSON value.
+var Undefined undefined
+
+// Binary is a representation for non-standard binary values. Any kind should
+// work, but the following are known as of this writing:
+//
+// 0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
+// 0x01 - Function (!?)
+// 0x02 - Obsolete generic.
+// 0x03 - UUID
+// 0x05 - MD5
+// 0x80 - User defined.
+//
+type Binary struct {
+ Kind byte
+ Data []byte
+}
+
+// RegEx represents a regular expression. The Options field may contain
+// individual characters defining the way in which the pattern should be
+// applied, and must be sorted. Valid options as of this writing are 'i' for
+// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
+// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
+// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
+// unicode. The value of the Options parameter is not verified before being
+// marshaled into the BSON format.
+type RegEx struct {
+ Pattern string
+ Options string
+}
+
+// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
+// will be marshaled as a mapping from identifiers to values that may be
+// used when evaluating the provided Code.
+type JavaScript struct {
+ Code string
+ Scope interface{}
+}
+
+// DBPointer refers to a document id in a namespace.
+//
+// This type is deprecated in the BSON specification and should not be used
+// except for backwards compatibility with ancient applications.
+type DBPointer struct {
+ Namespace string
+ Id ObjectId
+}
+
+const initialBufferSize = 64
+
+func handleErr(err *error) {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ } else if _, ok := r.(externalPanic); ok {
+ panic(r)
+ } else if s, ok := r.(string); ok {
+ *err = errors.New(s)
+ } else if e, ok := r.(error); ok {
+ *err = e
+ } else {
+ panic(r)
+ }
+ }
+}
+
+// Marshal serializes the in value, which may be a map or a struct value.
+// In the case of struct values, only exported fields will be serialized,
+// and the order of serialized fields will match that of the struct itself.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+// "[<key>][,<flag1>[,<flag2>]]"
+//
+// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+//
+// minsize Marshal an int64 value as an int32, if that's feasible
+// while preserving the numeric value.
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the bson keys of other struct fields.
+//
+// Some examples:
+//
+// type T struct {
+// A bool
+// B int "myb"
+// C string "myc,omitempty"
+// D string `bson:",omitempty" json:"jsonkey"`
+// E int64 ",minsize"
+// F int64 "myf,omitempty,minsize"
+// }
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := &encoder{make([]byte, 0, initialBufferSize)}
+ e.addDoc(reflect.ValueOf(in))
+ return e.out, nil
+}
+
+// Unmarshal deserializes data from in into the out value. The out value
+// must be a map, a pointer to a struct, or a pointer to a bson.D value.
+// In the case of struct values, only exported fields will be deserialized.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+// "[<key>][,<flag1>[,<flag2>]]"
+//
+// `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported during unmarshal (see the
+// Marshal method for other flags):
+//
+// inline Inline the field, which must be a struct or a map.
+// Inlined structs are handled as if its fields were part
+// of the outer struct. An inlined map causes keys that do
+// not match any other struct field to be inserted in the
+// map rather than being discarded as usual.
+//
+// The target field or element types of out may not necessarily match
+// the BSON values of the provided data. The following conversions are
+// made automatically:
+//
+// - Numeric types are converted if at least the integer part of the
+// value would be preserved correctly
+// - Bools are converted to numeric types as 1 or 0
+// - Numeric types are converted to bools as true if not 0 or false otherwise
+// - Binary and string BSON data is converted to a string, array or byte slice
+//
+// If the value would not fit the type and cannot be converted, it's
+// silently skipped.
+//
+// Pointer values are initialized when necessary.
+func Unmarshal(in []byte, out interface{}) (err error) {
+ if raw, ok := out.(*Raw); ok {
+ raw.Kind = 3
+ raw.Data = in
+ return nil
+ }
+ defer handleErr(&err)
+ v := reflect.ValueOf(out)
+ switch v.Kind() {
+ case reflect.Ptr:
+ fallthrough
+ case reflect.Map:
+ d := newDecoder(in)
+ d.readDocTo(v)
+ case reflect.Struct:
+ return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
+ default:
+ return errors.New("Unmarshal needs a map or a pointer to a struct.")
+ }
+ return nil
+}
+
+// Unmarshal deserializes raw into the out value. If the out value type
+// is not compatible with raw, a *bson.TypeError is returned.
+//
+// See the Unmarshal function documentation for more details on the
+// unmarshalling process.
+func (raw Raw) Unmarshal(out interface{}) (err error) {
+ defer handleErr(&err)
+ v := reflect.ValueOf(out)
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ fallthrough
+ case reflect.Map:
+ d := newDecoder(raw.Data)
+ good := d.readElemTo(v, raw.Kind)
+ if !good {
+ return &TypeError{v.Type(), raw.Kind}
+ }
+ case reflect.Struct:
+ return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
+ default:
+ return errors.New("Raw Unmarshal needs a map or a valid pointer.")
+ }
+ return nil
+}
+
+type TypeError struct {
+ Type reflect.Type
+ Kind byte
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+ InlineMap int
+ Zero reflect.Value
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ MinSize bool
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var structMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+ return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ structMapMutex.RLock()
+ sinfo, found := structMap[st]
+ structMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("bson")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "minsize":
+ info.MinSize = true
+ case "inline":
+ inline = true
+ default:
+ msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+ panic(externalPanic(msg))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ panic("Option ,inline needs a struct value or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+ sinfo = &structInfo{
+ fieldsMap,
+ fieldsList,
+ inlineMap,
+ reflect.New(st).Elem(),
+ }
+ structMapMutex.Lock()
+ structMap[st] = sinfo
+ structMapMutex.Unlock()
+ return sinfo, nil
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/bson_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/bson_test.go
new file mode 100644
index 00000000000..37451f9fdc2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/bson_test.go
@@ -0,0 +1,1832 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson_test
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "net/url"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2/bson"
+ "gopkg.in/yaml.v2"
+)
+
+func TestAll(t *testing.T) {
+ TestingT(t)
+}
+
+type S struct{}
+
+var _ = Suite(&S{})
+
+// Wrap up the document elements contained in data, prepending the int32
+// length of the data, and appending the '\x00' value closing the document.
+func wrapInDoc(data string) string {
+ result := make([]byte, len(data)+5)
+ binary.LittleEndian.PutUint32(result, uint32(len(result)))
+ copy(result[4:], []byte(data))
+ return string(result)
+}
+
+func makeZeroDoc(value interface{}) (zero interface{}) {
+ v := reflect.ValueOf(value)
+ t := v.Type()
+ switch t.Kind() {
+ case reflect.Map:
+ mv := reflect.MakeMap(t)
+ zero = mv.Interface()
+ case reflect.Ptr:
+ pv := reflect.New(v.Type().Elem())
+ zero = pv.Interface()
+ case reflect.Slice, reflect.Int, reflect.Int64, reflect.Struct:
+ zero = reflect.New(t).Interface()
+ default:
+ panic("unsupported doc type: " + t.Name())
+ }
+ return zero
+}
+
+func testUnmarshal(c *C, data string, obj interface{}) {
+ zero := makeZeroDoc(obj)
+ err := bson.Unmarshal([]byte(data), zero)
+ c.Assert(err, IsNil)
+ c.Assert(zero, DeepEquals, obj)
+}
+
+type testItemType struct {
+ obj interface{}
+ data string
+}
+
+// --------------------------------------------------------------------------
+// Samples from bsonspec.org:
+
+var sampleItems = []testItemType{
+ {bson.M{"hello": "world"},
+ "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"},
+ {bson.M{"BSON": []interface{}{"awesome", float64(5.05), 1986}},
+ "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" +
+ "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"},
+}
+
+func (s *S) TestMarshalSampleItems(c *C) {
+ for i, item := range sampleItems {
+ data, err := bson.Marshal(item.obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, item.data, Commentf("Failed on item %d", i))
+ }
+}
+
+func (s *S) TestUnmarshalSampleItems(c *C) {
+ for i, item := range sampleItems {
+ value := bson.M{}
+ err := bson.Unmarshal([]byte(item.data), value)
+ c.Assert(err, IsNil)
+ c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d", i))
+ }
+}
+
+// --------------------------------------------------------------------------
+// Every type, ordered by the type flag. These are not wrapped with the
+// length and last \x00 from the document. wrapInDoc() computes them.
+// Note that all of them should be supported as two-way conversions.
+
+var allItems = []testItemType{
+ {bson.M{},
+ ""},
+ {bson.M{"_": float64(5.05)},
+ "\x01_\x00333333\x14@"},
+ {bson.M{"_": "yo"},
+ "\x02_\x00\x03\x00\x00\x00yo\x00"},
+ {bson.M{"_": bson.M{"a": true}},
+ "\x03_\x00\x09\x00\x00\x00\x08a\x00\x01\x00"},
+ {bson.M{"_": []interface{}{true, false}},
+ "\x04_\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"},
+ {bson.M{"_": []byte("yo")},
+ "\x05_\x00\x02\x00\x00\x00\x00yo"},
+ {bson.M{"_": bson.Binary{0x80, []byte("udef")}},
+ "\x05_\x00\x04\x00\x00\x00\x80udef"},
+ {bson.M{"_": bson.Undefined}, // Obsolete, but still seen in the wild.
+ "\x06_\x00"},
+ {bson.M{"_": bson.ObjectId("0123456789ab")},
+ "\x07_\x000123456789ab"},
+ {bson.M{"_": bson.DBPointer{"testnamespace", bson.ObjectId("0123456789ab")}},
+ "\x0C_\x00\x0e\x00\x00\x00testnamespace\x000123456789ab"},
+ {bson.M{"_": false},
+ "\x08_\x00\x00"},
+ {bson.M{"_": true},
+ "\x08_\x00\x01"},
+ {bson.M{"_": time.Unix(0, 258e6)}, // Note the NS <=> MS conversion.
+ "\x09_\x00\x02\x01\x00\x00\x00\x00\x00\x00"},
+ {bson.M{"_": nil},
+ "\x0A_\x00"},
+ {bson.M{"_": bson.RegEx{"ab", "cd"}},
+ "\x0B_\x00ab\x00cd\x00"},
+ {bson.M{"_": bson.JavaScript{"code", nil}},
+ "\x0D_\x00\x05\x00\x00\x00code\x00"},
+ {bson.M{"_": bson.Symbol("sym")},
+ "\x0E_\x00\x04\x00\x00\x00sym\x00"},
+ {bson.M{"_": bson.JavaScript{"code", bson.M{"": nil}}},
+ "\x0F_\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" +
+ "\x07\x00\x00\x00\x0A\x00\x00"},
+ {bson.M{"_": 258},
+ "\x10_\x00\x02\x01\x00\x00"},
+ {bson.M{"_": bson.MongoTimestamp(258)},
+ "\x11_\x00\x02\x01\x00\x00\x00\x00\x00\x00"},
+ {bson.M{"_": int64(258)},
+ "\x12_\x00\x02\x01\x00\x00\x00\x00\x00\x00"},
+ {bson.M{"_": int64(258 << 32)},
+ "\x12_\x00\x00\x00\x00\x00\x02\x01\x00\x00"},
+ {bson.M{"_": bson.MaxKey},
+ "\x7F_\x00"},
+ {bson.M{"_": bson.MinKey},
+ "\xFF_\x00"},
+}
+
+func (s *S) TestMarshalAllItems(c *C) {
+ for i, item := range allItems {
+ data, err := bson.Marshal(item.obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, wrapInDoc(item.data), Commentf("Failed on item %d: %#v", i, item))
+ }
+}
+
+func (s *S) TestUnmarshalAllItems(c *C) {
+ for i, item := range allItems {
+ value := bson.M{}
+ err := bson.Unmarshal([]byte(wrapInDoc(item.data)), value)
+ c.Assert(err, IsNil)
+ c.Assert(value, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item))
+ }
+}
+
+func (s *S) TestUnmarshalRawAllItems(c *C) {
+ for i, item := range allItems {
+ if len(item.data) == 0 {
+ continue
+ }
+ value := item.obj.(bson.M)["_"]
+ if value == nil {
+ continue
+ }
+ pv := reflect.New(reflect.ValueOf(value).Type())
+ raw := bson.Raw{item.data[0], []byte(item.data[3:])}
+ c.Logf("Unmarshal raw: %#v, %#v", raw, pv.Interface())
+ err := raw.Unmarshal(pv.Interface())
+ c.Assert(err, IsNil)
+ c.Assert(pv.Elem().Interface(), DeepEquals, value, Commentf("Failed on item %d: %#v", i, item))
+ }
+}
+
+func (s *S) TestUnmarshalRawIncompatible(c *C) {
+ raw := bson.Raw{0x08, []byte{0x01}} // true
+ err := raw.Unmarshal(&struct{}{})
+ c.Assert(err, ErrorMatches, "BSON kind 0x08 isn't compatible with type struct \\{\\}")
+}
+
+func (s *S) TestUnmarshalZeroesStruct(c *C) {
+ data, err := bson.Marshal(bson.M{"b": 2})
+ c.Assert(err, IsNil)
+ type T struct{ A, B int }
+ v := T{A: 1}
+ err = bson.Unmarshal(data, &v)
+ c.Assert(err, IsNil)
+ c.Assert(v.A, Equals, 0)
+ c.Assert(v.B, Equals, 2)
+}
+
+func (s *S) TestUnmarshalZeroesMap(c *C) {
+ data, err := bson.Marshal(bson.M{"b": 2})
+ c.Assert(err, IsNil)
+ m := bson.M{"a": 1}
+ err = bson.Unmarshal(data, &m)
+ c.Assert(err, IsNil)
+ c.Assert(m, DeepEquals, bson.M{"b": 2})
+}
+
+func (s *S) TestUnmarshalNonNilInterface(c *C) {
+ data, err := bson.Marshal(bson.M{"b": 2})
+ c.Assert(err, IsNil)
+ m := bson.M{"a": 1}
+ var i interface{}
+ i = m
+ err = bson.Unmarshal(data, &i)
+ c.Assert(err, IsNil)
+ c.Assert(i, DeepEquals, bson.M{"b": 2})
+ c.Assert(m, DeepEquals, bson.M{"a": 1})
+}
+
+// --------------------------------------------------------------------------
+// Some one way marshaling operations which would unmarshal differently.
+
+var oneWayMarshalItems = []testItemType{
+ // These are being passed as pointers, and will unmarshal as values.
+ {bson.M{"": &bson.Binary{0x02, []byte("old")}},
+ "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"},
+ {bson.M{"": &bson.Binary{0x80, []byte("udef")}},
+ "\x05\x00\x04\x00\x00\x00\x80udef"},
+ {bson.M{"": &bson.RegEx{"ab", "cd"}},
+ "\x0B\x00ab\x00cd\x00"},
+ {bson.M{"": &bson.JavaScript{"code", nil}},
+ "\x0D\x00\x05\x00\x00\x00code\x00"},
+ {bson.M{"": &bson.JavaScript{"code", bson.M{"": nil}}},
+ "\x0F\x00\x14\x00\x00\x00\x05\x00\x00\x00code\x00" +
+ "\x07\x00\x00\x00\x0A\x00\x00"},
+
+ // There's no float32 type in BSON. Will encode as a float64.
+ {bson.M{"": float32(5.05)},
+ "\x01\x00\x00\x00\x00@33\x14@"},
+
+ // The array will be unmarshaled as a slice instead.
+ {bson.M{"": [2]bool{true, false}},
+ "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"},
+
+ // The typed slice will be unmarshaled as []interface{}.
+ {bson.M{"": []bool{true, false}},
+ "\x04\x00\r\x00\x00\x00\x080\x00\x01\x081\x00\x00\x00"},
+
+ // Will unmarshal as a []byte.
+ {bson.M{"": bson.Binary{0x00, []byte("yo")}},
+ "\x05\x00\x02\x00\x00\x00\x00yo"},
+ {bson.M{"": bson.Binary{0x02, []byte("old")}},
+ "\x05\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"},
+
+ // No way to preserve the type information here. We might encode as a zero
+ // value, but this would mean that pointer values in structs wouldn't be
+ // able to correctly distinguish between unset and set to the zero value.
+ {bson.M{"": (*byte)(nil)},
+ "\x0A\x00"},
+
+ // No int types smaller than int32 in BSON. Could encode this as a char,
+ // but it would still be ambiguous, take more, and be awkward in Go when
+ // loaded without typing information.
+ {bson.M{"": byte(8)},
+ "\x10\x00\x08\x00\x00\x00"},
+
+ // There are no unsigned types in BSON. Will unmarshal as int32 or int64.
+ {bson.M{"": uint32(258)},
+ "\x10\x00\x02\x01\x00\x00"},
+ {bson.M{"": uint64(258)},
+ "\x12\x00\x02\x01\x00\x00\x00\x00\x00\x00"},
+ {bson.M{"": uint64(258 << 32)},
+ "\x12\x00\x00\x00\x00\x00\x02\x01\x00\x00"},
+
+ // This will unmarshal as int.
+ {bson.M{"": int32(258)},
+ "\x10\x00\x02\x01\x00\x00"},
+
+ // That's a special case. The unsigned value is too large for an int32,
+ // so an int64 is used instead.
+ {bson.M{"": uint32(1<<32 - 1)},
+ "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"},
+ {bson.M{"": uint(1<<32 - 1)},
+ "\x12\x00\xFF\xFF\xFF\xFF\x00\x00\x00\x00"},
+}
+
+func (s *S) TestOneWayMarshalItems(c *C) {
+ for i, item := range oneWayMarshalItems {
+ data, err := bson.Marshal(item.obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, wrapInDoc(item.data),
+ Commentf("Failed on item %d", i))
+ }
+}
+
+// --------------------------------------------------------------------------
+// Two-way tests for user-defined structures using the samples
+// from bsonspec.org.
+
+type specSample1 struct {
+ Hello string
+}
+
+type specSample2 struct {
+ BSON []interface{} "BSON"
+}
+
+var structSampleItems = []testItemType{
+ {&specSample1{"world"},
+ "\x16\x00\x00\x00\x02hello\x00\x06\x00\x00\x00world\x00\x00"},
+ {&specSample2{[]interface{}{"awesome", float64(5.05), 1986}},
+ "1\x00\x00\x00\x04BSON\x00&\x00\x00\x00\x020\x00\x08\x00\x00\x00" +
+ "awesome\x00\x011\x00333333\x14@\x102\x00\xc2\x07\x00\x00\x00\x00"},
+}
+
+func (s *S) TestMarshalStructSampleItems(c *C) {
+ for i, item := range structSampleItems {
+ data, err := bson.Marshal(item.obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, item.data,
+ Commentf("Failed on item %d", i))
+ }
+}
+
+func (s *S) TestUnmarshalStructSampleItems(c *C) {
+ for _, item := range structSampleItems {
+ testUnmarshal(c, item.data, item.obj)
+ }
+}
+
+func (s *S) Test64bitInt(c *C) {
+ var i int64 = (1 << 31)
+ if int(i) > 0 {
+ data, err := bson.Marshal(bson.M{"i": int(i)})
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, wrapInDoc("\x12i\x00\x00\x00\x00\x80\x00\x00\x00\x00"))
+
+ var result struct{ I int }
+ err = bson.Unmarshal(data, &result)
+ c.Assert(err, IsNil)
+ c.Assert(int64(result.I), Equals, i)
+ }
+}
+
+// --------------------------------------------------------------------------
+// Generic two-way struct marshaling tests.
+
+var bytevar = byte(8)
+var byteptr = &bytevar
+
+var structItems = []testItemType{
+ {&struct{ Ptr *byte }{nil},
+ "\x0Aptr\x00"},
+ {&struct{ Ptr *byte }{&bytevar},
+ "\x10ptr\x00\x08\x00\x00\x00"},
+ {&struct{ Ptr **byte }{&byteptr},
+ "\x10ptr\x00\x08\x00\x00\x00"},
+ {&struct{ Byte byte }{8},
+ "\x10byte\x00\x08\x00\x00\x00"},
+ {&struct{ Byte byte }{0},
+ "\x10byte\x00\x00\x00\x00\x00"},
+ {&struct {
+ V byte "Tag"
+ }{8},
+ "\x10Tag\x00\x08\x00\x00\x00"},
+ {&struct {
+ V *struct {
+ Byte byte
+ }
+ }{&struct{ Byte byte }{8}},
+ "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"},
+ {&struct{ priv byte }{}, ""},
+
+ // The order of the dumped fields should be the same in the struct.
+ {&struct{ A, C, B, D, F, E *byte }{},
+ "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x0Ae\x00"},
+
+ {&struct{ V bson.Raw }{bson.Raw{0x03, []byte("\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00")}},
+ "\x03v\x00" + "\x0f\x00\x00\x00\x10byte\x00\b\x00\x00\x00\x00"},
+ {&struct{ V bson.Raw }{bson.Raw{0x10, []byte("\x00\x00\x00\x00")}},
+ "\x10v\x00" + "\x00\x00\x00\x00"},
+
+ // Byte arrays.
+ {&struct{ V [2]byte }{[2]byte{'y', 'o'}},
+ "\x05v\x00\x02\x00\x00\x00\x00yo"},
+}
+
+func (s *S) TestMarshalStructItems(c *C) {
+ for i, item := range structItems {
+ data, err := bson.Marshal(item.obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, wrapInDoc(item.data),
+ Commentf("Failed on item %d", i))
+ }
+}
+
+func (s *S) TestUnmarshalStructItems(c *C) {
+ for _, item := range structItems {
+ testUnmarshal(c, wrapInDoc(item.data), item.obj)
+ }
+}
+
+func (s *S) TestUnmarshalRawStructItems(c *C) {
+ for i, item := range structItems {
+ raw := bson.Raw{0x03, []byte(wrapInDoc(item.data))}
+ zero := makeZeroDoc(item.obj)
+ err := raw.Unmarshal(zero)
+ c.Assert(err, IsNil)
+ c.Assert(zero, DeepEquals, item.obj, Commentf("Failed on item %d: %#v", i, item))
+ }
+}
+
+func (s *S) TestUnmarshalRawNil(c *C) {
+ // Regression test: shouldn't try to nil out the pointer itself,
+ // as it's not settable.
+ raw := bson.Raw{0x0A, []byte{}}
+ err := raw.Unmarshal(&struct{}{})
+ c.Assert(err, IsNil)
+}
+
+// --------------------------------------------------------------------------
+// One-way marshaling tests.
+
+type dOnIface struct {
+ D interface{}
+}
+
+type ignoreField struct {
+ Before string
+ Ignore string `bson:"-"`
+ After string
+}
+
+var marshalItems = []testItemType{
+ // Ordered document dump. Will unmarshal as a dictionary by default.
+ {bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}},
+ "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"},
+ {MyD{{"a", nil}, {"c", nil}, {"b", nil}, {"d", nil}, {"f", nil}, {"e", true}},
+ "\x0Aa\x00\x0Ac\x00\x0Ab\x00\x0Ad\x00\x0Af\x00\x08e\x00\x01"},
+ {&dOnIface{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}},
+ "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")},
+
+ {bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}},
+ "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"},
+ {MyRawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}},
+ "\x0Aa\x00" + "\x0Ac\x00" + "\x08b\x00\x01"},
+ {&dOnIface{bson.RawD{{"a", bson.Raw{0x0A, nil}}, {"c", bson.Raw{0x0A, nil}}, {"b", bson.Raw{0x08, []byte{0x01}}}}},
+ "\x03d\x00" + wrapInDoc("\x0Aa\x00"+"\x0Ac\x00"+"\x08b\x00\x01")},
+
+ {&ignoreField{"before", "ignore", "after"},
+ "\x02before\x00\a\x00\x00\x00before\x00\x02after\x00\x06\x00\x00\x00after\x00"},
+
+ // Marshalling a Raw document does nothing.
+ {bson.Raw{0x03, []byte(wrapInDoc("anything"))},
+ "anything"},
+ {bson.Raw{Data: []byte(wrapInDoc("anything"))},
+ "anything"},
+}
+
+func (s *S) TestMarshalOneWayItems(c *C) {
+ for _, item := range marshalItems {
+ data, err := bson.Marshal(item.obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, wrapInDoc(item.data))
+ }
+}
+
+// --------------------------------------------------------------------------
+// One-way unmarshaling tests.
+
+var unmarshalItems = []testItemType{
+ // Field is private. Should not attempt to unmarshal it.
+ {&struct{ priv byte }{},
+ "\x10priv\x00\x08\x00\x00\x00"},
+
+ // Wrong casing. Field names are lowercased.
+ {&struct{ Byte byte }{},
+ "\x10Byte\x00\x08\x00\x00\x00"},
+
+ // Ignore non-existing field.
+ {&struct{ Byte byte }{9},
+ "\x10boot\x00\x08\x00\x00\x00" + "\x10byte\x00\x09\x00\x00\x00"},
+
+ // Do not unmarshal on ignored field.
+ {&ignoreField{"before", "", "after"},
+ "\x02before\x00\a\x00\x00\x00before\x00" +
+ "\x02-\x00\a\x00\x00\x00ignore\x00" +
+ "\x02after\x00\x06\x00\x00\x00after\x00"},
+
+ // Ignore unsuitable types silently.
+ {map[string]string{"str": "s"},
+ "\x02str\x00\x02\x00\x00\x00s\x00" + "\x10int\x00\x01\x00\x00\x00"},
+ {map[string][]int{"array": []int{5, 9}},
+ "\x04array\x00" + wrapInDoc("\x100\x00\x05\x00\x00\x00"+"\x021\x00\x02\x00\x00\x00s\x00"+"\x102\x00\x09\x00\x00\x00")},
+
+ // Wrong type. Shouldn't init pointer.
+ {&struct{ Str *byte }{},
+ "\x02str\x00\x02\x00\x00\x00s\x00"},
+ {&struct{ Str *struct{ Str string } }{},
+ "\x02str\x00\x02\x00\x00\x00s\x00"},
+
+ // Ordered document.
+ {&struct{ bson.D }{bson.D{{"a", nil}, {"c", nil}, {"b", nil}, {"d", true}}},
+ "\x03d\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x0Ab\x00\x08d\x00\x01")},
+
+ // Raw document.
+ {&bson.Raw{0x03, []byte(wrapInDoc("\x10byte\x00\x08\x00\x00\x00"))},
+ "\x10byte\x00\x08\x00\x00\x00"},
+
+ // RawD document.
+ {&struct{ bson.RawD }{bson.RawD{{"a", bson.Raw{0x0A, []byte{}}}, {"c", bson.Raw{0x0A, []byte{}}}, {"b", bson.Raw{0x08, []byte{0x01}}}}},
+ "\x03rawd\x00" + wrapInDoc("\x0Aa\x00\x0Ac\x00\x08b\x00\x01")},
+
+ // Decode old binary.
+ {bson.M{"_": []byte("old")},
+ "\x05_\x00\x07\x00\x00\x00\x02\x03\x00\x00\x00old"},
+
+ // Decode old binary without length. According to the spec, this shouldn't happen.
+ {bson.M{"_": []byte("old")},
+ "\x05_\x00\x03\x00\x00\x00\x02old"},
+
+ // Decode a doc within a doc in to a slice within a doc; shouldn't error
+ {&struct{ Foo []string }{},
+ "\x03\x66\x6f\x6f\x00\x05\x00\x00\x00\x00"},
+}
+
+func (s *S) TestUnmarshalOneWayItems(c *C) {
+ for _, item := range unmarshalItems {
+ testUnmarshal(c, wrapInDoc(item.data), item.obj)
+ }
+}
+
+func (s *S) TestUnmarshalNilInStruct(c *C) {
+ // Nil is the default value, so we need to ensure it's indeed being set.
+ b := byte(1)
+ v := &struct{ Ptr *byte }{&b}
+ err := bson.Unmarshal([]byte(wrapInDoc("\x0Aptr\x00")), v)
+ c.Assert(err, IsNil)
+ c.Assert(v, DeepEquals, &struct{ Ptr *byte }{nil})
+}
+
+// --------------------------------------------------------------------------
+// Marshalling error cases.
+
+type structWithDupKeys struct {
+ Name byte
+ Other byte "name" // Tag should precede.
+}
+
+var marshalErrorItems = []testItemType{
+ {bson.M{"": uint64(1 << 63)},
+ "BSON has no uint64 type, and value is too large to fit correctly in an int64"},
+ {bson.M{"": bson.ObjectId("tooshort")},
+ "ObjectIDs must be exactly 12 bytes long \\(got 8\\)"},
+ {int64(123),
+ "Can't marshal int64 as a BSON document"},
+ {bson.M{"": 1i},
+ "Can't marshal complex128 in a BSON document"},
+ {&structWithDupKeys{},
+ "Duplicated key 'name' in struct bson_test.structWithDupKeys"},
+ {bson.Raw{0xA, []byte{}},
+ "Attempted to marshal Raw kind 10 as a document"},
+ {bson.Raw{0x3, []byte{}},
+ "Attempted to marshal empty Raw document"},
+ {bson.M{"w": bson.Raw{0x3, []byte{}}},
+ "Attempted to marshal empty Raw document"},
+ {&inlineCantPtr{&struct{ A, B int }{1, 2}},
+ "Option ,inline needs a struct value or map field"},
+ {&inlineDupName{1, struct{ A, B int }{2, 3}},
+ "Duplicated key 'a' in struct bson_test.inlineDupName"},
+ {&inlineDupMap{},
+ "Multiple ,inline maps in struct bson_test.inlineDupMap"},
+ {&inlineBadKeyMap{},
+ "Option ,inline needs a map with string keys in struct bson_test.inlineBadKeyMap"},
+ {&inlineMap{A: 1, M: map[string]interface{}{"a": 1}},
+ `Can't have key "a" in inlined map; conflicts with struct field`},
+}
+
+func (s *S) TestMarshalErrorItems(c *C) {
+ for _, item := range marshalErrorItems {
+ data, err := bson.Marshal(item.obj)
+ c.Assert(err, ErrorMatches, item.data)
+ c.Assert(data, IsNil)
+ }
+}
+
+// --------------------------------------------------------------------------
+// Unmarshalling error cases.
+
+type unmarshalErrorType struct {
+ obj interface{}
+ data string
+ error string
+}
+
+var unmarshalErrorItems = []unmarshalErrorType{
+ // Tag name conflicts with existing parameter.
+ {&structWithDupKeys{},
+ "\x10name\x00\x08\x00\x00\x00",
+ "Duplicated key 'name' in struct bson_test.structWithDupKeys"},
+
+ // Non-string map key.
+ {map[int]interface{}{},
+ "\x10name\x00\x08\x00\x00\x00",
+ "BSON map must have string keys. Got: map\\[int\\]interface \\{\\}"},
+
+ {nil,
+ "\xEEname\x00",
+ "Unknown element kind \\(0xEE\\)"},
+
+ {struct{ Name bool }{},
+ "\x10name\x00\x08\x00\x00\x00",
+ "Unmarshal can't deal with struct values. Use a pointer."},
+
+ {123,
+ "\x10name\x00\x08\x00\x00\x00",
+ "Unmarshal needs a map or a pointer to a struct."},
+
+ {nil,
+ "\x08\x62\x00\x02",
+ "encoded boolean must be 1 or 0, found 2"},
+}
+
+func (s *S) TestUnmarshalErrorItems(c *C) {
+ for _, item := range unmarshalErrorItems {
+ data := []byte(wrapInDoc(item.data))
+ var value interface{}
+ switch reflect.ValueOf(item.obj).Kind() {
+ case reflect.Map, reflect.Ptr:
+ value = makeZeroDoc(item.obj)
+ case reflect.Invalid:
+ value = bson.M{}
+ default:
+ value = item.obj
+ }
+ err := bson.Unmarshal(data, value)
+ c.Assert(err, ErrorMatches, item.error)
+ }
+}
+
+type unmarshalRawErrorType struct {
+ obj interface{}
+ raw bson.Raw
+ error string
+}
+
+var unmarshalRawErrorItems = []unmarshalRawErrorType{
+ // Tag name conflicts with existing parameter.
+ {&structWithDupKeys{},
+ bson.Raw{0x03, []byte("\x10byte\x00\x08\x00\x00\x00")},
+ "Duplicated key 'name' in struct bson_test.structWithDupKeys"},
+
+ {&struct{}{},
+ bson.Raw{0xEE, []byte{}},
+ "Unknown element kind \\(0xEE\\)"},
+
+ {struct{ Name bool }{},
+ bson.Raw{0x10, []byte("\x08\x00\x00\x00")},
+ "Raw Unmarshal can't deal with struct values. Use a pointer."},
+
+ {123,
+ bson.Raw{0x10, []byte("\x08\x00\x00\x00")},
+ "Raw Unmarshal needs a map or a valid pointer."},
+}
+
+func (s *S) TestUnmarshalRawErrorItems(c *C) {
+ for i, item := range unmarshalRawErrorItems {
+ err := item.raw.Unmarshal(item.obj)
+ c.Assert(err, ErrorMatches, item.error, Commentf("Failed on item %d: %#v\n", i, item))
+ }
+}
+
+var corruptedData = []string{
+ "\x04\x00\x00\x00\x00", // Document shorter than minimum
+ "\x06\x00\x00\x00\x00", // Not enough data
+ "\x05\x00\x00", // Broken length
+ "\x05\x00\x00\x00\xff", // Corrupted termination
+ "\x0A\x00\x00\x00\x0Aooop\x00", // Unfinished C string
+
+ // Array end past end of string (s[2]=0x07 is correct)
+ wrapInDoc("\x04\x00\x09\x00\x00\x00\x0A\x00\x00"),
+
+ // Array end within string, but past acceptable.
+ wrapInDoc("\x04\x00\x08\x00\x00\x00\x0A\x00\x00"),
+
+ // Document end within string, but past acceptable.
+ wrapInDoc("\x03\x00\x08\x00\x00\x00\x0A\x00\x00"),
+
+ // String with corrupted end.
+ wrapInDoc("\x02\x00\x03\x00\x00\x00yo\xFF"),
+
+ // String with negative length (issue #116).
+ "\x0c\x00\x00\x00\x02x\x00\xff\xff\xff\xff\x00",
+
+ // String with zero length (must include trailing '\x00')
+ "\x0c\x00\x00\x00\x02x\x00\x00\x00\x00\x00\x00",
+
+ // Binary with negative length.
+ "\r\x00\x00\x00\x05x\x00\xff\xff\xff\xff\x00\x00",
+}
+
+func (s *S) TestUnmarshalMapDocumentTooShort(c *C) {
+ for _, data := range corruptedData {
+ err := bson.Unmarshal([]byte(data), bson.M{})
+ c.Assert(err, ErrorMatches, "Document is corrupted")
+
+ err = bson.Unmarshal([]byte(data), &struct{}{})
+ c.Assert(err, ErrorMatches, "Document is corrupted")
+ }
+}
+
+// --------------------------------------------------------------------------
+// Setter test cases.
+
+var setterResult = map[string]error{}
+
+type setterType struct {
+ received interface{}
+}
+
+func (o *setterType) SetBSON(raw bson.Raw) error {
+ err := raw.Unmarshal(&o.received)
+ if err != nil {
+ panic("The panic:" + err.Error())
+ }
+ if s, ok := o.received.(string); ok {
+ if result, ok := setterResult[s]; ok {
+ return result
+ }
+ }
+ return nil
+}
+
+type ptrSetterDoc struct {
+ Field *setterType "_"
+}
+
+type valSetterDoc struct {
+ Field setterType "_"
+}
+
+func (s *S) TestUnmarshalAllItemsWithPtrSetter(c *C) {
+ for _, item := range allItems {
+ for i := 0; i != 2; i++ {
+ var field *setterType
+ if i == 0 {
+ obj := &ptrSetterDoc{}
+ err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj)
+ c.Assert(err, IsNil)
+ field = obj.Field
+ } else {
+ obj := &valSetterDoc{}
+ err := bson.Unmarshal([]byte(wrapInDoc(item.data)), obj)
+ c.Assert(err, IsNil)
+ field = &obj.Field
+ }
+ if item.data == "" {
+ // Nothing to unmarshal. Should be untouched.
+ if i == 0 {
+ c.Assert(field, IsNil)
+ } else {
+ c.Assert(field.received, IsNil)
+ }
+ } else {
+ expected := item.obj.(bson.M)["_"]
+ c.Assert(field, NotNil, Commentf("Pointer not initialized (%#v)", expected))
+ c.Assert(field.received, DeepEquals, expected)
+ }
+ }
+ }
+}
+
+func (s *S) TestUnmarshalWholeDocumentWithSetter(c *C) {
+ obj := &setterType{}
+ err := bson.Unmarshal([]byte(sampleItems[0].data), obj)
+ c.Assert(err, IsNil)
+ c.Assert(obj.received, DeepEquals, bson.M{"hello": "world"})
+}
+
+func (s *S) TestUnmarshalSetterOmits(c *C) {
+ setterResult["2"] = &bson.TypeError{}
+ setterResult["4"] = &bson.TypeError{}
+ defer func() {
+ delete(setterResult, "2")
+ delete(setterResult, "4")
+ }()
+
+ m := map[string]*setterType{}
+ data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" +
+ "\x02def\x00\x02\x00\x00\x002\x00" +
+ "\x02ghi\x00\x02\x00\x00\x003\x00" +
+ "\x02jkl\x00\x02\x00\x00\x004\x00")
+ err := bson.Unmarshal([]byte(data), m)
+ c.Assert(err, IsNil)
+ c.Assert(m["abc"], NotNil)
+ c.Assert(m["def"], IsNil)
+ c.Assert(m["ghi"], NotNil)
+ c.Assert(m["jkl"], IsNil)
+
+ c.Assert(m["abc"].received, Equals, "1")
+ c.Assert(m["ghi"].received, Equals, "3")
+}
+
+func (s *S) TestUnmarshalSetterErrors(c *C) {
+ boom := errors.New("BOOM")
+ setterResult["2"] = boom
+ defer delete(setterResult, "2")
+
+ m := map[string]*setterType{}
+ data := wrapInDoc("\x02abc\x00\x02\x00\x00\x001\x00" +
+ "\x02def\x00\x02\x00\x00\x002\x00" +
+ "\x02ghi\x00\x02\x00\x00\x003\x00")
+ err := bson.Unmarshal([]byte(data), m)
+ c.Assert(err, Equals, boom)
+ c.Assert(m["abc"], NotNil)
+ c.Assert(m["def"], IsNil)
+ c.Assert(m["ghi"], IsNil)
+
+ c.Assert(m["abc"].received, Equals, "1")
+}
+
+func (s *S) TestDMap(c *C) {
+ d := bson.D{{"a", 1}, {"b", 2}}
+ c.Assert(d.Map(), DeepEquals, bson.M{"a": 1, "b": 2})
+}
+
+func (s *S) TestUnmarshalSetterSetZero(c *C) {
+ setterResult["foo"] = bson.SetZero
+ defer delete(setterResult, "field")
+
+ data, err := bson.Marshal(bson.M{"field": "foo"})
+ c.Assert(err, IsNil)
+
+ m := map[string]*setterType{}
+ err = bson.Unmarshal([]byte(data), m)
+ c.Assert(err, IsNil)
+
+ value, ok := m["field"]
+ c.Assert(ok, Equals, true)
+ c.Assert(value, IsNil)
+}
+
+// --------------------------------------------------------------------------
+// Getter test cases.
+
+type typeWithGetter struct {
+ result interface{}
+ err error
+}
+
+func (t *typeWithGetter) GetBSON() (interface{}, error) {
+ if t == nil {
+ return "<value is nil>", nil
+ }
+ return t.result, t.err
+}
+
+type docWithGetterField struct {
+ Field *typeWithGetter "_"
+}
+
+func (s *S) TestMarshalAllItemsWithGetter(c *C) {
+ for i, item := range allItems {
+ if item.data == "" {
+ continue
+ }
+ obj := &docWithGetterField{}
+ obj.Field = &typeWithGetter{result: item.obj.(bson.M)["_"]}
+ data, err := bson.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, wrapInDoc(item.data),
+ Commentf("Failed on item #%d", i))
+ }
+}
+
+func (s *S) TestMarshalWholeDocumentWithGetter(c *C) {
+ obj := &typeWithGetter{result: sampleItems[0].obj}
+ data, err := bson.Marshal(obj)
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, sampleItems[0].data)
+}
+
+func (s *S) TestGetterErrors(c *C) {
+ e := errors.New("oops")
+
+ obj1 := &docWithGetterField{}
+ obj1.Field = &typeWithGetter{sampleItems[0].obj, e}
+ data, err := bson.Marshal(obj1)
+ c.Assert(err, ErrorMatches, "oops")
+ c.Assert(data, IsNil)
+
+ obj2 := &typeWithGetter{sampleItems[0].obj, e}
+ data, err = bson.Marshal(obj2)
+ c.Assert(err, ErrorMatches, "oops")
+ c.Assert(data, IsNil)
+}
+
+type intGetter int64
+
+func (t intGetter) GetBSON() (interface{}, error) {
+ return int64(t), nil
+}
+
+type typeWithIntGetter struct {
+ V intGetter ",minsize"
+}
+
+func (s *S) TestMarshalShortWithGetter(c *C) {
+ obj := typeWithIntGetter{42}
+ data, err := bson.Marshal(obj)
+ c.Assert(err, IsNil)
+ m := bson.M{}
+ err = bson.Unmarshal(data, m)
+ c.Assert(err, IsNil)
+ c.Assert(m["v"], Equals, 42)
+}
+
+func (s *S) TestMarshalWithGetterNil(c *C) {
+ obj := docWithGetterField{}
+ data, err := bson.Marshal(obj)
+ c.Assert(err, IsNil)
+ m := bson.M{}
+ err = bson.Unmarshal(data, m)
+ c.Assert(err, IsNil)
+ c.Assert(m, DeepEquals, bson.M{"_": "<value is nil>"})
+}
+
+// --------------------------------------------------------------------------
+// Cross-type conversion tests.
+
+type crossTypeItem struct {
+ obj1 interface{}
+ obj2 interface{}
+}
+
+type condStr struct {
+ V string ",omitempty"
+}
+type condStrNS struct {
+ V string `a:"A" bson:",omitempty" b:"B"`
+}
+type condBool struct {
+ V bool ",omitempty"
+}
+type condInt struct {
+ V int ",omitempty"
+}
+type condUInt struct {
+ V uint ",omitempty"
+}
+type condFloat struct {
+ V float64 ",omitempty"
+}
+type condIface struct {
+ V interface{} ",omitempty"
+}
+type condPtr struct {
+ V *bool ",omitempty"
+}
+type condSlice struct {
+ V []string ",omitempty"
+}
+type condMap struct {
+ V map[string]int ",omitempty"
+}
+type namedCondStr struct {
+ V string "myv,omitempty"
+}
+type condTime struct {
+ V time.Time ",omitempty"
+}
+type condStruct struct {
+ V struct{ A []int } ",omitempty"
+}
+type condRaw struct {
+ V bson.Raw ",omitempty"
+}
+
+type shortInt struct {
+ V int64 ",minsize"
+}
+type shortUint struct {
+ V uint64 ",minsize"
+}
+type shortIface struct {
+ V interface{} ",minsize"
+}
+type shortPtr struct {
+ V *int64 ",minsize"
+}
+type shortNonEmptyInt struct {
+ V int64 ",minsize,omitempty"
+}
+
+type inlineInt struct {
+ V struct{ A, B int } ",inline"
+}
+type inlineCantPtr struct {
+ V *struct{ A, B int } ",inline"
+}
+type inlineDupName struct {
+ A int
+ V struct{ A, B int } ",inline"
+}
+type inlineMap struct {
+ A int
+ M map[string]interface{} ",inline"
+}
+type inlineMapInt struct {
+ A int
+ M map[string]int ",inline"
+}
+type inlineMapMyM struct {
+ A int
+ M MyM ",inline"
+}
+type inlineDupMap struct {
+ M1 map[string]interface{} ",inline"
+ M2 map[string]interface{} ",inline"
+}
+type inlineBadKeyMap struct {
+ M map[int]int ",inline"
+}
+type inlineUnexported struct {
+ M map[string]interface{} ",inline"
+ unexported ",inline"
+}
+type unexported struct {
+ A int
+}
+
+type getterSetterD bson.D
+
+func (s getterSetterD) GetBSON() (interface{}, error) {
+ if len(s) == 0 {
+ return bson.D{}, nil
+ }
+ return bson.D(s[:len(s)-1]), nil
+}
+
+func (s *getterSetterD) SetBSON(raw bson.Raw) error {
+ var doc bson.D
+ err := raw.Unmarshal(&doc)
+ doc = append(doc, bson.DocElem{"suffix", true})
+ *s = getterSetterD(doc)
+ return err
+}
+
+type getterSetterInt int
+
+func (i getterSetterInt) GetBSON() (interface{}, error) {
+ return bson.D{{"a", int(i)}}, nil
+}
+
+func (i *getterSetterInt) SetBSON(raw bson.Raw) error {
+ var doc struct{ A int }
+ err := raw.Unmarshal(&doc)
+ *i = getterSetterInt(doc.A)
+ return err
+}
+
+type ifaceType interface {
+ Hello()
+}
+
+type ifaceSlice []ifaceType
+
+func (s *ifaceSlice) SetBSON(raw bson.Raw) error {
+ var ns []int
+ if err := raw.Unmarshal(&ns); err != nil {
+ return err
+ }
+ *s = make(ifaceSlice, ns[0])
+ return nil
+}
+
+func (s ifaceSlice) GetBSON() (interface{}, error) {
+ return []int{len(s)}, nil
+}
+
+type (
+ MyString string
+ MyBytes []byte
+ MyBool bool
+ MyD []bson.DocElem
+ MyRawD []bson.RawDocElem
+ MyM map[string]interface{}
+)
+
+var (
+ truevar = true
+ falsevar = false
+
+ int64var = int64(42)
+ int64ptr = &int64var
+ intvar = int(42)
+ intptr = &intvar
+
+ gsintvar = getterSetterInt(42)
+)
+
+func parseURL(s string) *url.URL {
+ u, err := url.Parse(s)
+ if err != nil {
+ panic(err)
+ }
+ return u
+}
+
+// That's a pretty fun test. It will dump the first item, generate a zero
+// value equivalent to the second one, load the dumped data onto it, and then
+// verify that the resulting value is deep-equal to the untouched second value.
+// Then, it will do the same in the *opposite* direction!
+var twoWayCrossItems = []crossTypeItem{
+ // int<=>int
+ {&struct{ I int }{42}, &struct{ I int8 }{42}},
+ {&struct{ I int }{42}, &struct{ I int32 }{42}},
+ {&struct{ I int }{42}, &struct{ I int64 }{42}},
+ {&struct{ I int8 }{42}, &struct{ I int32 }{42}},
+ {&struct{ I int8 }{42}, &struct{ I int64 }{42}},
+ {&struct{ I int32 }{42}, &struct{ I int64 }{42}},
+
+ // uint<=>uint
+ {&struct{ I uint }{42}, &struct{ I uint8 }{42}},
+ {&struct{ I uint }{42}, &struct{ I uint32 }{42}},
+ {&struct{ I uint }{42}, &struct{ I uint64 }{42}},
+ {&struct{ I uint8 }{42}, &struct{ I uint32 }{42}},
+ {&struct{ I uint8 }{42}, &struct{ I uint64 }{42}},
+ {&struct{ I uint32 }{42}, &struct{ I uint64 }{42}},
+
+ // float32<=>float64
+ {&struct{ I float32 }{42}, &struct{ I float64 }{42}},
+
+ // int<=>uint
+ {&struct{ I uint }{42}, &struct{ I int }{42}},
+ {&struct{ I uint }{42}, &struct{ I int8 }{42}},
+ {&struct{ I uint }{42}, &struct{ I int32 }{42}},
+ {&struct{ I uint }{42}, &struct{ I int64 }{42}},
+ {&struct{ I uint8 }{42}, &struct{ I int }{42}},
+ {&struct{ I uint8 }{42}, &struct{ I int8 }{42}},
+ {&struct{ I uint8 }{42}, &struct{ I int32 }{42}},
+ {&struct{ I uint8 }{42}, &struct{ I int64 }{42}},
+ {&struct{ I uint32 }{42}, &struct{ I int }{42}},
+ {&struct{ I uint32 }{42}, &struct{ I int8 }{42}},
+ {&struct{ I uint32 }{42}, &struct{ I int32 }{42}},
+ {&struct{ I uint32 }{42}, &struct{ I int64 }{42}},
+ {&struct{ I uint64 }{42}, &struct{ I int }{42}},
+ {&struct{ I uint64 }{42}, &struct{ I int8 }{42}},
+ {&struct{ I uint64 }{42}, &struct{ I int32 }{42}},
+ {&struct{ I uint64 }{42}, &struct{ I int64 }{42}},
+
+ // int <=> float
+ {&struct{ I int }{42}, &struct{ I float64 }{42}},
+
+ // int <=> bool
+ {&struct{ I int }{1}, &struct{ I bool }{true}},
+ {&struct{ I int }{0}, &struct{ I bool }{false}},
+
+ // uint <=> float64
+ {&struct{ I uint }{42}, &struct{ I float64 }{42}},
+
+ // uint <=> bool
+ {&struct{ I uint }{1}, &struct{ I bool }{true}},
+ {&struct{ I uint }{0}, &struct{ I bool }{false}},
+
+ // float64 <=> bool
+ {&struct{ I float64 }{1}, &struct{ I bool }{true}},
+ {&struct{ I float64 }{0}, &struct{ I bool }{false}},
+
+ // string <=> string and string <=> []byte
+ {&struct{ S []byte }{[]byte("abc")}, &struct{ S string }{"abc"}},
+ {&struct{ S []byte }{[]byte("def")}, &struct{ S bson.Symbol }{"def"}},
+ {&struct{ S string }{"ghi"}, &struct{ S bson.Symbol }{"ghi"}},
+
+ // map <=> struct
+ {&struct {
+ A struct {
+ B, C int
+ }
+ }{struct{ B, C int }{1, 2}},
+ map[string]map[string]int{"a": map[string]int{"b": 1, "c": 2}}},
+
+ {&struct{ A bson.Symbol }{"abc"}, map[string]string{"a": "abc"}},
+ {&struct{ A bson.Symbol }{"abc"}, map[string][]byte{"a": []byte("abc")}},
+ {&struct{ A []byte }{[]byte("abc")}, map[string]string{"a": "abc"}},
+ {&struct{ A uint }{42}, map[string]int{"a": 42}},
+ {&struct{ A uint }{42}, map[string]float64{"a": 42}},
+ {&struct{ A uint }{1}, map[string]bool{"a": true}},
+ {&struct{ A int }{42}, map[string]uint{"a": 42}},
+ {&struct{ A int }{42}, map[string]float64{"a": 42}},
+ {&struct{ A int }{1}, map[string]bool{"a": true}},
+ {&struct{ A float64 }{42}, map[string]float32{"a": 42}},
+ {&struct{ A float64 }{42}, map[string]int{"a": 42}},
+ {&struct{ A float64 }{42}, map[string]uint{"a": 42}},
+ {&struct{ A float64 }{1}, map[string]bool{"a": true}},
+ {&struct{ A bool }{true}, map[string]int{"a": 1}},
+ {&struct{ A bool }{true}, map[string]uint{"a": 1}},
+ {&struct{ A bool }{true}, map[string]float64{"a": 1}},
+ {&struct{ A **byte }{&byteptr}, map[string]byte{"a": 8}},
+
+ // url.URL <=> string
+ {&struct{ URL *url.URL }{parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}},
+ {&struct{ URL url.URL }{*parseURL("h://e.c/p")}, map[string]string{"url": "h://e.c/p"}},
+
+ // Slices
+ {&struct{ S []int }{[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}},
+ {&struct{ S *[]int }{&[]int{1, 2, 3}}, map[string][]int{"s": []int{1, 2, 3}}},
+
+ // Conditionals
+ {&condBool{true}, map[string]bool{"v": true}},
+ {&condBool{}, map[string]bool{}},
+ {&condInt{1}, map[string]int{"v": 1}},
+ {&condInt{}, map[string]int{}},
+ {&condUInt{1}, map[string]uint{"v": 1}},
+ {&condUInt{}, map[string]uint{}},
+ {&condFloat{}, map[string]int{}},
+ {&condStr{"yo"}, map[string]string{"v": "yo"}},
+ {&condStr{}, map[string]string{}},
+ {&condStrNS{"yo"}, map[string]string{"v": "yo"}},
+ {&condStrNS{}, map[string]string{}},
+ {&condSlice{[]string{"yo"}}, map[string][]string{"v": []string{"yo"}}},
+ {&condSlice{}, map[string][]string{}},
+ {&condMap{map[string]int{"k": 1}}, bson.M{"v": bson.M{"k": 1}}},
+ {&condMap{}, map[string][]string{}},
+ {&condIface{"yo"}, map[string]string{"v": "yo"}},
+ {&condIface{""}, map[string]string{"v": ""}},
+ {&condIface{}, map[string]string{}},
+ {&condPtr{&truevar}, map[string]bool{"v": true}},
+ {&condPtr{&falsevar}, map[string]bool{"v": false}},
+ {&condPtr{}, map[string]string{}},
+
+ {&condTime{time.Unix(123456789, 123e6)}, map[string]time.Time{"v": time.Unix(123456789, 123e6)}},
+ {&condTime{}, map[string]string{}},
+
+ {&condStruct{struct{ A []int }{[]int{1}}}, bson.M{"v": bson.M{"a": []interface{}{1}}}},
+ {&condStruct{struct{ A []int }{}}, bson.M{}},
+
+ {&condRaw{bson.Raw{Kind: 0x0A, Data: []byte{}}}, bson.M{"v": nil}},
+ {&condRaw{bson.Raw{Kind: 0x00}}, bson.M{}},
+
+ {&namedCondStr{"yo"}, map[string]string{"myv": "yo"}},
+ {&namedCondStr{}, map[string]string{}},
+
+ {&shortInt{1}, map[string]interface{}{"v": 1}},
+ {&shortInt{1 << 30}, map[string]interface{}{"v": 1 << 30}},
+ {&shortInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}},
+ {&shortUint{1 << 30}, map[string]interface{}{"v": 1 << 30}},
+ {&shortUint{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}},
+ {&shortIface{int64(1) << 31}, map[string]interface{}{"v": int64(1 << 31)}},
+ {&shortPtr{int64ptr}, map[string]interface{}{"v": intvar}},
+
+ {&shortNonEmptyInt{1}, map[string]interface{}{"v": 1}},
+ {&shortNonEmptyInt{1 << 31}, map[string]interface{}{"v": int64(1 << 31)}},
+ {&shortNonEmptyInt{}, map[string]interface{}{}},
+
+ {&inlineInt{struct{ A, B int }{1, 2}}, map[string]interface{}{"a": 1, "b": 2}},
+ {&inlineMap{A: 1, M: map[string]interface{}{"b": 2}}, map[string]interface{}{"a": 1, "b": 2}},
+ {&inlineMap{A: 1, M: nil}, map[string]interface{}{"a": 1}},
+ {&inlineMapInt{A: 1, M: map[string]int{"b": 2}}, map[string]int{"a": 1, "b": 2}},
+ {&inlineMapInt{A: 1, M: nil}, map[string]int{"a": 1}},
+ {&inlineMapMyM{A: 1, M: MyM{"b": MyM{"c": 3}}}, map[string]interface{}{"a": 1, "b": map[string]interface{}{"c": 3}}},
+ {&inlineUnexported{M: map[string]interface{}{"b": 1}, unexported: unexported{A: 2}}, map[string]interface{}{"b": 1, "a": 2}},
+
+ // []byte <=> Binary
+ {&struct{ B []byte }{[]byte("abc")}, map[string]bson.Binary{"b": bson.Binary{Data: []byte("abc")}}},
+
+ // []byte <=> MyBytes
+ {&struct{ B MyBytes }{[]byte("abc")}, map[string]string{"b": "abc"}},
+ {&struct{ B MyBytes }{[]byte{}}, map[string]string{"b": ""}},
+ {&struct{ B MyBytes }{}, map[string]bool{}},
+ {&struct{ B []byte }{[]byte("abc")}, map[string]MyBytes{"b": []byte("abc")}},
+
+ // bool <=> MyBool
+ {&struct{ B MyBool }{true}, map[string]bool{"b": true}},
+ {&struct{ B MyBool }{}, map[string]bool{"b": false}},
+ {&struct{ B MyBool }{}, map[string]string{}},
+ {&struct{ B bool }{}, map[string]MyBool{"b": false}},
+
+ // arrays
+ {&struct{ V [2]int }{[...]int{1, 2}}, map[string][2]int{"v": [2]int{1, 2}}},
+ {&struct{ V [2]byte }{[...]byte{1, 2}}, map[string][2]byte{"v": [2]byte{1, 2}}},
+
+ // zero time
+ {&struct{ V time.Time }{}, map[string]interface{}{"v": time.Time{}}},
+
+ // zero time + 1 second + 1 millisecond; overflows int64 as nanoseconds
+ {&struct{ V time.Time }{time.Unix(-62135596799, 1e6).Local()},
+ map[string]interface{}{"v": time.Unix(-62135596799, 1e6).Local()}},
+
+ // bson.D <=> []DocElem
+ {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}},
+ {&bson.D{{"a", bson.D{{"b", 1}, {"c", 2}}}}, &MyD{{"a", MyD{{"b", 1}, {"c", 2}}}}},
+ {&struct{ V MyD }{MyD{{"a", 1}}}, &bson.D{{"v", bson.D{{"a", 1}}}}},
+
+ // bson.RawD <=> []RawDocElem
+ {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}},
+ {&bson.RawD{{"a", bson.Raw{0x08, []byte{0x01}}}}, &MyRawD{{"a", bson.Raw{0x08, []byte{0x01}}}}},
+
+ // bson.M <=> map
+ {bson.M{"a": bson.M{"b": 1, "c": 2}}, MyM{"a": MyM{"b": 1, "c": 2}}},
+ {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[string]interface{}{"a": map[string]interface{}{"b": 1, "c": 2}}},
+
+ // bson.M <=> map[MyString]
+ {bson.M{"a": bson.M{"b": 1, "c": 2}}, map[MyString]interface{}{"a": map[MyString]interface{}{"b": 1, "c": 2}}},
+
+ // json.Number <=> int64, float64
+ {&struct{ N json.Number }{"5"}, map[string]interface{}{"n": int64(5)}},
+ {&struct{ N json.Number }{"5.05"}, map[string]interface{}{"n": 5.05}},
+ {&struct{ N json.Number }{"9223372036854776000"}, map[string]interface{}{"n": float64(1 << 63)}},
+
+ // bson.D <=> non-struct getter/setter
+ {&bson.D{{"a", 1}}, &getterSetterD{{"a", 1}, {"suffix", true}}},
+ {&bson.D{{"a", 42}}, &gsintvar},
+
+ // Interface slice setter.
+ {&struct{ V ifaceSlice }{ifaceSlice{nil, nil, nil}}, bson.M{"v": []interface{}{3}}},
+}
+
+// Same thing, but only one way (obj1 => obj2).
+var oneWayCrossItems = []crossTypeItem{
+ // map <=> struct
+ {map[string]interface{}{"a": 1, "b": "2", "c": 3}, map[string]int{"a": 1, "c": 3}},
+
+ // inline map elides badly typed values
+ {map[string]interface{}{"a": 1, "b": "2", "c": 3}, &inlineMapInt{A: 1, M: map[string]int{"c": 3}}},
+
+ // Can't decode int into struct.
+ {bson.M{"a": bson.M{"b": 2}}, &struct{ A bool }{}},
+
+ // Would get decoded into a int32 too in the opposite direction.
+ {&shortIface{int64(1) << 30}, map[string]interface{}{"v": 1 << 30}},
+
+ // Ensure omitempty on struct with private fields works properly.
+ {&struct {
+ V struct{ v time.Time } ",omitempty"
+ }{}, map[string]interface{}{}},
+
+ // Attempt to marshal slice into RawD (issue #120).
+ {bson.M{"x": []int{1, 2, 3}}, &struct{ X bson.RawD }{}},
+}
+
+func testCrossPair(c *C, dump interface{}, load interface{}) {
+ c.Logf("Dump: %#v", dump)
+ c.Logf("Load: %#v", load)
+ zero := makeZeroDoc(load)
+ data, err := bson.Marshal(dump)
+ c.Assert(err, IsNil)
+ c.Logf("Dumped: %#v", string(data))
+ err = bson.Unmarshal(data, zero)
+ c.Assert(err, IsNil)
+ c.Logf("Loaded: %#v", zero)
+ c.Assert(zero, DeepEquals, load)
+}
+
+func (s *S) TestTwoWayCrossPairs(c *C) {
+ for _, item := range twoWayCrossItems {
+ testCrossPair(c, item.obj1, item.obj2)
+ testCrossPair(c, item.obj2, item.obj1)
+ }
+}
+
+func (s *S) TestOneWayCrossPairs(c *C) {
+ for _, item := range oneWayCrossItems {
+ testCrossPair(c, item.obj1, item.obj2)
+ }
+}
+
+// --------------------------------------------------------------------------
+// ObjectId hex representation test.
+
+func (s *S) TestObjectIdHex(c *C) {
+ id := bson.ObjectIdHex("4d88e15b60f486e428412dc9")
+ c.Assert(id.String(), Equals, `ObjectIdHex("4d88e15b60f486e428412dc9")`)
+ c.Assert(id.Hex(), Equals, "4d88e15b60f486e428412dc9")
+}
+
+func (s *S) TestIsObjectIdHex(c *C) {
+ test := []struct {
+ id string
+ valid bool
+ }{
+ {"4d88e15b60f486e428412dc9", true},
+ {"4d88e15b60f486e428412dc", false},
+ {"4d88e15b60f486e428412dc9e", false},
+ {"4d88e15b60f486e428412dcx", false},
+ }
+ for _, t := range test {
+ c.Assert(bson.IsObjectIdHex(t.id), Equals, t.valid)
+ }
+}
+
+// --------------------------------------------------------------------------
+// ObjectId parts extraction tests.
+
+type objectIdParts struct {
+ id bson.ObjectId
+ timestamp int64
+ machine []byte
+ pid uint16
+ counter int32
+}
+
+var objectIds = []objectIdParts{
+ objectIdParts{
+ bson.ObjectIdHex("4d88e15b60f486e428412dc9"),
+ 1300816219,
+ []byte{0x60, 0xf4, 0x86},
+ 0xe428,
+ 4271561,
+ },
+ objectIdParts{
+ bson.ObjectIdHex("000000000000000000000000"),
+ 0,
+ []byte{0x00, 0x00, 0x00},
+ 0x0000,
+ 0,
+ },
+ objectIdParts{
+ bson.ObjectIdHex("00000000aabbccddee000001"),
+ 0,
+ []byte{0xaa, 0xbb, 0xcc},
+ 0xddee,
+ 1,
+ },
+}
+
+func (s *S) TestObjectIdPartsExtraction(c *C) {
+ for i, v := range objectIds {
+ t := time.Unix(v.timestamp, 0)
+ c.Assert(v.id.Time(), Equals, t, Commentf("#%d Wrong timestamp value", i))
+ c.Assert(v.id.Machine(), DeepEquals, v.machine, Commentf("#%d Wrong machine id value", i))
+ c.Assert(v.id.Pid(), Equals, v.pid, Commentf("#%d Wrong pid value", i))
+ c.Assert(v.id.Counter(), Equals, v.counter, Commentf("#%d Wrong counter value", i))
+ }
+}
+
+func (s *S) TestNow(c *C) {
+ before := time.Now()
+ time.Sleep(1e6)
+ now := bson.Now()
+ time.Sleep(1e6)
+ after := time.Now()
+ c.Assert(now.After(before) && now.Before(after), Equals, true, Commentf("now=%s, before=%s, after=%s", now, before, after))
+}
+
+// --------------------------------------------------------------------------
+// ObjectId generation tests.
+
+func (s *S) TestNewObjectId(c *C) {
+ // Generate 10 ids
+ ids := make([]bson.ObjectId, 10)
+ for i := 0; i < 10; i++ {
+ ids[i] = bson.NewObjectId()
+ }
+ for i := 1; i < 10; i++ {
+ prevId := ids[i-1]
+ id := ids[i]
+ // Test for uniqueness among all other 9 generated ids
+ for j, tid := range ids {
+ if j != i {
+ c.Assert(id, Not(Equals), tid, Commentf("Generated ObjectId is not unique"))
+ }
+ }
+ // Check that timestamp was incremented and is within 30 seconds of the previous one
+ secs := id.Time().Sub(prevId.Time()).Seconds()
+ c.Assert((secs >= 0 && secs <= 30), Equals, true, Commentf("Wrong timestamp in generated ObjectId"))
+ // Check that machine ids are the same
+ c.Assert(id.Machine(), DeepEquals, prevId.Machine())
+ // Check that pids are the same
+ c.Assert(id.Pid(), Equals, prevId.Pid())
+ // Test for proper increment
+ delta := int(id.Counter() - prevId.Counter())
+ c.Assert(delta, Equals, 1, Commentf("Wrong increment in generated ObjectId"))
+ }
+}
+
+func (s *S) TestNewObjectIdWithTime(c *C) {
+ t := time.Unix(12345678, 0)
+ id := bson.NewObjectIdWithTime(t)
+ c.Assert(id.Time(), Equals, t)
+ c.Assert(id.Machine(), DeepEquals, []byte{0x00, 0x00, 0x00})
+ c.Assert(int(id.Pid()), Equals, 0)
+ c.Assert(int(id.Counter()), Equals, 0)
+}
+
+// --------------------------------------------------------------------------
+// ObjectId JSON marshalling.
+
+type jsonType struct {
+ Id bson.ObjectId
+}
+
+var jsonIdTests = []struct {
+ value jsonType
+ json string
+ marshal bool
+ unmarshal bool
+ error string
+}{{
+ value: jsonType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")},
+ json: `{"Id":"4d88e15b60f486e428412dc9"}`,
+ marshal: true,
+ unmarshal: true,
+}, {
+ value: jsonType{},
+ json: `{"Id":""}`,
+ marshal: true,
+ unmarshal: true,
+}, {
+ value: jsonType{},
+ json: `{"Id":null}`,
+ marshal: false,
+ unmarshal: true,
+}, {
+ json: `{"Id":"4d88e15b60f486e428412dc9A"}`,
+ error: `invalid ObjectId in JSON: "4d88e15b60f486e428412dc9A"`,
+ marshal: false,
+ unmarshal: true,
+}, {
+ json: `{"Id":"4d88e15b60f486e428412dcZ"}`,
+ error: `invalid ObjectId in JSON: "4d88e15b60f486e428412dcZ" .*`,
+ marshal: false,
+ unmarshal: true,
+}}
+
+func (s *S) TestObjectIdJSONMarshaling(c *C) {
+ for _, test := range jsonIdTests {
+ if test.marshal {
+ data, err := json.Marshal(&test.value)
+ if test.error == "" {
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, test.json)
+ } else {
+ c.Assert(err, ErrorMatches, test.error)
+ }
+ }
+
+ if test.unmarshal {
+ var value jsonType
+ err := json.Unmarshal([]byte(test.json), &value)
+ if test.error == "" {
+ c.Assert(err, IsNil)
+ c.Assert(value, DeepEquals, test.value)
+ } else {
+ c.Assert(err, ErrorMatches, test.error)
+ }
+ }
+ }
+}
+
+// --------------------------------------------------------------------------
+// Spec tests
+
+type specTest struct {
+ Description string
+ Documents []struct {
+ Decoded map[string]interface{}
+ Encoded string
+ DecodeOnly bool `yaml:"decodeOnly"`
+ Error interface{}
+ }
+}
+
+func (s *S) TestSpecTests(c *C) {
+ for _, data := range specTests {
+ var test specTest
+ err := yaml.Unmarshal([]byte(data), &test)
+ c.Assert(err, IsNil)
+
+ c.Logf("Running spec test set %q", test.Description)
+
+ for _, doc := range test.Documents {
+ if doc.Error != nil {
+ continue
+ }
+ c.Logf("Ensuring %q decodes as %v", doc.Encoded, doc.Decoded)
+ var decoded map[string]interface{}
+ encoded, err := hex.DecodeString(doc.Encoded)
+ c.Assert(err, IsNil)
+ err = bson.Unmarshal(encoded, &decoded)
+ c.Assert(err, IsNil)
+ c.Assert(decoded, DeepEquals, doc.Decoded)
+ }
+
+ for _, doc := range test.Documents {
+ if doc.DecodeOnly || doc.Error != nil {
+ continue
+ }
+ c.Logf("Ensuring %v encodes as %q", doc.Decoded, doc.Encoded)
+ encoded, err := bson.Marshal(doc.Decoded)
+ c.Assert(err, IsNil)
+ c.Assert(strings.ToUpper(hex.EncodeToString(encoded)), Equals, doc.Encoded)
+ }
+
+ for _, doc := range test.Documents {
+ if doc.Error == nil {
+ continue
+ }
+ c.Logf("Ensuring %q errors when decoded: %s", doc.Encoded, doc.Error)
+ var decoded map[string]interface{}
+ encoded, err := hex.DecodeString(doc.Encoded)
+ c.Assert(err, IsNil)
+ err = bson.Unmarshal(encoded, &decoded)
+ c.Assert(err, NotNil)
+ c.Logf("Failed with: %v", err)
+ }
+ }
+}
+
+// --------------------------------------------------------------------------
+// ObjectId Text encoding.TextUnmarshaler.
+
+var textIdTests = []struct {
+ value bson.ObjectId
+ text string
+ marshal bool
+ unmarshal bool
+ error string
+}{{
+ value: bson.ObjectIdHex("4d88e15b60f486e428412dc9"),
+ text: "4d88e15b60f486e428412dc9",
+ marshal: true,
+ unmarshal: true,
+}, {
+ text: "",
+ marshal: true,
+ unmarshal: true,
+}, {
+ text: "4d88e15b60f486e428412dc9A",
+ marshal: false,
+ unmarshal: true,
+ error: `invalid ObjectId: 4d88e15b60f486e428412dc9A`,
+}, {
+ text: "4d88e15b60f486e428412dcZ",
+ marshal: false,
+ unmarshal: true,
+ error: `invalid ObjectId: 4d88e15b60f486e428412dcZ .*`,
+}}
+
+func (s *S) TestObjectIdTextMarshaling(c *C) {
+ for _, test := range textIdTests {
+ if test.marshal {
+ data, err := test.value.MarshalText()
+ if test.error == "" {
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, test.text)
+ } else {
+ c.Assert(err, ErrorMatches, test.error)
+ }
+ }
+
+ if test.unmarshal {
+ err := test.value.UnmarshalText([]byte(test.text))
+ if test.error == "" {
+ c.Assert(err, IsNil)
+ if test.value != "" {
+ value := bson.ObjectIdHex(test.text)
+ c.Assert(value, DeepEquals, test.value)
+ }
+ } else {
+ c.Assert(err, ErrorMatches, test.error)
+ }
+ }
+ }
+}
+
+// --------------------------------------------------------------------------
+// ObjectId XML marshalling.
+
+type xmlType struct {
+ Id bson.ObjectId
+}
+
+var xmlIdTests = []struct {
+ value xmlType
+ xml string
+ marshal bool
+ unmarshal bool
+ error string
+}{{
+ value: xmlType{Id: bson.ObjectIdHex("4d88e15b60f486e428412dc9")},
+ xml: "<xmlType><Id>4d88e15b60f486e428412dc9</Id></xmlType>",
+ marshal: true,
+ unmarshal: true,
+}, {
+ value: xmlType{},
+ xml: "<xmlType><Id></Id></xmlType>",
+ marshal: true,
+ unmarshal: true,
+}, {
+ xml: "<xmlType><Id>4d88e15b60f486e428412dc9A</Id></xmlType>",
+ marshal: false,
+ unmarshal: true,
+ error: `invalid ObjectId: 4d88e15b60f486e428412dc9A`,
+}, {
+ xml: "<xmlType><Id>4d88e15b60f486e428412dcZ</Id></xmlType>",
+ marshal: false,
+ unmarshal: true,
+ error: `invalid ObjectId: 4d88e15b60f486e428412dcZ .*`,
+}}
+
+func (s *S) TestObjectIdXMLMarshaling(c *C) {
+ for _, test := range xmlIdTests {
+ if test.marshal {
+ data, err := xml.Marshal(&test.value)
+ if test.error == "" {
+ c.Assert(err, IsNil)
+ c.Assert(string(data), Equals, test.xml)
+ } else {
+ c.Assert(err, ErrorMatches, test.error)
+ }
+ }
+
+ if test.unmarshal {
+ var value xmlType
+ err := xml.Unmarshal([]byte(test.xml), &value)
+ if test.error == "" {
+ c.Assert(err, IsNil)
+ c.Assert(value, DeepEquals, test.value)
+ } else {
+ c.Assert(err, ErrorMatches, test.error)
+ }
+ }
+ }
+}
+
+// --------------------------------------------------------------------------
+// Some simple benchmarks.
+
+type BenchT struct {
+ A, B, C, D, E, F string
+}
+
+type BenchRawT struct {
+ A string
+ B int
+ C bson.M
+ D []float64
+}
+
+func (s *S) BenchmarkUnmarhsalStruct(c *C) {
+ v := BenchT{A: "A", D: "D", E: "E"}
+ data, err := bson.Marshal(&v)
+ if err != nil {
+ panic(err)
+ }
+ c.ResetTimer()
+ for i := 0; i < c.N; i++ {
+ err = bson.Unmarshal(data, &v)
+ }
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (s *S) BenchmarkUnmarhsalMap(c *C) {
+ m := bson.M{"a": "a", "d": "d", "e": "e"}
+ data, err := bson.Marshal(&m)
+ if err != nil {
+ panic(err)
+ }
+ c.ResetTimer()
+ for i := 0; i < c.N; i++ {
+ err = bson.Unmarshal(data, &m)
+ }
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (s *S) BenchmarkUnmarshalRaw(c *C) {
+ var err error
+ m := BenchRawT{
+ A: "test_string",
+ B: 123,
+ C: bson.M{
+ "subdoc_int": 12312,
+ "subdoc_doc": bson.M{"1": 1},
+ },
+ D: []float64{0.0, 1.3333, -99.9997, 3.1415},
+ }
+ data, err := bson.Marshal(&m)
+ if err != nil {
+ panic(err)
+ }
+ raw := bson.Raw{}
+ c.ResetTimer()
+ for i := 0; i < c.N; i++ {
+ err = bson.Unmarshal(data, &raw)
+ }
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (s *S) BenchmarkNewObjectId(c *C) {
+ for i := 0; i < c.N; i++ {
+ bson.NewObjectId()
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decimal.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decimal.go
new file mode 100644
index 00000000000..3d2f7002037
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decimal.go
@@ -0,0 +1,310 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+ h, l uint64
+}
+
+func (d Decimal128) String() string {
+ var pos int // positive sign
+ var e int // exponent
+ var h, l uint64 // significand high/low
+
+ if d.h>>63&1 == 0 {
+ pos = 1
+ }
+
+ switch d.h >> 58 & (1<<5 - 1) {
+ case 0x1F:
+ return "NaN"
+ case 0x1E:
+ return "-Inf"[pos:]
+ }
+
+ l = d.l
+ if d.h>>61&3 == 3 {
+ // Bits: 1*sign 2*ignored 14*exponent 111*significand.
+ // Implicit 0b100 prefix in significand.
+ e = int(d.h>>47&(1<<14-1)) - 6176
+ //h = 4<<47 | d.h&(1<<47-1)
+ // Spec says all of these values are out of range.
+ h, l = 0, 0
+ } else {
+ // Bits: 1*sign 14*exponent 113*significand
+ e = int(d.h>>49&(1<<14-1)) - 6176
+ h = d.h & (1<<49 - 1)
+ }
+
+ // Would be handled by the logic below, but that's trivial and common.
+ if h == 0 && l == 0 && e == 0 {
+ return "-0"[pos:]
+ }
+
+ var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+ var last = len(repr)
+ var i = len(repr)
+ var dot = len(repr) + e
+ var rem uint32
+Loop:
+ for d9 := 0; d9 < 5; d9++ {
+ h, l, rem = divmod(h, l, 1e9)
+ for d1 := 0; d1 < 9; d1++ {
+ // Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+ if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+ e += len(repr) - i
+ i--
+ repr[i] = '.'
+ last = i - 1
+ dot = len(repr) // Unmark.
+ }
+ c := '0' + byte(rem%10)
+ rem /= 10
+ i--
+ repr[i] = c
+ // Handle "0E+3", "1E+3", etc.
+ if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+ last = i
+ break Loop
+ }
+ if c != '0' {
+ last = i
+ }
+ // Break early. Works without it, but why.
+ if dot > i && l == 0 && h == 0 && rem == 0 {
+ break Loop
+ }
+ }
+ }
+ repr[last-1] = '-'
+ last--
+
+ if e > 0 {
+ return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+ }
+ if e < 0 {
+ return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+ }
+ return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+ div64 := uint64(div)
+ a := h >> 32
+ aq := a / div64
+ ar := a % div64
+ b := ar<<32 + h&(1<<32-1)
+ bq := b / div64
+ br := b % div64
+ c := br<<32 + l>>32
+ cq := c / div64
+ cr := c % div64
+ d := cr<<32 + l&(1<<32-1)
+ dq := d / div64
+ dr := d % div64
+ return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+ return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+func ParseDecimal128(s string) (Decimal128, error) {
+ orig := s
+ if s == "" {
+ return dErr(orig)
+ }
+ neg := s[0] == '-'
+ if neg || s[0] == '+' {
+ s = s[1:]
+ }
+
+ if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+ if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+ return dNaN, nil
+ }
+ if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+ if neg {
+ return dNegInf, nil
+ }
+ return dPosInf, nil
+ }
+ return dErr(orig)
+ }
+
+ var h, l uint64
+ var e int
+
+ var add, ovr uint32
+ var mul uint32 = 1
+ var dot = -1
+ var digits = 0
+ var i = 0
+ for i < len(s) {
+ c := s[i]
+ if mul == 1e9 {
+ h, l, ovr = muladd(h, l, mul, add)
+ mul, add = 1, 0
+ if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+ return dErr(orig)
+ }
+ }
+ if c >= '0' && c <= '9' {
+ i++
+ if c > '0' || digits > 0 {
+ digits++
+ }
+ if digits > 34 {
+ if c == '0' {
+ // Exact rounding.
+ e++
+ continue
+ }
+ return dErr(orig)
+ }
+ mul *= 10
+ add *= 10
+ add += uint32(c - '0')
+ continue
+ }
+ if c == '.' {
+ i++
+ if dot >= 0 || i == 1 && len(s) == 1 {
+ return dErr(orig)
+ }
+ if i == len(s) {
+ break
+ }
+ if s[i] < '0' || s[i] > '9' || e > 0 {
+ return dErr(orig)
+ }
+ dot = i
+ continue
+ }
+ break
+ }
+ if i == 0 {
+ return dErr(orig)
+ }
+ if mul > 1 {
+ h, l, ovr = muladd(h, l, mul, add)
+ if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+ return dErr(orig)
+ }
+ }
+ if dot >= 0 {
+ e += dot - i
+ }
+ if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+ i++
+ eneg := s[i] == '-'
+ if eneg || s[i] == '+' {
+ i++
+ if i == len(s) {
+ return dErr(orig)
+ }
+ }
+ n := 0
+ for i < len(s) && n < 1e4 {
+ c := s[i]
+ i++
+ if c < '0' || c > '9' {
+ return dErr(orig)
+ }
+ n *= 10
+ n += int(c - '0')
+ }
+ if eneg {
+ n = -n
+ }
+ e += n
+ for e < -6176 {
+ // Subnormal.
+ var div uint32 = 1
+ for div < 1e9 && e < -6176 {
+ div *= 10
+ e++
+ }
+ var rem uint32
+ h, l, rem = divmod(h, l, div)
+ if rem > 0 {
+ return dErr(orig)
+ }
+ }
+ for e > 6111 {
+ // Clamped.
+ var mul uint32 = 1
+ for mul < 1e9 && e > 6111 {
+ mul *= 10
+ e--
+ }
+ h, l, ovr = muladd(h, l, mul, 0)
+ if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+ return dErr(orig)
+ }
+ }
+ if e < -6176 || e > 6111 {
+ return dErr(orig)
+ }
+ }
+
+ if i < len(s) {
+ return dErr(orig)
+ }
+
+ h |= uint64(e+6176) & uint64(1<<14-1) << 49
+ if neg {
+ h |= 1 << 63
+ }
+ return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+ mul64 := uint64(mul)
+ a := mul64 * (l & (1<<32 - 1))
+ b := a>>32 + mul64*(l>>32)
+ c := b>>32 + mul64*(h&(1<<32-1))
+ d := c>>32 + mul64*(h>>32)
+
+ a = a&(1<<32-1) + uint64(add)
+ b = b&(1<<32-1) + a>>32
+ c = c&(1<<32-1) + b>>32
+ d = d&(1<<32-1) + c>>32
+
+ return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decimal_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decimal_test.go
new file mode 100644
index 00000000000..a29728094ec
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decimal_test.go
@@ -0,0 +1,4109 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson_test
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "gopkg.in/mgo.v2/bson"
+
+ . "gopkg.in/check.v1"
+)
+
+// --------------------------------------------------------------------------
+// Decimal tests
+
+type decimalTests struct {
+ Valid []struct {
+ Description string `json:"description"`
+ BSON string `json:"bson"`
+ CanonicalBSON string `json:"canonical_bson"`
+ ExtJSON string `json:"extjson"`
+ CanonicalExtJSON string `json:"canonical_extjson"`
+ Lossy bool `json:"lossy"`
+ } `json:"valid"`
+
+ ParseErrors []struct {
+ Description string `json:"description"`
+ String string `json:"string"`
+ } `json:"parseErrors"`
+}
+
+func extJSONRepr(s string) string {
+ var value struct {
+ D struct {
+ Repr string `json:"$numberDecimal"`
+ } `json:"d"`
+ }
+ err := json.Unmarshal([]byte(s), &value)
+ if err != nil {
+ panic(err)
+ }
+ return value.D.Repr
+}
+
+func (s *S) TestDecimalTests(c *C) {
+ // These also conform to the spec and are used by Go elsewhere.
+ // (e.g. math/big won't parse "Infinity").
+ goStr := func(s string) string {
+ switch s {
+ case "Infinity":
+ return "Inf"
+ case "-Infinity":
+ return "-Inf"
+ }
+ return s
+ }
+
+ for _, testEntry := range decimalTestsJSON {
+ testFile := testEntry.file
+
+ var tests decimalTests
+ err := json.Unmarshal([]byte(testEntry.json), &tests)
+ c.Assert(err, IsNil)
+
+ for _, test := range tests.Valid {
+ c.Logf("Running %s test: %s", testFile, test.Description)
+
+ test.BSON = strings.ToLower(test.BSON)
+
+ // Unmarshal value from BSON data.
+ bsonData, err := hex.DecodeString(test.BSON)
+ var bsonValue struct{ D interface{} }
+ err = bson.Unmarshal(bsonData, &bsonValue)
+ c.Assert(err, IsNil)
+ dec128, ok := bsonValue.D.(bson.Decimal128)
+ c.Assert(ok, Equals, true)
+
+ // Extract ExtJSON representations (canonical and not).
+ extjRepr := extJSONRepr(test.ExtJSON)
+ cextjRepr := extjRepr
+ if test.CanonicalExtJSON != "" {
+ cextjRepr = extJSONRepr(test.CanonicalExtJSON)
+ }
+
+ wantRepr := goStr(cextjRepr)
+
+ // Generate canonical representation.
+ c.Assert(dec128.String(), Equals, wantRepr)
+
+ // Parse original canonical representation.
+ parsed, err := bson.ParseDecimal128(cextjRepr)
+ c.Assert(err, IsNil)
+ c.Assert(parsed.String(), Equals, wantRepr)
+
+ // Parse non-canonical representation.
+ parsed, err = bson.ParseDecimal128(extjRepr)
+ c.Assert(err, IsNil)
+ c.Assert(parsed.String(), Equals, wantRepr)
+
+ // Parse Go canonical representation (Inf vs. Infinity).
+ parsed, err = bson.ParseDecimal128(wantRepr)
+ c.Assert(err, IsNil)
+ c.Assert(parsed.String(), Equals, wantRepr)
+
+ // Marshal original value back into BSON data.
+ data, err := bson.Marshal(bsonValue)
+ c.Assert(err, IsNil)
+ c.Assert(hex.EncodeToString(data), Equals, test.BSON)
+
+ if test.Lossy {
+ continue
+ }
+
+ // Marshal the parsed canonical representation.
+ var parsedValue struct{ D interface{} }
+ parsedValue.D = parsed
+ data, err = bson.Marshal(parsedValue)
+ c.Assert(err, IsNil)
+ c.Assert(hex.EncodeToString(data), Equals, test.BSON)
+ }
+
+ for _, test := range tests.ParseErrors {
+ c.Logf("Running %s parse error test: %s (string %q)", testFile, test.Description, test.String)
+
+ _, err := bson.ParseDecimal128(test.String)
+ quoted := regexp.QuoteMeta(fmt.Sprintf("%q", test.String))
+ c.Assert(err, ErrorMatches, `cannot parse `+quoted+` as a decimal128`)
+ }
+ }
+}
+
+const decBenchNum = "9.999999999999999999999999999999999E+6144"
+
+func (s *S) BenchmarkDecimal128String(c *C) {
+ d, err := bson.ParseDecimal128(decBenchNum)
+ c.Assert(err, IsNil)
+ c.Assert(d.String(), Equals, decBenchNum)
+
+ c.ResetTimer()
+ for i := 0; i < c.N; i++ {
+ d.String()
+ }
+}
+
+func (s *S) BenchmarkDecimal128Parse(c *C) {
+ var err error
+ c.ResetTimer()
+ for i := 0; i < c.N; i++ {
+ _, err = bson.ParseDecimal128(decBenchNum)
+ }
+ if err != nil {
+ panic(err)
+ }
+}
+
+var decimalTestsJSON = []struct{ file, json string }{
+ {"decimal128-1.json", `
+{
+ "description": "Decimal128",
+ "bson_type": "0x13",
+ "test_key": "d",
+ "valid": [
+ {
+ "description": "Special - Canonical NaN",
+ "bson": "180000001364000000000000000000000000000000007C00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}"
+ },
+ {
+ "description": "Special - Negative NaN",
+ "bson": "18000000136400000000000000000000000000000000FC00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+ "lossy": true
+ },
+ {
+ "description": "Special - Negative NaN",
+ "bson": "18000000136400000000000000000000000000000000FC00",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-NaN\"}}",
+ "lossy": true
+ },
+ {
+ "description": "Special - Canonical SNaN",
+ "bson": "180000001364000000000000000000000000000000007E00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+ "lossy": true
+ },
+ {
+ "description": "Special - Negative SNaN",
+ "bson": "18000000136400000000000000000000000000000000FE00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+ "lossy": true
+ },
+ {
+ "description": "Special - NaN with a payload",
+ "bson": "180000001364001200000000000000000000000000007E00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}",
+ "lossy": true
+ },
+ {
+ "description": "Special - Canonical Positive Infinity",
+ "bson": "180000001364000000000000000000000000000000007800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+ },
+ {
+ "description": "Special - Canonical Negative Infinity",
+ "bson": "18000000136400000000000000000000000000000000F800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+ },
+ {
+ "description": "Special - Invalid representation treated as 0",
+ "bson": "180000001364000000000000000000000000000000106C00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}",
+ "lossy": true
+ },
+ {
+ "description": "Special - Invalid representation treated as -0",
+ "bson": "18000000136400DCBA9876543210DEADBEEF00000010EC00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}",
+ "lossy": true
+ },
+ {
+ "description": "Special - Invalid representation treated as 0E3",
+ "bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF116C00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}",
+ "lossy": true
+ },
+ {
+ "description": "Regular - Adjusted Exponent Limit",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00",
+ "extjson": "{\"d\": { \"$numberDecimal\": \"0.000001234567890123456789012345678901234\" }}"
+ },
+ {
+ "description": "Regular - Smallest",
+ "bson": "18000000136400D204000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001234\"}}"
+ },
+ {
+ "description": "Regular - Smallest with Trailing Zeros",
+ "bson": "1800000013640040EF5A07000000000000000000002A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00123400000\"}}"
+ },
+ {
+ "description": "Regular - 0.1",
+ "bson": "1800000013640001000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1\"}}"
+ },
+ {
+ "description": "Regular - 0.1234567890123456789012345678901234",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFC2F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1234567890123456789012345678901234\"}}"
+ },
+ {
+ "description": "Regular - 0",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "Regular - -0",
+ "bson": "18000000136400000000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+ },
+ {
+ "description": "Regular - -0.0",
+ "bson": "1800000013640000000000000000000000000000003EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}"
+ },
+ {
+ "description": "Regular - 2",
+ "bson": "180000001364000200000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"2\"}}"
+ },
+ {
+ "description": "Regular - 2.000",
+ "bson": "18000000136400D0070000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"2.000\"}}"
+ },
+ {
+ "description": "Regular - Largest",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}"
+ },
+ {
+ "description": "Scientific - Tiniest",
+ "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED010000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E-6143\"}}"
+ },
+ {
+ "description": "Scientific - Tiny",
+ "bson": "180000001364000100000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+ },
+ {
+ "description": "Scientific - Negative Tiny",
+ "bson": "180000001364000100000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}"
+ },
+ {
+ "description": "Scientific - Adjusted Exponent Limit",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00",
+ "extjson": "{\"d\": { \"$numberDecimal\": \"1.234567890123456789012345678901234E-7\" }}"
+ },
+ {
+ "description": "Scientific - Fractional",
+ "bson": "1800000013640064000000000000000000000000002CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}"
+ },
+ {
+ "description": "Scientific - 0 with Exponent",
+ "bson": "180000001364000000000000000000000000000000205F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6000\"}}"
+ },
+ {
+ "description": "Scientific - 0 with Negative Exponent",
+ "bson": "1800000013640000000000000000000000000000007A2B00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-611\"}}"
+ },
+ {
+ "description": "Scientific - No Decimal with Signed Exponent",
+ "bson": "180000001364000100000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}"
+ },
+ {
+ "description": "Scientific - Trailing Zero",
+ "bson": "180000001364001A04000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.050E+4\"}}"
+ },
+ {
+ "description": "Scientific - With Decimal",
+ "bson": "180000001364006900000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.05E+3\"}}"
+ },
+ {
+ "description": "Scientific - Full",
+ "bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"5192296858534827628530496329220095\"}}"
+ },
+ {
+ "description": "Scientific - Large",
+ "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "Scientific - Largest",
+ "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - Exponent Normalization",
+ "bson": "1800000013640064000000000000000000000000002CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-100E-10\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - Unsigned Positive Exponent",
+ "bson": "180000001364000100000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - Lowercase Exponent Identifier",
+ "bson": "180000001364000100000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - Long Significand with Exponent",
+ "bson": "1800000013640079D9E0F9763ADA429D0200000000583000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345689012345789012345E+12\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.2345689012345789012345E+34\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - Positive Sign",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1234567890123456789012345678901234\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - Long Decimal String",
+ "bson": "180000001364000100000000000000000000000000722800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-999\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - nan",
+ "bson": "180000001364000000000000000000000000000000007C00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"nan\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - nAn",
+ "bson": "180000001364000000000000000000000000000000007C00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"nAn\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - +infinity",
+ "bson": "180000001364000000000000000000000000000000007800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+infinity\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - infinity",
+ "bson": "180000001364000000000000000000000000000000007800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"infinity\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - infiniTY",
+ "bson": "180000001364000000000000000000000000000000007800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"infiniTY\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - inf",
+ "bson": "180000001364000000000000000000000000000000007800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"inf\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - inF",
+ "bson": "180000001364000000000000000000000000000000007800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"inF\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - -infinity",
+ "bson": "18000000136400000000000000000000000000000000F800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infinity\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - -infiniTy",
+ "bson": "18000000136400000000000000000000000000000000F800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-infiniTy\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - -Inf",
+ "bson": "18000000136400000000000000000000000000000000F800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - -inf",
+ "bson": "18000000136400000000000000000000000000000000F800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inf\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+ },
+ {
+ "description": "Non-Canonical Parsing - -inF",
+ "bson": "18000000136400000000000000000000000000000000F800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-inF\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+ },
+ {
+ "description": "Rounded Subnormal number",
+ "bson": "180000001364000100000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E-6177\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+ },
+ {
+ "description": "Clamped",
+ "bson": "180000001364000a00000000000000000000000000fe5f00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E6112\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}"
+ },
+ {
+ "description": "Exact rounding",
+ "bson": "18000000136400000000000a5bc138938d44c64d31cc3700",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+999\"}}"
+ }
+ ]
+}
+`},
+
+ {"decimal128-2.json", `
+{
+ "description": "Decimal128",
+ "bson_type": "0x13",
+ "test_key": "d",
+ "valid": [
+ {
+ "description": "[decq021] Normality",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C40B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234567890123456789012345678901234\"}}"
+ },
+ {
+ "description": "[decq823] values around [u]int32 edges (zeros done earlier)",
+ "bson": "18000000136400010000800000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483649\"}}"
+ },
+ {
+ "description": "[decq822] values around [u]int32 edges (zeros done earlier)",
+ "bson": "18000000136400000000800000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483648\"}}"
+ },
+ {
+ "description": "[decq821] values around [u]int32 edges (zeros done earlier)",
+ "bson": "18000000136400FFFFFF7F0000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483647\"}}"
+ },
+ {
+ "description": "[decq820] values around [u]int32 edges (zeros done earlier)",
+ "bson": "18000000136400FEFFFF7F0000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483646\"}}"
+ },
+ {
+ "description": "[decq152] fold-downs (more below)",
+ "bson": "18000000136400393000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-12345\"}}"
+ },
+ {
+ "description": "[decq154] fold-downs (more below)",
+ "bson": "18000000136400D20400000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234\"}}"
+ },
+ {
+ "description": "[decq006] derivative canonical plain strings",
+ "bson": "18000000136400EE0200000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-750\"}}"
+ },
+ {
+ "description": "[decq164] fold-downs (more below)",
+ "bson": "1800000013640039300000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-123.45\"}}"
+ },
+ {
+ "description": "[decq156] fold-downs (more below)",
+ "bson": "180000001364007B0000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-123\"}}"
+ },
+ {
+ "description": "[decq008] derivative canonical plain strings",
+ "bson": "18000000136400EE020000000000000000000000003EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-75.0\"}}"
+ },
+ {
+ "description": "[decq158] fold-downs (more below)",
+ "bson": "180000001364000C0000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-12\"}}"
+ },
+ {
+ "description": "[decq122] Nmax and similar",
+ "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFFDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999999999999999999999999999999999E+6144\"}}"
+ },
+ {
+ "description": "[decq002] (mostly derived from the Strawman 4 document and examples)",
+ "bson": "18000000136400EE020000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50\"}}"
+ },
+ {
+ "description": "[decq004] derivative canonical plain strings",
+ "bson": "18000000136400EE0200000000000000000000000042B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E+3\"}}"
+ },
+ {
+ "description": "[decq018] derivative canonical plain strings",
+ "bson": "18000000136400EE020000000000000000000000002EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E-7\"}}"
+ },
+ {
+ "description": "[decq125] Nmax and similar",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.234567890123456789012345678901234E+6144\"}}"
+ },
+ {
+ "description": "[decq131] fold-downs (more below)",
+ "bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq162] fold-downs (more below)",
+ "bson": "180000001364007B000000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23\"}}"
+ },
+ {
+ "description": "[decq176] Nmin and below",
+ "bson": "18000000136400010000000A5BC138938D44C64D31008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000001E-6143\"}}"
+ },
+ {
+ "description": "[decq174] Nmin and below",
+ "bson": "18000000136400000000000A5BC138938D44C64D31008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E-6143\"}}"
+ },
+ {
+ "description": "[decq133] fold-downs (more below)",
+ "bson": "18000000136400000000000A5BC138938D44C64D31FEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq160] fold-downs (more below)",
+ "bson": "18000000136400010000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}"
+ },
+ {
+ "description": "[decq172] Nmin and below",
+ "bson": "180000001364000100000000000000000000000000428000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6143\"}}"
+ },
+ {
+ "description": "[decq010] derivative canonical plain strings",
+ "bson": "18000000136400EE020000000000000000000000003AB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.750\"}}"
+ },
+ {
+ "description": "[decq012] derivative canonical plain strings",
+ "bson": "18000000136400EE0200000000000000000000000038B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0750\"}}"
+ },
+ {
+ "description": "[decq014] derivative canonical plain strings",
+ "bson": "18000000136400EE0200000000000000000000000034B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000750\"}}"
+ },
+ {
+ "description": "[decq016] derivative canonical plain strings",
+ "bson": "18000000136400EE0200000000000000000000000030B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000750\"}}"
+ },
+ {
+ "description": "[decq404] zeros",
+ "bson": "180000001364000000000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}"
+ },
+ {
+ "description": "[decq424] negative zeros",
+ "bson": "180000001364000000000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}"
+ },
+ {
+ "description": "[decq407] zeros",
+ "bson": "1800000013640000000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+ },
+ {
+ "description": "[decq427] negative zeros",
+ "bson": "1800000013640000000000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+ },
+ {
+ "description": "[decq409] zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[decq428] negative zeros",
+ "bson": "18000000136400000000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+ },
+ {
+ "description": "[decq700] Selected DPD codes",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[decq406] zeros",
+ "bson": "1800000013640000000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+ },
+ {
+ "description": "[decq426] negative zeros",
+ "bson": "1800000013640000000000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+ },
+ {
+ "description": "[decq410] zeros",
+ "bson": "180000001364000000000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}"
+ },
+ {
+ "description": "[decq431] negative zeros",
+ "bson": "18000000136400000000000000000000000000000046B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+3\"}}"
+ },
+ {
+ "description": "[decq419] clamped zeros...",
+ "bson": "180000001364000000000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}"
+ },
+ {
+ "description": "[decq432] negative zeros",
+ "bson": "180000001364000000000000000000000000000000FEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}"
+ },
+ {
+ "description": "[decq405] zeros",
+ "bson": "180000001364000000000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}"
+ },
+ {
+ "description": "[decq425] negative zeros",
+ "bson": "180000001364000000000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}"
+ },
+ {
+ "description": "[decq508] Specials",
+ "bson": "180000001364000000000000000000000000000000007800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}"
+ },
+ {
+ "description": "[decq528] Specials",
+ "bson": "18000000136400000000000000000000000000000000F800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}"
+ },
+ {
+ "description": "[decq541] Specials",
+ "bson": "180000001364000000000000000000000000000000007C00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}"
+ },
+ {
+ "description": "[decq074] Nmin and below",
+ "bson": "18000000136400000000000A5BC138938D44C64D31000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E-6143\"}}"
+ },
+ {
+ "description": "[decq602] fold-down full sequence",
+ "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq604] fold-down full sequence",
+ "bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}"
+ },
+ {
+ "description": "[decq606] fold-down full sequence",
+ "bson": "1800000013640000000080264B91C02220BE377E00FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}"
+ },
+ {
+ "description": "[decq608] fold-down full sequence",
+ "bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}"
+ },
+ {
+ "description": "[decq610] fold-down full sequence",
+ "bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}"
+ },
+ {
+ "description": "[decq612] fold-down full sequence",
+ "bson": "18000000136400000000106102253E5ECE4F200000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}"
+ },
+ {
+ "description": "[decq614] fold-down full sequence",
+ "bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}"
+ },
+ {
+ "description": "[decq616] fold-down full sequence",
+ "bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}"
+ },
+ {
+ "description": "[decq618] fold-down full sequence",
+ "bson": "180000001364000000004A48011416954508000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}"
+ },
+ {
+ "description": "[decq620] fold-down full sequence",
+ "bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}"
+ },
+ {
+ "description": "[decq622] fold-down full sequence",
+ "bson": "18000000136400000080F64AE1C7022D1500000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}"
+ },
+ {
+ "description": "[decq624] fold-down full sequence",
+ "bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}"
+ },
+ {
+ "description": "[decq626] fold-down full sequence",
+ "bson": "180000001364000000A0DEC5ADC935360000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}"
+ },
+ {
+ "description": "[decq628] fold-down full sequence",
+ "bson": "18000000136400000010632D5EC76B050000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}"
+ },
+ {
+ "description": "[decq630] fold-down full sequence",
+ "bson": "180000001364000000E8890423C78A000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}"
+ },
+ {
+ "description": "[decq632] fold-down full sequence",
+ "bson": "18000000136400000064A7B3B6E00D000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}"
+ },
+ {
+ "description": "[decq634] fold-down full sequence",
+ "bson": "1800000013640000008A5D78456301000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}"
+ },
+ {
+ "description": "[decq636] fold-down full sequence",
+ "bson": "180000001364000000C16FF2862300000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}"
+ },
+ {
+ "description": "[decq638] fold-down full sequence",
+ "bson": "180000001364000080C6A47E8D0300000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}"
+ },
+ {
+ "description": "[decq640] fold-down full sequence",
+ "bson": "1800000013640000407A10F35A0000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}"
+ },
+ {
+ "description": "[decq642] fold-down full sequence",
+ "bson": "1800000013640000A0724E18090000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}"
+ },
+ {
+ "description": "[decq644] fold-down full sequence",
+ "bson": "180000001364000010A5D4E8000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}"
+ },
+ {
+ "description": "[decq646] fold-down full sequence",
+ "bson": "1800000013640000E8764817000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}"
+ },
+ {
+ "description": "[decq648] fold-down full sequence",
+ "bson": "1800000013640000E40B5402000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}"
+ },
+ {
+ "description": "[decq650] fold-down full sequence",
+ "bson": "1800000013640000CA9A3B00000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}"
+ },
+ {
+ "description": "[decq652] fold-down full sequence",
+ "bson": "1800000013640000E1F50500000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}"
+ },
+ {
+ "description": "[decq654] fold-down full sequence",
+ "bson": "180000001364008096980000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}"
+ },
+ {
+ "description": "[decq656] fold-down full sequence",
+ "bson": "1800000013640040420F0000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}"
+ },
+ {
+ "description": "[decq658] fold-down full sequence",
+ "bson": "18000000136400A086010000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}"
+ },
+ {
+ "description": "[decq660] fold-down full sequence",
+ "bson": "180000001364001027000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}"
+ },
+ {
+ "description": "[decq662] fold-down full sequence",
+ "bson": "18000000136400E803000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}"
+ },
+ {
+ "description": "[decq664] fold-down full sequence",
+ "bson": "180000001364006400000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}"
+ },
+ {
+ "description": "[decq666] fold-down full sequence",
+ "bson": "180000001364000A00000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}"
+ },
+ {
+ "description": "[decq060] fold-downs (more below)",
+ "bson": "180000001364000100000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}"
+ },
+ {
+ "description": "[decq670] fold-down full sequence",
+ "bson": "180000001364000100000000000000000000000000FC5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6110\"}}"
+ },
+ {
+ "description": "[decq668] fold-down full sequence",
+ "bson": "180000001364000100000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6111\"}}"
+ },
+ {
+ "description": "[decq072] Nmin and below",
+ "bson": "180000001364000100000000000000000000000000420000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6143\"}}"
+ },
+ {
+ "description": "[decq076] Nmin and below",
+ "bson": "18000000136400010000000A5BC138938D44C64D31000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000001E-6143\"}}"
+ },
+ {
+ "description": "[decq036] fold-downs (more below)",
+ "bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq062] fold-downs (more below)",
+ "bson": "180000001364007B000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23\"}}"
+ },
+ {
+ "description": "[decq034] Nmax and similar",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234567890123456789012345678901234E+6144\"}}"
+ },
+ {
+ "description": "[decq441] exponent lengths",
+ "bson": "180000001364000700000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}"
+ },
+ {
+ "description": "[decq449] exponent lengths",
+ "bson": "1800000013640007000000000000000000000000001E5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5999\"}}"
+ },
+ {
+ "description": "[decq447] exponent lengths",
+ "bson": "1800000013640007000000000000000000000000000E3800",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+999\"}}"
+ },
+ {
+ "description": "[decq445] exponent lengths",
+ "bson": "180000001364000700000000000000000000000000063100",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+99\"}}"
+ },
+ {
+ "description": "[decq443] exponent lengths",
+ "bson": "180000001364000700000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}"
+ },
+ {
+ "description": "[decq842] VG testcase",
+ "bson": "180000001364000000FED83F4E7C9FE4E269E38A5BCD1700",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7.049000000000010795488000000000000E-3097\"}}"
+ },
+ {
+ "description": "[decq841] VG testcase",
+ "bson": "180000001364000000203B9DB5056F000000000000002400",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"8.000000000000000000E-1550\"}}"
+ },
+ {
+ "description": "[decq840] VG testcase",
+ "bson": "180000001364003C17258419D710C42F0000000000002400",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"8.81125000000001349436E-1548\"}}"
+ },
+ {
+ "description": "[decq701] Selected DPD codes",
+ "bson": "180000001364000900000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"9\"}}"
+ },
+ {
+ "description": "[decq032] Nmax and similar",
+ "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}"
+ },
+ {
+ "description": "[decq702] Selected DPD codes",
+ "bson": "180000001364000A00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}"
+ },
+ {
+ "description": "[decq057] fold-downs (more below)",
+ "bson": "180000001364000C00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}"
+ },
+ {
+ "description": "[decq703] Selected DPD codes",
+ "bson": "180000001364001300000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"19\"}}"
+ },
+ {
+ "description": "[decq704] Selected DPD codes",
+ "bson": "180000001364001400000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"20\"}}"
+ },
+ {
+ "description": "[decq705] Selected DPD codes",
+ "bson": "180000001364001D00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"29\"}}"
+ },
+ {
+ "description": "[decq706] Selected DPD codes",
+ "bson": "180000001364001E00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"30\"}}"
+ },
+ {
+ "description": "[decq707] Selected DPD codes",
+ "bson": "180000001364002700000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"39\"}}"
+ },
+ {
+ "description": "[decq708] Selected DPD codes",
+ "bson": "180000001364002800000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"40\"}}"
+ },
+ {
+ "description": "[decq709] Selected DPD codes",
+ "bson": "180000001364003100000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"49\"}}"
+ },
+ {
+ "description": "[decq710] Selected DPD codes",
+ "bson": "180000001364003200000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"50\"}}"
+ },
+ {
+ "description": "[decq711] Selected DPD codes",
+ "bson": "180000001364003B00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"59\"}}"
+ },
+ {
+ "description": "[decq712] Selected DPD codes",
+ "bson": "180000001364003C00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"60\"}}"
+ },
+ {
+ "description": "[decq713] Selected DPD codes",
+ "bson": "180000001364004500000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"69\"}}"
+ },
+ {
+ "description": "[decq714] Selected DPD codes",
+ "bson": "180000001364004600000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"70\"}}"
+ },
+ {
+ "description": "[decq715] Selected DPD codes",
+ "bson": "180000001364004700000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"71\"}}"
+ },
+ {
+ "description": "[decq716] Selected DPD codes",
+ "bson": "180000001364004800000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"72\"}}"
+ },
+ {
+ "description": "[decq717] Selected DPD codes",
+ "bson": "180000001364004900000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"73\"}}"
+ },
+ {
+ "description": "[decq718] Selected DPD codes",
+ "bson": "180000001364004A00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"74\"}}"
+ },
+ {
+ "description": "[decq719] Selected DPD codes",
+ "bson": "180000001364004B00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"75\"}}"
+ },
+ {
+ "description": "[decq720] Selected DPD codes",
+ "bson": "180000001364004C00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"76\"}}"
+ },
+ {
+ "description": "[decq721] Selected DPD codes",
+ "bson": "180000001364004D00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"77\"}}"
+ },
+ {
+ "description": "[decq722] Selected DPD codes",
+ "bson": "180000001364004E00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"78\"}}"
+ },
+ {
+ "description": "[decq723] Selected DPD codes",
+ "bson": "180000001364004F00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"79\"}}"
+ },
+ {
+ "description": "[decq056] fold-downs (more below)",
+ "bson": "180000001364007B00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"123\"}}"
+ },
+ {
+ "description": "[decq064] fold-downs (more below)",
+ "bson": "1800000013640039300000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"123.45\"}}"
+ },
+ {
+ "description": "[decq732] Selected DPD codes",
+ "bson": "180000001364000802000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"520\"}}"
+ },
+ {
+ "description": "[decq733] Selected DPD codes",
+ "bson": "180000001364000902000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"521\"}}"
+ },
+ {
+ "description": "[decq740] DPD: one of each of the huffman groups",
+ "bson": "180000001364000903000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"777\"}}"
+ },
+ {
+ "description": "[decq741] DPD: one of each of the huffman groups",
+ "bson": "180000001364000A03000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"778\"}}"
+ },
+ {
+ "description": "[decq742] DPD: one of each of the huffman groups",
+ "bson": "180000001364001303000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"787\"}}"
+ },
+ {
+ "description": "[decq746] DPD: one of each of the huffman groups",
+ "bson": "180000001364001F03000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"799\"}}"
+ },
+ {
+ "description": "[decq743] DPD: one of each of the huffman groups",
+ "bson": "180000001364006D03000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"877\"}}"
+ },
+ {
+ "description": "[decq753] DPD all-highs cases (includes the 24 redundant codes)",
+ "bson": "180000001364007803000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"888\"}}"
+ },
+ {
+ "description": "[decq754] DPD all-highs cases (includes the 24 redundant codes)",
+ "bson": "180000001364007903000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"889\"}}"
+ },
+ {
+ "description": "[decq760] DPD all-highs cases (includes the 24 redundant codes)",
+ "bson": "180000001364008203000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"898\"}}"
+ },
+ {
+ "description": "[decq764] DPD all-highs cases (includes the 24 redundant codes)",
+ "bson": "180000001364008303000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"899\"}}"
+ },
+ {
+ "description": "[decq745] DPD: one of each of the huffman groups",
+ "bson": "18000000136400D303000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"979\"}}"
+ },
+ {
+ "description": "[decq770] DPD all-highs cases (includes the 24 redundant codes)",
+ "bson": "18000000136400DC03000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"988\"}}"
+ },
+ {
+ "description": "[decq774] DPD all-highs cases (includes the 24 redundant codes)",
+ "bson": "18000000136400DD03000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"989\"}}"
+ },
+ {
+ "description": "[decq730] Selected DPD codes",
+ "bson": "18000000136400E203000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"994\"}}"
+ },
+ {
+ "description": "[decq731] Selected DPD codes",
+ "bson": "18000000136400E303000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"995\"}}"
+ },
+ {
+ "description": "[decq744] DPD: one of each of the huffman groups",
+ "bson": "18000000136400E503000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"997\"}}"
+ },
+ {
+ "description": "[decq780] DPD all-highs cases (includes the 24 redundant codes)",
+ "bson": "18000000136400E603000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"998\"}}"
+ },
+ {
+ "description": "[decq787] DPD all-highs cases (includes the 24 redundant codes)",
+ "bson": "18000000136400E703000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"999\"}}"
+ },
+ {
+ "description": "[decq053] fold-downs (more below)",
+ "bson": "18000000136400D204000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234\"}}"
+ },
+ {
+ "description": "[decq052] fold-downs (more below)",
+ "bson": "180000001364003930000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345\"}}"
+ },
+ {
+ "description": "[decq792] Miscellaneous (testers' queries, etc.)",
+ "bson": "180000001364003075000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"30000\"}}"
+ },
+ {
+ "description": "[decq793] Miscellaneous (testers' queries, etc.)",
+ "bson": "1800000013640090940D0000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"890000\"}}"
+ },
+ {
+ "description": "[decq824] values around [u]int32 edges (zeros done earlier)",
+ "bson": "18000000136400FEFFFF7F00000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483646\"}}"
+ },
+ {
+ "description": "[decq825] values around [u]int32 edges (zeros done earlier)",
+ "bson": "18000000136400FFFFFF7F00000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483647\"}}"
+ },
+ {
+ "description": "[decq826] values around [u]int32 edges (zeros done earlier)",
+ "bson": "180000001364000000008000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483648\"}}"
+ },
+ {
+ "description": "[decq827] values around [u]int32 edges (zeros done earlier)",
+ "bson": "180000001364000100008000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483649\"}}"
+ },
+ {
+ "description": "[decq828] values around [u]int32 edges (zeros done earlier)",
+ "bson": "18000000136400FEFFFFFF00000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967294\"}}"
+ },
+ {
+ "description": "[decq829] values around [u]int32 edges (zeros done earlier)",
+ "bson": "18000000136400FFFFFFFF00000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967295\"}}"
+ },
+ {
+ "description": "[decq830] values around [u]int32 edges (zeros done earlier)",
+ "bson": "180000001364000000000001000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967296\"}}"
+ },
+ {
+ "description": "[decq831] values around [u]int32 edges (zeros done earlier)",
+ "bson": "180000001364000100000001000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967297\"}}"
+ },
+ {
+ "description": "[decq022] Normality",
+ "bson": "18000000136400C7711CC7B548F377DC80A131C836403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1111111111111111111111111111111111\"}}"
+ },
+ {
+ "description": "[decq020] Normality",
+ "bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}"
+ },
+ {
+ "description": "[decq550] Specials",
+ "bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED413000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"9999999999999999999999999999999999\"}}"
+ }
+ ]
+}
+`},
+
+ {"decimal128-3.json", `
+{
+ "description": "Decimal128",
+ "bson_type": "0x13",
+ "test_key": "d",
+ "valid": [
+ {
+ "description": "[basx066] strings without E cannot generate E in result",
+ "bson": "18000000136400185C0ACE0000000000000000000038B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00345678.5432\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}"
+ },
+ {
+ "description": "[basx065] strings without E cannot generate E in result",
+ "bson": "18000000136400185C0ACE0000000000000000000038B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0345678.5432\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}"
+ },
+ {
+ "description": "[basx064] strings without E cannot generate E in result",
+ "bson": "18000000136400185C0ACE0000000000000000000038B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}"
+ },
+ {
+ "description": "[basx041] strings without E cannot generate E in result",
+ "bson": "180000001364004C0000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-76\"}}"
+ },
+ {
+ "description": "[basx027] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364000F270000000000000000000000003AB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999\"}}"
+ },
+ {
+ "description": "[basx026] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364009F230000000000000000000000003AB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.119\"}}"
+ },
+ {
+ "description": "[basx025] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364008F030000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.11\"}}"
+ },
+ {
+ "description": "[basx024] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364005B000000000000000000000000003EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.1\"}}"
+ },
+ {
+ "description": "[dqbsr531] negatives (Rounded)",
+ "bson": "1800000013640099761CC7B548F377DC80A131C836FEAF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.1111111111111111111111111111123450\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.111111111111111111111111111112345\"}}"
+ },
+ {
+ "description": "[basx022] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364000A000000000000000000000000003EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0\"}}"
+ },
+ {
+ "description": "[basx021] conform to rules and exponent will be in permitted range).",
+ "bson": "18000000136400010000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}"
+ },
+ {
+ "description": "[basx601] Zeros",
+ "bson": "1800000013640000000000000000000000000000002E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}"
+ },
+ {
+ "description": "[basx622] Zeros",
+ "bson": "1800000013640000000000000000000000000000002EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-9\"}}"
+ },
+ {
+ "description": "[basx602] Zeros",
+ "bson": "180000001364000000000000000000000000000000303000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}"
+ },
+ {
+ "description": "[basx621] Zeros",
+ "bson": "18000000136400000000000000000000000000000030B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8\"}}"
+ },
+ {
+ "description": "[basx603] Zeros",
+ "bson": "180000001364000000000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+ },
+ {
+ "description": "[basx620] Zeros",
+ "bson": "18000000136400000000000000000000000000000032B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}"
+ },
+ {
+ "description": "[basx604] Zeros",
+ "bson": "180000001364000000000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+ },
+ {
+ "description": "[basx619] Zeros",
+ "bson": "18000000136400000000000000000000000000000034B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}"
+ },
+ {
+ "description": "[basx605] Zeros",
+ "bson": "180000001364000000000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+ },
+ {
+ "description": "[basx618] Zeros",
+ "bson": "18000000136400000000000000000000000000000036B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}"
+ },
+ {
+ "description": "[basx680] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"000000.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx606] Zeros",
+ "bson": "180000001364000000000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+ },
+ {
+ "description": "[basx617] Zeros",
+ "bson": "18000000136400000000000000000000000000000038B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}"
+ },
+ {
+ "description": "[basx681] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"00000.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx686] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+00000.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx687] Zeros",
+ "bson": "18000000136400000000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00000.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+ },
+ {
+ "description": "[basx019] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640000000000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-00.00\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+ },
+ {
+ "description": "[basx607] Zeros",
+ "bson": "1800000013640000000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}"
+ },
+ {
+ "description": "[basx616] Zeros",
+ "bson": "1800000013640000000000000000000000000000003AB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}"
+ },
+ {
+ "description": "[basx682] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0000.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx155] Numbers with E",
+ "bson": "1800000013640000000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000e+0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}"
+ },
+ {
+ "description": "[basx130] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+ },
+ {
+ "description": "[basx290] some more negative zeros [systematic tests below]",
+ "bson": "18000000136400000000000000000000000000000038B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}"
+ },
+ {
+ "description": "[basx131] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+ },
+ {
+ "description": "[basx291] some more negative zeros [systematic tests below]",
+ "bson": "18000000136400000000000000000000000000000036B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}"
+ },
+ {
+ "description": "[basx132] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+ },
+ {
+ "description": "[basx292] some more negative zeros [systematic tests below]",
+ "bson": "18000000136400000000000000000000000000000034B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}"
+ },
+ {
+ "description": "[basx133] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+ },
+ {
+ "description": "[basx293] some more negative zeros [systematic tests below]",
+ "bson": "18000000136400000000000000000000000000000032B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}"
+ },
+ {
+ "description": "[basx608] Zeros",
+ "bson": "1800000013640000000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+ },
+ {
+ "description": "[basx615] Zeros",
+ "bson": "1800000013640000000000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+ },
+ {
+ "description": "[basx683] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"000.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx630] Zeros",
+ "bson": "1800000013640000000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+ },
+ {
+ "description": "[basx670] Zeros",
+ "bson": "1800000013640000000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+ },
+ {
+ "description": "[basx631] Zeros",
+ "bson": "1800000013640000000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+ },
+ {
+ "description": "[basx671] Zeros",
+ "bson": "1800000013640000000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}"
+ },
+ {
+ "description": "[basx134] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+ },
+ {
+ "description": "[basx294] some more negative zeros [systematic tests below]",
+ "bson": "18000000136400000000000000000000000000000038B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}"
+ },
+ {
+ "description": "[basx632] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx672] Zeros",
+ "bson": "180000001364000000000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+ },
+ {
+ "description": "[basx135] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+ },
+ {
+ "description": "[basx295] some more negative zeros [systematic tests below]",
+ "bson": "18000000136400000000000000000000000000000036B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}"
+ },
+ {
+ "description": "[basx633] Zeros",
+ "bson": "180000001364000000000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}"
+ },
+ {
+ "description": "[basx673] Zeros",
+ "bson": "180000001364000000000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+ },
+ {
+ "description": "[basx136] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+ },
+ {
+ "description": "[basx674] Zeros",
+ "bson": "180000001364000000000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+ },
+ {
+ "description": "[basx634] Zeros",
+ "bson": "180000001364000000000000000000000000000000443000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}"
+ },
+ {
+ "description": "[basx137] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+ },
+ {
+ "description": "[basx635] Zeros",
+ "bson": "180000001364000000000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}"
+ },
+ {
+ "description": "[basx675] Zeros",
+ "bson": "180000001364000000000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+ },
+ {
+ "description": "[basx636] Zeros",
+ "bson": "180000001364000000000000000000000000000000483000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}"
+ },
+ {
+ "description": "[basx676] Zeros",
+ "bson": "180000001364000000000000000000000000000000303000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}"
+ },
+ {
+ "description": "[basx637] Zeros",
+ "bson": "1800000013640000000000000000000000000000004A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}"
+ },
+ {
+ "description": "[basx677] Zeros",
+ "bson": "1800000013640000000000000000000000000000002E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}"
+ },
+ {
+ "description": "[basx638] Zeros",
+ "bson": "1800000013640000000000000000000000000000004C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}"
+ },
+ {
+ "description": "[basx678] Zeros",
+ "bson": "1800000013640000000000000000000000000000002C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}"
+ },
+ {
+ "description": "[basx149] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"000E+9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+ },
+ {
+ "description": "[basx639] Zeros",
+ "bson": "1800000013640000000000000000000000000000004E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}"
+ },
+ {
+ "description": "[basx679] Zeros",
+ "bson": "1800000013640000000000000000000000000000002A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-11\"}}"
+ },
+ {
+ "description": "[basx063] strings without E cannot generate E in result",
+ "bson": "18000000136400185C0ACE00000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+00345678.5432\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}"
+ },
+ {
+ "description": "[basx018] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640000000000000000000000000000003EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}"
+ },
+ {
+ "description": "[basx609] Zeros",
+ "bson": "1800000013640000000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+ },
+ {
+ "description": "[basx614] Zeros",
+ "bson": "1800000013640000000000000000000000000000003EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}"
+ },
+ {
+ "description": "[basx684] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"00.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx640] Zeros",
+ "bson": "1800000013640000000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+ },
+ {
+ "description": "[basx660] Zeros",
+ "bson": "1800000013640000000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+ },
+ {
+ "description": "[basx641] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx661] Zeros",
+ "bson": "1800000013640000000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}"
+ },
+ {
+ "description": "[basx296] some more negative zeros [systematic tests below]",
+ "bson": "1800000013640000000000000000000000000000003AB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}"
+ },
+ {
+ "description": "[basx642] Zeros",
+ "bson": "180000001364000000000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}"
+ },
+ {
+ "description": "[basx662] Zeros",
+ "bson": "1800000013640000000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}"
+ },
+ {
+ "description": "[basx297] some more negative zeros [systematic tests below]",
+ "bson": "18000000136400000000000000000000000000000038B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}"
+ },
+ {
+ "description": "[basx643] Zeros",
+ "bson": "180000001364000000000000000000000000000000443000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}"
+ },
+ {
+ "description": "[basx663] Zeros",
+ "bson": "180000001364000000000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}"
+ },
+ {
+ "description": "[basx644] Zeros",
+ "bson": "180000001364000000000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}"
+ },
+ {
+ "description": "[basx664] Zeros",
+ "bson": "180000001364000000000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}"
+ },
+ {
+ "description": "[basx645] Zeros",
+ "bson": "180000001364000000000000000000000000000000483000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}"
+ },
+ {
+ "description": "[basx665] Zeros",
+ "bson": "180000001364000000000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}"
+ },
+ {
+ "description": "[basx646] Zeros",
+ "bson": "1800000013640000000000000000000000000000004A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}"
+ },
+ {
+ "description": "[basx666] Zeros",
+ "bson": "180000001364000000000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}"
+ },
+ {
+ "description": "[basx647] Zeros",
+ "bson": "1800000013640000000000000000000000000000004C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}"
+ },
+ {
+ "description": "[basx667] Zeros",
+ "bson": "180000001364000000000000000000000000000000303000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}"
+ },
+ {
+ "description": "[basx648] Zeros",
+ "bson": "1800000013640000000000000000000000000000004E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}"
+ },
+ {
+ "description": "[basx668] Zeros",
+ "bson": "1800000013640000000000000000000000000000002E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}"
+ },
+ {
+ "description": "[basx160] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"00E+9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+ },
+ {
+ "description": "[basx161] Numbers with E",
+ "bson": "1800000013640000000000000000000000000000002E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"00E-9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}"
+ },
+ {
+ "description": "[basx649] Zeros",
+ "bson": "180000001364000000000000000000000000000000503000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}"
+ },
+ {
+ "description": "[basx669] Zeros",
+ "bson": "1800000013640000000000000000000000000000002C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}"
+ },
+ {
+ "description": "[basx062] strings without E cannot generate E in result",
+ "bson": "18000000136400185C0ACE00000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0345678.5432\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}"
+ },
+ {
+ "description": "[basx001] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx017] conform to rules and exponent will be in permitted range).",
+ "bson": "18000000136400000000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+ },
+ {
+ "description": "[basx611] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx613] Zeros",
+ "bson": "18000000136400000000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+ },
+ {
+ "description": "[basx685] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx688] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx689] Zeros",
+ "bson": "18000000136400000000000000000000000000000040B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}"
+ },
+ {
+ "description": "[basx650] Zeros",
+ "bson": "180000001364000000000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}"
+ },
+ {
+ "description": "[basx651] Zeros",
+ "bson": "180000001364000000000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}"
+ },
+ {
+ "description": "[basx298] some more negative zeros [systematic tests below]",
+ "bson": "1800000013640000000000000000000000000000003CB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}"
+ },
+ {
+ "description": "[basx652] Zeros",
+ "bson": "180000001364000000000000000000000000000000443000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}"
+ },
+ {
+ "description": "[basx299] some more negative zeros [systematic tests below]",
+ "bson": "1800000013640000000000000000000000000000003AB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}"
+ },
+ {
+ "description": "[basx653] Zeros",
+ "bson": "180000001364000000000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}"
+ },
+ {
+ "description": "[basx654] Zeros",
+ "bson": "180000001364000000000000000000000000000000483000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}"
+ },
+ {
+ "description": "[basx655] Zeros",
+ "bson": "1800000013640000000000000000000000000000004A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}"
+ },
+ {
+ "description": "[basx656] Zeros",
+ "bson": "1800000013640000000000000000000000000000004C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}"
+ },
+ {
+ "description": "[basx657] Zeros",
+ "bson": "1800000013640000000000000000000000000000004E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}"
+ },
+ {
+ "description": "[basx658] Zeros",
+ "bson": "180000001364000000000000000000000000000000503000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}"
+ },
+ {
+ "description": "[basx138] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0E+9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+ },
+ {
+ "description": "[basx139] Numbers with E",
+ "bson": "18000000136400000000000000000000000000000052B000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+9\"}}"
+ },
+ {
+ "description": "[basx144] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+ },
+ {
+ "description": "[basx154] Numbers with E",
+ "bson": "180000001364000000000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+ },
+ {
+ "description": "[basx659] Zeros",
+ "bson": "180000001364000000000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}"
+ },
+ {
+ "description": "[basx042] strings without E cannot generate E in result",
+ "bson": "18000000136400FC040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}"
+ },
+ {
+ "description": "[basx143] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+1E+009\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+ },
+ {
+ "description": "[basx061] strings without E cannot generate E in result",
+ "bson": "18000000136400185C0ACE00000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+345678.5432\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}"
+ },
+ {
+ "description": "[basx036] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640015CD5B0700000000000000000000203000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000123456789\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-8\"}}"
+ },
+ {
+ "description": "[basx035] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640015CD5B0700000000000000000000223000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000123456789\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-7\"}}"
+ },
+ {
+ "description": "[basx034] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640015CD5B0700000000000000000000243000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000123456789\"}}"
+ },
+ {
+ "description": "[basx053] strings without E cannot generate E in result",
+ "bson": "180000001364003200000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}"
+ },
+ {
+ "description": "[basx033] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640015CD5B0700000000000000000000263000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000123456789\"}}"
+ },
+ {
+ "description": "[basx016] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364000C000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.012\"}}"
+ },
+ {
+ "description": "[basx015] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364007B000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123\"}}"
+ },
+ {
+ "description": "[basx037] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640078DF0D8648700000000000000000223000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012344\"}}"
+ },
+ {
+ "description": "[basx038] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640079DF0D8648700000000000000000223000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012345\"}}"
+ },
+ {
+ "description": "[basx250] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+ },
+ {
+ "description": "[basx257] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+ },
+ {
+ "description": "[basx256] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}"
+ },
+ {
+ "description": "[basx258] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+ },
+ {
+ "description": "[basx251] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000103000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-20\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-21\"}}"
+ },
+ {
+ "description": "[basx263] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000603000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+20\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+19\"}}"
+ },
+ {
+ "description": "[basx255] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}"
+ },
+ {
+ "description": "[basx259] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+ },
+ {
+ "description": "[basx254] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}"
+ },
+ {
+ "description": "[basx260] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+ },
+ {
+ "description": "[basx253] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000303000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}"
+ },
+ {
+ "description": "[basx261] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+ },
+ {
+ "description": "[basx252] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000283000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-9\"}}"
+ },
+ {
+ "description": "[basx262] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000483000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}"
+ },
+ {
+ "description": "[basx159] Numbers with E",
+ "bson": "1800000013640049000000000000000000000000002E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.73e-7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7.3E-8\"}}"
+ },
+ {
+ "description": "[basx004] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640064000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00\"}}"
+ },
+ {
+ "description": "[basx003] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364000A000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}"
+ },
+ {
+ "description": "[basx002] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364000100000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}"
+ },
+ {
+ "description": "[basx148] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+009\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+ },
+ {
+ "description": "[basx153] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E009\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+ },
+ {
+ "description": "[basx141] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+09\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+ },
+ {
+ "description": "[basx146] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+09\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+ },
+ {
+ "description": "[basx151] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e09\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+ },
+ {
+ "description": "[basx142] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000F43000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}"
+ },
+ {
+ "description": "[basx147] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000F43000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+90\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}"
+ },
+ {
+ "description": "[basx152] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000F43000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E90\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}"
+ },
+ {
+ "description": "[basx140] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+ },
+ {
+ "description": "[basx150] Numbers with E",
+ "bson": "180000001364000100000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}"
+ },
+ {
+ "description": "[basx014] conform to rules and exponent will be in permitted range).",
+ "bson": "18000000136400D2040000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234\"}}"
+ },
+ {
+ "description": "[basx170] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+ },
+ {
+ "description": "[basx177] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+ },
+ {
+ "description": "[basx176] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+ },
+ {
+ "description": "[basx178] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+ },
+ {
+ "description": "[basx171] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000123000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-20\"}}"
+ },
+ {
+ "description": "[basx183] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000623000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+20\"}}"
+ },
+ {
+ "description": "[basx175] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}"
+ },
+ {
+ "description": "[basx179] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+ },
+ {
+ "description": "[basx174] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}"
+ },
+ {
+ "description": "[basx180] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+ },
+ {
+ "description": "[basx173] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}"
+ },
+ {
+ "description": "[basx181] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}"
+ },
+ {
+ "description": "[basx172] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000002A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-8\"}}"
+ },
+ {
+ "description": "[basx182] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000004A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+8\"}}"
+ },
+ {
+ "description": "[basx157] Numbers with E",
+ "bson": "180000001364000400000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"4E+9\"}}"
+ },
+ {
+ "description": "[basx067] examples",
+ "bson": "180000001364000500000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}"
+ },
+ {
+ "description": "[basx069] examples",
+ "bson": "180000001364000500000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}"
+ },
+ {
+ "description": "[basx385] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}"
+ },
+ {
+ "description": "[basx365] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000543000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E10\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+10\"}}"
+ },
+ {
+ "description": "[basx405] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000002C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-10\"}}"
+ },
+ {
+ "description": "[basx363] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000563000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E11\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+11\"}}"
+ },
+ {
+ "description": "[basx407] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000002A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-11\"}}"
+ },
+ {
+ "description": "[basx361] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000583000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E12\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+12\"}}"
+ },
+ {
+ "description": "[basx409] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000283000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-12\"}}"
+ },
+ {
+ "description": "[basx411] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000263000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-13\"}}"
+ },
+ {
+ "description": "[basx383] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+1\"}}"
+ },
+ {
+ "description": "[basx387] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.7\"}}"
+ },
+ {
+ "description": "[basx381] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000443000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+2\"}}"
+ },
+ {
+ "description": "[basx389] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.07\"}}"
+ },
+ {
+ "description": "[basx379] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+3\"}}"
+ },
+ {
+ "description": "[basx391] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.007\"}}"
+ },
+ {
+ "description": "[basx377] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000483000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+4\"}}"
+ },
+ {
+ "description": "[basx393] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0007\"}}"
+ },
+ {
+ "description": "[basx375] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000004A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5\"}}"
+ },
+ {
+ "description": "[basx395] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00007\"}}"
+ },
+ {
+ "description": "[basx373] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000004C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+6\"}}"
+ },
+ {
+ "description": "[basx397] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000007\"}}"
+ },
+ {
+ "description": "[basx371] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000004E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+7\"}}"
+ },
+ {
+ "description": "[basx399] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-7\"}}"
+ },
+ {
+ "description": "[basx369] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000503000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+8\"}}"
+ },
+ {
+ "description": "[basx401] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000303000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-8\"}}"
+ },
+ {
+ "description": "[basx367] Engineering notation tests",
+ "bson": "180000001364000700000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}"
+ },
+ {
+ "description": "[basx403] Engineering notation tests",
+ "bson": "1800000013640007000000000000000000000000002E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-9\"}}"
+ },
+ {
+ "description": "[basx007] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640064000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.0\"}}"
+ },
+ {
+ "description": "[basx005] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364000A00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}"
+ },
+ {
+ "description": "[basx165] Numbers with E",
+ "bson": "180000001364000A00000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+009\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}"
+ },
+ {
+ "description": "[basx163] Numbers with E",
+ "bson": "180000001364000A00000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+09\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}"
+ },
+ {
+ "description": "[basx325] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}"
+ },
+ {
+ "description": "[basx305] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000543000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e10\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+11\"}}"
+ },
+ {
+ "description": "[basx345] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000002C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-10\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-9\"}}"
+ },
+ {
+ "description": "[basx303] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000563000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e11\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+12\"}}"
+ },
+ {
+ "description": "[basx347] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000002A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-11\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-10\"}}"
+ },
+ {
+ "description": "[basx301] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000583000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e12\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+13\"}}"
+ },
+ {
+ "description": "[basx349] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000283000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-12\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-11\"}}"
+ },
+ {
+ "description": "[basx351] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000263000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-13\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-12\"}}"
+ },
+ {
+ "description": "[basx323] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+2\"}}"
+ },
+ {
+ "description": "[basx327] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}"
+ },
+ {
+ "description": "[basx321] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000443000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+3\"}}"
+ },
+ {
+ "description": "[basx329] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.10\"}}"
+ },
+ {
+ "description": "[basx319] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+4\"}}"
+ },
+ {
+ "description": "[basx331] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.010\"}}"
+ },
+ {
+ "description": "[basx317] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000483000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+5\"}}"
+ },
+ {
+ "description": "[basx333] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0010\"}}"
+ },
+ {
+ "description": "[basx315] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000004A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6\"}}"
+ },
+ {
+ "description": "[basx335] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00010\"}}"
+ },
+ {
+ "description": "[basx313] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000004C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+7\"}}"
+ },
+ {
+ "description": "[basx337] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-6\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000010\"}}"
+ },
+ {
+ "description": "[basx311] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000004E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+8\"}}"
+ },
+ {
+ "description": "[basx339] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000010\"}}"
+ },
+ {
+ "description": "[basx309] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000503000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+9\"}}"
+ },
+ {
+ "description": "[basx341] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000303000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-7\"}}"
+ },
+ {
+ "description": "[basx164] Numbers with E",
+ "bson": "180000001364000A00000000000000000000000000F43000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e+90\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+91\"}}"
+ },
+ {
+ "description": "[basx162] Numbers with E",
+ "bson": "180000001364000A00000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}"
+ },
+ {
+ "description": "[basx307] Engineering notation tests",
+ "bson": "180000001364000A00000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}"
+ },
+ {
+ "description": "[basx343] Engineering notation tests",
+ "bson": "180000001364000A000000000000000000000000002E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-8\"}}"
+ },
+ {
+ "description": "[basx008] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640065000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.1\"}}"
+ },
+ {
+ "description": "[basx009] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640068000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.4\"}}"
+ },
+ {
+ "description": "[basx010] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640069000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.5\"}}"
+ },
+ {
+ "description": "[basx011] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364006A000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.6\"}}"
+ },
+ {
+ "description": "[basx012] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364006D000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"10.9\"}}"
+ },
+ {
+ "description": "[basx013] conform to rules and exponent will be in permitted range).",
+ "bson": "180000001364006E000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"11.0\"}}"
+ },
+ {
+ "description": "[basx040] strings without E cannot generate E in result",
+ "bson": "180000001364000C00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}"
+ },
+ {
+ "description": "[basx190] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+ },
+ {
+ "description": "[basx197] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+ },
+ {
+ "description": "[basx196] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+ },
+ {
+ "description": "[basx198] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+ },
+ {
+ "description": "[basx191] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000143000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-20\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-19\"}}"
+ },
+ {
+ "description": "[basx203] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000643000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+20\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+21\"}}"
+ },
+ {
+ "description": "[basx195] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+ },
+ {
+ "description": "[basx199] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+ },
+ {
+ "description": "[basx194] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}"
+ },
+ {
+ "description": "[basx200] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}"
+ },
+ {
+ "description": "[basx193] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}"
+ },
+ {
+ "description": "[basx201] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000443000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}"
+ },
+ {
+ "description": "[basx192] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000002C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-7\"}}"
+ },
+ {
+ "description": "[basx202] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000004C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+9\"}}"
+ },
+ {
+ "description": "[basx044] strings without E cannot generate E in result",
+ "bson": "18000000136400FC040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"012.76\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}"
+ },
+ {
+ "description": "[basx042] strings without E cannot generate E in result",
+ "bson": "18000000136400FC040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}"
+ },
+ {
+ "description": "[basx046] strings without E cannot generate E in result",
+ "bson": "180000001364001100000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"17.\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"17\"}}"
+ },
+ {
+ "description": "[basx049] strings without E cannot generate E in result",
+ "bson": "180000001364002C00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0044\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}"
+ },
+ {
+ "description": "[basx048] strings without E cannot generate E in result",
+ "bson": "180000001364002C00000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"044\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}"
+ },
+ {
+ "description": "[basx158] Numbers with E",
+ "bson": "180000001364002C00000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"44E+9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4.4E+10\"}}"
+ },
+ {
+ "description": "[basx068] examples",
+ "bson": "180000001364003200000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"50E-7\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}"
+ },
+ {
+ "description": "[basx169] Numbers with E",
+ "bson": "180000001364006400000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+009\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}"
+ },
+ {
+ "description": "[basx167] Numbers with E",
+ "bson": "180000001364006400000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+09\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}"
+ },
+ {
+ "description": "[basx168] Numbers with E",
+ "bson": "180000001364006400000000000000000000000000F43000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"100E+90\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+92\"}}"
+ },
+ {
+ "description": "[basx166] Numbers with E",
+ "bson": "180000001364006400000000000000000000000000523000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+9\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}"
+ },
+ {
+ "description": "[basx210] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+ },
+ {
+ "description": "[basx217] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+ },
+ {
+ "description": "[basx216] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+ },
+ {
+ "description": "[basx218] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+ },
+ {
+ "description": "[basx211] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000163000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-20\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-18\"}}"
+ },
+ {
+ "description": "[basx223] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000663000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+20\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+22\"}}"
+ },
+ {
+ "description": "[basx215] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+ },
+ {
+ "description": "[basx219] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}"
+ },
+ {
+ "description": "[basx214] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+ },
+ {
+ "description": "[basx220] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000443000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}"
+ },
+ {
+ "description": "[basx213] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}"
+ },
+ {
+ "description": "[basx221] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}"
+ },
+ {
+ "description": "[basx212] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000002E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000001265\"}}"
+ },
+ {
+ "description": "[basx222] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000004E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+10\"}}"
+ },
+ {
+ "description": "[basx006] conform to rules and exponent will be in permitted range).",
+ "bson": "18000000136400E803000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1000\"}}"
+ },
+ {
+ "description": "[basx230] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+ },
+ {
+ "description": "[basx237] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}"
+ },
+ {
+ "description": "[basx236] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}"
+ },
+ {
+ "description": "[basx238] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000423000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+1\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}"
+ },
+ {
+ "description": "[basx231] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000183000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-20\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-17\"}}"
+ },
+ {
+ "description": "[basx243] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000683000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+20\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+23\"}}"
+ },
+ {
+ "description": "[basx235] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}"
+ },
+ {
+ "description": "[basx239] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000443000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+2\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}"
+ },
+ {
+ "description": "[basx234] Numbers with E",
+ "bson": "18000000136400F1040000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}"
+ },
+ {
+ "description": "[basx240] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000463000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+3\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}"
+ },
+ {
+ "description": "[basx233] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}"
+ },
+ {
+ "description": "[basx241] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000483000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+4\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}"
+ },
+ {
+ "description": "[basx232] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000303000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}"
+ },
+ {
+ "description": "[basx242] Numbers with E",
+ "bson": "18000000136400F104000000000000000000000000503000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+8\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+11\"}}"
+ },
+ {
+ "description": "[basx060] strings without E cannot generate E in result",
+ "bson": "18000000136400185C0ACE00000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}"
+ },
+ {
+ "description": "[basx059] strings without E cannot generate E in result",
+ "bson": "18000000136400F198670C08000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0345678.54321\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.54321\"}}"
+ },
+ {
+ "description": "[basx058] strings without E cannot generate E in result",
+ "bson": "180000001364006AF90B7C50000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.543210\"}}"
+ },
+ {
+ "description": "[basx057] strings without E cannot generate E in result",
+ "bson": "180000001364006A19562522020000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"2345678.543210\"}}"
+ },
+ {
+ "description": "[basx056] strings without E cannot generate E in result",
+ "bson": "180000001364006AB9C8733A0B0000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"12345678.543210\"}}"
+ },
+ {
+ "description": "[basx031] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640040AF0D8648700000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.000000\"}}"
+ },
+ {
+ "description": "[basx030] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640080910F8648700000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.123456\"}}"
+ },
+ {
+ "description": "[basx032] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640080910F8648700000000000000000403000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789123456\"}}"
+ }
+ ]
+}
+`},
+
+ {"decimal128-4.json", `
+{
+ "description": "Decimal128",
+ "bson_type": "0x13",
+ "test_key": "d",
+ "valid": [
+ {
+ "description": "[basx023] conform to rules and exponent will be in permitted range).",
+ "bson": "1800000013640001000000000000000000000000003EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.1\"}}"
+ },
+
+ {
+ "description": "[basx045] strings without E cannot generate E in result",
+ "bson": "1800000013640003000000000000000000000000003A3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.003\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.003\"}}"
+ },
+ {
+ "description": "[basx610] Zeros",
+ "bson": "1800000013640000000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \".0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}"
+ },
+ {
+ "description": "[basx612] Zeros",
+ "bson": "1800000013640000000000000000000000000000003EB000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-.0\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}"
+ },
+ {
+ "description": "[basx043] strings without E cannot generate E in result",
+ "bson": "18000000136400FC040000000000000000000000003C3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}"
+ },
+ {
+ "description": "[basx055] strings without E cannot generate E in result",
+ "bson": "180000001364000500000000000000000000000000303000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000005\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-8\"}}"
+ },
+ {
+ "description": "[basx054] strings without E cannot generate E in result",
+ "bson": "180000001364000500000000000000000000000000323000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000005\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}"
+ },
+ {
+ "description": "[basx052] strings without E cannot generate E in result",
+ "bson": "180000001364000500000000000000000000000000343000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}"
+ },
+ {
+ "description": "[basx051] strings without E cannot generate E in result",
+ "bson": "180000001364000500000000000000000000000000363000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"00.00005\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00005\"}}"
+ },
+ {
+ "description": "[basx050] strings without E cannot generate E in result",
+ "bson": "180000001364000500000000000000000000000000383000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0005\"}}"
+ },
+ {
+ "description": "[basx047] strings without E cannot generate E in result",
+ "bson": "1800000013640005000000000000000000000000003E3000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \".5\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.5\"}}"
+ },
+ {
+ "description": "[dqbsr431] check rounding modes heeded (Rounded)",
+ "bson": "1800000013640099761CC7B548F377DC80A131C836FE2F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.1111111111111111111111111111123450\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.111111111111111111111111111112345\"}}"
+ },
+ {
+ "description": "OK2",
+ "bson": "18000000136400000000000A5BC138938D44C64D31FC2F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \".100000000000000000000000000000000000000000000000000000000000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1000000000000000000000000000000000\"}}"
+ }
+ ],
+ "parseErrors": [
+ {
+ "description": "[basx564] Near-specials (Conversion_syntax)",
+ "string": "Infi"
+ },
+ {
+ "description": "[basx565] Near-specials (Conversion_syntax)",
+ "string": "Infin"
+ },
+ {
+ "description": "[basx566] Near-specials (Conversion_syntax)",
+ "string": "Infini"
+ },
+ {
+ "description": "[basx567] Near-specials (Conversion_syntax)",
+ "string": "Infinit"
+ },
+ {
+ "description": "[basx568] Near-specials (Conversion_syntax)",
+ "string": "-Infinit"
+ },
+ {
+ "description": "[basx590] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": ".Infinity"
+ },
+ {
+ "description": "[basx562] Near-specials (Conversion_syntax)",
+ "string": "NaNq"
+ },
+ {
+ "description": "[basx563] Near-specials (Conversion_syntax)",
+ "string": "NaNs"
+ },
+ {
+ "description": "[dqbas939] overflow results at different rounding modes (Overflow & Inexact & Rounded)",
+ "string": "-7e10000"
+ },
+ {
+ "description": "[dqbsr534] negatives (Rounded & Inexact)",
+ "string": "-1.11111111111111111111111111111234650"
+ },
+ {
+ "description": "[dqbsr535] negatives (Rounded & Inexact)",
+ "string": "-1.11111111111111111111111111111234551"
+ },
+ {
+ "description": "[dqbsr533] negatives (Rounded & Inexact)",
+ "string": "-1.11111111111111111111111111111234550"
+ },
+ {
+ "description": "[dqbsr532] negatives (Rounded & Inexact)",
+ "string": "-1.11111111111111111111111111111234549"
+ },
+ {
+ "description": "[dqbsr432] check rounding modes heeded (Rounded & Inexact)",
+ "string": "1.11111111111111111111111111111234549"
+ },
+ {
+ "description": "[dqbsr433] check rounding modes heeded (Rounded & Inexact)",
+ "string": "1.11111111111111111111111111111234550"
+ },
+ {
+ "description": "[dqbsr435] check rounding modes heeded (Rounded & Inexact)",
+ "string": "1.11111111111111111111111111111234551"
+ },
+ {
+ "description": "[dqbsr434] check rounding modes heeded (Rounded & Inexact)",
+ "string": "1.11111111111111111111111111111234650"
+ },
+ {
+ "description": "[dqbas938] overflow results at different rounding modes (Overflow & Inexact & Rounded)",
+ "string": "7e10000"
+ },
+ {
+ "description": "Inexact rounding#1",
+ "string": "100000000000000000000000000000000000000000000000000000000001"
+ },
+ {
+ "description": "Inexact rounding#2",
+ "string": "1E-6177"
+ }
+ ]
+}
+`},
+
+ {"decimal128-5.json", `
+{
+ "description": "Decimal128",
+ "bson_type": "0x13",
+ "test_key": "d",
+ "valid": [
+ {
+ "description": "[decq035] fold-downs (more below) (Clamped)",
+ "bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23E+6144\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq037] fold-downs (more below) (Clamped)",
+ "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq077] Nmin and below (Subnormal)",
+ "bson": "180000001364000000000081EFAC855B416D2DEE04000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.100000000000000000000000000000000E-6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}"
+ },
+ {
+ "description": "[decq078] Nmin and below (Subnormal)",
+ "bson": "180000001364000000000081EFAC855B416D2DEE04000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}"
+ },
+ {
+ "description": "[decq079] Nmin and below (Subnormal)",
+ "bson": "180000001364000A00000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000010E-6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}"
+ },
+ {
+ "description": "[decq080] Nmin and below (Subnormal)",
+ "bson": "180000001364000A00000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}"
+ },
+ {
+ "description": "[decq081] Nmin and below (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000020000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000000000000000000000000001E-6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}"
+ },
+ {
+ "description": "[decq082] Nmin and below (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000020000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}"
+ },
+ {
+ "description": "[decq083] Nmin and below (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000001E-6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+ },
+ {
+ "description": "[decq084] Nmin and below (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+ },
+ {
+ "description": "[decq090] underflows cannot be tested for simple copies, check edge cases (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1e-6176\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}"
+ },
+ {
+ "description": "[decq100] underflows cannot be tested for simple copies, check edge cases (Subnormal)",
+ "bson": "18000000136400FFFFFFFF095BC138938D44C64D31000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"999999999999999999999999999999999e-6176\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.99999999999999999999999999999999E-6144\"}}"
+ },
+ {
+ "description": "[decq130] fold-downs (more below) (Clamped)",
+ "bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23E+6144\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq132] fold-downs (more below) (Clamped)",
+ "bson": "18000000136400000000000A5BC138938D44C64D31FEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E+6144\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq177] Nmin and below (Subnormal)",
+ "bson": "180000001364000000000081EFAC855B416D2DEE04008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.100000000000000000000000000000000E-6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}"
+ },
+ {
+ "description": "[decq178] Nmin and below (Subnormal)",
+ "bson": "180000001364000000000081EFAC855B416D2DEE04008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}"
+ },
+ {
+ "description": "[decq179] Nmin and below (Subnormal)",
+ "bson": "180000001364000A00000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000010E-6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}"
+ },
+ {
+ "description": "[decq180] Nmin and below (Subnormal)",
+ "bson": "180000001364000A00000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}"
+ },
+ {
+ "description": "[decq181] Nmin and below (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000028000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000000000000000000000000001E-6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}"
+ },
+ {
+ "description": "[decq182] Nmin and below (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000028000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}"
+ },
+ {
+ "description": "[decq183] Nmin and below (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000001E-6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}"
+ },
+ {
+ "description": "[decq184] Nmin and below (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}"
+ },
+ {
+ "description": "[decq190] underflow edge cases (Subnormal)",
+ "bson": "180000001364000100000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-1e-6176\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}"
+ },
+ {
+ "description": "[decq200] underflow edge cases (Subnormal)",
+ "bson": "18000000136400FFFFFFFF095BC138938D44C64D31008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-999999999999999999999999999999999e-6176\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.99999999999999999999999999999999E-6144\"}}"
+ },
+ {
+ "description": "[decq400] zeros (Clamped)",
+ "bson": "180000001364000000000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}"
+ },
+ {
+ "description": "[decq401] zeros (Clamped)",
+ "bson": "180000001364000000000000000000000000000000000000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6177\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}"
+ },
+ {
+ "description": "[decq414] clamped zeros... (Clamped)",
+ "bson": "180000001364000000000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6112\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}"
+ },
+ {
+ "description": "[decq416] clamped zeros... (Clamped)",
+ "bson": "180000001364000000000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6144\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}"
+ },
+ {
+ "description": "[decq418] clamped zeros... (Clamped)",
+ "bson": "180000001364000000000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}"
+ },
+ {
+ "description": "[decq420] negative zeros (Clamped)",
+ "bson": "180000001364000000000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}"
+ },
+ {
+ "description": "[decq421] negative zeros (Clamped)",
+ "bson": "180000001364000000000000000000000000000000008000",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6177\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}"
+ },
+ {
+ "description": "[decq434] clamped zeros... (Clamped)",
+ "bson": "180000001364000000000000000000000000000000FEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6112\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}"
+ },
+ {
+ "description": "[decq436] clamped zeros... (Clamped)",
+ "bson": "180000001364000000000000000000000000000000FEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6144\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}"
+ },
+ {
+ "description": "[decq438] clamped zeros... (Clamped)",
+ "bson": "180000001364000000000000000000000000000000FEDF00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+8000\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}"
+ },
+ {
+ "description": "[decq601] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000000000A5BC138938D44C64D31FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}"
+ },
+ {
+ "description": "[decq603] fold-down full sequence (Clamped)",
+ "bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6143\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}"
+ },
+ {
+ "description": "[decq605] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000000080264B91C02220BE377E00FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6142\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}"
+ },
+ {
+ "description": "[decq607] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6141\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}"
+ },
+ {
+ "description": "[decq609] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6140\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}"
+ },
+ {
+ "description": "[decq611] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000000106102253E5ECE4F200000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6139\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}"
+ },
+ {
+ "description": "[decq613] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6138\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}"
+ },
+ {
+ "description": "[decq615] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6137\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}"
+ },
+ {
+ "description": "[decq617] fold-down full sequence (Clamped)",
+ "bson": "180000001364000000004A48011416954508000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6136\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}"
+ },
+ {
+ "description": "[decq619] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6135\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}"
+ },
+ {
+ "description": "[decq621] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000080F64AE1C7022D1500000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6134\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}"
+ },
+ {
+ "description": "[decq623] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6133\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}"
+ },
+ {
+ "description": "[decq625] fold-down full sequence (Clamped)",
+ "bson": "180000001364000000A0DEC5ADC935360000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6132\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}"
+ },
+ {
+ "description": "[decq627] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000010632D5EC76B050000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6131\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}"
+ },
+ {
+ "description": "[decq629] fold-down full sequence (Clamped)",
+ "bson": "180000001364000000E8890423C78A000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6130\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}"
+ },
+ {
+ "description": "[decq631] fold-down full sequence (Clamped)",
+ "bson": "18000000136400000064A7B3B6E00D000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6129\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}"
+ },
+ {
+ "description": "[decq633] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000008A5D78456301000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6128\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}"
+ },
+ {
+ "description": "[decq635] fold-down full sequence (Clamped)",
+ "bson": "180000001364000000C16FF2862300000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6127\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}"
+ },
+ {
+ "description": "[decq637] fold-down full sequence (Clamped)",
+ "bson": "180000001364000080C6A47E8D0300000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6126\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}"
+ },
+ {
+ "description": "[decq639] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000407A10F35A0000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6125\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}"
+ },
+ {
+ "description": "[decq641] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000A0724E18090000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6124\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}"
+ },
+ {
+ "description": "[decq643] fold-down full sequence (Clamped)",
+ "bson": "180000001364000010A5D4E8000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6123\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}"
+ },
+ {
+ "description": "[decq645] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000E8764817000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6122\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}"
+ },
+ {
+ "description": "[decq647] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000E40B5402000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6121\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}"
+ },
+ {
+ "description": "[decq649] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000CA9A3B00000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6120\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}"
+ },
+ {
+ "description": "[decq651] fold-down full sequence (Clamped)",
+ "bson": "1800000013640000E1F50500000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6119\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}"
+ },
+ {
+ "description": "[decq653] fold-down full sequence (Clamped)",
+ "bson": "180000001364008096980000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6118\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}"
+ },
+ {
+ "description": "[decq655] fold-down full sequence (Clamped)",
+ "bson": "1800000013640040420F0000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6117\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}"
+ },
+ {
+ "description": "[decq657] fold-down full sequence (Clamped)",
+ "bson": "18000000136400A086010000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6116\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}"
+ },
+ {
+ "description": "[decq659] fold-down full sequence (Clamped)",
+ "bson": "180000001364001027000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6115\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}"
+ },
+ {
+ "description": "[decq661] fold-down full sequence (Clamped)",
+ "bson": "18000000136400E803000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6114\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}"
+ },
+ {
+ "description": "[decq663] fold-down full sequence (Clamped)",
+ "bson": "180000001364006400000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6113\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}"
+ },
+ {
+ "description": "[decq665] fold-down full sequence (Clamped)",
+ "bson": "180000001364000A00000000000000000000000000FE5F00",
+ "extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6112\"}}",
+ "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}"
+ }
+ ]
+}
+`},
+
+ {"decimal128-6.json", `
+{
+ "description": "Decimal128",
+ "bson_type": "0x13",
+ "test_key": "d",
+ "parseErrors": [
+ {
+ "description": "Incomplete Exponent",
+ "string": "1e"
+ },
+ {
+ "description": "Exponent at the beginning",
+ "string": "E01"
+ },
+ {
+ "description": "Just a decimal place",
+ "string": "."
+ },
+ {
+ "description": "2 decimal places",
+ "string": "..3"
+ },
+ {
+ "description": "2 decimal places",
+ "string": ".13.3"
+ },
+ {
+ "description": "2 decimal places",
+ "string": "1..3"
+ },
+ {
+ "description": "2 decimal places",
+ "string": "1.3.4"
+ },
+ {
+ "description": "2 decimal places",
+ "string": "1.34."
+ },
+ {
+ "description": "Decimal with no digits",
+ "string": ".e"
+ },
+ {
+ "description": "2 signs",
+ "string": "+-32.4"
+ },
+ {
+ "description": "2 signs",
+ "string": "-+32.4"
+ },
+ {
+ "description": "2 negative signs",
+ "string": "--32.4"
+ },
+ {
+ "description": "2 negative signs",
+ "string": "-32.-4"
+ },
+ {
+ "description": "End in negative sign",
+ "string": "32.0-"
+ },
+ {
+ "description": "2 negative signs",
+ "string": "32.4E--21"
+ },
+ {
+ "description": "2 negative signs",
+ "string": "32.4E-2-1"
+ },
+ {
+ "description": "2 signs",
+ "string": "32.4E+-21"
+ },
+ {
+ "description": "Empty string",
+ "string": ""
+ },
+ {
+ "description": "leading white space positive number",
+ "string": " 1"
+ },
+ {
+ "description": "leading white space negative number",
+ "string": " -1"
+ },
+ {
+ "description": "trailing white space",
+ "string": "1 "
+ },
+ {
+ "description": "Invalid",
+ "string": "E"
+ },
+ {
+ "description": "Invalid",
+ "string": "invalid"
+ },
+ {
+ "description": "Invalid",
+ "string": "i"
+ },
+ {
+ "description": "Invalid",
+ "string": "in"
+ },
+ {
+ "description": "Invalid",
+ "string": "-in"
+ },
+ {
+ "description": "Invalid",
+ "string": "Na"
+ },
+ {
+ "description": "Invalid",
+ "string": "-Na"
+ },
+ {
+ "description": "Invalid",
+ "string": "1.23abc"
+ },
+ {
+ "description": "Invalid",
+ "string": "1.23abcE+02"
+ },
+ {
+ "description": "Invalid",
+ "string": "1.23E+0aabs2"
+ }
+ ]
+}
+`},
+
+ {"decimal128-7.json", `
+{
+ "description": "Decimal128",
+ "bson_type": "0x13",
+ "test_key": "d",
+ "parseErrors": [
+ {
+ "description": "[basx572] Near-specials (Conversion_syntax)",
+ "string": "-9Inf"
+ },
+ {
+ "description": "[basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "-1-"
+ },
+ {
+ "description": "[basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "0000.."
+ },
+ {
+ "description": "[basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": ".0000."
+ },
+ {
+ "description": "[basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "00..00"
+ },
+ {
+ "description": "[basx569] Near-specials (Conversion_syntax)",
+ "string": "0Inf"
+ },
+ {
+ "description": "[basx571] Near-specials (Conversion_syntax)",
+ "string": "-0Inf"
+ },
+ {
+ "description": "[basx575] Near-specials (Conversion_syntax)",
+ "string": "0sNaN"
+ },
+ {
+ "description": "[basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "++1"
+ },
+ {
+ "description": "[basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "--1"
+ },
+ {
+ "description": "[basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "-+1"
+ },
+ {
+ "description": "[basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "+-1"
+ },
+ {
+ "description": "[basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": " +1"
+ },
+ {
+ "description": "[basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": " + 1"
+ },
+ {
+ "description": "[basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": " - 1"
+ },
+ {
+ "description": "[basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "."
+ },
+ {
+ "description": "[basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": ".."
+ },
+ {
+ "description": "[basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": ""
+ },
+ {
+ "description": "[basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "e100"
+ },
+ {
+ "description": "[basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "e+1"
+ },
+ {
+ "description": "[basx577] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": ".e+1"
+ },
+ {
+ "description": "[basx578] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "+.e+1"
+ },
+ {
+ "description": "[basx581] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "E+1"
+ },
+ {
+ "description": "[basx582] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": ".E+1"
+ },
+ {
+ "description": "[basx583] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "+.E+1"
+ },
+ {
+ "description": "[basx579] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "-.e+"
+ },
+ {
+ "description": "[basx580] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "-.e"
+ },
+ {
+ "description": "[basx584] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "-.E+"
+ },
+ {
+ "description": "[basx585] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "-.E"
+ },
+ {
+ "description": "[basx589] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "+.Inf"
+ },
+ {
+ "description": "[basx586] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": ".NaN"
+ },
+ {
+ "description": "[basx587] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "-.NaN"
+ },
+ {
+ "description": "[basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "ONE"
+ },
+ {
+ "description": "[basx561] Near-specials (Conversion_syntax)",
+ "string": "qNaN"
+ },
+ {
+ "description": "[basx573] Near-specials (Conversion_syntax)",
+ "string": "-sNa"
+ },
+ {
+ "description": "[basx588] some baddies with dots and Es and dots and specials (Conversion_syntax)",
+ "string": "+.sNaN"
+ },
+ {
+ "description": "[basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "ten"
+ },
+ {
+ "description": "[basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "u0b65"
+ },
+ {
+ "description": "[basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "u0e5a"
+ },
+ {
+ "description": "[basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "x"
+ },
+ {
+ "description": "[basx574] Near-specials (Conversion_syntax)",
+ "string": "xNaN"
+ },
+ {
+ "description": "[basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": ".123.5"
+ },
+ {
+ "description": "[basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1..2"
+ },
+ {
+ "description": "[basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1e1.0"
+ },
+ {
+ "description": "[basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E+1.2.3"
+ },
+ {
+ "description": "[basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1e123e"
+ },
+ {
+ "description": "[basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E+1.2"
+ },
+ {
+ "description": "[basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1e.1"
+ },
+ {
+ "description": "[basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1e1."
+ },
+ {
+ "description": "[basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E++1"
+ },
+ {
+ "description": "[basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E--1"
+ },
+ {
+ "description": "[basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E+-1"
+ },
+ {
+ "description": "[basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E-+1"
+ },
+ {
+ "description": "[basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E'1"
+ },
+ {
+ "description": "[basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E\"1"
+ },
+ {
+ "description": "[basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1e-"
+ },
+ {
+ "description": "[basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1E"
+ },
+ {
+ "description": "[basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1ee"
+ },
+ {
+ "description": "[basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1.2.1"
+ },
+ {
+ "description": "[basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1.23.4"
+ },
+ {
+ "description": "[basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "1.34.5"
+ },
+ {
+ "description": "[basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "01.35."
+ },
+ {
+ "description": "[basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "01.35-"
+ },
+ {
+ "description": "[basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "3+"
+ },
+ {
+ "description": "[basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "7e99999a"
+ },
+ {
+ "description": "[basx570] Near-specials (Conversion_syntax)",
+ "string": "9Inf"
+ },
+ {
+ "description": "[basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "12 "
+ },
+ {
+ "description": "[basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "12-"
+ },
+ {
+ "description": "[basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "12e"
+ },
+ {
+ "description": "[basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "12e++"
+ },
+ {
+ "description": "[basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "12f4"
+ },
+ {
+ "description": "[basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "111e*123"
+ },
+ {
+ "description": "[basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "111e123-"
+ },
+ {
+ "description": "[basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "111e1*23"
+ },
+ {
+ "description": "[basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "111e+12+"
+ },
+ {
+ "description": "[basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "111e1-3-"
+ },
+ {
+ "description": "[basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "111E1e+3"
+ },
+ {
+ "description": "[basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "123,65"
+ },
+ {
+ "description": "[basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "7e12356789012x"
+ },
+ {
+ "description": "[basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)",
+ "string": "7e123567890x"
+ }
+ ]
+}
+`},
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decode.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decode.go
new file mode 100644
index 00000000000..7c2d8416afe
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/decode.go
@@ -0,0 +1,849 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+)
+
+type decoder struct {
+ in []byte
+ i int
+ docType reflect.Type
+}
+
+var typeM = reflect.TypeOf(M{})
+
+func newDecoder(in []byte) *decoder {
+ return &decoder{in, 0, typeM}
+}
+
+// --------------------------------------------------------------------------
+// Some helper functions.
+
+func corrupted() {
+ panic("Document is corrupted")
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of documents.
+
+const (
+ setterUnknown = iota
+ setterNone
+ setterType
+ setterAddr
+)
+
+var setterStyles map[reflect.Type]int
+var setterIface reflect.Type
+var setterMutex sync.RWMutex
+
+func init() {
+ var iface Setter
+ setterIface = reflect.TypeOf(&iface).Elem()
+ setterStyles = make(map[reflect.Type]int)
+}
+
+func setterStyle(outt reflect.Type) int {
+ setterMutex.RLock()
+ style := setterStyles[outt]
+ setterMutex.RUnlock()
+ if style == setterUnknown {
+ setterMutex.Lock()
+ defer setterMutex.Unlock()
+ if outt.Implements(setterIface) {
+ setterStyles[outt] = setterType
+ } else if reflect.PtrTo(outt).Implements(setterIface) {
+ setterStyles[outt] = setterAddr
+ } else {
+ setterStyles[outt] = setterNone
+ }
+ style = setterStyles[outt]
+ }
+ return style
+}
+
+func getSetter(outt reflect.Type, out reflect.Value) Setter {
+ style := setterStyle(outt)
+ if style == setterNone {
+ return nil
+ }
+ if style == setterAddr {
+ if !out.CanAddr() {
+ return nil
+ }
+ out = out.Addr()
+ } else if outt.Kind() == reflect.Ptr && out.IsNil() {
+ out.Set(reflect.New(outt.Elem()))
+ }
+ return out.Interface().(Setter)
+}
+
+func clearMap(m reflect.Value) {
+ var none reflect.Value
+ for _, k := range m.MapKeys() {
+ m.SetMapIndex(k, none)
+ }
+}
+
+func (d *decoder) readDocTo(out reflect.Value) {
+ var elemType reflect.Type
+ outt := out.Type()
+ outk := outt.Kind()
+
+ for {
+ if outk == reflect.Ptr && out.IsNil() {
+ out.Set(reflect.New(outt.Elem()))
+ }
+ if setter := getSetter(outt, out); setter != nil {
+ var raw Raw
+ d.readDocTo(reflect.ValueOf(&raw))
+ err := setter.SetBSON(raw)
+ if _, ok := err.(*TypeError); err != nil && !ok {
+ panic(err)
+ }
+ return
+ }
+ if outk == reflect.Ptr {
+ out = out.Elem()
+ outt = out.Type()
+ outk = out.Kind()
+ continue
+ }
+ break
+ }
+
+ var fieldsMap map[string]fieldInfo
+ var inlineMap reflect.Value
+ start := d.i
+
+ origout := out
+ if outk == reflect.Interface {
+ if d.docType.Kind() == reflect.Map {
+ mv := reflect.MakeMap(d.docType)
+ out.Set(mv)
+ out = mv
+ } else {
+ dv := reflect.New(d.docType).Elem()
+ out.Set(dv)
+ out = dv
+ }
+ outt = out.Type()
+ outk = outt.Kind()
+ }
+
+ docType := d.docType
+ keyType := typeString
+ convertKey := false
+ switch outk {
+ case reflect.Map:
+ keyType = outt.Key()
+ if keyType.Kind() != reflect.String {
+ panic("BSON map must have string keys. Got: " + outt.String())
+ }
+ if keyType != typeString {
+ convertKey = true
+ }
+ elemType = outt.Elem()
+ if elemType == typeIface {
+ d.docType = outt
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(out.Type()))
+ } else if out.Len() > 0 {
+ clearMap(out)
+ }
+ case reflect.Struct:
+ if outt != typeRaw {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+ fieldsMap = sinfo.FieldsMap
+ out.Set(sinfo.Zero)
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ if !inlineMap.IsNil() && inlineMap.Len() > 0 {
+ clearMap(inlineMap)
+ }
+ elemType = inlineMap.Type().Elem()
+ if elemType == typeIface {
+ d.docType = inlineMap.Type()
+ }
+ }
+ }
+ case reflect.Slice:
+ switch outt.Elem() {
+ case typeDocElem:
+ origout.Set(d.readDocElems(outt))
+ return
+ case typeRawDocElem:
+ origout.Set(d.readRawDocElems(outt))
+ return
+ }
+ fallthrough
+ default:
+ panic("Unsupported document type for unmarshalling: " + out.Type().String())
+ }
+
+ end := int(d.readInt32())
+ end += d.i - 4
+ if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+ corrupted()
+ }
+ for d.in[d.i] != '\x00' {
+ kind := d.readByte()
+ name := d.readCStr()
+ if d.i >= end {
+ corrupted()
+ }
+
+ switch outk {
+ case reflect.Map:
+ e := reflect.New(elemType).Elem()
+ if d.readElemTo(e, kind) {
+ k := reflect.ValueOf(name)
+ if convertKey {
+ k = k.Convert(keyType)
+ }
+ out.SetMapIndex(k, e)
+ }
+ case reflect.Struct:
+ if outt == typeRaw {
+ d.dropElem(kind)
+ } else {
+ if info, ok := fieldsMap[name]; ok {
+ if info.Inline == nil {
+ d.readElemTo(out.Field(info.Num), kind)
+ } else {
+ d.readElemTo(out.FieldByIndex(info.Inline), kind)
+ }
+ } else if inlineMap.IsValid() {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ e := reflect.New(elemType).Elem()
+ if d.readElemTo(e, kind) {
+ inlineMap.SetMapIndex(reflect.ValueOf(name), e)
+ }
+ } else {
+ d.dropElem(kind)
+ }
+ }
+ case reflect.Slice:
+ }
+
+ if d.i >= end {
+ corrupted()
+ }
+ }
+ d.i++ // '\x00'
+ if d.i != end {
+ corrupted()
+ }
+ d.docType = docType
+
+ if outt == typeRaw {
+ out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
+ }
+}
+
+func (d *decoder) readArrayDocTo(out reflect.Value) {
+ end := int(d.readInt32())
+ end += d.i - 4
+ if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+ corrupted()
+ }
+ i := 0
+ l := out.Len()
+ for d.in[d.i] != '\x00' {
+ if i >= l {
+ panic("Length mismatch on array field")
+ }
+ kind := d.readByte()
+ for d.i < end && d.in[d.i] != '\x00' {
+ d.i++
+ }
+ if d.i >= end {
+ corrupted()
+ }
+ d.i++
+ d.readElemTo(out.Index(i), kind)
+ if d.i >= end {
+ corrupted()
+ }
+ i++
+ }
+ if i != l {
+ panic("Length mismatch on array field")
+ }
+ d.i++ // '\x00'
+ if d.i != end {
+ corrupted()
+ }
+}
+
+func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
+ tmp := make([]reflect.Value, 0, 8)
+ elemType := t.Elem()
+ if elemType == typeRawDocElem {
+ d.dropElem(0x04)
+ return reflect.Zero(t).Interface()
+ }
+
+ end := int(d.readInt32())
+ end += d.i - 4
+ if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+ corrupted()
+ }
+ for d.in[d.i] != '\x00' {
+ kind := d.readByte()
+ for d.i < end && d.in[d.i] != '\x00' {
+ d.i++
+ }
+ if d.i >= end {
+ corrupted()
+ }
+ d.i++
+ e := reflect.New(elemType).Elem()
+ if d.readElemTo(e, kind) {
+ tmp = append(tmp, e)
+ }
+ if d.i >= end {
+ corrupted()
+ }
+ }
+ d.i++ // '\x00'
+ if d.i != end {
+ corrupted()
+ }
+
+ n := len(tmp)
+ slice := reflect.MakeSlice(t, n, n)
+ for i := 0; i != n; i++ {
+ slice.Index(i).Set(tmp[i])
+ }
+ return slice.Interface()
+}
+
+var typeSlice = reflect.TypeOf([]interface{}{})
+var typeIface = typeSlice.Elem()
+
+func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
+ docType := d.docType
+ d.docType = typ
+ slice := make([]DocElem, 0, 8)
+ d.readDocWith(func(kind byte, name string) {
+ e := DocElem{Name: name}
+ v := reflect.ValueOf(&e.Value)
+ if d.readElemTo(v.Elem(), kind) {
+ slice = append(slice, e)
+ }
+ })
+ slicev := reflect.New(typ).Elem()
+ slicev.Set(reflect.ValueOf(slice))
+ d.docType = docType
+ return slicev
+}
+
+func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
+ docType := d.docType
+ d.docType = typ
+ slice := make([]RawDocElem, 0, 8)
+ d.readDocWith(func(kind byte, name string) {
+ e := RawDocElem{Name: name}
+ v := reflect.ValueOf(&e.Value)
+ if d.readElemTo(v.Elem(), kind) {
+ slice = append(slice, e)
+ }
+ })
+ slicev := reflect.New(typ).Elem()
+ slicev.Set(reflect.ValueOf(slice))
+ d.docType = docType
+ return slicev
+}
+
+func (d *decoder) readDocWith(f func(kind byte, name string)) {
+ end := int(d.readInt32())
+ end += d.i - 4
+ if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+ corrupted()
+ }
+ for d.in[d.i] != '\x00' {
+ kind := d.readByte()
+ name := d.readCStr()
+ if d.i >= end {
+ corrupted()
+ }
+ f(kind, name)
+ if d.i >= end {
+ corrupted()
+ }
+ }
+ d.i++ // '\x00'
+ if d.i != end {
+ corrupted()
+ }
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of individual elements within a document.
+
+var blackHole = settableValueOf(struct{}{})
+
+func (d *decoder) dropElem(kind byte) {
+ d.readElemTo(blackHole, kind)
+}
+
+// Attempt to decode an element from the document and put it into out.
+// If the types are not compatible, the returned ok value will be
+// false and out will be unchanged.
+func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
+
+ start := d.i
+
+ if kind == 0x03 {
+ // Delegate unmarshaling of documents.
+ outt := out.Type()
+ outk := out.Kind()
+ switch outk {
+ case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
+ d.readDocTo(out)
+ return true
+ }
+ if setterStyle(outt) != setterNone {
+ d.readDocTo(out)
+ return true
+ }
+ if outk == reflect.Slice {
+ switch outt.Elem() {
+ case typeDocElem:
+ out.Set(d.readDocElems(outt))
+ case typeRawDocElem:
+ out.Set(d.readRawDocElems(outt))
+ default:
+ d.readDocTo(blackHole)
+ }
+ return true
+ }
+ d.readDocTo(blackHole)
+ return true
+ }
+
+ var in interface{}
+
+ switch kind {
+ case 0x01: // Float64
+ in = d.readFloat64()
+ case 0x02: // UTF-8 string
+ in = d.readStr()
+ case 0x03: // Document
+ panic("Can't happen. Handled above.")
+ case 0x04: // Array
+ outt := out.Type()
+ if setterStyle(outt) != setterNone {
+ // Skip the value so its data is handed to the setter below.
+ d.dropElem(kind)
+ break
+ }
+ for outt.Kind() == reflect.Ptr {
+ outt = outt.Elem()
+ }
+ switch outt.Kind() {
+ case reflect.Array:
+ d.readArrayDocTo(out)
+ return true
+ case reflect.Slice:
+ in = d.readSliceDoc(outt)
+ default:
+ in = d.readSliceDoc(typeSlice)
+ }
+ case 0x05: // Binary
+ b := d.readBinary()
+ if b.Kind == 0x00 || b.Kind == 0x02 {
+ in = b.Data
+ } else {
+ in = b
+ }
+ case 0x06: // Undefined (obsolete, but still seen in the wild)
+ in = Undefined
+ case 0x07: // ObjectId
+ in = ObjectId(d.readBytes(12))
+ case 0x08: // Bool
+ in = d.readBool()
+ case 0x09: // Timestamp
+ // MongoDB handles timestamps as milliseconds.
+ i := d.readInt64()
+ if i == -62135596800000 {
+ in = time.Time{} // In UTC for convenience.
+ } else {
+ in = time.Unix(i/1e3, i%1e3*1e6)
+ }
+ case 0x0A: // Nil
+ in = nil
+ case 0x0B: // RegEx
+ in = d.readRegEx()
+ case 0x0C:
+ in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
+ case 0x0D: // JavaScript without scope
+ in = JavaScript{Code: d.readStr()}
+ case 0x0E: // Symbol
+ in = Symbol(d.readStr())
+ case 0x0F: // JavaScript with scope
+ d.i += 4 // Skip length
+ js := JavaScript{d.readStr(), make(M)}
+ d.readDocTo(reflect.ValueOf(js.Scope))
+ in = js
+ case 0x10: // Int32
+ in = int(d.readInt32())
+ case 0x11: // Mongo-specific timestamp
+ in = MongoTimestamp(d.readInt64())
+ case 0x12: // Int64
+ in = d.readInt64()
+ case 0x13: // Decimal128
+ in = Decimal128{
+ l: uint64(d.readInt64()),
+ h: uint64(d.readInt64()),
+ }
+ case 0x7F: // Max key
+ in = MaxKey
+ case 0xFF: // Min key
+ in = MinKey
+ default:
+ panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
+ }
+
+ outt := out.Type()
+
+ if outt == typeRaw {
+ out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
+ return true
+ }
+
+ if setter := getSetter(outt, out); setter != nil {
+ err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
+ if err == SetZero {
+ out.Set(reflect.Zero(outt))
+ return true
+ }
+ if err == nil {
+ return true
+ }
+ if _, ok := err.(*TypeError); !ok {
+ panic(err)
+ }
+ return false
+ }
+
+ if in == nil {
+ out.Set(reflect.Zero(outt))
+ return true
+ }
+
+ outk := outt.Kind()
+
+ // Dereference and initialize pointer if necessary.
+ first := true
+ for outk == reflect.Ptr {
+ if !out.IsNil() {
+ out = out.Elem()
+ } else {
+ elem := reflect.New(outt.Elem())
+ if first {
+ // Only set if value is compatible.
+ first = false
+ defer func(out, elem reflect.Value) {
+ if good {
+ out.Set(elem)
+ }
+ }(out, elem)
+ } else {
+ out.Set(elem)
+ }
+ out = elem
+ }
+ outt = out.Type()
+ outk = outt.Kind()
+ }
+
+ inv := reflect.ValueOf(in)
+ if outt == inv.Type() {
+ out.Set(inv)
+ return true
+ }
+
+ switch outk {
+ case reflect.Interface:
+ out.Set(inv)
+ return true
+ case reflect.String:
+ switch inv.Kind() {
+ case reflect.String:
+ out.SetString(inv.String())
+ return true
+ case reflect.Slice:
+ if b, ok := in.([]byte); ok {
+ out.SetString(string(b))
+ return true
+ }
+ case reflect.Int, reflect.Int64:
+ if outt == typeJSONNumber {
+ out.SetString(strconv.FormatInt(inv.Int(), 10))
+ return true
+ }
+ case reflect.Float64:
+ if outt == typeJSONNumber {
+ out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
+ return true
+ }
+ }
+ case reflect.Slice, reflect.Array:
+ // Remember, array (0x04) slices are built with the correct
+ // element type. If we are here, must be a cross BSON kind
+ // conversion (e.g. 0x05 unmarshalling on string).
+ if outt.Elem().Kind() != reflect.Uint8 {
+ break
+ }
+ switch inv.Kind() {
+ case reflect.String:
+ slice := []byte(inv.String())
+ out.Set(reflect.ValueOf(slice))
+ return true
+ case reflect.Slice:
+ switch outt.Kind() {
+ case reflect.Array:
+ reflect.Copy(out, inv)
+ case reflect.Slice:
+ out.SetBytes(inv.Bytes())
+ }
+ return true
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch inv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out.SetInt(inv.Int())
+ return true
+ case reflect.Float32, reflect.Float64:
+ out.SetInt(int64(inv.Float()))
+ return true
+ case reflect.Bool:
+ if inv.Bool() {
+ out.SetInt(1)
+ } else {
+ out.SetInt(0)
+ }
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ panic("can't happen: no uint types in BSON (!?)")
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch inv.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out.SetUint(uint64(inv.Int()))
+ return true
+ case reflect.Float32, reflect.Float64:
+ out.SetUint(uint64(inv.Float()))
+ return true
+ case reflect.Bool:
+ if inv.Bool() {
+ out.SetUint(1)
+ } else {
+ out.SetUint(0)
+ }
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ panic("Can't happen. No uint types in BSON.")
+ }
+ case reflect.Float32, reflect.Float64:
+ switch inv.Kind() {
+ case reflect.Float32, reflect.Float64:
+ out.SetFloat(inv.Float())
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out.SetFloat(float64(inv.Int()))
+ return true
+ case reflect.Bool:
+ if inv.Bool() {
+ out.SetFloat(1)
+ } else {
+ out.SetFloat(0)
+ }
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ panic("Can't happen. No uint types in BSON?")
+ }
+ case reflect.Bool:
+ switch inv.Kind() {
+ case reflect.Bool:
+ out.SetBool(inv.Bool())
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ out.SetBool(inv.Int() != 0)
+ return true
+ case reflect.Float32, reflect.Float64:
+ out.SetBool(inv.Float() != 0)
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ panic("Can't happen. No uint types in BSON?")
+ }
+ case reflect.Struct:
+ if outt == typeURL && inv.Kind() == reflect.String {
+ u, err := url.Parse(inv.String())
+ if err != nil {
+ panic(err)
+ }
+ out.Set(reflect.ValueOf(u).Elem())
+ return true
+ }
+ if outt == typeBinary {
+ if b, ok := in.([]byte); ok {
+ out.Set(reflect.ValueOf(Binary{Data: b}))
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// --------------------------------------------------------------------------
+// Parsers of basic types.
+
+func (d *decoder) readRegEx() RegEx {
+ re := RegEx{}
+ re.Pattern = d.readCStr()
+ re.Options = d.readCStr()
+ return re
+}
+
+func (d *decoder) readBinary() Binary {
+ l := d.readInt32()
+ b := Binary{}
+ b.Kind = d.readByte()
+ b.Data = d.readBytes(l)
+ if b.Kind == 0x02 && len(b.Data) >= 4 {
+ // Weird obsolete format with redundant length.
+ b.Data = b.Data[4:]
+ }
+ return b
+}
+
+func (d *decoder) readStr() string {
+ l := d.readInt32()
+ b := d.readBytes(l - 1)
+ if d.readByte() != '\x00' {
+ corrupted()
+ }
+ return string(b)
+}
+
+func (d *decoder) readCStr() string {
+ start := d.i
+ end := start
+ l := len(d.in)
+ for ; end != l; end++ {
+ if d.in[end] == '\x00' {
+ break
+ }
+ }
+ d.i = end + 1
+ if d.i > l {
+ corrupted()
+ }
+ return string(d.in[start:end])
+}
+
+func (d *decoder) readBool() bool {
+ b := d.readByte()
+ if b == 0 {
+ return false
+ }
+ if b == 1 {
+ return true
+ }
+ panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
+}
+
+func (d *decoder) readFloat64() float64 {
+ return math.Float64frombits(uint64(d.readInt64()))
+}
+
+func (d *decoder) readInt32() int32 {
+ b := d.readBytes(4)
+ return int32((uint32(b[0]) << 0) |
+ (uint32(b[1]) << 8) |
+ (uint32(b[2]) << 16) |
+ (uint32(b[3]) << 24))
+}
+
+func (d *decoder) readInt64() int64 {
+ b := d.readBytes(8)
+ return int64((uint64(b[0]) << 0) |
+ (uint64(b[1]) << 8) |
+ (uint64(b[2]) << 16) |
+ (uint64(b[3]) << 24) |
+ (uint64(b[4]) << 32) |
+ (uint64(b[5]) << 40) |
+ (uint64(b[6]) << 48) |
+ (uint64(b[7]) << 56))
+}
+
+func (d *decoder) readByte() byte {
+ i := d.i
+ d.i++
+ if d.i > len(d.in) {
+ corrupted()
+ }
+ return d.in[i]
+}
+
+func (d *decoder) readBytes(length int32) []byte {
+ if length < 0 {
+ corrupted()
+ }
+ start := d.i
+ d.i += int(length)
+ if d.i < start || d.i > len(d.in) {
+ corrupted()
+ }
+ return d.in[start : start+int(length)]
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/encode.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/encode.go
new file mode 100644
index 00000000000..add39e865dd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/encode.go
@@ -0,0 +1,514 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// --------------------------------------------------------------------------
+// Some internal infrastructure.
+
+var (
+ typeBinary = reflect.TypeOf(Binary{})
+ typeObjectId = reflect.TypeOf(ObjectId(""))
+ typeDBPointer = reflect.TypeOf(DBPointer{"", ObjectId("")})
+ typeSymbol = reflect.TypeOf(Symbol(""))
+ typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
+ typeOrderKey = reflect.TypeOf(MinKey)
+ typeDocElem = reflect.TypeOf(DocElem{})
+ typeRawDocElem = reflect.TypeOf(RawDocElem{})
+ typeRaw = reflect.TypeOf(Raw{})
+ typeURL = reflect.TypeOf(url.URL{})
+ typeTime = reflect.TypeOf(time.Time{})
+ typeString = reflect.TypeOf("")
+ typeJSONNumber = reflect.TypeOf(json.Number(""))
+)
+
+const itoaCacheSize = 32
+
+var itoaCache []string
+
+func init() {
+ itoaCache = make([]string, itoaCacheSize)
+ for i := 0; i != itoaCacheSize; i++ {
+ itoaCache[i] = strconv.Itoa(i)
+ }
+}
+
+func itoa(i int) string {
+ if i < itoaCacheSize {
+ return itoaCache[i]
+ }
+ return strconv.Itoa(i)
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of the document value itself.
+
+type encoder struct {
+ out []byte
+}
+
+func (e *encoder) addDoc(v reflect.Value) {
+ for {
+ if vi, ok := v.Interface().(Getter); ok {
+ getv, err := vi.GetBSON()
+ if err != nil {
+ panic(err)
+ }
+ v = reflect.ValueOf(getv)
+ continue
+ }
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+
+ if v.Type() == typeRaw {
+ raw := v.Interface().(Raw)
+ if raw.Kind != 0x03 && raw.Kind != 0x00 {
+ panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+ }
+ if len(raw.Data) == 0 {
+ panic("Attempted to marshal empty Raw document")
+ }
+ e.addBytes(raw.Data...)
+ return
+ }
+
+ start := e.reserveInt32()
+
+ switch v.Kind() {
+ case reflect.Map:
+ e.addMap(v)
+ case reflect.Struct:
+ e.addStruct(v)
+ case reflect.Array, reflect.Slice:
+ e.addSlice(v)
+ default:
+ panic("Can't marshal " + v.Type().String() + " as a BSON document")
+ }
+
+ e.addBytes(0)
+ e.setInt32(start, int32(len(e.out)-start))
+}
+
+func (e *encoder) addMap(v reflect.Value) {
+ for _, k := range v.MapKeys() {
+ e.addElem(k.String(), v.MapIndex(k), false)
+ }
+}
+
+func (e *encoder) addStruct(v reflect.Value) {
+ sinfo, err := getStructInfo(v.Type())
+ if err != nil {
+ panic(err)
+ }
+ var value reflect.Value
+ if sinfo.InlineMap >= 0 {
+ m := v.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ for _, k := range m.MapKeys() {
+ ks := k.String()
+ if _, found := sinfo.FieldsMap[ks]; found {
+ panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
+ }
+ e.addElem(ks, m.MapIndex(k), false)
+ }
+ }
+ }
+ for _, info := range sinfo.FieldsList {
+ if info.Inline == nil {
+ value = v.Field(info.Num)
+ } else {
+ value = v.FieldByIndex(info.Inline)
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.addElem(info.Key, value, info.MinSize)
+ }
+}
+
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Ptr, reflect.Interface:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ if vt == typeTime {
+ return v.Interface().(time.Time).IsZero()
+ }
+ for i := 0; i < v.NumField(); i++ {
+ if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (e *encoder) addSlice(v reflect.Value) {
+ vi := v.Interface()
+ if d, ok := vi.(D); ok {
+ for _, elem := range d {
+ e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+ }
+ return
+ }
+ if d, ok := vi.(RawD); ok {
+ for _, elem := range d {
+ e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+ }
+ return
+ }
+ l := v.Len()
+ et := v.Type().Elem()
+ if et == typeDocElem {
+ for i := 0; i < l; i++ {
+ elem := v.Index(i).Interface().(DocElem)
+ e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+ }
+ return
+ }
+ if et == typeRawDocElem {
+ for i := 0; i < l; i++ {
+ elem := v.Index(i).Interface().(RawDocElem)
+ e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+ }
+ return
+ }
+ for i := 0; i < l; i++ {
+ e.addElem(itoa(i), v.Index(i), false)
+ }
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of elements in a document.
+
+func (e *encoder) addElemName(kind byte, name string) {
+ e.addBytes(kind)
+ e.addBytes([]byte(name)...)
+ e.addBytes(0)
+}
+
+func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
+
+ if !v.IsValid() {
+ e.addElemName(0x0A, name)
+ return
+ }
+
+ if getter, ok := v.Interface().(Getter); ok {
+ getv, err := getter.GetBSON()
+ if err != nil {
+ panic(err)
+ }
+ e.addElem(name, reflect.ValueOf(getv), minSize)
+ return
+ }
+
+ switch v.Kind() {
+
+ case reflect.Interface:
+ e.addElem(name, v.Elem(), minSize)
+
+ case reflect.Ptr:
+ e.addElem(name, v.Elem(), minSize)
+
+ case reflect.String:
+ s := v.String()
+ switch v.Type() {
+ case typeObjectId:
+ if len(s) != 12 {
+ panic("ObjectIDs must be exactly 12 bytes long (got " +
+ strconv.Itoa(len(s)) + ")")
+ }
+ e.addElemName(0x07, name)
+ e.addBytes([]byte(s)...)
+ case typeSymbol:
+ e.addElemName(0x0E, name)
+ e.addStr(s)
+ case typeJSONNumber:
+ n := v.Interface().(json.Number)
+ if i, err := n.Int64(); err == nil {
+ e.addElemName(0x12, name)
+ e.addInt64(i)
+ } else if f, err := n.Float64(); err == nil {
+ e.addElemName(0x01, name)
+ e.addFloat64(f)
+ } else {
+ panic("failed to convert json.Number to a number: " + s)
+ }
+ default:
+ e.addElemName(0x02, name)
+ e.addStr(s)
+ }
+
+ case reflect.Float32, reflect.Float64:
+ e.addElemName(0x01, name)
+ e.addFloat64(v.Float())
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ u := v.Uint()
+ if int64(u) < 0 {
+ panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
+ } else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
+ e.addElemName(0x10, name)
+ e.addInt32(int32(u))
+ } else {
+ e.addElemName(0x12, name)
+ e.addInt64(int64(u))
+ }
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ switch v.Type() {
+ case typeMongoTimestamp:
+ e.addElemName(0x11, name)
+ e.addInt64(v.Int())
+
+ case typeOrderKey:
+ if v.Int() == int64(MaxKey) {
+ e.addElemName(0x7F, name)
+ } else {
+ e.addElemName(0xFF, name)
+ }
+
+ default:
+ i := v.Int()
+ if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
+ // It fits into an int32, encode as such.
+ e.addElemName(0x10, name)
+ e.addInt32(int32(i))
+ } else {
+ e.addElemName(0x12, name)
+ e.addInt64(i)
+ }
+ }
+
+ case reflect.Bool:
+ e.addElemName(0x08, name)
+ if v.Bool() {
+ e.addBytes(1)
+ } else {
+ e.addBytes(0)
+ }
+
+ case reflect.Map:
+ e.addElemName(0x03, name)
+ e.addDoc(v)
+
+ case reflect.Slice:
+ vt := v.Type()
+ et := vt.Elem()
+ if et.Kind() == reflect.Uint8 {
+ e.addElemName(0x05, name)
+ e.addBinary(0x00, v.Bytes())
+ } else if et == typeDocElem || et == typeRawDocElem {
+ e.addElemName(0x03, name)
+ e.addDoc(v)
+ } else {
+ e.addElemName(0x04, name)
+ e.addDoc(v)
+ }
+
+ case reflect.Array:
+ et := v.Type().Elem()
+ if et.Kind() == reflect.Uint8 {
+ e.addElemName(0x05, name)
+ if v.CanAddr() {
+ e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
+ } else {
+ n := v.Len()
+ e.addInt32(int32(n))
+ e.addBytes(0x00)
+ for i := 0; i < n; i++ {
+ el := v.Index(i)
+ e.addBytes(byte(el.Uint()))
+ }
+ }
+ } else {
+ e.addElemName(0x04, name)
+ e.addDoc(v)
+ }
+
+ case reflect.Struct:
+ switch s := v.Interface().(type) {
+
+ case Raw:
+ kind := s.Kind
+ if kind == 0x00 {
+ kind = 0x03
+ }
+ if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
+ panic("Attempted to marshal empty Raw document")
+ }
+ e.addElemName(kind, name)
+ e.addBytes(s.Data...)
+
+ case Binary:
+ e.addElemName(0x05, name)
+ e.addBinary(s.Kind, s.Data)
+
+ case Decimal128:
+ e.addElemName(0x13, name)
+ e.addInt64(int64(s.l))
+ e.addInt64(int64(s.h))
+
+ case DBPointer:
+ e.addElemName(0x0C, name)
+ e.addStr(s.Namespace)
+ if len(s.Id) != 12 {
+ panic("ObjectIDs must be exactly 12 bytes long (got " +
+ strconv.Itoa(len(s.Id)) + ")")
+ }
+ e.addBytes([]byte(s.Id)...)
+
+ case RegEx:
+ e.addElemName(0x0B, name)
+ e.addCStr(s.Pattern)
+ e.addCStr(s.Options)
+
+ case JavaScript:
+ if s.Scope == nil {
+ e.addElemName(0x0D, name)
+ e.addStr(s.Code)
+ } else {
+ e.addElemName(0x0F, name)
+ start := e.reserveInt32()
+ e.addStr(s.Code)
+ e.addDoc(reflect.ValueOf(s.Scope))
+ e.setInt32(start, int32(len(e.out)-start))
+ }
+
+ case time.Time:
+ // MongoDB handles timestamps as milliseconds.
+ e.addElemName(0x09, name)
+ e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
+
+ case url.URL:
+ e.addElemName(0x02, name)
+ e.addStr(s.String())
+
+ case undefined:
+ e.addElemName(0x06, name)
+
+ default:
+ e.addElemName(0x03, name)
+ e.addDoc(v)
+ }
+
+ default:
+ panic("Can't marshal " + v.Type().String() + " in a BSON document")
+ }
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of base types.
+
+func (e *encoder) addBinary(subtype byte, v []byte) {
+ if subtype == 0x02 {
+ // Wonder how that brilliant idea came to life. Obsolete, luckily.
+ e.addInt32(int32(len(v) + 4))
+ e.addBytes(subtype)
+ e.addInt32(int32(len(v)))
+ } else {
+ e.addInt32(int32(len(v)))
+ e.addBytes(subtype)
+ }
+ e.addBytes(v...)
+}
+
+func (e *encoder) addStr(v string) {
+ e.addInt32(int32(len(v) + 1))
+ e.addCStr(v)
+}
+
+func (e *encoder) addCStr(v string) {
+ e.addBytes([]byte(v)...)
+ e.addBytes(0)
+}
+
+func (e *encoder) reserveInt32() (pos int) {
+ pos = len(e.out)
+ e.addBytes(0, 0, 0, 0)
+ return pos
+}
+
+func (e *encoder) setInt32(pos int, v int32) {
+ e.out[pos+0] = byte(v)
+ e.out[pos+1] = byte(v >> 8)
+ e.out[pos+2] = byte(v >> 16)
+ e.out[pos+3] = byte(v >> 24)
+}
+
+func (e *encoder) addInt32(v int32) {
+ u := uint32(v)
+ e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
+}
+
+func (e *encoder) addInt64(v int64) {
+ u := uint64(v)
+ e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
+ byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
+}
+
+func (e *encoder) addFloat64(v float64) {
+ e.addInt64(int64(math.Float64bits(v)))
+}
+
+func (e *encoder) addBytes(v ...byte) {
+ e.out = append(e.out, v...)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/json.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/json.go
new file mode 100644
index 00000000000..09df8260a53
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/json.go
@@ -0,0 +1,380 @@
+package bson
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "gopkg.in/mgo.v2/internal/json"
+ "strconv"
+ "time"
+)
+
+// UnmarshalJSON unmarshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func UnmarshalJSON(data []byte, value interface{}) error {
+ d := json.NewDecoder(bytes.NewBuffer(data))
+ d.Extend(&jsonExt)
+ return d.Decode(value)
+}
+
+// MarshalJSON marshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func MarshalJSON(value interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ e := json.NewEncoder(&buf)
+ e.Extend(&jsonExt)
+ err := e.Encode(value)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// jdec is used internally by the JSON decoding functions
+// so they may unmarshal functions without getting into endless
+// recursion due to keyed objects.
+func jdec(data []byte, value interface{}) error {
+ d := json.NewDecoder(bytes.NewBuffer(data))
+ d.Extend(&funcExt)
+ return d.Decode(value)
+}
+
+var jsonExt json.Extension
+var funcExt json.Extension
+
+// TODO
+// - Shell regular expressions ("/regexp/opts")
+
+func init() {
+ jsonExt.DecodeUnquotedKeys(true)
+ jsonExt.DecodeTrailingCommas(true)
+
+ funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
+ jsonExt.DecodeKeyed("$binary", jdecBinary)
+ jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
+ jsonExt.EncodeType([]byte(nil), jencBinarySlice)
+ jsonExt.EncodeType(Binary{}, jencBinaryType)
+
+ funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
+ funcExt.DecodeFunc("new Date", "$dateFunc", "S")
+ jsonExt.DecodeKeyed("$date", jdecDate)
+ jsonExt.DecodeKeyed("$dateFunc", jdecDate)
+ jsonExt.EncodeType(time.Time{}, jencDate)
+
+ funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
+ jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
+ jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
+
+ funcExt.DecodeConst("undefined", Undefined)
+
+ jsonExt.DecodeKeyed("$regex", jdecRegEx)
+ jsonExt.EncodeType(RegEx{}, jencRegEx)
+
+ funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
+ jsonExt.DecodeKeyed("$oid", jdecObjectId)
+ jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
+ jsonExt.EncodeType(ObjectId(""), jencObjectId)
+
+ funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
+ jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
+
+ funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
+ jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
+ jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
+ jsonExt.EncodeType(int64(0), jencNumberLong)
+ jsonExt.EncodeType(int(0), jencInt)
+
+ funcExt.DecodeConst("MinKey", MinKey)
+ funcExt.DecodeConst("MaxKey", MaxKey)
+ jsonExt.DecodeKeyed("$minKey", jdecMinKey)
+ jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
+ jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
+
+ jsonExt.DecodeKeyed("$undefined", jdecUndefined)
+ jsonExt.EncodeType(Undefined, jencUndefined)
+
+ jsonExt.Extend(&funcExt)
+}
+
+func fbytes(format string, args ...interface{}) []byte {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, format, args...)
+ return buf.Bytes()
+}
+
+func jdecBinary(data []byte) (interface{}, error) {
+ var v struct {
+ Binary []byte `json:"$binary"`
+ Type string `json:"$type"`
+ Func struct {
+ Binary []byte `json:"$binary"`
+ Type int64 `json:"$type"`
+ } `json:"$binaryFunc"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+
+ var binData []byte
+ var binKind int64
+ if v.Type == "" && v.Binary == nil {
+ binData = v.Func.Binary
+ binKind = v.Func.Type
+ } else if v.Type == "" {
+ return v.Binary, nil
+ } else {
+ binData = v.Binary
+ binKind, err = strconv.ParseInt(v.Type, 0, 64)
+ if err != nil {
+ binKind = -1
+ }
+ }
+
+ if binKind == 0 {
+ return binData, nil
+ }
+ if binKind < 0 || binKind > 255 {
+ return nil, fmt.Errorf("invalid type in binary object: %s", data)
+ }
+
+ return Binary{Kind: byte(binKind), Data: binData}, nil
+}
+
+func jencBinarySlice(v interface{}) ([]byte, error) {
+ in := v.([]byte)
+ out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
+ base64.StdEncoding.Encode(out, in)
+ return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
+}
+
+func jencBinaryType(v interface{}) ([]byte, error) {
+ in := v.(Binary)
+ out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
+ base64.StdEncoding.Encode(out, in.Data)
+ return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
+}
+
+const jdateFormat = "2006-01-02T15:04:05.999Z"
+
+func jdecDate(data []byte) (interface{}, error) {
+ var v struct {
+ S string `json:"$date"`
+ Func struct {
+ S string
+ } `json:"$dateFunc"`
+ }
+ _ = jdec(data, &v)
+ if v.S == "" {
+ v.S = v.Func.S
+ }
+ if v.S != "" {
+ for _, format := range []string{jdateFormat, "2006-01-02"} {
+ t, err := time.Parse(format, v.S)
+ if err == nil {
+ return t, nil
+ }
+ }
+ return nil, fmt.Errorf("cannot parse date: %q", v.S)
+ }
+
+ var vn struct {
+ Date struct {
+ N int64 `json:"$numberLong,string"`
+ } `json:"$date"`
+ Func struct {
+ S int64
+ } `json:"$dateFunc"`
+ }
+ err := jdec(data, &vn)
+ if err != nil {
+ return nil, fmt.Errorf("cannot parse date: %q", data)
+ }
+ n := vn.Date.N
+ if n == 0 {
+ n = vn.Func.S
+ }
+ return time.Unix(n/1000, n%1000*1e6).UTC(), nil
+}
+
+func jencDate(v interface{}) ([]byte, error) {
+ t := v.(time.Time)
+ return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
+}
+
+func jdecTimestamp(data []byte) (interface{}, error) {
+ var v struct {
+ Func struct {
+ T int32 `json:"t"`
+ I int32 `json:"i"`
+ } `json:"$timestamp"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
+}
+
+func jencTimestamp(v interface{}) ([]byte, error) {
+ ts := uint64(v.(MongoTimestamp))
+ return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
+}
+
+func jdecRegEx(data []byte) (interface{}, error) {
+ var v struct {
+ Regex string `json:"$regex"`
+ Options string `json:"$options"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ return RegEx{v.Regex, v.Options}, nil
+}
+
+func jencRegEx(v interface{}) ([]byte, error) {
+ re := v.(RegEx)
+ type regex struct {
+ Regex string `json:"$regex"`
+ Options string `json:"$options"`
+ }
+ return json.Marshal(regex{re.Pattern, re.Options})
+}
+
+func jdecObjectId(data []byte) (interface{}, error) {
+ var v struct {
+ Id string `json:"$oid"`
+ Func struct {
+ Id string
+ } `json:"$oidFunc"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ if v.Id == "" {
+ v.Id = v.Func.Id
+ }
+ return ObjectIdHex(v.Id), nil
+}
+
+func jencObjectId(v interface{}) ([]byte, error) {
+ return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
+}
+
+func jdecDBRef(data []byte) (interface{}, error) {
+ // TODO Support unmarshaling $ref and $id into the input value.
+ var v struct {
+ Obj map[string]interface{} `json:"$dbrefFunc"`
+ }
+ // TODO Fix this. Must not be required.
+ v.Obj = make(map[string]interface{})
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ return v.Obj, nil
+}
+
+func jdecNumberLong(data []byte) (interface{}, error) {
+ var v struct {
+ N int64 `json:"$numberLong,string"`
+ Func struct {
+ N int64 `json:",string"`
+ } `json:"$numberLongFunc"`
+ }
+ var vn struct {
+ N int64 `json:"$numberLong"`
+ Func struct {
+ N int64
+ } `json:"$numberLongFunc"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ err = jdec(data, &vn)
+ v.N = vn.N
+ v.Func.N = vn.Func.N
+ }
+ if err != nil {
+ return nil, err
+ }
+ if v.N != 0 {
+ return v.N, nil
+ }
+ return v.Func.N, nil
+}
+
+func jencNumberLong(v interface{}) ([]byte, error) {
+ n := v.(int64)
+ f := `{"$numberLong":"%d"}`
+ if n <= 1<<53 {
+ f = `{"$numberLong":%d}`
+ }
+ return fbytes(f, n), nil
+}
+
+func jencInt(v interface{}) ([]byte, error) {
+ n := v.(int)
+ f := `{"$numberLong":"%d"}`
+ if int64(n) <= 1<<53 {
+ f = `%d`
+ }
+ return fbytes(f, n), nil
+}
+
+func jdecMinKey(data []byte) (interface{}, error) {
+ var v struct {
+ N int64 `json:"$minKey"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ if v.N != 1 {
+ return nil, fmt.Errorf("invalid $minKey object: %s", data)
+ }
+ return MinKey, nil
+}
+
+func jdecMaxKey(data []byte) (interface{}, error) {
+ var v struct {
+ N int64 `json:"$maxKey"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ if v.N != 1 {
+ return nil, fmt.Errorf("invalid $maxKey object: %s", data)
+ }
+ return MaxKey, nil
+}
+
+func jencMinMaxKey(v interface{}) ([]byte, error) {
+ switch v.(orderKey) {
+ case MinKey:
+ return []byte(`{"$minKey":1}`), nil
+ case MaxKey:
+ return []byte(`{"$maxKey":1}`), nil
+ }
+ panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
+}
+
+func jdecUndefined(data []byte) (interface{}, error) {
+ var v struct {
+ B bool `json:"$undefined"`
+ }
+ err := jdec(data, &v)
+ if err != nil {
+ return nil, err
+ }
+ if !v.B {
+ return nil, fmt.Errorf("invalid $undefined object: %s", data)
+ }
+ return Undefined, nil
+}
+
+func jencUndefined(v interface{}) ([]byte, error) {
+ return []byte(`{"$undefined":true}`), nil
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/json_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/json_test.go
new file mode 100644
index 00000000000..866f51c34e3
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/json_test.go
@@ -0,0 +1,184 @@
+package bson_test
+
+import (
+ "gopkg.in/mgo.v2/bson"
+
+ . "gopkg.in/check.v1"
+ "reflect"
+ "strings"
+ "time"
+)
+
+type jsonTest struct {
+ a interface{} // value encoded into JSON (optional)
+ b string // JSON expected as output of <a>, and used as input to <c>
+ c interface{} // Value expected from decoding <b>, defaults to <a>
+ e string // error string, if decoding (b) should fail
+}
+
+var jsonTests = []jsonTest{
+ // $binary
+ {
+ a: []byte("foo"),
+ b: `{"$binary":"Zm9v","$type":"0x0"}`,
+ }, {
+ a: bson.Binary{Kind: 2, Data: []byte("foo")},
+ b: `{"$binary":"Zm9v","$type":"0x2"}`,
+ }, {
+ b: `BinData(2,"Zm9v")`,
+ c: bson.Binary{Kind: 2, Data: []byte("foo")},
+ },
+
+ // $date
+ {
+ a: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
+ b: `{"$date":"2016-05-15T01:02:03.004Z"}`,
+ }, {
+ b: `{"$date": {"$numberLong": "1002"}}`,
+ c: time.Date(1970, 1, 1, 0, 0, 1, 2e6, time.UTC),
+ }, {
+ b: `ISODate("2016-05-15T01:02:03.004Z")`,
+ c: time.Date(2016, 5, 15, 1, 2, 3, 4000000, time.UTC),
+ }, {
+ b: `new Date(1000)`,
+ c: time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC),
+ }, {
+ b: `new Date("2016-05-15")`,
+ c: time.Date(2016, 5, 15, 0, 0, 0, 0, time.UTC),
+ },
+
+ // $timestamp
+ {
+ a: bson.MongoTimestamp(4294967298),
+ b: `{"$timestamp":{"t":1,"i":2}}`,
+ }, {
+ b: `Timestamp(1, 2)`,
+ c: bson.MongoTimestamp(4294967298),
+ },
+
+ // $regex
+ {
+ a: bson.RegEx{"pattern", "options"},
+ b: `{"$regex":"pattern","$options":"options"}`,
+ },
+
+ // $oid
+ {
+ a: bson.ObjectIdHex("0123456789abcdef01234567"),
+ b: `{"$oid":"0123456789abcdef01234567"}`,
+ }, {
+ b: `ObjectId("0123456789abcdef01234567")`,
+ c: bson.ObjectIdHex("0123456789abcdef01234567"),
+ },
+
+ // $ref (no special type)
+ {
+ b: `DBRef("name", "id")`,
+ c: map[string]interface{}{"$ref": "name", "$id": "id"},
+ },
+
+ // $numberLong
+ {
+ a: 123,
+ b: `123`,
+ }, {
+ a: int64(9007199254740992),
+ b: `{"$numberLong":9007199254740992}`,
+ }, {
+ a: int64(1<<53 + 1),
+ b: `{"$numberLong":"9007199254740993"}`,
+ }, {
+ a: 1<<53 + 1,
+ b: `{"$numberLong":"9007199254740993"}`,
+ c: int64(9007199254740993),
+ }, {
+ b: `NumberLong(9007199254740992)`,
+ c: int64(1 << 53),
+ }, {
+ b: `NumberLong("9007199254740993")`,
+ c: int64(1<<53 + 1),
+ },
+
+ // $minKey, $maxKey
+ {
+ a: bson.MinKey,
+ b: `{"$minKey":1}`,
+ }, {
+ a: bson.MaxKey,
+ b: `{"$maxKey":1}`,
+ }, {
+ b: `MinKey`,
+ c: bson.MinKey,
+ }, {
+ b: `MaxKey`,
+ c: bson.MaxKey,
+ }, {
+ b: `{"$minKey":0}`,
+ e: `invalid $minKey object: {"$minKey":0}`,
+ }, {
+ b: `{"$maxKey":0}`,
+ e: `invalid $maxKey object: {"$maxKey":0}`,
+ },
+
+ {
+ a: bson.Undefined,
+ b: `{"$undefined":true}`,
+ }, {
+ b: `undefined`,
+ c: bson.Undefined,
+ }, {
+ b: `{"v": undefined}`,
+ c: struct{ V interface{} }{bson.Undefined},
+ },
+
+ // Unquoted keys and trailing commas
+ {
+ b: `{$foo: ["bar",],}`,
+ c: map[string]interface{}{"$foo": []interface{}{"bar"}},
+ },
+}
+
+func (s *S) TestJSON(c *C) {
+ for i, item := range jsonTests {
+ c.Logf("------------ (#%d)", i)
+ c.Logf("A: %#v", item.a)
+ c.Logf("B: %#v", item.b)
+
+ if item.c == nil {
+ item.c = item.a
+ } else {
+ c.Logf("C: %#v", item.c)
+ }
+ if item.e != "" {
+ c.Logf("E: %s", item.e)
+ }
+
+ if item.a != nil {
+ data, err := bson.MarshalJSON(item.a)
+ c.Assert(err, IsNil)
+ c.Logf("Dumped: %#v", string(data))
+ c.Assert(strings.TrimSuffix(string(data), "\n"), Equals, item.b)
+ }
+
+ var zero interface{}
+ if item.c == nil {
+ zero = &struct{}{}
+ } else {
+ zero = reflect.New(reflect.TypeOf(item.c)).Interface()
+ }
+ err := bson.UnmarshalJSON([]byte(item.b), zero)
+ if item.e != "" {
+ c.Assert(err, NotNil)
+ c.Assert(err.Error(), Equals, item.e)
+ continue
+ }
+ c.Assert(err, IsNil)
+ zerov := reflect.ValueOf(zero)
+ value := zerov.Interface()
+ if zerov.Kind() == reflect.Ptr {
+ value = zerov.Elem().Interface()
+ }
+ c.Logf("Loaded: %#v", value)
+ c.Assert(value, DeepEquals, item.c)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/specdata/update.sh b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/specdata/update.sh
new file mode 100755
index 00000000000..1efd3d3b66d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/specdata/update.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+set -e
+
+if [ ! -d specifications ]; then
+ git clone -b bson git@github.com:jyemin/specifications
+fi
+
+TESTFILE="../specdata_test.go"
+
+cat <<END > $TESTFILE
+package bson_test
+
+var specTests = []string{
+END
+
+for file in specifications/source/bson/tests/*.yml; do
+ (
+ echo '`'
+ cat $file
+ echo -n '`,'
+ ) >> $TESTFILE
+done
+
+echo '}' >> $TESTFILE
+
+gofmt -w $TESTFILE
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/specdata_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/specdata_test.go
new file mode 100644
index 00000000000..513f9b209c7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bson/specdata_test.go
@@ -0,0 +1,241 @@
+package bson_test
+
+var specTests = []string{
+ `
+---
+description: "Array type"
+documents:
+ -
+ decoded:
+ a : []
+ encoded: 0D000000046100050000000000
+ -
+ decoded:
+ a: [10]
+ encoded: 140000000461000C0000001030000A0000000000
+ -
+ # Decode an array that uses an empty string as the key
+ decodeOnly : true
+ decoded:
+ a: [10]
+ encoded: 130000000461000B00000010000A0000000000
+ -
+ # Decode an array that uses a non-numeric string as the key
+ decodeOnly : true
+ decoded:
+ a: [10]
+ encoded: 150000000461000D000000106162000A0000000000
+
+
+`, `
+---
+description: "Boolean type"
+documents:
+ -
+ encoded: "090000000862000100"
+ decoded: { "b" : true }
+ -
+ encoded: "090000000862000000"
+ decoded: { "b" : false }
+
+
+ `, `
+---
+description: "Corrupted BSON"
+documents:
+ -
+ encoded: "09000000016600"
+ error: "truncated double"
+ -
+ encoded: "09000000026600"
+ error: "truncated string"
+ -
+ encoded: "09000000036600"
+ error: "truncated document"
+ -
+ encoded: "09000000046600"
+ error: "truncated array"
+ -
+ encoded: "09000000056600"
+ error: "truncated binary"
+ -
+ encoded: "09000000076600"
+ error: "truncated objectid"
+ -
+ encoded: "09000000086600"
+ error: "truncated boolean"
+ -
+ encoded: "09000000096600"
+ error: "truncated date"
+ -
+ encoded: "090000000b6600"
+ error: "truncated regex"
+ -
+ encoded: "090000000c6600"
+ error: "truncated db pointer"
+ -
+ encoded: "0C0000000d6600"
+ error: "truncated javascript"
+ -
+ encoded: "0C0000000e6600"
+ error: "truncated symbol"
+ -
+ encoded: "0C0000000f6600"
+ error: "truncated javascript with scope"
+ -
+ encoded: "0C000000106600"
+ error: "truncated int32"
+ -
+ encoded: "0C000000116600"
+ error: "truncated timestamp"
+ -
+ encoded: "0C000000126600"
+ error: "truncated int64"
+ -
+ encoded: "0400000000"
+ error: basic
+ -
+ encoded: "0500000001"
+ error: basic
+ -
+ encoded: "05000000"
+ error: basic
+ -
+ encoded: "0700000002610078563412"
+ error: basic
+ -
+ encoded: "090000001061000500"
+ error: basic
+ -
+ encoded: "00000000000000000000"
+ error: basic
+ -
+ encoded: "1300000002666f6f00040000006261720000"
+ error: "basic"
+ -
+ encoded: "1800000003666f6f000f0000001062617200ffffff7f0000"
+ error: basic
+ -
+ encoded: "1500000003666f6f000c0000000862617200010000"
+ error: basic
+ -
+ encoded: "1c00000003666f6f001200000002626172000500000062617a000000"
+ error: basic
+ -
+ encoded: "1000000002610004000000616263ff00"
+ error: string is not null-terminated
+ -
+ encoded: "0c0000000200000000000000"
+ error: bad_string_length
+ -
+ encoded: "120000000200ffffffff666f6f6261720000"
+ error: bad_string_length
+ -
+ encoded: "0c0000000e00000000000000"
+ error: bad_string_length
+ -
+ encoded: "120000000e00ffffffff666f6f6261720000"
+ error: bad_string_length
+ -
+ encoded: "180000000c00fa5bd841d6585d9900"
+ error: ""
+ -
+ encoded: "1e0000000c00ffffffff666f6f626172005259b56afa5bd841d6585d9900"
+ error: bad_string_length
+ -
+ encoded: "0c0000000d00000000000000"
+ error: bad_string_length
+ -
+ encoded: "0c0000000d00ffffffff0000"
+ error: bad_string_length
+ -
+ encoded: "1c0000000f001500000000000000000c000000020001000000000000"
+ error: bad_string_length
+ -
+ encoded: "1c0000000f0015000000ffffffff000c000000020001000000000000"
+ error: bad_string_length
+ -
+ encoded: "1c0000000f001500000001000000000c000000020000000000000000"
+ error: bad_string_length
+ -
+ encoded: "1c0000000f001500000001000000000c0000000200ffffffff000000"
+ error: bad_string_length
+ -
+ encoded: "0E00000008616263646566676869707172737475"
+ error: "Run-on CString"
+ -
+ encoded: "0100000000"
+ error: "An object size that's too small to even include the object size, but is correctly encoded, along with a correct EOO (and no data)"
+ -
+ encoded: "1a0000000e74657374000c00000068656c6c6f20776f726c6400000500000000"
+ error: "One object, but with object size listed smaller than it is in the data"
+ -
+ encoded: "05000000"
+ error: "One object, missing the EOO at the end"
+ -
+ encoded: "0500000001"
+ error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01"
+ -
+ encoded: "05000000ff"
+ error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff"
+ -
+ encoded: "0500000070"
+ error: "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70"
+ -
+ encoded: "07000000000000"
+ error: "Invalid BSON type low range"
+ -
+ encoded: "07000000800000"
+ error: "Invalid BSON type high range"
+ -
+ encoded: "090000000862000200"
+ error: "Invalid boolean value of 2"
+ -
+ encoded: "09000000086200ff00"
+ error: "Invalid boolean value of -1"
+ `, `
+---
+description: "Int32 type"
+documents:
+ -
+ decoded:
+ i: -2147483648
+ encoded: 0C0000001069000000008000
+ -
+ decoded:
+ i: 2147483647
+ encoded: 0C000000106900FFFFFF7F00
+ -
+ decoded:
+ i: -1
+ encoded: 0C000000106900FFFFFFFF00
+ -
+ decoded:
+ i: 0
+ encoded: 0C0000001069000000000000
+ -
+ decoded:
+ i: 1
+ encoded: 0C0000001069000100000000
+
+`, `
+---
+description: "String type"
+documents:
+ -
+ decoded:
+ s : ""
+ encoded: 0D000000027300010000000000
+ -
+ decoded:
+ s: "a"
+ encoded: 0E00000002730002000000610000
+ -
+ decoded:
+ s: "This is a string"
+ encoded: 1D0000000273001100000054686973206973206120737472696E670000
+ -
+ decoded:
+ s: "κόσμε"
+ encoded: 180000000273000C000000CEBAE1BDB9CF83CEBCCEB50000
+`}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bulk.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bulk.go
new file mode 100644
index 00000000000..072a5206ac2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bulk.go
@@ -0,0 +1,351 @@
+package mgo
+
+import (
+ "bytes"
+ "sort"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+// Bulk represents an operation that can be prepared with several
+// orthogonal changes before being delivered to the server.
+//
+// MongoDB servers older than version 2.6 do not have proper support for bulk
+// operations, so the driver attempts to map its API as much as possible into
+// the functionality that works. In particular, in those releases updates and
+// removals are sent individually, and inserts are sent in bulk but have
+// suboptimal error reporting compared to more recent versions of the server.
+// See the documentation of BulkErrorCase for details on that.
+//
+// Relevant documentation:
+//
+// http://blog.mongodb.org/post/84922794768/mongodbs-new-bulk-api
+//
+type Bulk struct {
+ c *Collection
+ opcount int
+ actions []bulkAction
+ ordered bool
+}
+
+type bulkOp int
+
+const (
+ bulkInsert bulkOp = iota + 1
+ bulkUpdate
+ bulkUpdateAll
+ bulkRemove
+)
+
+type bulkAction struct {
+ op bulkOp
+ docs []interface{}
+ idxs []int
+}
+
+type bulkUpdateOp []interface{}
+type bulkDeleteOp []interface{}
+
+// BulkResult holds the results for a bulk operation.
+type BulkResult struct {
+ Matched int
+ Modified int // Available only for MongoDB 2.6+
+
+ // Be conservative while we understand exactly how to report these
+ // results in a useful and convenient way, and also how to emulate
+ // them with prior servers.
+ private bool
+}
+
+// BulkError holds an error returned from running a Bulk operation.
+// Individual errors may be obtained and inspected via the Cases method.
+type BulkError struct {
+ ecases []BulkErrorCase
+}
+
+func (e *BulkError) Error() string {
+ if len(e.ecases) == 0 {
+ return "invalid BulkError instance: no errors"
+ }
+ if len(e.ecases) == 1 {
+ return e.ecases[0].Err.Error()
+ }
+ msgs := make([]string, 0, len(e.ecases))
+ seen := make(map[string]bool)
+ for _, ecase := range e.ecases {
+ msg := ecase.Err.Error()
+ if !seen[msg] {
+ seen[msg] = true
+ msgs = append(msgs, msg)
+ }
+ }
+ if len(msgs) == 1 {
+ return msgs[0]
+ }
+ var buf bytes.Buffer
+ buf.WriteString("multiple errors in bulk operation:\n")
+ for _, msg := range msgs {
+ buf.WriteString(" - ")
+ buf.WriteString(msg)
+ buf.WriteByte('\n')
+ }
+ return buf.String()
+}
+
+type bulkErrorCases []BulkErrorCase
+
+func (slice bulkErrorCases) Len() int { return len(slice) }
+func (slice bulkErrorCases) Less(i, j int) bool { return slice[i].Index < slice[j].Index }
+func (slice bulkErrorCases) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] }
+
+// BulkErrorCase holds an individual error found while attempting a single change
+// within a bulk operation, and the position in which it was enqueued.
+//
+// MongoDB servers older than version 2.6 do not have proper support for bulk
+// operations, so the driver attempts to map its API as much as possible into
+// the functionality that works. In particular, only the last error is reported
+// for bulk inserts and without any positional information, so the Index
+// field is set to -1 in these cases.
+type BulkErrorCase struct {
+ Index int // Position of operation that failed, or -1 if unknown.
+ Err error
+}
+
+// Cases returns all individual errors found while attempting the requested changes.
+//
+// See the documentation of BulkErrorCase for limitations in older MongoDB releases.
+func (e *BulkError) Cases() []BulkErrorCase {
+ return e.ecases
+}
+
+// Bulk returns a value to prepare the execution of a bulk operation.
+func (c *Collection) Bulk() *Bulk {
+ return &Bulk{c: c, ordered: true}
+}
+
+// Unordered puts the bulk operation in unordered mode.
+//
+// In unordered mode the indvidual operations may be sent
+// out of order, which means latter operations may proceed
+// even if prior ones have failed.
+func (b *Bulk) Unordered() {
+ b.ordered = false
+}
+
+func (b *Bulk) action(op bulkOp, opcount int) *bulkAction {
+ var action *bulkAction
+ if len(b.actions) > 0 && b.actions[len(b.actions)-1].op == op {
+ action = &b.actions[len(b.actions)-1]
+ } else if !b.ordered {
+ for i := range b.actions {
+ if b.actions[i].op == op {
+ action = &b.actions[i]
+ break
+ }
+ }
+ }
+ if action == nil {
+ b.actions = append(b.actions, bulkAction{op: op})
+ action = &b.actions[len(b.actions)-1]
+ }
+ for i := 0; i < opcount; i++ {
+ action.idxs = append(action.idxs, b.opcount)
+ b.opcount++
+ }
+ return action
+}
+
+// Insert queues up the provided documents for insertion.
+func (b *Bulk) Insert(docs ...interface{}) {
+ action := b.action(bulkInsert, len(docs))
+ action.docs = append(action.docs, docs...)
+}
+
+// Remove queues up the provided selectors for removing matching documents.
+// Each selector will remove only a single matching document.
+func (b *Bulk) Remove(selectors ...interface{}) {
+ action := b.action(bulkRemove, len(selectors))
+ for _, selector := range selectors {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &deleteOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Flags: 1,
+ Limit: 1,
+ })
+ }
+}
+
+// RemoveAll queues up the provided selectors for removing all matching documents.
+// Each selector will remove all matching documents.
+func (b *Bulk) RemoveAll(selectors ...interface{}) {
+ action := b.action(bulkRemove, len(selectors))
+ for _, selector := range selectors {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &deleteOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Flags: 0,
+ Limit: 0,
+ })
+ }
+}
+
+// Update queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Update(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.Update requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate, len(pairs)/2)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ })
+ }
+}
+
+// UpdateAll queues up the provided pairs of updating instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair updates all documents matching the selector.
+func (b *Bulk) UpdateAll(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.UpdateAll requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate, len(pairs)/2)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ Flags: 2,
+ Multi: true,
+ })
+ }
+}
+
+// Upsert queues up the provided pairs of upserting instructions.
+// The first element of each pair selects which documents must be
+// updated, and the second element defines how to update it.
+// Each pair matches exactly one document for updating at most.
+func (b *Bulk) Upsert(pairs ...interface{}) {
+ if len(pairs)%2 != 0 {
+ panic("Bulk.Update requires an even number of parameters")
+ }
+ action := b.action(bulkUpdate, len(pairs)/2)
+ for i := 0; i < len(pairs); i += 2 {
+ selector := pairs[i]
+ if selector == nil {
+ selector = bson.D{}
+ }
+ action.docs = append(action.docs, &updateOp{
+ Collection: b.c.FullName,
+ Selector: selector,
+ Update: pairs[i+1],
+ Flags: 1,
+ Upsert: true,
+ })
+ }
+}
+
+// Run runs all the operations queued up.
+//
+// If an error is reported on an unordered bulk operation, the error value may
+// be an aggregation of all issues observed. As an exception to that, Insert
+// operations running on MongoDB versions prior to 2.6 will report the last
+// error only due to a limitation in the wire protocol.
+func (b *Bulk) Run() (*BulkResult, error) {
+ var result BulkResult
+ var berr BulkError
+ var failed bool
+ for i := range b.actions {
+ action := &b.actions[i]
+ var ok bool
+ switch action.op {
+ case bulkInsert:
+ ok = b.runInsert(action, &result, &berr)
+ case bulkUpdate:
+ ok = b.runUpdate(action, &result, &berr)
+ case bulkRemove:
+ ok = b.runRemove(action, &result, &berr)
+ default:
+ panic("unknown bulk operation")
+ }
+ if !ok {
+ failed = true
+ if b.ordered {
+ break
+ }
+ }
+ }
+ if failed {
+ sort.Sort(bulkErrorCases(berr.ecases))
+ return nil, &berr
+ }
+ return &result, nil
+}
+
+func (b *Bulk) runInsert(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+ op := &insertOp{b.c.FullName, action.docs, 0}
+ if !b.ordered {
+ op.flags = 1 // ContinueOnError
+ }
+ lerr, err := b.c.writeOp(op, b.ordered)
+ return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) runUpdate(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+ lerr, err := b.c.writeOp(bulkUpdateOp(action.docs), b.ordered)
+ if lerr != nil {
+ result.Matched += lerr.N
+ result.Modified += lerr.modified
+ }
+ return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) runRemove(action *bulkAction, result *BulkResult, berr *BulkError) bool {
+ lerr, err := b.c.writeOp(bulkDeleteOp(action.docs), b.ordered)
+ if lerr != nil {
+ result.Matched += lerr.N
+ result.Modified += lerr.modified
+ }
+ return b.checkSuccess(action, berr, lerr, err)
+}
+
+func (b *Bulk) checkSuccess(action *bulkAction, berr *BulkError, lerr *LastError, err error) bool {
+ if lerr != nil && len(lerr.ecases) > 0 {
+ for i := 0; i < len(lerr.ecases); i++ {
+ // Map back from the local error index into the visible one.
+ ecase := lerr.ecases[i]
+ idx := ecase.Index
+ if idx >= 0 {
+ idx = action.idxs[idx]
+ }
+ berr.ecases = append(berr.ecases, BulkErrorCase{idx, ecase.Err})
+ }
+ return false
+ } else if err != nil {
+ for i := 0; i < len(action.idxs); i++ {
+ berr.ecases = append(berr.ecases, BulkErrorCase{action.idxs[i], err})
+ }
+ return false
+ }
+ return true
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bulk_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bulk_test.go
new file mode 100644
index 00000000000..cb280bbfa40
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/bulk_test.go
@@ -0,0 +1,504 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2015 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2"
+)
+
+func (s *S) TestBulkInsert(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ bulk := coll.Bulk()
+ bulk.Insert(M{"n": 1})
+ bulk.Insert(M{"n": 2}, M{"n": 3})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
+}
+
+func (s *S) TestBulkInsertError(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ bulk := coll.Bulk()
+ bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
+ _, err = bulk.Run()
+ c.Assert(err, ErrorMatches, ".*duplicate key.*")
+ c.Assert(mgo.IsDup(err), Equals, true)
+
+ type doc struct {
+ N int `_id`
+ }
+ var res []doc
+ err = coll.Find(nil).Sort("_id").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{1}, {2}})
+}
+
+func (s *S) TestBulkInsertErrorUnordered(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ bulk := coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2}, M{"_id": 3})
+ _, err = bulk.Run()
+ c.Assert(err, ErrorMatches, ".*duplicate key.*")
+
+ type doc struct {
+ N int `_id`
+ }
+ var res []doc
+ err = coll.Find(nil).Sort("_id").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{1}, {2}, {3}})
+}
+
+func (s *S) TestBulkInsertErrorUnorderedSplitBatch(c *C) {
+ // The server has a batch limit of 1000 documents when using write commands.
+ // This artificial limit did not exist with the old wire protocol, so to
+ // avoid compatibility issues the implementation internally split batches
+ // into the proper size and delivers them one by one. This test ensures that
+ // the behavior of unordered (that is, continue on error) remains correct
+ // when errors happen and there are batches left.
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ bulk := coll.Bulk()
+ bulk.Unordered()
+
+ const total = 4096
+ type doc struct {
+ Id int `_id`
+ }
+ docs := make([]interface{}, total)
+ for i := 0; i < total; i++ {
+ docs[i] = doc{i}
+ }
+ docs[1] = doc{0}
+ bulk.Insert(docs...)
+ _, err = bulk.Run()
+ c.Assert(err, ErrorMatches, ".*duplicate key.*")
+
+ n, err := coll.Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, total-1)
+
+ var res doc
+ err = coll.FindId(1500).One(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res.Id, Equals, 1500)
+}
+
+func (s *S) TestBulkErrorString(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ // If it's just the same string multiple times, join it into a single message.
+ bulk := coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"_id": 1}, M{"_id": 2}, M{"_id": 2})
+ _, err = bulk.Run()
+ c.Assert(err, ErrorMatches, ".*duplicate key.*")
+ c.Assert(err, Not(ErrorMatches), ".*duplicate key.*duplicate key")
+ c.Assert(mgo.IsDup(err), Equals, true)
+
+ // With matching errors but different messages, present them all.
+ bulk = coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"_id": "dupone"}, M{"_id": "dupone"}, M{"_id": "duptwo"}, M{"_id": "duptwo"})
+ _, err = bulk.Run()
+ if s.versionAtLeast(2, 6) {
+ c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n( - .*duplicate.*\n){2}$")
+ c.Assert(err, ErrorMatches, "(?s).*dupone.*")
+ c.Assert(err, ErrorMatches, "(?s).*duptwo.*")
+ } else {
+ // Wire protocol query doesn't return all errors.
+ c.Assert(err, ErrorMatches, ".*duplicate.*")
+ }
+ c.Assert(mgo.IsDup(err), Equals, true)
+
+ // With mixed errors, present them all.
+ bulk = coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"_id": 1}, M{"_id": []int{2}})
+ _, err = bulk.Run()
+ if s.versionAtLeast(2, 6) {
+ c.Assert(err, ErrorMatches, "multiple errors in bulk operation:\n - .*duplicate.*\n - .*array.*\n$")
+ } else {
+ // Wire protocol query doesn't return all errors.
+ c.Assert(err, ErrorMatches, ".*array.*")
+ }
+ c.Assert(mgo.IsDup(err), Equals, false)
+}
+
+func (s *S) TestBulkErrorCases_2_6(c *C) {
+ if !s.versionAtLeast(2, 6) {
+ c.Skip("2.4- has poor bulk reporting")
+ }
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ bulk := coll.Bulk()
+ bulk.Unordered()
+
+ // There's a limit of 1000 operations per command, so
+ // this forces the more complex indexing logic to act.
+ for i := 0; i < 1010; i++ {
+ switch i {
+ case 3, 14:
+ bulk.Insert(M{"_id": "dupone"})
+ case 5, 106:
+ bulk.Update(M{"_id": i - 1}, M{"$set": M{"_id": 4}})
+ case 7, 1008:
+ bulk.Insert(M{"_id": "duptwo"})
+ default:
+ bulk.Insert(M{"_id": i})
+ }
+ }
+
+ _, err = bulk.Run()
+ ecases := err.(*mgo.BulkError).Cases()
+
+ c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
+ c.Check(ecases[0].Index, Equals, 14)
+ c.Check(ecases[1].Err, ErrorMatches, ".*update.*_id.*")
+ c.Check(ecases[1].Index, Equals, 106)
+ c.Check(ecases[2].Err, ErrorMatches, ".*duplicate.*duptwo.*")
+ c.Check(ecases[2].Index, Equals, 1008)
+}
+
+func (s *S) TestBulkErrorCases_2_4(c *C) {
+ if s.versionAtLeast(2, 6) {
+ c.Skip("2.6+ has better reporting")
+ }
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ bulk := coll.Bulk()
+ bulk.Unordered()
+
+ // There's a limit of 1000 operations per command, so
+ // this forces the more complex indexing logic to act.
+ for i := 0; i < 1010; i++ {
+ switch i {
+ case 3, 14:
+ bulk.Insert(M{"_id": "dupone"})
+ case 5:
+ bulk.Update(M{"_id": i - 1}, M{"$set": M{"n": 4}})
+ case 106:
+ bulk.Update(M{"_id": i - 1}, M{"$bogus": M{"n": 4}})
+ case 7, 1008:
+ bulk.Insert(M{"_id": "duptwo"})
+ default:
+ bulk.Insert(M{"_id": i})
+ }
+ }
+
+ _, err = bulk.Run()
+ ecases := err.(*mgo.BulkError).Cases()
+
+ c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*duptwo.*")
+ c.Check(ecases[0].Index, Equals, -1)
+ c.Check(ecases[1].Err, ErrorMatches, `.*\$bogus.*`)
+ c.Check(ecases[1].Index, Equals, 106)
+}
+
+func (s *S) TestBulkErrorCasesOrdered(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ bulk := coll.Bulk()
+
+ // There's a limit of 1000 operations per command, so
+ // this forces the more complex indexing logic to act.
+ for i := 0; i < 20; i++ {
+ switch i {
+ case 3, 14:
+ bulk.Insert(M{"_id": "dupone"})
+ case 7, 17:
+ bulk.Insert(M{"_id": "duptwo"})
+ default:
+ bulk.Insert(M{"_id": i})
+ }
+ }
+
+ _, err = bulk.Run()
+ ecases := err.(*mgo.BulkError).Cases()
+
+ c.Check(ecases[0].Err, ErrorMatches, ".*duplicate.*dupone.*")
+ if s.versionAtLeast(2, 6) {
+ c.Check(ecases[0].Index, Equals, 14)
+ } else {
+ c.Check(ecases[0].Index, Equals, -1)
+ }
+ c.Check(ecases, HasLen, 1)
+}
+
+func (s *S) TestBulkUpdate(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Update(M{"n": 1}, M{"$set": M{"n": 1}})
+ bulk.Update(M{"n": 2}, M{"$set": M{"n": 20}})
+ bulk.Update(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
+ bulk.Update(M{"n": 1}, M{"$set": M{"n": 10}}, M{"n": 3}, M{"$set": M{"n": 30}})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 4)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(r.Modified, Equals, 3)
+ }
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{10}, {20}, {30}})
+}
+
+func (s *S) TestBulkUpdateError(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Update(
+ M{"n": 1}, M{"$set": M{"n": 10}},
+ M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
+ M{"n": 3}, M{"$set": M{"n": 30}},
+ )
+ r, err := bulk.Run()
+ c.Assert(err, ErrorMatches, ".*_id.*")
+ c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{2}, {3}, {10}})
+}
+
+func (s *S) TestBulkUpdateErrorUnordered(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Unordered()
+ bulk.Update(
+ M{"n": 1}, M{"$set": M{"n": 10}},
+ M{"n": 2}, M{"$set": M{"n": 20, "_id": 20}},
+ M{"n": 3}, M{"$set": M{"n": 30}},
+ )
+ r, err := bulk.Run()
+ c.Assert(err, ErrorMatches, ".*_id.*")
+ c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{2}, {10}, {30}})
+}
+
+func (s *S) TestBulkUpdateAll(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.UpdateAll(M{"n": 1}, M{"$set": M{"n": 10}})
+ bulk.UpdateAll(M{"n": 2}, M{"$set": M{"n": 2}}) // Won't change.
+ bulk.UpdateAll(M{"n": 5}, M{"$set": M{"n": 50}}) // Won't match.
+ bulk.UpdateAll(M{}, M{"$inc": M{"n": 1}}, M{"n": 11}, M{"$set": M{"n": 5}})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 6)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(r.Modified, Equals, 5)
+ }
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{3}, {4}, {5}})
+}
+
+func (s *S) TestBulkMixedUnordered(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ // Abuse undefined behavior to ensure the desired implementation is in place.
+ bulk := coll.Bulk()
+ bulk.Unordered()
+ bulk.Insert(M{"n": 1})
+ bulk.Update(M{"n": 2}, M{"$inc": M{"n": 1}})
+ bulk.Insert(M{"n": 2})
+ bulk.Update(M{"n": 3}, M{"$inc": M{"n": 1}})
+ bulk.Update(M{"n": 1}, M{"$inc": M{"n": 1}})
+ bulk.Insert(M{"n": 3})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 3)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(r.Modified, Equals, 3)
+ }
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{2}, {3}, {4}})
+}
+
+func (s *S) TestBulkUpsert(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Upsert(M{"n": 2}, M{"$set": M{"n": 20}})
+ bulk.Upsert(M{"n": 4}, M{"$set": M{"n": 40}}, M{"n": 3}, M{"$set": M{"n": 30}})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r, FitsTypeOf, &mgo.BulkResult{})
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{1}, {20}, {30}, {40}})
+}
+
+func (s *S) TestBulkRemove(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.Remove(M{"n": 1})
+ bulk.Remove(M{"n": 2}, M{"n": 4})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 3)
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{3}, {4}})
+}
+
+func (s *S) TestBulkRemoveAll(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1}, M{"n": 2}, M{"n": 3}, M{"n": 4}, M{"n": 4})
+ c.Assert(err, IsNil)
+
+ bulk := coll.Bulk()
+ bulk.RemoveAll(M{"n": 1})
+ bulk.RemoveAll(M{"n": 2}, M{"n": 4})
+ r, err := bulk.Run()
+ c.Assert(err, IsNil)
+ c.Assert(r.Matched, Equals, 4)
+
+ type doc struct{ N int }
+ var res []doc
+ err = coll.Find(nil).Sort("n").All(&res)
+ c.Assert(err, IsNil)
+ c.Assert(res, DeepEquals, []doc{{3}})
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/cluster.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/cluster.go
new file mode 100644
index 00000000000..c3bf8b01375
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/cluster.go
@@ -0,0 +1,682 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+// ---------------------------------------------------------------------------
+// Mongo cluster encapsulation.
+//
+// A cluster enables the communication with one or more servers participating
+// in a mongo cluster. This works with individual servers, a replica set,
+// a replica pair, one or multiple mongos routers, etc.
+
+type mongoCluster struct {
+ sync.RWMutex
+ serverSynced sync.Cond
+ userSeeds []string
+ dynaSeeds []string
+ servers mongoServers
+ masters mongoServers
+ references int
+ syncing bool
+ direct bool
+ failFast bool
+ syncCount uint
+ setName string
+ cachedIndex map[string]bool
+ sync chan bool
+ dial dialer
+}
+
+func newCluster(userSeeds []string, direct, failFast bool, dial dialer, setName string) *mongoCluster {
+ cluster := &mongoCluster{
+ userSeeds: userSeeds,
+ references: 1,
+ direct: direct,
+ failFast: failFast,
+ dial: dial,
+ setName: setName,
+ }
+ cluster.serverSynced.L = cluster.RWMutex.RLocker()
+ cluster.sync = make(chan bool, 1)
+ stats.cluster(+1)
+ go cluster.syncServersLoop()
+ return cluster
+}
+
+// Acquire increases the reference count for the cluster.
+func (cluster *mongoCluster) Acquire() {
+ cluster.Lock()
+ cluster.references++
+ debugf("Cluster %p acquired (refs=%d)", cluster, cluster.references)
+ cluster.Unlock()
+}
+
+// Release decreases the reference count for the cluster. Once
+// it reaches zero, all servers will be closed.
+func (cluster *mongoCluster) Release() {
+ cluster.Lock()
+ if cluster.references == 0 {
+ panic("cluster.Release() with references == 0")
+ }
+ cluster.references--
+ debugf("Cluster %p released (refs=%d)", cluster, cluster.references)
+ if cluster.references == 0 {
+ for _, server := range cluster.servers.Slice() {
+ server.Close()
+ }
+ // Wake up the sync loop so it can die.
+ cluster.syncServers()
+ stats.cluster(-1)
+ }
+ cluster.Unlock()
+}
+
+func (cluster *mongoCluster) LiveServers() (servers []string) {
+ cluster.RLock()
+ for _, serv := range cluster.servers.Slice() {
+ servers = append(servers, serv.Addr)
+ }
+ cluster.RUnlock()
+ return servers
+}
+
+func (cluster *mongoCluster) removeServer(server *mongoServer) {
+ cluster.Lock()
+ cluster.masters.Remove(server)
+ other := cluster.servers.Remove(server)
+ cluster.Unlock()
+ if other != nil {
+ other.Close()
+ log("Removed server ", server.Addr, " from cluster.")
+ }
+ server.Close()
+}
+
+type isMasterResult struct {
+ IsMaster bool
+ Secondary bool
+ Primary string
+ Hosts []string
+ Passives []string
+ Tags bson.D
+ Msg string
+ SetName string `bson:"setName"`
+ MaxWireVersion int `bson:"maxWireVersion"`
+}
+
+func (cluster *mongoCluster) isMaster(socket *mongoSocket, result *isMasterResult) error {
+ // Monotonic let's it talk to a slave and still hold the socket.
+ session := newSession(Monotonic, cluster, 10*time.Second)
+ session.setSocket(socket)
+ err := session.Run("ismaster", result)
+ session.Close()
+ return err
+}
+
+type possibleTimeout interface {
+ Timeout() bool
+}
+
+var syncSocketTimeout = 5 * time.Second
+
+func (cluster *mongoCluster) syncServer(server *mongoServer) (info *mongoServerInfo, hosts []string, err error) {
+ var syncTimeout time.Duration
+ if raceDetector {
+ // This variable is only ever touched by tests.
+ globalMutex.Lock()
+ syncTimeout = syncSocketTimeout
+ globalMutex.Unlock()
+ } else {
+ syncTimeout = syncSocketTimeout
+ }
+
+ addr := server.Addr
+ log("SYNC Processing ", addr, "...")
+
+ // Retry a few times to avoid knocking a server down for a hiccup.
+ var result isMasterResult
+ var tryerr error
+ for retry := 0; ; retry++ {
+ if retry == 3 || retry == 1 && cluster.failFast {
+ return nil, nil, tryerr
+ }
+ if retry > 0 {
+ // Don't abuse the server needlessly if there's something actually wrong.
+ if err, ok := tryerr.(possibleTimeout); ok && err.Timeout() {
+ // Give a chance for waiters to timeout as well.
+ cluster.serverSynced.Broadcast()
+ }
+ time.Sleep(syncShortDelay)
+ }
+
+ // It's not clear what would be a good timeout here. Is it
+ // better to wait longer or to retry?
+ socket, _, err := server.AcquireSocket(0, syncTimeout)
+ if err != nil {
+ tryerr = err
+ logf("SYNC Failed to get socket to %s: %v", addr, err)
+ continue
+ }
+ err = cluster.isMaster(socket, &result)
+ socket.Release()
+ if err != nil {
+ tryerr = err
+ logf("SYNC Command 'ismaster' to %s failed: %v", addr, err)
+ continue
+ }
+ debugf("SYNC Result of 'ismaster' from %s: %#v", addr, result)
+ break
+ }
+
+ if cluster.setName != "" && result.SetName != cluster.setName {
+ logf("SYNC Server %s is not a member of replica set %q", addr, cluster.setName)
+ return nil, nil, fmt.Errorf("server %s is not a member of replica set %q", addr, cluster.setName)
+ }
+
+ if result.IsMaster {
+ debugf("SYNC %s is a master.", addr)
+ if !server.info.Master {
+ // Made an incorrect assumption above, so fix stats.
+ stats.conn(-1, false)
+ stats.conn(+1, true)
+ }
+ } else if result.Secondary {
+ debugf("SYNC %s is a slave.", addr)
+ } else if cluster.direct {
+ logf("SYNC %s in unknown state. Pretending it's a slave due to direct connection.", addr)
+ } else {
+ logf("SYNC %s is neither a master nor a slave.", addr)
+ // Let stats track it as whatever was known before.
+ return nil, nil, errors.New(addr + " is not a master nor slave")
+ }
+
+ info = &mongoServerInfo{
+ Master: result.IsMaster,
+ Mongos: result.Msg == "isdbgrid",
+ Tags: result.Tags,
+ SetName: result.SetName,
+ MaxWireVersion: result.MaxWireVersion,
+ }
+
+ hosts = make([]string, 0, 1+len(result.Hosts)+len(result.Passives))
+ if result.Primary != "" {
+ // First in the list to speed up master discovery.
+ hosts = append(hosts, result.Primary)
+ }
+ hosts = append(hosts, result.Hosts...)
+ hosts = append(hosts, result.Passives...)
+
+ debugf("SYNC %s knows about the following peers: %#v", addr, hosts)
+ return info, hosts, nil
+}
+
+type syncKind bool
+
+const (
+ completeSync syncKind = true
+ partialSync syncKind = false
+)
+
+func (cluster *mongoCluster) addServer(server *mongoServer, info *mongoServerInfo, syncKind syncKind) {
+ cluster.Lock()
+ current := cluster.servers.Search(server.ResolvedAddr)
+ if current == nil {
+ if syncKind == partialSync {
+ cluster.Unlock()
+ server.Close()
+ log("SYNC Discarding unknown server ", server.Addr, " due to partial sync.")
+ return
+ }
+ cluster.servers.Add(server)
+ if info.Master {
+ cluster.masters.Add(server)
+ log("SYNC Adding ", server.Addr, " to cluster as a master.")
+ } else {
+ log("SYNC Adding ", server.Addr, " to cluster as a slave.")
+ }
+ } else {
+ if server != current {
+ panic("addServer attempting to add duplicated server")
+ }
+ if server.Info().Master != info.Master {
+ if info.Master {
+ log("SYNC Server ", server.Addr, " is now a master.")
+ cluster.masters.Add(server)
+ } else {
+ log("SYNC Server ", server.Addr, " is now a slave.")
+ cluster.masters.Remove(server)
+ }
+ }
+ }
+ server.SetInfo(info)
+ debugf("SYNC Broadcasting availability of server %s", server.Addr)
+ cluster.serverSynced.Broadcast()
+ cluster.Unlock()
+}
+
+func (cluster *mongoCluster) getKnownAddrs() []string {
+ cluster.RLock()
+ max := len(cluster.userSeeds) + len(cluster.dynaSeeds) + cluster.servers.Len()
+ seen := make(map[string]bool, max)
+ known := make([]string, 0, max)
+
+ add := func(addr string) {
+ if _, found := seen[addr]; !found {
+ seen[addr] = true
+ known = append(known, addr)
+ }
+ }
+
+ for _, addr := range cluster.userSeeds {
+ add(addr)
+ }
+ for _, addr := range cluster.dynaSeeds {
+ add(addr)
+ }
+ for _, serv := range cluster.servers.Slice() {
+ add(serv.Addr)
+ }
+ cluster.RUnlock()
+
+ return known
+}
+
+// syncServers injects a value into the cluster.sync channel to force
+// an iteration of the syncServersLoop function.
+func (cluster *mongoCluster) syncServers() {
+ select {
+ case cluster.sync <- true:
+ default:
+ }
+}
+
+// How long to wait for a checkup of the cluster topology if nothing
+// else kicks a synchronization before that.
+const syncServersDelay = 30 * time.Second
+const syncShortDelay = 500 * time.Millisecond
+
+// syncServersLoop loops while the cluster is alive to keep its idea of
+// the server topology up-to-date. It must be called just once from
+// newCluster. The loop iterates once syncServersDelay has passed, or
+// if somebody injects a value into the cluster.sync channel to force a
+// synchronization. A loop iteration will contact all servers in
+// parallel, ask them about known peers and their own role within the
+// cluster, and then attempt to do the same with all the peers
+// retrieved.
+func (cluster *mongoCluster) syncServersLoop() {
+ for {
+ debugf("SYNC Cluster %p is starting a sync loop iteration.", cluster)
+
+ cluster.Lock()
+ if cluster.references == 0 {
+ cluster.Unlock()
+ break
+ }
+ cluster.references++ // Keep alive while syncing.
+ direct := cluster.direct
+ cluster.Unlock()
+
+ cluster.syncServersIteration(direct)
+
+ // We just synchronized, so consume any outstanding requests.
+ select {
+ case <-cluster.sync:
+ default:
+ }
+
+ cluster.Release()
+
+ // Hold off before allowing another sync. No point in
+ // burning CPU looking for down servers.
+ if !cluster.failFast {
+ time.Sleep(syncShortDelay)
+ }
+
+ cluster.Lock()
+ if cluster.references == 0 {
+ cluster.Unlock()
+ break
+ }
+ cluster.syncCount++
+ // Poke all waiters so they have a chance to timeout or
+ // restart syncing if they wish to.
+ cluster.serverSynced.Broadcast()
+ // Check if we have to restart immediately either way.
+ restart := !direct && cluster.masters.Empty() || cluster.servers.Empty()
+ cluster.Unlock()
+
+ if restart {
+ log("SYNC No masters found. Will synchronize again.")
+ time.Sleep(syncShortDelay)
+ continue
+ }
+
+ debugf("SYNC Cluster %p waiting for next requested or scheduled sync.", cluster)
+
+ // Hold off until somebody explicitly requests a synchronization
+ // or it's time to check for a cluster topology change again.
+ select {
+ case <-cluster.sync:
+ case <-time.After(syncServersDelay):
+ }
+ }
+ debugf("SYNC Cluster %p is stopping its sync loop.", cluster)
+}
+
+func (cluster *mongoCluster) server(addr string, tcpaddr *net.TCPAddr) *mongoServer {
+ cluster.RLock()
+ server := cluster.servers.Search(tcpaddr.String())
+ cluster.RUnlock()
+ if server != nil {
+ return server
+ }
+ return newServer(addr, tcpaddr, cluster.sync, cluster.dial)
+}
+
+func resolveAddr(addr string) (*net.TCPAddr, error) {
+ // Simple cases that do not need actual resolution. Works with IPv4 and v6.
+ if host, port, err := net.SplitHostPort(addr); err == nil {
+ if port, _ := strconv.Atoi(port); port > 0 {
+ zone := ""
+ if i := strings.LastIndex(host, "%"); i >= 0 {
+ zone = host[i+1:]
+ host = host[:i]
+ }
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return &net.TCPAddr{IP: ip, Port: port, Zone: zone}, nil
+ }
+ }
+ }
+
+ // Attempt to resolve IPv4 and v6 concurrently.
+ addrChan := make(chan *net.TCPAddr, 2)
+ for _, network := range []string{"udp4", "udp6"} {
+ network := network
+ go func() {
+ // The unfortunate UDP dialing hack allows having a timeout on address resolution.
+ conn, err := net.DialTimeout(network, addr, 10*time.Second)
+ if err != nil {
+ addrChan <- nil
+ } else {
+ addrChan <- (*net.TCPAddr)(conn.RemoteAddr().(*net.UDPAddr))
+ conn.Close()
+ }
+ }()
+ }
+
+ // Wait for the result of IPv4 and v6 resolution. Use IPv4 if available.
+ tcpaddr := <-addrChan
+ if tcpaddr == nil || len(tcpaddr.IP) != 4 {
+ var timeout <-chan time.Time
+ if tcpaddr != nil {
+ // Don't wait too long if an IPv6 address is known.
+ timeout = time.After(50 * time.Millisecond)
+ }
+ select {
+ case <-timeout:
+ case tcpaddr2 := <-addrChan:
+ if tcpaddr == nil || tcpaddr2 != nil {
+ // It's an IPv4 address or the only known address. Use it.
+ tcpaddr = tcpaddr2
+ }
+ }
+ }
+
+ if tcpaddr == nil {
+ log("SYNC Failed to resolve server address: ", addr)
+ return nil, errors.New("failed to resolve server address: " + addr)
+ }
+ if tcpaddr.String() != addr {
+ debug("SYNC Address ", addr, " resolved as ", tcpaddr.String())
+ }
+ return tcpaddr, nil
+}
+
+type pendingAdd struct {
+ server *mongoServer
+ info *mongoServerInfo
+}
+
+func (cluster *mongoCluster) syncServersIteration(direct bool) {
+ log("SYNC Starting full topology synchronization...")
+
+ var wg sync.WaitGroup
+ var m sync.Mutex
+ notYetAdded := make(map[string]pendingAdd)
+ addIfFound := make(map[string]bool)
+ seen := make(map[string]bool)
+ syncKind := partialSync
+
+ var spawnSync func(addr string, byMaster bool)
+ spawnSync = func(addr string, byMaster bool) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ tcpaddr, err := resolveAddr(addr)
+ if err != nil {
+ log("SYNC Failed to start sync of ", addr, ": ", err.Error())
+ return
+ }
+ resolvedAddr := tcpaddr.String()
+
+ m.Lock()
+ if byMaster {
+ if pending, ok := notYetAdded[resolvedAddr]; ok {
+ delete(notYetAdded, resolvedAddr)
+ m.Unlock()
+ cluster.addServer(pending.server, pending.info, completeSync)
+ return
+ }
+ addIfFound[resolvedAddr] = true
+ }
+ if seen[resolvedAddr] {
+ m.Unlock()
+ return
+ }
+ seen[resolvedAddr] = true
+ m.Unlock()
+
+ server := cluster.server(addr, tcpaddr)
+ info, hosts, err := cluster.syncServer(server)
+ if err != nil {
+ cluster.removeServer(server)
+ return
+ }
+
+ m.Lock()
+ add := direct || info.Master || addIfFound[resolvedAddr]
+ if add {
+ syncKind = completeSync
+ } else {
+ notYetAdded[resolvedAddr] = pendingAdd{server, info}
+ }
+ m.Unlock()
+ if add {
+ cluster.addServer(server, info, completeSync)
+ }
+ if !direct {
+ for _, addr := range hosts {
+ spawnSync(addr, info.Master)
+ }
+ }
+ }()
+ }
+
+ knownAddrs := cluster.getKnownAddrs()
+ for _, addr := range knownAddrs {
+ spawnSync(addr, false)
+ }
+ wg.Wait()
+
+ if syncKind == completeSync {
+ logf("SYNC Synchronization was complete (got data from primary).")
+ for _, pending := range notYetAdded {
+ cluster.removeServer(pending.server)
+ }
+ } else {
+ logf("SYNC Synchronization was partial (cannot talk to primary).")
+ for _, pending := range notYetAdded {
+ cluster.addServer(pending.server, pending.info, partialSync)
+ }
+ }
+
+ cluster.Lock()
+ mastersLen := cluster.masters.Len()
+ logf("SYNC Synchronization completed: %d master(s) and %d slave(s) alive.", mastersLen, cluster.servers.Len()-mastersLen)
+
+ // Update dynamic seeds, but only if we have any good servers. Otherwise,
+ // leave them alone for better chances of a successful sync in the future.
+ if syncKind == completeSync {
+ dynaSeeds := make([]string, cluster.servers.Len())
+ for i, server := range cluster.servers.Slice() {
+ dynaSeeds[i] = server.Addr
+ }
+ cluster.dynaSeeds = dynaSeeds
+ debugf("SYNC New dynamic seeds: %#v\n", dynaSeeds)
+ }
+ cluster.Unlock()
+}
+
+// AcquireSocket returns a socket to a server in the cluster. If slaveOk is
+// true, it will attempt to return a socket to a slave server. If it is
+// false, the socket will necessarily be to a master server.
+func (cluster *mongoCluster) AcquireSocket(mode Mode, slaveOk bool, syncTimeout time.Duration, socketTimeout time.Duration, serverTags []bson.D, poolLimit int) (s *mongoSocket, err error) {
+ var started time.Time
+ var syncCount uint
+ warnedLimit := false
+ for {
+ cluster.RLock()
+ for {
+ mastersLen := cluster.masters.Len()
+ slavesLen := cluster.servers.Len() - mastersLen
+ debugf("Cluster has %d known masters and %d known slaves.", mastersLen, slavesLen)
+ if mastersLen > 0 && !(slaveOk && mode == Secondary) || slavesLen > 0 && slaveOk {
+ break
+ }
+ if mastersLen > 0 && mode == Secondary && cluster.masters.HasMongos() {
+ break
+ }
+ if started.IsZero() {
+ // Initialize after fast path above.
+ started = time.Now()
+ syncCount = cluster.syncCount
+ } else if syncTimeout != 0 && started.Before(time.Now().Add(-syncTimeout)) || cluster.failFast && cluster.syncCount != syncCount {
+ cluster.RUnlock()
+ return nil, errors.New("no reachable servers")
+ }
+ log("Waiting for servers to synchronize...")
+ cluster.syncServers()
+
+ // Remember: this will release and reacquire the lock.
+ cluster.serverSynced.Wait()
+ }
+
+ var server *mongoServer
+ if slaveOk {
+ server = cluster.servers.BestFit(mode, serverTags)
+ } else {
+ server = cluster.masters.BestFit(mode, nil)
+ }
+ cluster.RUnlock()
+
+ if server == nil {
+ // Must have failed the requested tags. Sleep to avoid spinning.
+ time.Sleep(1e8)
+ continue
+ }
+
+ s, abended, err := server.AcquireSocket(poolLimit, socketTimeout)
+ if err == errPoolLimit {
+ if !warnedLimit {
+ warnedLimit = true
+ log("WARNING: Per-server connection limit reached.")
+ }
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ if err != nil {
+ cluster.removeServer(server)
+ cluster.syncServers()
+ continue
+ }
+ if abended && !slaveOk {
+ var result isMasterResult
+ err := cluster.isMaster(s, &result)
+ if err != nil || !result.IsMaster {
+ logf("Cannot confirm server %s as master (%v)", server.Addr, err)
+ s.Release()
+ cluster.syncServers()
+ time.Sleep(100 * time.Millisecond)
+ continue
+ }
+ }
+ return s, nil
+ }
+ panic("unreached")
+}
+
+func (cluster *mongoCluster) CacheIndex(cacheKey string, exists bool) {
+ cluster.Lock()
+ if cluster.cachedIndex == nil {
+ cluster.cachedIndex = make(map[string]bool)
+ }
+ if exists {
+ cluster.cachedIndex[cacheKey] = true
+ } else {
+ delete(cluster.cachedIndex, cacheKey)
+ }
+ cluster.Unlock()
+}
+
+func (cluster *mongoCluster) HasCachedIndex(cacheKey string) (result bool) {
+ cluster.RLock()
+ if cluster.cachedIndex != nil {
+ result = cluster.cachedIndex[cacheKey]
+ }
+ cluster.RUnlock()
+ return
+}
+
+func (cluster *mongoCluster) ResetIndexCache() {
+ cluster.Lock()
+ cluster.cachedIndex = make(map[string]bool)
+ cluster.Unlock()
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/cluster_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/cluster_test.go
new file mode 100644
index 00000000000..54ec8676226
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/cluster_test.go
@@ -0,0 +1,2090 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func (s *S) TestNewSession(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Do a dummy operation to wait for connection.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+
+ // Tweak safety and query settings to ensure other has copied those.
+ session.SetSafe(nil)
+ session.SetBatch(-1)
+ other := session.New()
+ defer other.Close()
+ session.SetSafe(&mgo.Safe{})
+
+ // Clone was copied while session was unsafe, so no errors.
+ otherColl := other.DB("mydb").C("mycoll")
+ err = otherColl.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+
+ // Original session was made safe again.
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, NotNil)
+
+ // With New(), each session has its own socket now.
+ stats := mgo.GetStats()
+ c.Assert(stats.MasterConns, Equals, 2)
+ c.Assert(stats.SocketsInUse, Equals, 2)
+
+ // Ensure query parameters were cloned.
+ err = otherColl.Insert(M{"_id": 2})
+ c.Assert(err, IsNil)
+
+ // Ping the database to ensure the nonce has been received already.
+ c.Assert(other.Ping(), IsNil)
+
+ mgo.ResetStats()
+
+ iter := otherColl.Find(M{}).Iter()
+ c.Assert(err, IsNil)
+
+ m := M{}
+ ok := iter.Next(m)
+ c.Assert(ok, Equals, true)
+ err = iter.Close()
+ c.Assert(err, IsNil)
+
+ // If Batch(-1) is in effect, a single document must have been received.
+ stats = mgo.GetStats()
+ c.Assert(stats.ReceivedDocs, Equals, 1)
+}
+
+func (s *S) TestCloneSession(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Do a dummy operation to wait for connection.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+
+ // Tweak safety and query settings to ensure clone is copying those.
+ session.SetSafe(nil)
+ session.SetBatch(-1)
+ clone := session.Clone()
+ defer clone.Close()
+ session.SetSafe(&mgo.Safe{})
+
+ // Clone was copied while session was unsafe, so no errors.
+ cloneColl := clone.DB("mydb").C("mycoll")
+ err = cloneColl.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+
+ // Original session was made safe again.
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, NotNil)
+
+ // With Clone(), same socket is shared between sessions now.
+ stats := mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 1)
+ c.Assert(stats.SocketRefs, Equals, 2)
+
+ // Refreshing one of them should let the original socket go,
+ // while preserving the safety settings.
+ clone.Refresh()
+ err = cloneColl.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+
+ // Must have used another connection now.
+ stats = mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 2)
+ c.Assert(stats.SocketRefs, Equals, 2)
+
+ // Ensure query parameters were cloned.
+ err = cloneColl.Insert(M{"_id": 2})
+ c.Assert(err, IsNil)
+
+ // Ping the database to ensure the nonce has been received already.
+ c.Assert(clone.Ping(), IsNil)
+
+ mgo.ResetStats()
+
+ iter := cloneColl.Find(M{}).Iter()
+ c.Assert(err, IsNil)
+
+ m := M{}
+ ok := iter.Next(m)
+ c.Assert(ok, Equals, true)
+ err = iter.Close()
+ c.Assert(err, IsNil)
+
+ // If Batch(-1) is in effect, a single document must have been received.
+ stats = mgo.GetStats()
+ c.Assert(stats.ReceivedDocs, Equals, 1)
+}
+
+func (s *S) TestModeStrong(c *C) {
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, false)
+ session.SetMode(mgo.Strong, false)
+
+ c.Assert(session.Mode(), Equals, mgo.Strong)
+
+ result := M{}
+ cmd := session.DB("admin").C("$cmd")
+ err = cmd.Find(M{"ismaster": 1}).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result["ismaster"], Equals, true)
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ // Wait since the sync also uses sockets.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for cluster sync to finish...")
+ time.Sleep(5e8)
+ }
+
+ stats := mgo.GetStats()
+ c.Assert(stats.MasterConns, Equals, 1)
+ c.Assert(stats.SlaveConns, Equals, 2)
+ c.Assert(stats.SocketsInUse, Equals, 1)
+
+ session.SetMode(mgo.Strong, true)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestModeMonotonic(c *C) {
+ // Must necessarily connect to a slave, otherwise the
+ // master connection will be available first.
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, false)
+
+ c.Assert(session.Mode(), Equals, mgo.Monotonic)
+
+ var result struct{ IsMaster bool }
+ cmd := session.DB("admin").C("$cmd")
+ err = cmd.Find(M{"ismaster": 1}).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.IsMaster, Equals, false)
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ err = cmd.Find(M{"ismaster": 1}).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.IsMaster, Equals, true)
+
+ // Wait since the sync also uses sockets.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for cluster sync to finish...")
+ time.Sleep(5e8)
+ }
+
+ stats := mgo.GetStats()
+ c.Assert(stats.MasterConns, Equals, 1)
+ c.Assert(stats.SlaveConns, Equals, 2)
+ c.Assert(stats.SocketsInUse, Equals, 2)
+
+ session.SetMode(mgo.Monotonic, true)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestModeMonotonicAfterStrong(c *C) {
+ // Test that a strong session shifting to a monotonic
+ // one preserves the socket untouched.
+
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Insert something to force a connection to the master.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ session.SetMode(mgo.Monotonic, false)
+
+ // Wait since the sync also uses sockets.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for cluster sync to finish...")
+ time.Sleep(5e8)
+ }
+
+ // Master socket should still be reserved.
+ stats := mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 1)
+
+ // Confirm it's the master even though it's Monotonic by now.
+ result := M{}
+ cmd := session.DB("admin").C("$cmd")
+ err = cmd.Find(M{"ismaster": 1}).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result["ismaster"], Equals, true)
+}
+
+func (s *S) TestModeStrongAfterMonotonic(c *C) {
+ // Test that shifting from Monotonic to Strong while
+ // using a slave socket will keep the socket reserved
+ // until the master socket is necessary, so that no
+ // switch over occurs unless it's actually necessary.
+
+ // Must necessarily connect to a slave, otherwise the
+ // master connection will be available first.
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, false)
+
+ // Ensure we're talking to a slave, and reserve the socket.
+ result := M{}
+ err = session.Run("ismaster", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result["ismaster"], Equals, false)
+
+ // Switch to a Strong session.
+ session.SetMode(mgo.Strong, false)
+
+ // Wait since the sync also uses sockets.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for cluster sync to finish...")
+ time.Sleep(5e8)
+ }
+
+ // Slave socket should still be reserved.
+ stats := mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 1)
+
+ // But any operation will switch it to the master.
+ result = M{}
+ err = session.Run("ismaster", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result["ismaster"], Equals, true)
+}
+
+func (s *S) TestModeMonotonicWriteOnIteration(c *C) {
+ // Must necessarily connect to a slave, otherwise the
+ // master connection will be available first.
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, false)
+
+ c.Assert(session.Mode(), Equals, mgo.Monotonic)
+
+ coll1 := session.DB("mydb").C("mycoll1")
+ coll2 := session.DB("mydb").C("mycoll2")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll1.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ // Release master so we can grab a slave again.
+ session.Refresh()
+
+ // Wait until synchronization is done.
+ for {
+ n, err := coll1.Count()
+ c.Assert(err, IsNil)
+ if n == len(ns) {
+ break
+ }
+ }
+
+ iter := coll1.Find(nil).Batch(2).Iter()
+ i := 0
+ m := M{}
+ for iter.Next(&m) {
+ i++
+ if i > 3 {
+ err := coll2.Insert(M{"n": 47 + i})
+ c.Assert(err, IsNil)
+ }
+ }
+ c.Assert(i, Equals, len(ns))
+}
+
+func (s *S) TestModeEventual(c *C) {
+ // Must necessarily connect to a slave, otherwise the
+ // master connection will be available first.
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Eventual, false)
+
+ c.Assert(session.Mode(), Equals, mgo.Eventual)
+
+ result := M{}
+ err = session.Run("ismaster", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result["ismaster"], Equals, false)
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ result = M{}
+ err = session.Run("ismaster", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result["ismaster"], Equals, false)
+
+ // Wait since the sync also uses sockets.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for cluster sync to finish...")
+ time.Sleep(5e8)
+ }
+
+ stats := mgo.GetStats()
+ c.Assert(stats.MasterConns, Equals, 1)
+ c.Assert(stats.SlaveConns, Equals, 2)
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestModeEventualAfterStrong(c *C) {
+ // Test that a strong session shifting to an eventual
+ // one preserves the socket untouched.
+
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Insert something to force a connection to the master.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ session.SetMode(mgo.Eventual, false)
+
+ // Wait since the sync also uses sockets.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for cluster sync to finish...")
+ time.Sleep(5e8)
+ }
+
+ // Master socket should still be reserved.
+ stats := mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 1)
+
+ // Confirm it's the master even though it's Eventual by now.
+ result := M{}
+ cmd := session.DB("admin").C("$cmd")
+ err = cmd.Find(M{"ismaster": 1}).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result["ismaster"], Equals, true)
+
+ session.SetMode(mgo.Eventual, true)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestModeStrongFallover(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // With strong consistency, this will open a socket to the master.
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+
+ // Kill the master.
+ host := result.Host
+ s.Stop(host)
+
+ // This must fail, since the connection was broken.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, Equals, io.EOF)
+
+ // With strong consistency, it fails again until reset.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, Equals, io.EOF)
+
+ session.Refresh()
+
+ // Now we should be able to talk to the new master.
+ // Increase the timeout since this may take quite a while.
+ session.SetSyncTimeout(3 * time.Minute)
+
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Host, Not(Equals), host)
+
+ // Insert some data to confirm it's indeed a master.
+ err = session.DB("mydb").C("mycoll").Insert(M{"n": 42})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestModePrimaryHiccup(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // With strong consistency, this will open a socket to the master.
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+
+ // Establish a few extra sessions to create spare sockets to
+ // the master. This increases a bit the chances of getting an
+ // incorrect cached socket.
+ var sessions []*mgo.Session
+ for i := 0; i < 20; i++ {
+ sessions = append(sessions, session.Copy())
+ err = sessions[len(sessions)-1].Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ }
+ for i := range sessions {
+ sessions[i].Close()
+ }
+
+ // Kill the master, but bring it back immediatelly.
+ host := result.Host
+ s.Stop(host)
+ s.StartAll()
+
+ // This must fail, since the connection was broken.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, Equals, io.EOF)
+
+ // With strong consistency, it fails again until reset.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, Equals, io.EOF)
+
+ session.Refresh()
+
+ // Now we should be able to talk to the new master.
+ // Increase the timeout since this may take quite a while.
+ session.SetSyncTimeout(3 * time.Minute)
+
+ // Insert some data to confirm it's indeed a master.
+ err = session.DB("mydb").C("mycoll").Insert(M{"n": 42})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestModeMonotonicFallover(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, true)
+
+ // Insert something to force a switch to the master.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ // Wait a bit for this to be synchronized to slaves.
+ time.Sleep(3 * time.Second)
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+
+ // Kill the master.
+ host := result.Host
+ s.Stop(host)
+
+ // This must fail, since the connection was broken.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, Equals, io.EOF)
+
+ // With monotonic consistency, it fails again until reset.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, Equals, io.EOF)
+
+ session.Refresh()
+
+ // Now we should be able to talk to the new master.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Host, Not(Equals), host)
+}
+
+func (s *S) TestModeMonotonicWithSlaveFallover(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ ssresult := &struct{ Host string }{}
+ imresult := &struct{ IsMaster bool }{}
+
+ // Figure the master while still using the strong session.
+ err = session.Run("serverStatus", ssresult)
+ c.Assert(err, IsNil)
+ err = session.Run("isMaster", imresult)
+ c.Assert(err, IsNil)
+ master := ssresult.Host
+ c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+ // Create new monotonic session with an explicit address to ensure
+ // a slave is synchronized before the master, otherwise a connection
+ // with the master may be used below for lack of other options.
+ var addr string
+ switch {
+ case strings.HasSuffix(ssresult.Host, ":40021"):
+ addr = "localhost:40022"
+ case strings.HasSuffix(ssresult.Host, ":40022"):
+ addr = "localhost:40021"
+ case strings.HasSuffix(ssresult.Host, ":40023"):
+ addr = "localhost:40021"
+ default:
+ c.Fatal("Unknown host: ", ssresult.Host)
+ }
+
+ session, err = mgo.Dial(addr)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, true)
+
+ // Check the address of the socket associated with the monotonic session.
+ c.Log("Running serverStatus and isMaster with monotonic session")
+ err = session.Run("serverStatus", ssresult)
+ c.Assert(err, IsNil)
+ err = session.Run("isMaster", imresult)
+ c.Assert(err, IsNil)
+ slave := ssresult.Host
+ c.Assert(imresult.IsMaster, Equals, false, Commentf("%s is not a slave", slave))
+
+ c.Assert(master, Not(Equals), slave)
+
+ // Kill the master.
+ s.Stop(master)
+
+ // Session must still be good, since we were talking to a slave.
+ err = session.Run("serverStatus", ssresult)
+ c.Assert(err, IsNil)
+
+ c.Assert(ssresult.Host, Equals, slave,
+ Commentf("Monotonic session moved from %s to %s", slave, ssresult.Host))
+
+ // If we try to insert something, it'll have to hold until the new
+ // master is available to move the connection, and work correctly.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ // Must now be talking to the new master.
+ err = session.Run("serverStatus", ssresult)
+ c.Assert(err, IsNil)
+ err = session.Run("isMaster", imresult)
+ c.Assert(err, IsNil)
+ c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+ // ... which is not the old one, since it's still dead.
+ c.Assert(ssresult.Host, Not(Equals), master)
+}
+
+func (s *S) TestModeEventualFallover(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ master := result.Host
+
+ session.SetMode(mgo.Eventual, true)
+
+ // Should connect to the master when needed.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ // Wait a bit for this to be synchronized to slaves.
+ time.Sleep(3 * time.Second)
+
+ // Kill the master.
+ s.Stop(master)
+
+ // Should still work, with the new master now.
+ coll = session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Host, Not(Equals), master)
+}
+
+func (s *S) TestModeSecondaryJustPrimary(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Secondary, true)
+
+ err = session.Ping()
+ c.Assert(err, ErrorMatches, "no reachable servers")
+}
+
+func (s *S) TestModeSecondaryPreferredJustPrimary(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.SecondaryPreferred, true)
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestModeSecondaryPreferredFallover(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Ensure secondaries are available for being picked up.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for cluster sync to finish...")
+ time.Sleep(5e8)
+ }
+
+ session.SetMode(mgo.SecondaryPreferred, true)
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(supvName(result.Host), Not(Equals), "rs1a")
+ secondary := result.Host
+
+ // Should connect to the primary when needed.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ // Wait a bit for this to be synchronized to slaves.
+ time.Sleep(3 * time.Second)
+
+ // Kill the primary.
+ s.Stop("localhost:40011")
+
+ // It can still talk to the selected secondary.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Host, Equals, secondary)
+
+ // But cannot speak to the primary until reset.
+ coll = session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, Equals, io.EOF)
+
+ session.Refresh()
+
+ // Can still talk to a secondary.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(supvName(result.Host), Not(Equals), "rs1a")
+
+ s.StartAll()
+
+ // Should now be able to talk to the primary again.
+ coll = session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestModePrimaryPreferredFallover(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.PrimaryPreferred, true)
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(supvName(result.Host), Equals, "rs1a")
+
+ // Kill the primary.
+ s.Stop("localhost:40011")
+
+ // Should now fail as there was a primary socket in use already.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, Equals, io.EOF)
+
+ // Refresh so the reserved primary socket goes away.
+ session.Refresh()
+
+ // Should be able to talk to the secondary.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+
+ s.StartAll()
+
+ // Should wait for the new primary to become available.
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ // And should use the new primary in general, as it is preferred.
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(supvName(result.Host), Equals, "rs1a")
+}
+
+func (s *S) TestModePrimaryFallover(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetSyncTimeout(3 * time.Second)
+
+ session.SetMode(mgo.Primary, true)
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(supvName(result.Host), Equals, "rs1a")
+
+ // Kill the primary.
+ s.Stop("localhost:40011")
+
+ session.Refresh()
+
+ err = session.Ping()
+ c.Assert(err, ErrorMatches, "no reachable servers")
+}
+
+func (s *S) TestModeSecondary(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Secondary, true)
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(supvName(result.Host), Not(Equals), "rs1a")
+ secondary := result.Host
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Host, Equals, secondary)
+}
+
+func (s *S) TestPreserveSocketCountOnSync(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ stats := mgo.GetStats()
+ for stats.SocketsAlive != 3 {
+ c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive)
+ stats = mgo.GetStats()
+ time.Sleep(5e8)
+ }
+
+ c.Assert(stats.SocketsAlive, Equals, 3)
+
+ // Kill the master (with rs1, 'a' is always the master).
+ s.Stop("localhost:40011")
+
+ // Wait for the logic to run for a bit and bring it back.
+ startedAll := make(chan bool)
+ go func() {
+ time.Sleep(5e9)
+ s.StartAll()
+ startedAll <- true
+ }()
+
+ // Do not allow the test to return before the goroutine above is done.
+ defer func() {
+ <-startedAll
+ }()
+
+ // Do an action to kick the resync logic in, and also to
+ // wait until the cluster recognizes the server is back.
+ result := struct{ Ok bool }{}
+ err = session.Run("getLastError", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Ok, Equals, true)
+
+ for i := 0; i != 20; i++ {
+ stats = mgo.GetStats()
+ if stats.SocketsAlive == 3 {
+ break
+ }
+ c.Logf("Waiting for 3 sockets alive, have %d", stats.SocketsAlive)
+ time.Sleep(5e8)
+ }
+
+ // Ensure the number of sockets is preserved after syncing.
+ stats = mgo.GetStats()
+ c.Assert(stats.SocketsAlive, Equals, 3)
+ c.Assert(stats.SocketsInUse, Equals, 1)
+ c.Assert(stats.SocketRefs, Equals, 1)
+}
+
+// Connect to the master of a deployment with a single server,
+// run an insert, and then ensure the insert worked and that a
+// single connection was established.
+func (s *S) TestTopologySyncWithSingleMaster(c *C) {
+ // Use hostname here rather than IP, to make things trickier.
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1, "b": 2})
+ c.Assert(err, IsNil)
+
+ // One connection used for discovery. Master socket recycled for
+ // insert. Socket is reserved after insert.
+ stats := mgo.GetStats()
+ c.Assert(stats.MasterConns, Equals, 1)
+ c.Assert(stats.SlaveConns, Equals, 0)
+ c.Assert(stats.SocketsInUse, Equals, 1)
+
+ // Refresh session and socket must be released.
+ session.Refresh()
+ stats = mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestTopologySyncWithSlaveSeed(c *C) {
+ // That's supposed to be a slave. Must run discovery
+ // and find out master to insert successfully.
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ coll.Insert(M{"a": 1, "b": 2})
+
+ result := struct{ Ok bool }{}
+ err = session.Run("getLastError", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Ok, Equals, true)
+
+ // One connection to each during discovery. Master
+ // socket recycled for insert.
+ stats := mgo.GetStats()
+ c.Assert(stats.MasterConns, Equals, 1)
+ c.Assert(stats.SlaveConns, Equals, 2)
+
+ // Only one socket reference alive, in the master socket owned
+ // by the above session.
+ c.Assert(stats.SocketsInUse, Equals, 1)
+
+ // Refresh it, and it must be gone.
+ session.Refresh()
+ stats = mgo.GetStats()
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestSyncTimeout(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ s.Stop("localhost:40001")
+
+ timeout := 3 * time.Second
+ session.SetSyncTimeout(timeout)
+ started := time.Now()
+
+ // Do something.
+ result := struct{ Ok bool }{}
+ err = session.Run("getLastError", &result)
+ c.Assert(err, ErrorMatches, "no reachable servers")
+ c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+ c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true)
+}
+
+func (s *S) TestDialWithTimeout(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ timeout := 2 * time.Second
+ started := time.Now()
+
+ // 40009 isn't used by the test servers.
+ session, err := mgo.DialWithTimeout("localhost:40009", timeout)
+ if session != nil {
+ session.Close()
+ }
+ c.Assert(err, ErrorMatches, "no reachable servers")
+ c.Assert(session, IsNil)
+ c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+ c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true)
+}
+
+func (s *S) TestSocketTimeout(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ s.Freeze("localhost:40001")
+
+ timeout := 3 * time.Second
+ session.SetSocketTimeout(timeout)
+ started := time.Now()
+
+ // Do something.
+ result := struct{ Ok bool }{}
+ err = session.Run("getLastError", &result)
+ c.Assert(err, ErrorMatches, ".*: i/o timeout")
+ c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+ c.Assert(started.After(time.Now().Add(-timeout*2)), Equals, true)
+}
+
+func (s *S) TestSocketTimeoutOnDial(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ timeout := 1 * time.Second
+
+ defer mgo.HackSyncSocketTimeout(timeout)()
+
+ s.Freeze("localhost:40001")
+
+ started := time.Now()
+
+ session, err := mgo.DialWithTimeout("localhost:40001", timeout)
+ c.Assert(err, ErrorMatches, "no reachable servers")
+ c.Assert(session, IsNil)
+
+ c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+ c.Assert(started.After(time.Now().Add(-20*time.Second)), Equals, true)
+}
+
+func (s *S) TestSocketTimeoutOnInactiveSocket(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ timeout := 2 * time.Second
+ session.SetSocketTimeout(timeout)
+
+ // Do something that relies on the timeout and works.
+ c.Assert(session.Ping(), IsNil)
+
+ // Freeze and wait for the timeout to go by.
+ s.Freeze("localhost:40001")
+ time.Sleep(timeout + 500*time.Millisecond)
+ s.Thaw("localhost:40001")
+
+ // Do something again. The timeout above should not have killed
+ // the socket as there was nothing to be done.
+ c.Assert(session.Ping(), IsNil)
+}
+
+func (s *S) TestDialWithReplicaSetName(c *C) {
+ seedLists := [][]string{
+ // rs1 primary and rs2 primary
+ []string{"localhost:40011", "localhost:40021"},
+ // rs1 primary and rs2 secondary
+ []string{"localhost:40011", "localhost:40022"},
+ // rs1 secondary and rs2 primary
+ []string{"localhost:40012", "localhost:40021"},
+ // rs1 secondary and rs2 secondary
+ []string{"localhost:40012", "localhost:40022"},
+ }
+
+ rs2Members := []string{":40021", ":40022", ":40023"}
+
+ verifySyncedServers := func(session *mgo.Session, numServers int) {
+ // wait for the server(s) to be synced
+ for len(session.LiveServers()) != numServers {
+ c.Log("Waiting for cluster sync to finish...")
+ time.Sleep(5e8)
+ }
+
+ // ensure none of the rs2 set members are communicated with
+ for _, addr := range session.LiveServers() {
+ for _, rs2Member := range rs2Members {
+ c.Assert(strings.HasSuffix(addr, rs2Member), Equals, false)
+ }
+ }
+ }
+
+ // only communication with rs1 members is expected
+ for _, seedList := range seedLists {
+ info := mgo.DialInfo{
+ Addrs: seedList,
+ Timeout: 5 * time.Second,
+ ReplicaSetName: "rs1",
+ }
+
+ session, err := mgo.DialWithInfo(&info)
+ c.Assert(err, IsNil)
+ verifySyncedServers(session, 3)
+ session.Close()
+
+ info.Direct = true
+ session, err = mgo.DialWithInfo(&info)
+ c.Assert(err, IsNil)
+ verifySyncedServers(session, 1)
+ session.Close()
+
+ connectionUrl := fmt.Sprintf("mongodb://%v/?replicaSet=rs1", strings.Join(seedList, ","))
+ session, err = mgo.Dial(connectionUrl)
+ c.Assert(err, IsNil)
+ verifySyncedServers(session, 3)
+ session.Close()
+
+ connectionUrl += "&connect=direct"
+ session, err = mgo.Dial(connectionUrl)
+ c.Assert(err, IsNil)
+ verifySyncedServers(session, 1)
+ session.Close()
+ }
+
+}
+
+func (s *S) TestDirect(c *C) {
+ session, err := mgo.Dial("localhost:40012?connect=direct")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // We know that server is a slave.
+ session.SetMode(mgo.Monotonic, true)
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true)
+
+ stats := mgo.GetStats()
+ c.Assert(stats.SocketsAlive, Equals, 1)
+ c.Assert(stats.SocketsInUse, Equals, 1)
+ c.Assert(stats.SocketRefs, Equals, 1)
+
+ // We've got no master, so it'll timeout.
+ session.SetSyncTimeout(5e8 * time.Nanosecond)
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"test": 1})
+ c.Assert(err, ErrorMatches, "no reachable servers")
+
+ // Writing to the local database is okay.
+ coll = session.DB("local").C("mycoll")
+ defer coll.RemoveAll(nil)
+ id := bson.NewObjectId()
+ err = coll.Insert(M{"_id": id})
+ c.Assert(err, IsNil)
+
+ // Data was stored in the right server.
+ n, err := coll.Find(M{"_id": id}).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 1)
+
+ // Server hasn't changed.
+ result.Host = ""
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(strings.HasSuffix(result.Host, ":40012"), Equals, true)
+}
+
+func (s *S) TestDirectToUnknownStateMember(c *C) {
+ session, err := mgo.Dial("localhost:40041?connect=direct")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Monotonic, true)
+
+ result := &struct{ Host string }{}
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true)
+
+ // We've got no master, so it'll timeout.
+ session.SetSyncTimeout(5e8 * time.Nanosecond)
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"test": 1})
+ c.Assert(err, ErrorMatches, "no reachable servers")
+
+ // Slave is still reachable.
+ result.Host = ""
+ err = session.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+ c.Assert(strings.HasSuffix(result.Host, ":40041"), Equals, true)
+}
+
+func (s *S) TestFailFast(c *C) {
+ info := mgo.DialInfo{
+ Addrs: []string{"localhost:99999"},
+ Timeout: 5 * time.Second,
+ FailFast: true,
+ }
+
+ started := time.Now()
+
+ _, err := mgo.DialWithInfo(&info)
+ c.Assert(err, ErrorMatches, "no reachable servers")
+
+ c.Assert(started.After(time.Now().Add(-time.Second)), Equals, true)
+}
+
+func (s *S) countQueries(c *C, server string) (n int) {
+ defer func() { c.Logf("Queries for %q: %d", server, n) }()
+ session, err := mgo.Dial(server + "?connect=direct")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ session.SetMode(mgo.Monotonic, true)
+ var result struct {
+ OpCounters struct {
+ Query int
+ }
+ Metrics struct {
+ Commands struct{ Find struct{ Total int } }
+ }
+ }
+ err = session.Run("serverStatus", &result)
+ c.Assert(err, IsNil)
+ if s.versionAtLeast(3, 2) {
+ return result.Metrics.Commands.Find.Total
+ }
+ return result.OpCounters.Query
+}
+
+func (s *S) countCommands(c *C, server, commandName string) (n int) {
+ defer func() { c.Logf("Queries for %q: %d", server, n) }()
+ session, err := mgo.Dial(server + "?connect=direct")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ session.SetMode(mgo.Monotonic, true)
+ var result struct {
+ Metrics struct {
+ Commands map[string]struct{ Total int }
+ }
+ }
+ err = session.Run("serverStatus", &result)
+ c.Assert(err, IsNil)
+ return result.Metrics.Commands[commandName].Total
+}
+
+func (s *S) TestMonotonicSlaveOkFlagWithMongos(c *C) {
+ session, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ ssresult := &struct{ Host string }{}
+ imresult := &struct{ IsMaster bool }{}
+
+ // Figure the master while still using the strong session.
+ err = session.Run("serverStatus", ssresult)
+ c.Assert(err, IsNil)
+ err = session.Run("isMaster", imresult)
+ c.Assert(err, IsNil)
+ master := ssresult.Host
+ c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+ // Ensure mongos is aware about the current topology.
+ s.Stop(":40201")
+ s.StartAll()
+
+ mongos, err := mgo.Dial("localhost:40202")
+ c.Assert(err, IsNil)
+ defer mongos.Close()
+
+ // Insert some data as otherwise 3.2+ doesn't seem to run the query at all.
+ err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1})
+ c.Assert(err, IsNil)
+
+ // Wait until all servers see the data.
+ for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} {
+ session, err := mgo.Dial(addr + "?connect=direct")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ session.SetMode(mgo.Monotonic, true)
+ for i := 300; i >= 0; i-- {
+ n, err := session.DB("mydb").C("mycoll").Find(nil).Count()
+ c.Assert(err, IsNil)
+ if n == 1 {
+ break
+ }
+ if i == 0 {
+ c.Fatalf("Inserted data never reached " + addr)
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+
+ // Collect op counters for everyone.
+ q21a := s.countQueries(c, "localhost:40021")
+ q22a := s.countQueries(c, "localhost:40022")
+ q23a := s.countQueries(c, "localhost:40023")
+
+ // Do a SlaveOk query through MongoS
+
+ mongos.SetMode(mgo.Monotonic, true)
+
+ coll := mongos.DB("mydb").C("mycoll")
+ var result struct{ N int }
+ for i := 0; i != 5; i++ {
+ err = coll.Find(nil).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 1)
+ }
+
+ // Collect op counters for everyone again.
+ q21b := s.countQueries(c, "localhost:40021")
+ q22b := s.countQueries(c, "localhost:40022")
+ q23b := s.countQueries(c, "localhost:40023")
+
+ var masterDelta, slaveDelta int
+ switch hostPort(master) {
+ case "40021":
+ masterDelta = q21b - q21a
+ slaveDelta = (q22b - q22a) + (q23b - q23a)
+ case "40022":
+ masterDelta = q22b - q22a
+ slaveDelta = (q21b - q21a) + (q23b - q23a)
+ case "40023":
+ masterDelta = q23b - q23a
+ slaveDelta = (q21b - q21a) + (q22b - q22a)
+ default:
+ c.Fatal("Uh?")
+ }
+
+ c.Check(masterDelta, Equals, 0) // Just the counting itself.
+ c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above.
+}
+
+func (s *S) TestSecondaryModeWithMongos(c *C) {
+ session, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ ssresult := &struct{ Host string }{}
+ imresult := &struct{ IsMaster bool }{}
+
+ // Figure the master while still using the strong session.
+ err = session.Run("serverStatus", ssresult)
+ c.Assert(err, IsNil)
+ err = session.Run("isMaster", imresult)
+ c.Assert(err, IsNil)
+ master := ssresult.Host
+ c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+ // Ensure mongos is aware about the current topology.
+ s.Stop(":40201")
+ s.StartAll()
+
+ mongos, err := mgo.Dial("localhost:40202")
+ c.Assert(err, IsNil)
+ defer mongos.Close()
+
+ mongos.SetSyncTimeout(5 * time.Second)
+
+ // Insert some data as otherwise 3.2+ doesn't seem to run the query at all.
+ err = mongos.DB("mydb").C("mycoll").Insert(bson.M{"n": 1})
+ c.Assert(err, IsNil)
+
+ // Wait until all servers see the data.
+ for _, addr := range []string{"localhost:40021", "localhost:40022", "localhost:40023"} {
+ session, err := mgo.Dial(addr + "?connect=direct")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ session.SetMode(mgo.Monotonic, true)
+ for i := 300; i >= 0; i-- {
+ n, err := session.DB("mydb").C("mycoll").Find(nil).Count()
+ c.Assert(err, IsNil)
+ if n == 1 {
+ break
+ }
+ if i == 0 {
+ c.Fatalf("Inserted data never reached " + addr)
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ }
+
+ // Collect op counters for everyone.
+ q21a := s.countQueries(c, "localhost:40021")
+ q22a := s.countQueries(c, "localhost:40022")
+ q23a := s.countQueries(c, "localhost:40023")
+
+ // Do a Secondary query through MongoS
+
+ mongos.SetMode(mgo.Secondary, true)
+
+ coll := mongos.DB("mydb").C("mycoll")
+ var result struct{ N int }
+ for i := 0; i != 5; i++ {
+ err = coll.Find(nil).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 1)
+ }
+
+ // Collect op counters for everyone again.
+ q21b := s.countQueries(c, "localhost:40021")
+ q22b := s.countQueries(c, "localhost:40022")
+ q23b := s.countQueries(c, "localhost:40023")
+
+ var masterDelta, slaveDelta int
+ switch hostPort(master) {
+ case "40021":
+ masterDelta = q21b - q21a
+ slaveDelta = (q22b - q22a) + (q23b - q23a)
+ case "40022":
+ masterDelta = q22b - q22a
+ slaveDelta = (q21b - q21a) + (q23b - q23a)
+ case "40023":
+ masterDelta = q23b - q23a
+ slaveDelta = (q21b - q21a) + (q22b - q22a)
+ default:
+ c.Fatal("Uh?")
+ }
+
+ c.Check(masterDelta, Equals, 0) // Just the counting itself.
+ c.Check(slaveDelta, Equals, 5) // The counting for both, plus 5 queries above.
+}
+
+func (s *S) TestSecondaryModeWithMongosInsert(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40202")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Secondary, true)
+ session.SetSyncTimeout(4 * time.Second)
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ var result struct{ A int }
+ coll.Find(nil).One(&result)
+ c.Assert(result.A, Equals, 1)
+}
+
+
+func (s *S) TestRemovalOfClusterMember(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ master, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer master.Close()
+
+ // Wait for cluster to fully sync up.
+ for i := 0; i < 10; i++ {
+ if len(master.LiveServers()) == 3 {
+ break
+ }
+ time.Sleep(5e8)
+ }
+ if len(master.LiveServers()) != 3 {
+ c.Fatalf("Test started with bad cluster state: %v", master.LiveServers())
+ }
+
+ result := &struct {
+ IsMaster bool
+ Me string
+ }{}
+ slave := master.Copy()
+ slave.SetMode(mgo.Monotonic, true) // Monotonic can hold a non-master socket persistently.
+ err = slave.Run("isMaster", result)
+ c.Assert(err, IsNil)
+ c.Assert(result.IsMaster, Equals, false)
+ slaveAddr := result.Me
+
+ defer func() {
+ config := map[string]string{
+ "40021": `{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}}`,
+ "40022": `{_id: 2, host: "127.0.0.1:40022", priority: 0, tags: {rs2: "b"}}`,
+ "40023": `{_id: 3, host: "127.0.0.1:40023", priority: 0, tags: {rs2: "c"}}`,
+ }
+ master.Refresh()
+ master.Run(bson.D{{"$eval", `rs.add(` + config[hostPort(slaveAddr)] + `)`}}, nil)
+ master.Close()
+ slave.Close()
+
+ // Ensure suite syncs up with the changes before next test.
+ s.Stop(":40201")
+ s.StartAll()
+ time.Sleep(8 * time.Second)
+ // TODO Find a better way to find out when mongos is fully aware that all
+ // servers are up. Without that follow up tests that depend on mongos will
+ // break due to their expectation of things being in a working state.
+ }()
+
+ c.Logf("========== Removing slave: %s ==========", slaveAddr)
+
+ master.Run(bson.D{{"$eval", `rs.remove("` + slaveAddr + `")`}}, nil)
+
+ master.Refresh()
+
+ // Give the cluster a moment to catch up by doing a roundtrip to the master.
+ err = master.Ping()
+ c.Assert(err, IsNil)
+
+ time.Sleep(3e9)
+
+ // This must fail since the slave has been taken off the cluster.
+ err = slave.Ping()
+ c.Assert(err, NotNil)
+
+ for i := 0; i < 15; i++ {
+ if len(master.LiveServers()) == 2 {
+ break
+ }
+ time.Sleep(time.Second)
+ }
+ live := master.LiveServers()
+ if len(live) != 2 {
+ c.Errorf("Removed server still considered live: %#s", live)
+ }
+
+ c.Log("========== Test succeeded. ==========")
+}
+
+func (s *S) TestPoolLimitSimple(c *C) {
+ for test := 0; test < 2; test++ {
+ var session *mgo.Session
+ var err error
+ if test == 0 {
+ session, err = mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ session.SetPoolLimit(1)
+ } else {
+ session, err = mgo.Dial("localhost:40001?maxPoolSize=1")
+ c.Assert(err, IsNil)
+ }
+ defer session.Close()
+
+ // Put one socket in use.
+ c.Assert(session.Ping(), IsNil)
+
+ done := make(chan time.Duration)
+
+ // Now block trying to get another one due to the pool limit.
+ go func() {
+ copy := session.Copy()
+ defer copy.Close()
+ started := time.Now()
+ c.Check(copy.Ping(), IsNil)
+ done <- time.Now().Sub(started)
+ }()
+
+ time.Sleep(300 * time.Millisecond)
+
+ // Put the one socket back in the pool, freeing it for the copy.
+ session.Refresh()
+ delay := <-done
+ c.Assert(delay > 300*time.Millisecond, Equals, true, Commentf("Delay: %s", delay))
+ }
+}
+
+func (s *S) TestPoolLimitMany(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ stats := mgo.GetStats()
+ for stats.SocketsAlive != 3 {
+ c.Logf("Waiting for all connections to be established (sockets alive currently %d)...", stats.SocketsAlive)
+ stats = mgo.GetStats()
+ time.Sleep(5e8)
+ }
+
+ const poolLimit = 64
+ session.SetPoolLimit(poolLimit)
+
+ // Consume the whole limit for the master.
+ var master []*mgo.Session
+ for i := 0; i < poolLimit; i++ {
+ s := session.Copy()
+ defer s.Close()
+ c.Assert(s.Ping(), IsNil)
+ master = append(master, s)
+ }
+
+ before := time.Now()
+ go func() {
+ time.Sleep(3e9)
+ master[0].Refresh()
+ }()
+
+ // Then, a single ping must block, since it would need another
+ // connection to the master, over the limit. Once the goroutine
+ // above releases its socket, it should move on.
+ session.Ping()
+ delay := time.Now().Sub(before)
+ c.Assert(delay > 3e9, Equals, true)
+ c.Assert(delay < 6e9, Equals, true)
+}
+
+func (s *S) TestSetModeEventualIterBug(c *C) {
+ session1, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session1.Close()
+
+ session1.SetMode(mgo.Eventual, false)
+
+ coll1 := session1.DB("mydb").C("mycoll")
+
+ const N = 100
+ for i := 0; i < N; i++ {
+ err = coll1.Insert(M{"_id": i})
+ c.Assert(err, IsNil)
+ }
+
+ c.Logf("Waiting until secondary syncs")
+ for {
+ n, err := coll1.Count()
+ c.Assert(err, IsNil)
+ if n == N {
+ c.Logf("Found all")
+ break
+ }
+ }
+
+ session2, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session2.Close()
+
+ session2.SetMode(mgo.Eventual, false)
+
+ coll2 := session2.DB("mydb").C("mycoll")
+
+ i := 0
+ iter := coll2.Find(nil).Batch(10).Iter()
+ var result struct{}
+ for iter.Next(&result) {
+ i++
+ }
+ c.Assert(iter.Close(), Equals, nil)
+ c.Assert(i, Equals, N)
+}
+
+func (s *S) TestCustomDialOld(c *C) {
+ dials := make(chan bool, 16)
+ dial := func(addr net.Addr) (net.Conn, error) {
+ tcpaddr, ok := addr.(*net.TCPAddr)
+ if !ok {
+ return nil, fmt.Errorf("unexpected address type: %T", addr)
+ }
+ dials <- true
+ return net.DialTCP("tcp", nil, tcpaddr)
+ }
+ info := mgo.DialInfo{
+ Addrs: []string{"localhost:40012"},
+ Dial: dial,
+ }
+
+ // Use hostname here rather than IP, to make things trickier.
+ session, err := mgo.DialWithInfo(&info)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ const N = 3
+ for i := 0; i < N; i++ {
+ select {
+ case <-dials:
+ case <-time.After(5 * time.Second):
+ c.Fatalf("expected %d dials, got %d", N, i)
+ }
+ }
+ select {
+ case <-dials:
+ c.Fatalf("got more dials than expected")
+ case <-time.After(100 * time.Millisecond):
+ }
+}
+
+func (s *S) TestCustomDialNew(c *C) {
+ dials := make(chan bool, 16)
+ dial := func(addr *mgo.ServerAddr) (net.Conn, error) {
+ dials <- true
+ if addr.TCPAddr().Port == 40012 {
+ c.Check(addr.String(), Equals, "localhost:40012")
+ }
+ return net.DialTCP("tcp", nil, addr.TCPAddr())
+ }
+ info := mgo.DialInfo{
+ Addrs: []string{"localhost:40012"},
+ DialServer: dial,
+ }
+
+ // Use hostname here rather than IP, to make things trickier.
+ session, err := mgo.DialWithInfo(&info)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ const N = 3
+ for i := 0; i < N; i++ {
+ select {
+ case <-dials:
+ case <-time.After(5 * time.Second):
+ c.Fatalf("expected %d dials, got %d", N, i)
+ }
+ }
+ select {
+ case <-dials:
+ c.Fatalf("got more dials than expected")
+ case <-time.After(100 * time.Millisecond):
+ }
+}
+
+func (s *S) TestPrimaryShutdownOnAuthShard(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ // Dial the shard.
+ session, err := mgo.Dial("localhost:40203")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Login and insert something to make it more realistic.
+ session.DB("admin").Login("root", "rapadura")
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(bson.M{"n": 1})
+ c.Assert(err, IsNil)
+
+ // Dial the replica set to figure the master out.
+ rs, err := mgo.Dial("root:rapadura@localhost:40031")
+ c.Assert(err, IsNil)
+ defer rs.Close()
+
+ // With strong consistency, this will open a socket to the master.
+ result := &struct{ Host string }{}
+ err = rs.Run("serverStatus", result)
+ c.Assert(err, IsNil)
+
+ // Kill the master.
+ host := result.Host
+ s.Stop(host)
+
+ // This must fail, since the connection was broken.
+ err = rs.Run("serverStatus", result)
+ c.Assert(err, Equals, io.EOF)
+
+ // This won't work because the master just died.
+ err = coll.Insert(bson.M{"n": 2})
+ c.Assert(err, NotNil)
+
+ // Refresh session and wait for re-election.
+ session.Refresh()
+ for i := 0; i < 60; i++ {
+ err = coll.Insert(bson.M{"n": 3})
+ if err == nil {
+ break
+ }
+ c.Logf("Waiting for replica set to elect a new master. Last error: %v", err)
+ time.Sleep(500 * time.Millisecond)
+ }
+ c.Assert(err, IsNil)
+
+ count, err := coll.Count()
+ c.Assert(count > 1, Equals, true)
+}
+
+func (s *S) TestNearestSecondary(c *C) {
+ defer mgo.HackPingDelay(300 * time.Millisecond)()
+
+ rs1a := "127.0.0.1:40011"
+ rs1b := "127.0.0.1:40012"
+ rs1c := "127.0.0.1:40013"
+ s.Freeze(rs1b)
+
+ session, err := mgo.Dial(rs1a)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Wait for the sync up to run through the first couple of servers.
+ for len(session.LiveServers()) != 2 {
+ c.Log("Waiting for two servers to be alive...")
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ // Extra delay to ensure the third server gets penalized.
+ time.Sleep(500 * time.Millisecond)
+
+ // Release third server.
+ s.Thaw(rs1b)
+
+ // Wait for it to come up.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for all servers to be alive...")
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ session.SetMode(mgo.Monotonic, true)
+ var result struct{ Host string }
+
+ // See which slave picks the line, several times to avoid chance.
+ for i := 0; i < 10; i++ {
+ session.Refresh()
+ err = session.Run("serverStatus", &result)
+ c.Assert(err, IsNil)
+ c.Assert(hostPort(result.Host), Equals, hostPort(rs1c))
+ }
+
+ if *fast {
+ // Don't hold back for several seconds.
+ return
+ }
+
+ // Now hold the other server for long enough to penalize it.
+ s.Freeze(rs1c)
+ time.Sleep(5 * time.Second)
+ s.Thaw(rs1c)
+
+ // Wait for the ping to be processed.
+ time.Sleep(500 * time.Millisecond)
+
+ // Repeating the test should now pick the former server consistently.
+ for i := 0; i < 10; i++ {
+ session.Refresh()
+ err = session.Run("serverStatus", &result)
+ c.Assert(err, IsNil)
+ c.Assert(hostPort(result.Host), Equals, hostPort(rs1b))
+ }
+}
+
+func (s *S) TestNearestServer(c *C) {
+ defer mgo.HackPingDelay(300 * time.Millisecond)()
+
+ rs1a := "127.0.0.1:40011"
+ rs1b := "127.0.0.1:40012"
+ rs1c := "127.0.0.1:40013"
+
+ session, err := mgo.Dial(rs1a)
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ s.Freeze(rs1a)
+ s.Freeze(rs1b)
+
+ // Extra delay to ensure the first two servers get penalized.
+ time.Sleep(500 * time.Millisecond)
+
+ // Release them.
+ s.Thaw(rs1a)
+ s.Thaw(rs1b)
+
+ // Wait for everyone to come up.
+ for len(session.LiveServers()) != 3 {
+ c.Log("Waiting for all servers to be alive...")
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ session.SetMode(mgo.Nearest, true)
+ var result struct{ Host string }
+
+ // See which server picks the line, several times to avoid chance.
+ for i := 0; i < 10; i++ {
+ session.Refresh()
+ err = session.Run("serverStatus", &result)
+ c.Assert(err, IsNil)
+ c.Assert(hostPort(result.Host), Equals, hostPort(rs1c))
+ }
+
+ if *fast {
+ // Don't hold back for several seconds.
+ return
+ }
+
+ // Now hold the two secondaries for long enough to penalize them.
+ s.Freeze(rs1b)
+ s.Freeze(rs1c)
+ time.Sleep(5 * time.Second)
+ s.Thaw(rs1b)
+ s.Thaw(rs1c)
+
+ // Wait for the ping to be processed.
+ time.Sleep(500 * time.Millisecond)
+
+ // Repeating the test should now pick the primary server consistently.
+ for i := 0; i < 10; i++ {
+ session.Refresh()
+ err = session.Run("serverStatus", &result)
+ c.Assert(err, IsNil)
+ c.Assert(hostPort(result.Host), Equals, hostPort(rs1a))
+ }
+}
+
+func (s *S) TestConnectCloseConcurrency(c *C) {
+ restore := mgo.HackPingDelay(500 * time.Millisecond)
+ defer restore()
+ var wg sync.WaitGroup
+ const n = 500
+ wg.Add(n)
+ for i := 0; i < n; i++ {
+ go func() {
+ defer wg.Done()
+ session, err := mgo.Dial("localhost:40001")
+ if err != nil {
+ c.Fatal(err)
+ }
+ time.Sleep(1)
+ session.Close()
+ }()
+ }
+ wg.Wait()
+}
+
+func (s *S) TestSelectServers(c *C) {
+ if !s.versionAtLeast(2, 2) {
+ c.Skip("read preferences introduced in 2.2")
+ }
+
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetMode(mgo.Eventual, true)
+
+ var result struct{ Host string }
+
+ session.Refresh()
+ session.SelectServers(bson.D{{"rs1", "b"}})
+ err = session.Run("serverStatus", &result)
+ c.Assert(err, IsNil)
+ c.Assert(hostPort(result.Host), Equals, "40012")
+
+ session.Refresh()
+ session.SelectServers(bson.D{{"rs1", "c"}})
+ err = session.Run("serverStatus", &result)
+ c.Assert(err, IsNil)
+ c.Assert(hostPort(result.Host), Equals, "40013")
+}
+
+func (s *S) TestSelectServersWithMongos(c *C) {
+ if !s.versionAtLeast(2, 2) {
+ c.Skip("read preferences introduced in 2.2")
+ }
+
+ session, err := mgo.Dial("localhost:40021")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ ssresult := &struct{ Host string }{}
+ imresult := &struct{ IsMaster bool }{}
+
+ // Figure the master while still using the strong session.
+ err = session.Run("serverStatus", ssresult)
+ c.Assert(err, IsNil)
+ err = session.Run("isMaster", imresult)
+ c.Assert(err, IsNil)
+ master := ssresult.Host
+ c.Assert(imresult.IsMaster, Equals, true, Commentf("%s is not the master", master))
+
+ var slave1, slave2 string
+ switch hostPort(master) {
+ case "40021":
+ slave1, slave2 = "b", "c"
+ case "40022":
+ slave1, slave2 = "a", "c"
+ case "40023":
+ slave1, slave2 = "a", "b"
+ }
+
+ // Collect op counters for everyone.
+ q21a := s.countQueries(c, "localhost:40021")
+ q22a := s.countQueries(c, "localhost:40022")
+ q23a := s.countQueries(c, "localhost:40023")
+
+ // Do a SlaveOk query through MongoS
+ mongos, err := mgo.Dial("localhost:40202")
+ c.Assert(err, IsNil)
+ defer mongos.Close()
+
+ mongos.SetMode(mgo.Monotonic, true)
+
+ mongos.Refresh()
+ mongos.SelectServers(bson.D{{"rs2", slave1}})
+ coll := mongos.DB("mydb").C("mycoll")
+ result := &struct{}{}
+ for i := 0; i != 5; i++ {
+ err := coll.Find(nil).One(result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+ }
+
+ mongos.Refresh()
+ mongos.SelectServers(bson.D{{"rs2", slave2}})
+ coll = mongos.DB("mydb").C("mycoll")
+ for i := 0; i != 7; i++ {
+ err := coll.Find(nil).One(result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+ }
+
+ // Collect op counters for everyone again.
+ q21b := s.countQueries(c, "localhost:40021")
+ q22b := s.countQueries(c, "localhost:40022")
+ q23b := s.countQueries(c, "localhost:40023")
+
+ switch hostPort(master) {
+ case "40021":
+ c.Check(q21b-q21a, Equals, 0)
+ c.Check(q22b-q22a, Equals, 5)
+ c.Check(q23b-q23a, Equals, 7)
+ case "40022":
+ c.Check(q21b-q21a, Equals, 5)
+ c.Check(q22b-q22a, Equals, 0)
+ c.Check(q23b-q23a, Equals, 7)
+ case "40023":
+ c.Check(q21b-q21a, Equals, 5)
+ c.Check(q22b-q22a, Equals, 7)
+ c.Check(q23b-q23a, Equals, 0)
+ default:
+ c.Fatal("Uh?")
+ }
+}
+
+func (s *S) TestDoNotFallbackToMonotonic(c *C) {
+ // There was a bug at some point that some functions were
+ // falling back to Monotonic mode. This test ensures all listIndexes
+ // commands go to the primary, as should happen since the session is
+ // in Strong mode.
+ if !s.versionAtLeast(3, 0) {
+ c.Skip("command-counting logic depends on 3.0+")
+ }
+
+ session, err := mgo.Dial("localhost:40012")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ for i := 0; i < 15; i++ {
+ q11a := s.countCommands(c, "localhost:40011", "listIndexes")
+ q12a := s.countCommands(c, "localhost:40012", "listIndexes")
+ q13a := s.countCommands(c, "localhost:40013", "listIndexes")
+
+ _, err := session.DB("local").C("system.indexes").Indexes()
+ c.Assert(err, IsNil)
+
+ q11b := s.countCommands(c, "localhost:40011", "listIndexes")
+ q12b := s.countCommands(c, "localhost:40012", "listIndexes")
+ q13b := s.countCommands(c, "localhost:40013", "listIndexes")
+
+ c.Assert(q11b, Equals, q11a+1)
+ c.Assert(q12b, Equals, q12a)
+ c.Assert(q13b, Equals, q13a)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/dbserver.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/dbserver.go
new file mode 100644
index 00000000000..16b7b58417a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/dbserver.go
@@ -0,0 +1,196 @@
+package dbtest
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "os"
+ "os/exec"
+ "strconv"
+ "time"
+
+ "gopkg.in/mgo.v2"
+ "gopkg.in/tomb.v2"
+)
+
+// DBServer controls a MongoDB server process to be used within test suites.
+//
+// The test server is started when Session is called the first time and should
+// remain running for the duration of all tests, with the Wipe method being
+// called between tests (before each of them) to clear stored data. After all tests
+// are done, the Stop method should be called to stop the test server.
+//
+// Before the DBServer is used the SetPath method must be called to define
+// the location for the database files to be stored.
+type DBServer struct {
+ session *mgo.Session
+ output bytes.Buffer
+ server *exec.Cmd
+ dbpath string
+ host string
+ tomb tomb.Tomb
+}
+
+// SetPath defines the path to the directory where the database files will be
+// stored if it is started. The directory path itself is not created or removed
+// by the test helper.
+func (dbs *DBServer) SetPath(dbpath string) {
+ dbs.dbpath = dbpath
+}
+
+func (dbs *DBServer) start() {
+ if dbs.server != nil {
+ panic("DBServer already started")
+ }
+ if dbs.dbpath == "" {
+ panic("DBServer.SetPath must be called before using the server")
+ }
+ mgo.SetStats(true)
+ l, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ panic("unable to listen on a local address: " + err.Error())
+ }
+ addr := l.Addr().(*net.TCPAddr)
+ l.Close()
+ dbs.host = addr.String()
+
+ args := []string{
+ "--dbpath", dbs.dbpath,
+ "--bind_ip", "127.0.0.1",
+ "--port", strconv.Itoa(addr.Port),
+ "--nssize", "1",
+ "--noprealloc",
+ "--smallfiles",
+ "--nojournal",
+ }
+ dbs.tomb = tomb.Tomb{}
+ dbs.server = exec.Command("mongod", args...)
+ dbs.server.Stdout = &dbs.output
+ dbs.server.Stderr = &dbs.output
+ err = dbs.server.Start()
+ if err != nil {
+ panic(err)
+ }
+ dbs.tomb.Go(dbs.monitor)
+ dbs.Wipe()
+}
+
+func (dbs *DBServer) monitor() error {
+ dbs.server.Process.Wait()
+ if dbs.tomb.Alive() {
+ // Present some debugging information.
+ fmt.Fprintf(os.Stderr, "---- mongod process died unexpectedly:\n")
+ fmt.Fprintf(os.Stderr, "%s", dbs.output.Bytes())
+ fmt.Fprintf(os.Stderr, "---- mongod processes running right now:\n")
+ cmd := exec.Command("/bin/sh", "-c", "ps auxw | grep mongod")
+ cmd.Stdout = os.Stderr
+ cmd.Stderr = os.Stderr
+ cmd.Run()
+ fmt.Fprintf(os.Stderr, "----------------------------------------\n")
+
+ panic("mongod process died unexpectedly")
+ }
+ return nil
+}
+
+// Stop stops the test server process, if it is running.
+//
+// It's okay to call Stop multiple times. After the test server is
+// stopped it cannot be restarted.
+//
+// All database sessions must be closed before or while the Stop method
+// is running. Otherwise Stop will panic after a timeout informing that
+// there is a session leak.
+func (dbs *DBServer) Stop() {
+ if dbs.session != nil {
+ dbs.checkSessions()
+ if dbs.session != nil {
+ dbs.session.Close()
+ dbs.session = nil
+ }
+ }
+ if dbs.server != nil {
+ dbs.tomb.Kill(nil)
+ dbs.server.Process.Signal(os.Interrupt)
+ select {
+ case <-dbs.tomb.Dead():
+ case <-time.After(5 * time.Second):
+ panic("timeout waiting for mongod process to die")
+ }
+ dbs.server = nil
+ }
+}
+
+// Session returns a new session to the server. The returned session
+// must be closed after the test is done with it.
+//
+// The first Session obtained from a DBServer will start it.
+func (dbs *DBServer) Session() *mgo.Session {
+ if dbs.server == nil {
+ dbs.start()
+ }
+ if dbs.session == nil {
+ mgo.ResetStats()
+ var err error
+ dbs.session, err = mgo.Dial(dbs.host + "/test")
+ if err != nil {
+ panic(err)
+ }
+ }
+ return dbs.session.Copy()
+}
+
+// checkSessions ensures all mgo sessions opened were properly closed.
+// For slightly faster tests, it may be disabled setting the
+// environmnet variable CHECK_SESSIONS to 0.
+func (dbs *DBServer) checkSessions() {
+ if check := os.Getenv("CHECK_SESSIONS"); check == "0" || dbs.server == nil || dbs.session == nil {
+ return
+ }
+ dbs.session.Close()
+ dbs.session = nil
+ for i := 0; i < 100; i++ {
+ stats := mgo.GetStats()
+ if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
+ return
+ }
+ time.Sleep(100 * time.Millisecond)
+ }
+ panic("There are mgo sessions still alive.")
+}
+
+// Wipe drops all created databases and their data.
+//
+// The MongoDB server remains running if it was prevoiusly running,
+// or stopped if it was previously stopped.
+//
+// All database sessions must be closed before or while the Wipe method
+// is running. Otherwise Wipe will panic after a timeout informing that
+// there is a session leak.
+func (dbs *DBServer) Wipe() {
+ if dbs.server == nil || dbs.session == nil {
+ return
+ }
+ dbs.checkSessions()
+ sessionUnset := dbs.session == nil
+ session := dbs.Session()
+ defer session.Close()
+ if sessionUnset {
+ dbs.session.Close()
+ dbs.session = nil
+ }
+ names, err := session.DatabaseNames()
+ if err != nil {
+ panic(err)
+ }
+ for _, name := range names {
+ switch name {
+ case "admin", "local", "config":
+ default:
+ err = session.DB(name).DropDatabase()
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/dbserver_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/dbserver_test.go
new file mode 100644
index 00000000000..79812fde34f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/dbserver_test.go
@@ -0,0 +1,108 @@
+package dbtest_test
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ . "gopkg.in/check.v1"
+
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/dbtest"
+)
+
+type M map[string]interface{}
+
+func TestAll(t *testing.T) {
+ TestingT(t)
+}
+
+type S struct {
+ oldCheckSessions string
+}
+
+var _ = Suite(&S{})
+
+func (s *S) SetUpTest(c *C) {
+ s.oldCheckSessions = os.Getenv("CHECK_SESSIONS")
+ os.Setenv("CHECK_SESSIONS", "")
+}
+
+func (s *S) TearDownTest(c *C) {
+ os.Setenv("CHECK_SESSIONS", s.oldCheckSessions)
+}
+
+func (s *S) TestWipeData(c *C) {
+ var server dbtest.DBServer
+ server.SetPath(c.MkDir())
+ defer server.Stop()
+
+ session := server.Session()
+ err := session.DB("mydb").C("mycoll").Insert(M{"a": 1})
+ session.Close()
+ c.Assert(err, IsNil)
+
+ server.Wipe()
+
+ session = server.Session()
+ names, err := session.DatabaseNames()
+ session.Close()
+ c.Assert(err, IsNil)
+ for _, name := range names {
+ if name != "local" && name != "admin" {
+ c.Fatalf("Wipe should have removed this database: %s", name)
+ }
+ }
+}
+
+func (s *S) TestStop(c *C) {
+ var server dbtest.DBServer
+ server.SetPath(c.MkDir())
+ defer server.Stop()
+
+ // Server should not be running.
+ process := server.ProcessTest()
+ c.Assert(process, IsNil)
+
+ session := server.Session()
+ addr := session.LiveServers()[0]
+ session.Close()
+
+ // Server should be running now.
+ process = server.ProcessTest()
+ p, err := os.FindProcess(process.Pid)
+ c.Assert(err, IsNil)
+ p.Release()
+
+ server.Stop()
+
+ // Server should not be running anymore.
+ session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond)
+ if session != nil {
+ session.Close()
+ c.Fatalf("Stop did not stop the server")
+ }
+}
+
+func (s *S) TestCheckSessions(c *C) {
+ var server dbtest.DBServer
+ server.SetPath(c.MkDir())
+ defer server.Stop()
+
+ session := server.Session()
+ defer session.Close()
+ c.Assert(server.Wipe, PanicMatches, "There are mgo sessions still alive.")
+}
+
+func (s *S) TestCheckSessionsDisabled(c *C) {
+ var server dbtest.DBServer
+ server.SetPath(c.MkDir())
+ defer server.Stop()
+
+ os.Setenv("CHECK_SESSIONS", "0")
+
+ // Should not panic, although it looks to Wipe like this session will leak.
+ session := server.Session()
+ defer session.Close()
+ server.Wipe()
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/export_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/export_test.go
new file mode 100644
index 00000000000..65f1cb02388
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/dbtest/export_test.go
@@ -0,0 +1,12 @@
+package dbtest
+
+import (
+ "os"
+)
+
+func (dbs *DBServer) ProcessTest() *os.Process {
+ if dbs.server == nil {
+ return nil
+ }
+ return dbs.server.Process
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/doc.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/doc.go
new file mode 100644
index 00000000000..859fd9b8df9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/doc.go
@@ -0,0 +1,31 @@
+// Package mgo offers a rich MongoDB driver for Go.
+//
+// Details about the mgo project (pronounced as "mango") are found
+// in its web page:
+//
+// http://labix.org/mgo
+//
+// Usage of the driver revolves around the concept of sessions. To
+// get started, obtain a session using the Dial function:
+//
+// session, err := mgo.Dial(url)
+//
+// This will establish one or more connections with the cluster of
+// servers defined by the url parameter. From then on, the cluster
+// may be queried with multiple consistency rules (see SetMode) and
+// documents retrieved with statements such as:
+//
+// c := session.DB(database).C(collection)
+// err := c.Find(query).One(&result)
+//
+// New sessions are typically created by calling session.Copy on the
+// initial session obtained at dial time. These new sessions will share
+// the same cluster information and connection pool, and may be easily
+// handed into other methods and functions for organizing logic.
+// Every session created must have its Close method called at the end
+// of its life time, so its resources may be put back in the pool or
+// collected, depending on the case.
+//
+// For more details, see the documentation for the types and methods.
+//
+package mgo
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/export_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/export_test.go
new file mode 100644
index 00000000000..690f84d3835
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/export_test.go
@@ -0,0 +1,33 @@
+package mgo
+
+import (
+ "time"
+)
+
+func HackPingDelay(newDelay time.Duration) (restore func()) {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+
+ oldDelay := pingDelay
+ restore = func() {
+ globalMutex.Lock()
+ pingDelay = oldDelay
+ globalMutex.Unlock()
+ }
+ pingDelay = newDelay
+ return
+}
+
+func HackSyncSocketTimeout(newTimeout time.Duration) (restore func()) {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+
+ oldTimeout := syncSocketTimeout
+ restore = func() {
+ globalMutex.Lock()
+ syncSocketTimeout = oldTimeout
+ globalMutex.Unlock()
+ }
+ syncSocketTimeout = newTimeout
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/gridfs.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/gridfs.go
new file mode 100644
index 00000000000..421472095cf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/gridfs.go
@@ -0,0 +1,761 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "errors"
+ "hash"
+ "io"
+ "os"
+ "sync"
+ "time"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+type GridFS struct {
+ Files *Collection
+ Chunks *Collection
+}
+
+type gfsFileMode int
+
+const (
+ gfsClosed gfsFileMode = 0
+ gfsReading gfsFileMode = 1
+ gfsWriting gfsFileMode = 2
+)
+
+type GridFile struct {
+ m sync.Mutex
+ c sync.Cond
+ gfs *GridFS
+ mode gfsFileMode
+ err error
+
+ chunk int
+ offset int64
+
+ wpending int
+ wbuf []byte
+ wsum hash.Hash
+
+ rbuf []byte
+ rcache *gfsCachedChunk
+
+ doc gfsFile
+}
+
+type gfsFile struct {
+ Id interface{} "_id"
+ ChunkSize int "chunkSize"
+ UploadDate time.Time "uploadDate"
+ Length int64 ",minsize"
+ MD5 string
+ Filename string ",omitempty"
+ ContentType string "contentType,omitempty"
+ Metadata *bson.Raw ",omitempty"
+}
+
+type gfsChunk struct {
+ Id interface{} "_id"
+ FilesId interface{} "files_id"
+ N int
+ Data []byte
+}
+
+type gfsCachedChunk struct {
+ wait sync.Mutex
+ n int
+ data []byte
+ err error
+}
+
+func newGridFS(db *Database, prefix string) *GridFS {
+ return &GridFS{db.C(prefix + ".files"), db.C(prefix + ".chunks")}
+}
+
+func (gfs *GridFS) newFile() *GridFile {
+ file := &GridFile{gfs: gfs}
+ file.c.L = &file.m
+ //runtime.SetFinalizer(file, finalizeFile)
+ return file
+}
+
+func finalizeFile(file *GridFile) {
+ file.Close()
+}
+
+// Create creates a new file with the provided name in the GridFS. If the file
+// name already exists, a new version will be inserted with an up-to-date
+// uploadDate that will cause it to be atomically visible to the Open and
+// OpenId methods. If the file name is not important, an empty name may be
+// provided and the file Id used instead.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// A simple example inserting a new file:
+//
+// func check(err error) {
+// if err != nil {
+// panic(err.String())
+// }
+// }
+// file, err := db.GridFS("fs").Create("myfile.txt")
+// check(err)
+// n, err := file.Write([]byte("Hello world!"))
+// check(err)
+// err = file.Close()
+// check(err)
+// fmt.Printf("%d bytes written\n", n)
+//
+// The io.Writer interface is implemented by *GridFile and may be used to
+// help on the file creation. For example:
+//
+// file, err := db.GridFS("fs").Create("myfile.txt")
+// check(err)
+// messages, err := os.Open("/var/log/messages")
+// check(err)
+// defer messages.Close()
+// err = io.Copy(file, messages)
+// check(err)
+// err = file.Close()
+// check(err)
+//
+func (gfs *GridFS) Create(name string) (file *GridFile, err error) {
+ file = gfs.newFile()
+ file.mode = gfsWriting
+ file.wsum = md5.New()
+ file.doc = gfsFile{Id: bson.NewObjectId(), ChunkSize: 255 * 1024, Filename: name}
+ return
+}
+
+// OpenId returns the file with the provided id, for reading.
+// If the file isn't found, err will be set to mgo.ErrNotFound.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// The following example will print the first 8192 bytes from the file:
+//
+// func check(err error) {
+// if err != nil {
+// panic(err.String())
+// }
+// }
+// file, err := db.GridFS("fs").OpenId(objid)
+// check(err)
+// b := make([]byte, 8192)
+// n, err := file.Read(b)
+// check(err)
+// fmt.Println(string(b))
+// check(err)
+// err = file.Close()
+// check(err)
+// fmt.Printf("%d bytes read\n", n)
+//
+// The io.Reader interface is implemented by *GridFile and may be used to
+// deal with it. As an example, the following snippet will dump the whole
+// file into the standard output:
+//
+// file, err := db.GridFS("fs").OpenId(objid)
+// check(err)
+// err = io.Copy(os.Stdout, file)
+// check(err)
+// err = file.Close()
+// check(err)
+//
+func (gfs *GridFS) OpenId(id interface{}) (file *GridFile, err error) {
+ var doc gfsFile
+ err = gfs.Files.Find(bson.M{"_id": id}).One(&doc)
+ if err != nil {
+ return
+ }
+ file = gfs.newFile()
+ file.mode = gfsReading
+ file.doc = doc
+ return
+}
+
+// Open returns the most recently uploaded file with the provided
+// name, for reading. If the file isn't found, err will be set
+// to mgo.ErrNotFound.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+//
+// The following example will print the first 8192 bytes from the file:
+//
+// file, err := db.GridFS("fs").Open("myfile.txt")
+// check(err)
+// b := make([]byte, 8192)
+// n, err := file.Read(b)
+// check(err)
+// fmt.Println(string(b))
+// check(err)
+// err = file.Close()
+// check(err)
+// fmt.Printf("%d bytes read\n", n)
+//
+// The io.Reader interface is implemented by *GridFile and may be used to
+// deal with it. As an example, the following snippet will dump the whole
+// file into the standard output:
+//
+// file, err := db.GridFS("fs").Open("myfile.txt")
+// check(err)
+// err = io.Copy(os.Stdout, file)
+// check(err)
+// err = file.Close()
+// check(err)
+//
+func (gfs *GridFS) Open(name string) (file *GridFile, err error) {
+ var doc gfsFile
+ err = gfs.Files.Find(bson.M{"filename": name}).Sort("-uploadDate").One(&doc)
+ if err != nil {
+ return
+ }
+ file = gfs.newFile()
+ file.mode = gfsReading
+ file.doc = doc
+ return
+}
+
+// OpenNext opens the next file from iter for reading, sets *file to it,
+// and returns true on the success case. If no more documents are available
+// on iter or an error occurred, *file is set to nil and the result is false.
+// Errors will be available via iter.Err().
+//
+// The iter parameter must be an iterator on the GridFS files collection.
+// Using the GridFS.Find method is an easy way to obtain such an iterator,
+// but any iterator on the collection will work.
+//
+// If the provided *file is non-nil, OpenNext will close it before attempting
+// to iterate to the next element. This means that in a loop one only
+// has to worry about closing files when breaking out of the loop early
+// (break, return, or panic).
+//
+// For example:
+//
+// gfs := db.GridFS("fs")
+// query := gfs.Find(nil).Sort("filename")
+// iter := query.Iter()
+// var f *mgo.GridFile
+// for gfs.OpenNext(iter, &f) {
+// fmt.Printf("Filename: %s\n", f.Name())
+// }
+// if iter.Close() != nil {
+// panic(iter.Close())
+// }
+//
+func (gfs *GridFS) OpenNext(iter *Iter, file **GridFile) bool {
+ if *file != nil {
+ // Ignoring the error here shouldn't be a big deal
+ // as we're reading the file and the loop iteration
+ // for this file is finished.
+ _ = (*file).Close()
+ }
+ var doc gfsFile
+ if !iter.Next(&doc) {
+ *file = nil
+ return false
+ }
+ f := gfs.newFile()
+ f.mode = gfsReading
+ f.doc = doc
+ *file = f
+ return true
+}
+
+// Find runs query on GridFS's files collection and returns
+// the resulting Query.
+//
+// This logic:
+//
+// gfs := db.GridFS("fs")
+// iter := gfs.Find(nil).Iter()
+//
+// Is equivalent to:
+//
+// files := db.C("fs" + ".files")
+// iter := files.Find(nil).Iter()
+//
+func (gfs *GridFS) Find(query interface{}) *Query {
+ return gfs.Files.Find(query)
+}
+
+// RemoveId deletes the file with the provided id from the GridFS.
+func (gfs *GridFS) RemoveId(id interface{}) error {
+ err := gfs.Files.Remove(bson.M{"_id": id})
+ if err != nil {
+ return err
+ }
+ _, err = gfs.Chunks.RemoveAll(bson.D{{"files_id", id}})
+ return err
+}
+
+type gfsDocId struct {
+ Id interface{} "_id"
+}
+
+// Remove deletes all files with the provided name from the GridFS.
+func (gfs *GridFS) Remove(name string) (err error) {
+ iter := gfs.Files.Find(bson.M{"filename": name}).Select(bson.M{"_id": 1}).Iter()
+ var doc gfsDocId
+ for iter.Next(&doc) {
+ if e := gfs.RemoveId(doc.Id); e != nil {
+ err = e
+ }
+ }
+ if err == nil {
+ err = iter.Close()
+ }
+ return err
+}
+
+func (file *GridFile) assertMode(mode gfsFileMode) {
+ switch file.mode {
+ case mode:
+ return
+ case gfsWriting:
+ panic("GridFile is open for writing")
+ case gfsReading:
+ panic("GridFile is open for reading")
+ case gfsClosed:
+ panic("GridFile is closed")
+ default:
+ panic("internal error: missing GridFile mode")
+ }
+}
+
+// SetChunkSize sets size of saved chunks. Once the file is written to, it
+// will be split in blocks of that size and each block saved into an
+// independent chunk document. The default chunk size is 255kb.
+//
+// It is a runtime error to call this function once the file has started
+// being written to.
+func (file *GridFile) SetChunkSize(bytes int) {
+ file.assertMode(gfsWriting)
+ debugf("GridFile %p: setting chunk size to %d", file, bytes)
+ file.m.Lock()
+ file.doc.ChunkSize = bytes
+ file.m.Unlock()
+}
+
+// Id returns the current file Id.
+func (file *GridFile) Id() interface{} {
+ return file.doc.Id
+}
+
+// SetId changes the current file Id.
+//
+// It is a runtime error to call this function once the file has started
+// being written to, or when the file is not open for writing.
+func (file *GridFile) SetId(id interface{}) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ file.doc.Id = id
+ file.m.Unlock()
+}
+
+// Name returns the optional file name. An empty string will be returned
+// in case it is unset.
+func (file *GridFile) Name() string {
+ return file.doc.Filename
+}
+
+// SetName changes the optional file name. An empty string may be used to
+// unset it.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetName(name string) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ file.doc.Filename = name
+ file.m.Unlock()
+}
+
+// ContentType returns the optional file content type. An empty string will be
+// returned in case it is unset.
+func (file *GridFile) ContentType() string {
+ return file.doc.ContentType
+}
+
+// ContentType changes the optional file content type. An empty string may be
+// used to unset it.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetContentType(ctype string) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ file.doc.ContentType = ctype
+ file.m.Unlock()
+}
+
+// GetMeta unmarshals the optional "metadata" field associated with the
+// file into the result parameter. The meaning of keys under that field
+// is user-defined. For example:
+//
+// result := struct{ INode int }{}
+// err = file.GetMeta(&result)
+// if err != nil {
+// panic(err.String())
+// }
+// fmt.Printf("inode: %d\n", result.INode)
+//
+func (file *GridFile) GetMeta(result interface{}) (err error) {
+ file.m.Lock()
+ if file.doc.Metadata != nil {
+ err = bson.Unmarshal(file.doc.Metadata.Data, result)
+ }
+ file.m.Unlock()
+ return
+}
+
+// SetMeta changes the optional "metadata" field associated with the
+// file. The meaning of keys under that field is user-defined.
+// For example:
+//
+// file.SetMeta(bson.M{"inode": inode})
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetMeta(metadata interface{}) {
+ file.assertMode(gfsWriting)
+ data, err := bson.Marshal(metadata)
+ file.m.Lock()
+ if err != nil && file.err == nil {
+ file.err = err
+ } else {
+ file.doc.Metadata = &bson.Raw{Data: data}
+ }
+ file.m.Unlock()
+}
+
+// Size returns the file size in bytes.
+func (file *GridFile) Size() (bytes int64) {
+ file.m.Lock()
+ bytes = file.doc.Length
+ file.m.Unlock()
+ return
+}
+
+// MD5 returns the file MD5 as a hex-encoded string.
+func (file *GridFile) MD5() (md5 string) {
+ return file.doc.MD5
+}
+
+// UploadDate returns the file upload time.
+func (file *GridFile) UploadDate() time.Time {
+ return file.doc.UploadDate
+}
+
+// SetUploadDate changes the file upload time.
+//
+// It is a runtime error to call this function when the file is not open
+// for writing.
+func (file *GridFile) SetUploadDate(t time.Time) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ file.doc.UploadDate = t
+ file.m.Unlock()
+}
+
+// Close flushes any pending changes in case the file is being written
+// to, waits for any background operations to finish, and closes the file.
+//
+// It's important to Close files whether they are being written to
+// or read from, and to check the err result to ensure the operation
+// completed successfully.
+func (file *GridFile) Close() (err error) {
+ file.m.Lock()
+ defer file.m.Unlock()
+ if file.mode == gfsWriting {
+ if len(file.wbuf) > 0 && file.err == nil {
+ file.insertChunk(file.wbuf)
+ file.wbuf = file.wbuf[0:0]
+ }
+ file.completeWrite()
+ } else if file.mode == gfsReading && file.rcache != nil {
+ file.rcache.wait.Lock()
+ file.rcache = nil
+ }
+ file.mode = gfsClosed
+ debugf("GridFile %p: closed", file)
+ return file.err
+}
+
+func (file *GridFile) completeWrite() {
+ for file.wpending > 0 {
+ debugf("GridFile %p: waiting for %d pending chunks to complete file write", file, file.wpending)
+ file.c.Wait()
+ }
+ if file.err == nil {
+ hexsum := hex.EncodeToString(file.wsum.Sum(nil))
+ if file.doc.UploadDate.IsZero() {
+ file.doc.UploadDate = bson.Now()
+ }
+ file.doc.MD5 = hexsum
+ file.err = file.gfs.Files.Insert(file.doc)
+ }
+ if file.err != nil {
+ file.gfs.Chunks.RemoveAll(bson.D{{"files_id", file.doc.Id}})
+ }
+ if file.err == nil {
+ index := Index{
+ Key: []string{"files_id", "n"},
+ Unique: true,
+ }
+ file.err = file.gfs.Chunks.EnsureIndex(index)
+ }
+}
+
+// Abort cancels an in-progress write, preventing the file from being
+// automically created and ensuring previously written chunks are
+// removed when the file is closed.
+//
+// It is a runtime error to call Abort when the file was not opened
+// for writing.
+func (file *GridFile) Abort() {
+ if file.mode != gfsWriting {
+ panic("file.Abort must be called on file opened for writing")
+ }
+ file.err = errors.New("write aborted")
+}
+
+// Write writes the provided data to the file and returns the
+// number of bytes written and an error in case something
+// wrong happened.
+//
+// The file will internally cache the data so that all but the last
+// chunk sent to the database have the size defined by SetChunkSize.
+// This also means that errors may be deferred until a future call
+// to Write or Close.
+//
+// The parameters and behavior of this function turn the file
+// into an io.Writer.
+func (file *GridFile) Write(data []byte) (n int, err error) {
+ file.assertMode(gfsWriting)
+ file.m.Lock()
+ debugf("GridFile %p: writing %d bytes", file, len(data))
+ defer file.m.Unlock()
+
+ if file.err != nil {
+ return 0, file.err
+ }
+
+ n = len(data)
+ file.doc.Length += int64(n)
+ chunkSize := file.doc.ChunkSize
+
+ if len(file.wbuf)+len(data) < chunkSize {
+ file.wbuf = append(file.wbuf, data...)
+ return
+ }
+
+ // First, flush file.wbuf complementing with data.
+ if len(file.wbuf) > 0 {
+ missing := chunkSize - len(file.wbuf)
+ if missing > len(data) {
+ missing = len(data)
+ }
+ file.wbuf = append(file.wbuf, data[:missing]...)
+ data = data[missing:]
+ file.insertChunk(file.wbuf)
+ file.wbuf = file.wbuf[0:0]
+ }
+
+ // Then, flush all chunks from data without copying.
+ for len(data) > chunkSize {
+ size := chunkSize
+ if size > len(data) {
+ size = len(data)
+ }
+ file.insertChunk(data[:size])
+ data = data[size:]
+ }
+
+ // And append the rest for a future call.
+ file.wbuf = append(file.wbuf, data...)
+
+ return n, file.err
+}
+
+func (file *GridFile) insertChunk(data []byte) {
+ n := file.chunk
+ file.chunk++
+ debugf("GridFile %p: adding to checksum: %q", file, string(data))
+ file.wsum.Write(data)
+
+ for file.doc.ChunkSize*file.wpending >= 1024*1024 {
+ // Hold on.. we got a MB pending.
+ file.c.Wait()
+ if file.err != nil {
+ return
+ }
+ }
+
+ file.wpending++
+
+ debugf("GridFile %p: inserting chunk %d with %d bytes", file, n, len(data))
+
+ // We may not own the memory of data, so rather than
+ // simply copying it, we'll marshal the document ahead of time.
+ data, err := bson.Marshal(gfsChunk{bson.NewObjectId(), file.doc.Id, n, data})
+ if err != nil {
+ file.err = err
+ return
+ }
+
+ go func() {
+ err := file.gfs.Chunks.Insert(bson.Raw{Data: data})
+ file.m.Lock()
+ file.wpending--
+ if err != nil && file.err == nil {
+ file.err = err
+ }
+ file.c.Broadcast()
+ file.m.Unlock()
+ }()
+}
+
+// Seek sets the offset for the next Read or Write on file to
+// offset, interpreted according to whence: 0 means relative to
+// the origin of the file, 1 means relative to the current offset,
+// and 2 means relative to the end. It returns the new offset and
+// an error, if any.
+func (file *GridFile) Seek(offset int64, whence int) (pos int64, err error) {
+ file.m.Lock()
+ debugf("GridFile %p: seeking for %s (whence=%d)", file, offset, whence)
+ defer file.m.Unlock()
+ switch whence {
+ case os.SEEK_SET:
+ case os.SEEK_CUR:
+ offset += file.offset
+ case os.SEEK_END:
+ offset += file.doc.Length
+ default:
+ panic("unsupported whence value")
+ }
+ if offset > file.doc.Length {
+ return file.offset, errors.New("seek past end of file")
+ }
+ if offset == file.doc.Length {
+ // If we're seeking to the end of the file,
+ // no need to read anything. This enables
+ // a client to find the size of the file using only the
+ // io.ReadSeeker interface with low overhead.
+ file.offset = offset
+ return file.offset, nil
+ }
+ chunk := int(offset / int64(file.doc.ChunkSize))
+ if chunk+1 == file.chunk && offset >= file.offset {
+ file.rbuf = file.rbuf[int(offset-file.offset):]
+ file.offset = offset
+ return file.offset, nil
+ }
+ file.offset = offset
+ file.chunk = chunk
+ file.rbuf = nil
+ file.rbuf, err = file.getChunk()
+ if err == nil {
+ file.rbuf = file.rbuf[int(file.offset-int64(chunk)*int64(file.doc.ChunkSize)):]
+ }
+ return file.offset, err
+}
+
+// Read reads into b the next available data from the file and
+// returns the number of bytes written and an error in case
+// something wrong happened. At the end of the file, n will
+// be zero and err will be set to io.EOF.
+//
+// The parameters and behavior of this function turn the file
+// into an io.Reader.
+func (file *GridFile) Read(b []byte) (n int, err error) {
+ file.assertMode(gfsReading)
+ file.m.Lock()
+ debugf("GridFile %p: reading at offset %d into buffer of length %d", file, file.offset, len(b))
+ defer file.m.Unlock()
+ if file.offset == file.doc.Length {
+ return 0, io.EOF
+ }
+ for err == nil {
+ i := copy(b, file.rbuf)
+ n += i
+ file.offset += int64(i)
+ file.rbuf = file.rbuf[i:]
+ if i == len(b) || file.offset == file.doc.Length {
+ break
+ }
+ b = b[i:]
+ file.rbuf, err = file.getChunk()
+ }
+ return n, err
+}
+
+func (file *GridFile) getChunk() (data []byte, err error) {
+ cache := file.rcache
+ file.rcache = nil
+ if cache != nil && cache.n == file.chunk {
+ debugf("GridFile %p: Getting chunk %d from cache", file, file.chunk)
+ cache.wait.Lock()
+ data, err = cache.data, cache.err
+ } else {
+ debugf("GridFile %p: Fetching chunk %d", file, file.chunk)
+ var doc gfsChunk
+ err = file.gfs.Chunks.Find(bson.D{{"files_id", file.doc.Id}, {"n", file.chunk}}).One(&doc)
+ data = doc.Data
+ }
+ file.chunk++
+ if int64(file.chunk)*int64(file.doc.ChunkSize) < file.doc.Length {
+ // Read the next one in background.
+ cache = &gfsCachedChunk{n: file.chunk}
+ cache.wait.Lock()
+ debugf("GridFile %p: Scheduling chunk %d for background caching", file, file.chunk)
+ // Clone the session to avoid having it closed in between.
+ chunks := file.gfs.Chunks
+ session := chunks.Database.Session.Clone()
+ go func(id interface{}, n int) {
+ defer session.Close()
+ chunks = chunks.With(session)
+ var doc gfsChunk
+ cache.err = chunks.Find(bson.D{{"files_id", id}, {"n", n}}).One(&doc)
+ cache.data = doc.Data
+ cache.wait.Unlock()
+ }(file.doc.Id, file.chunk)
+ file.rcache = cache
+ }
+ debugf("Returning err: %#v", err)
+ return
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/gridfs_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/gridfs_test.go
new file mode 100644
index 00000000000..5a6ed555950
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/gridfs_test.go
@@ -0,0 +1,708 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+ "io"
+ "os"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func (s *S) TestGridFSCreate(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ before := bson.Now()
+
+ gfs := db.GridFS("fs")
+ file, err := gfs.Create("")
+ c.Assert(err, IsNil)
+
+ n, err := file.Write([]byte("some data"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 9)
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+
+ after := bson.Now()
+
+ // Check the file information.
+ result := M{}
+ err = db.C("fs.files").Find(nil).One(result)
+ c.Assert(err, IsNil)
+
+ fileId, ok := result["_id"].(bson.ObjectId)
+ c.Assert(ok, Equals, true)
+ c.Assert(fileId.Valid(), Equals, true)
+ result["_id"] = "<id>"
+
+ ud, ok := result["uploadDate"].(time.Time)
+ c.Assert(ok, Equals, true)
+ c.Assert(ud.After(before) && ud.Before(after), Equals, true)
+ result["uploadDate"] = "<timestamp>"
+
+ expected := M{
+ "_id": "<id>",
+ "length": 9,
+ "chunkSize": 255 * 1024,
+ "uploadDate": "<timestamp>",
+ "md5": "1e50210a0202497fb79bc38b6ade6c34",
+ }
+ c.Assert(result, DeepEquals, expected)
+
+ // Check the chunk.
+ result = M{}
+ err = db.C("fs.chunks").Find(nil).One(result)
+ c.Assert(err, IsNil)
+
+ chunkId, ok := result["_id"].(bson.ObjectId)
+ c.Assert(ok, Equals, true)
+ c.Assert(chunkId.Valid(), Equals, true)
+ result["_id"] = "<id>"
+
+ expected = M{
+ "_id": "<id>",
+ "files_id": fileId,
+ "n": 0,
+ "data": []byte("some data"),
+ }
+ c.Assert(result, DeepEquals, expected)
+
+ // Check that an index was created.
+ indexes, err := db.C("fs.chunks").Indexes()
+ c.Assert(err, IsNil)
+ c.Assert(len(indexes), Equals, 2)
+ c.Assert(indexes[1].Key, DeepEquals, []string{"files_id", "n"})
+}
+
+func (s *S) TestGridFSFileDetails(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+
+ file, err := gfs.Create("myfile1.txt")
+ c.Assert(err, IsNil)
+
+ n, err := file.Write([]byte("some"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 4)
+
+ c.Assert(file.Size(), Equals, int64(4))
+
+ n, err = file.Write([]byte(" data"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 5)
+
+ c.Assert(file.Size(), Equals, int64(9))
+
+ id, _ := file.Id().(bson.ObjectId)
+ c.Assert(id.Valid(), Equals, true)
+ c.Assert(file.Name(), Equals, "myfile1.txt")
+ c.Assert(file.ContentType(), Equals, "")
+
+ var info interface{}
+ err = file.GetMeta(&info)
+ c.Assert(err, IsNil)
+ c.Assert(info, IsNil)
+
+ file.SetId("myid")
+ file.SetName("myfile2.txt")
+ file.SetContentType("text/plain")
+ file.SetMeta(M{"any": "thing"})
+
+ c.Assert(file.Id(), Equals, "myid")
+ c.Assert(file.Name(), Equals, "myfile2.txt")
+ c.Assert(file.ContentType(), Equals, "text/plain")
+
+ err = file.GetMeta(&info)
+ c.Assert(err, IsNil)
+ c.Assert(info, DeepEquals, bson.M{"any": "thing"})
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+
+ c.Assert(file.MD5(), Equals, "1e50210a0202497fb79bc38b6ade6c34")
+
+ ud := file.UploadDate()
+ now := time.Now()
+ c.Assert(ud.Before(now), Equals, true)
+ c.Assert(ud.After(now.Add(-3*time.Second)), Equals, true)
+
+ result := M{}
+ err = db.C("fs.files").Find(nil).One(result)
+ c.Assert(err, IsNil)
+
+ result["uploadDate"] = "<timestamp>"
+
+ expected := M{
+ "_id": "myid",
+ "length": 9,
+ "chunkSize": 255 * 1024,
+ "uploadDate": "<timestamp>",
+ "md5": "1e50210a0202497fb79bc38b6ade6c34",
+ "filename": "myfile2.txt",
+ "contentType": "text/plain",
+ "metadata": M{"any": "thing"},
+ }
+ c.Assert(result, DeepEquals, expected)
+}
+
+func (s *S) TestGridFSSetUploadDate(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+ file, err := gfs.Create("")
+ c.Assert(err, IsNil)
+
+ t := time.Date(2014, 1, 1, 1, 1, 1, 0, time.Local)
+ file.SetUploadDate(t)
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+
+ // Check the file information.
+ result := M{}
+ err = db.C("fs.files").Find(nil).One(result)
+ c.Assert(err, IsNil)
+
+ ud := result["uploadDate"].(time.Time)
+ if !ud.Equal(t) {
+ c.Fatalf("want upload date %s, got %s", t, ud)
+ }
+}
+
+func (s *S) TestGridFSCreateWithChunking(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+
+ file, err := gfs.Create("")
+ c.Assert(err, IsNil)
+
+ file.SetChunkSize(5)
+
+ // Smaller than the chunk size.
+ n, err := file.Write([]byte("abc"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+
+ // Boundary in the middle.
+ n, err = file.Write([]byte("defg"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 4)
+
+ // Boundary at the end.
+ n, err = file.Write([]byte("hij"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+
+ // Larger than the chunk size, with 3 chunks.
+ n, err = file.Write([]byte("klmnopqrstuv"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 12)
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+
+ // Check the file information.
+ result := M{}
+ err = db.C("fs.files").Find(nil).One(result)
+ c.Assert(err, IsNil)
+
+ fileId, _ := result["_id"].(bson.ObjectId)
+ c.Assert(fileId.Valid(), Equals, true)
+ result["_id"] = "<id>"
+ result["uploadDate"] = "<timestamp>"
+
+ expected := M{
+ "_id": "<id>",
+ "length": 22,
+ "chunkSize": 5,
+ "uploadDate": "<timestamp>",
+ "md5": "44a66044834cbe55040089cabfc102d5",
+ }
+ c.Assert(result, DeepEquals, expected)
+
+ // Check the chunks.
+ iter := db.C("fs.chunks").Find(nil).Sort("n").Iter()
+ dataChunks := []string{"abcde", "fghij", "klmno", "pqrst", "uv"}
+ for i := 0; ; i++ {
+ result = M{}
+ if !iter.Next(result) {
+ if i != 5 {
+ c.Fatalf("Expected 5 chunks, got %d", i)
+ }
+ break
+ }
+ c.Assert(iter.Close(), IsNil)
+
+ result["_id"] = "<id>"
+
+ expected = M{
+ "_id": "<id>",
+ "files_id": fileId,
+ "n": i,
+ "data": []byte(dataChunks[i]),
+ }
+ c.Assert(result, DeepEquals, expected)
+ }
+}
+
+func (s *S) TestGridFSAbort(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+ file, err := gfs.Create("")
+ c.Assert(err, IsNil)
+
+ file.SetChunkSize(5)
+
+ n, err := file.Write([]byte("some data"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 9)
+
+ var count int
+ for i := 0; i < 10; i++ {
+ count, err = db.C("fs.chunks").Count()
+ if count > 0 || err != nil {
+ break
+ }
+ }
+ c.Assert(err, IsNil)
+ c.Assert(count, Equals, 1)
+
+ file.Abort()
+
+ err = file.Close()
+ c.Assert(err, ErrorMatches, "write aborted")
+
+ count, err = db.C("fs.chunks").Count()
+ c.Assert(err, IsNil)
+ c.Assert(count, Equals, 0)
+}
+
+func (s *S) TestGridFSCloseConflict(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ db.C("fs.files").EnsureIndex(mgo.Index{Key: []string{"filename"}, Unique: true})
+
+ // For a closing-time conflict
+ err = db.C("fs.files").Insert(M{"filename": "foo.txt"})
+ c.Assert(err, IsNil)
+
+ gfs := db.GridFS("fs")
+ file, err := gfs.Create("foo.txt")
+ c.Assert(err, IsNil)
+
+ _, err = file.Write([]byte("some data"))
+ c.Assert(err, IsNil)
+
+ err = file.Close()
+ c.Assert(mgo.IsDup(err), Equals, true)
+
+ count, err := db.C("fs.chunks").Count()
+ c.Assert(err, IsNil)
+ c.Assert(count, Equals, 0)
+}
+
+func (s *S) TestGridFSOpenNotFound(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+ file, err := gfs.OpenId("non-existent")
+ c.Assert(err == mgo.ErrNotFound, Equals, true)
+ c.Assert(file, IsNil)
+
+ file, err = gfs.Open("non-existent")
+ c.Assert(err == mgo.ErrNotFound, Equals, true)
+ c.Assert(file, IsNil)
+}
+
+func (s *S) TestGridFSReadAll(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+ file, err := gfs.Create("")
+ c.Assert(err, IsNil)
+ id := file.Id()
+
+ file.SetChunkSize(5)
+
+ n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 22)
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+
+ file, err = gfs.OpenId(id)
+ c.Assert(err, IsNil)
+
+ b := make([]byte, 30)
+ n, err = file.Read(b)
+ c.Assert(n, Equals, 22)
+ c.Assert(err, IsNil)
+
+ n, err = file.Read(b)
+ c.Assert(n, Equals, 0)
+ c.Assert(err == io.EOF, Equals, true)
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestGridFSReadChunking(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+
+ file, err := gfs.Create("")
+ c.Assert(err, IsNil)
+
+ id := file.Id()
+
+ file.SetChunkSize(5)
+
+ n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 22)
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+
+ file, err = gfs.OpenId(id)
+ c.Assert(err, IsNil)
+
+ b := make([]byte, 30)
+
+ // Smaller than the chunk size.
+ n, err = file.Read(b[:3])
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+ c.Assert(b[:3], DeepEquals, []byte("abc"))
+
+ // Boundary in the middle.
+ n, err = file.Read(b[:4])
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 4)
+ c.Assert(b[:4], DeepEquals, []byte("defg"))
+
+ // Boundary at the end.
+ n, err = file.Read(b[:3])
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+ c.Assert(b[:3], DeepEquals, []byte("hij"))
+
+ // Larger than the chunk size, with 3 chunks.
+ n, err = file.Read(b)
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 12)
+ c.Assert(b[:12], DeepEquals, []byte("klmnopqrstuv"))
+
+ n, err = file.Read(b)
+ c.Assert(n, Equals, 0)
+ c.Assert(err == io.EOF, Equals, true)
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestGridFSOpen(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+
+ file, err := gfs.Create("myfile.txt")
+ c.Assert(err, IsNil)
+ file.Write([]byte{'1'})
+ file.Close()
+
+ file, err = gfs.Create("myfile.txt")
+ c.Assert(err, IsNil)
+ file.Write([]byte{'2'})
+ file.Close()
+
+ file, err = gfs.Open("myfile.txt")
+ c.Assert(err, IsNil)
+ defer file.Close()
+
+ var b [1]byte
+
+ _, err = file.Read(b[:])
+ c.Assert(err, IsNil)
+ c.Assert(string(b[:]), Equals, "2")
+}
+
+func (s *S) TestGridFSSeek(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+ file, err := gfs.Create("")
+ c.Assert(err, IsNil)
+ id := file.Id()
+
+ file.SetChunkSize(5)
+
+ n, err := file.Write([]byte("abcdefghijklmnopqrstuv"))
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 22)
+
+ err = file.Close()
+ c.Assert(err, IsNil)
+
+ b := make([]byte, 5)
+
+ file, err = gfs.OpenId(id)
+ c.Assert(err, IsNil)
+
+ o, err := file.Seek(3, os.SEEK_SET)
+ c.Assert(err, IsNil)
+ c.Assert(o, Equals, int64(3))
+ _, err = file.Read(b)
+ c.Assert(err, IsNil)
+ c.Assert(b, DeepEquals, []byte("defgh"))
+
+ o, err = file.Seek(5, os.SEEK_CUR)
+ c.Assert(err, IsNil)
+ c.Assert(o, Equals, int64(13))
+ _, err = file.Read(b)
+ c.Assert(err, IsNil)
+ c.Assert(b, DeepEquals, []byte("nopqr"))
+
+ o, err = file.Seek(0, os.SEEK_END)
+ c.Assert(err, IsNil)
+ c.Assert(o, Equals, int64(22))
+ n, err = file.Read(b)
+ c.Assert(err, Equals, io.EOF)
+ c.Assert(n, Equals, 0)
+
+ o, err = file.Seek(-10, os.SEEK_END)
+ c.Assert(err, IsNil)
+ c.Assert(o, Equals, int64(12))
+ _, err = file.Read(b)
+ c.Assert(err, IsNil)
+ c.Assert(b, DeepEquals, []byte("mnopq"))
+
+ o, err = file.Seek(8, os.SEEK_SET)
+ c.Assert(err, IsNil)
+ c.Assert(o, Equals, int64(8))
+ _, err = file.Read(b)
+ c.Assert(err, IsNil)
+ c.Assert(b, DeepEquals, []byte("ijklm"))
+
+ // Trivial seek forward within same chunk. Already
+ // got the data, shouldn't touch the database.
+ sent := mgo.GetStats().SentOps
+ o, err = file.Seek(1, os.SEEK_CUR)
+ c.Assert(err, IsNil)
+ c.Assert(o, Equals, int64(14))
+ c.Assert(mgo.GetStats().SentOps, Equals, sent)
+ _, err = file.Read(b)
+ c.Assert(err, IsNil)
+ c.Assert(b, DeepEquals, []byte("opqrs"))
+
+ // Try seeking past end of file.
+ file.Seek(3, os.SEEK_SET)
+ o, err = file.Seek(23, os.SEEK_SET)
+ c.Assert(err, ErrorMatches, "seek past end of file")
+ c.Assert(o, Equals, int64(3))
+}
+
+func (s *S) TestGridFSRemoveId(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+
+ file, err := gfs.Create("myfile.txt")
+ c.Assert(err, IsNil)
+ file.Write([]byte{'1'})
+ file.Close()
+
+ file, err = gfs.Create("myfile.txt")
+ c.Assert(err, IsNil)
+ file.Write([]byte{'2'})
+ id := file.Id()
+ file.Close()
+
+ err = gfs.RemoveId(id)
+ c.Assert(err, IsNil)
+
+ file, err = gfs.Open("myfile.txt")
+ c.Assert(err, IsNil)
+ defer file.Close()
+
+ var b [1]byte
+
+ _, err = file.Read(b[:])
+ c.Assert(err, IsNil)
+ c.Assert(string(b[:]), Equals, "1")
+
+ n, err := db.C("fs.chunks").Find(M{"files_id": id}).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 0)
+}
+
+func (s *S) TestGridFSRemove(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+
+ file, err := gfs.Create("myfile.txt")
+ c.Assert(err, IsNil)
+ file.Write([]byte{'1'})
+ file.Close()
+
+ file, err = gfs.Create("myfile.txt")
+ c.Assert(err, IsNil)
+ file.Write([]byte{'2'})
+ file.Close()
+
+ err = gfs.Remove("myfile.txt")
+ c.Assert(err, IsNil)
+
+ _, err = gfs.Open("myfile.txt")
+ c.Assert(err == mgo.ErrNotFound, Equals, true)
+
+ n, err := db.C("fs.chunks").Find(nil).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 0)
+}
+
+func (s *S) TestGridFSOpenNext(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+
+ gfs := db.GridFS("fs")
+
+ file, err := gfs.Create("myfile1.txt")
+ c.Assert(err, IsNil)
+ file.Write([]byte{'1'})
+ file.Close()
+
+ file, err = gfs.Create("myfile2.txt")
+ c.Assert(err, IsNil)
+ file.Write([]byte{'2'})
+ file.Close()
+
+ var f *mgo.GridFile
+ var b [1]byte
+
+ iter := gfs.Find(nil).Sort("-filename").Iter()
+
+ ok := gfs.OpenNext(iter, &f)
+ c.Assert(ok, Equals, true)
+ c.Check(f.Name(), Equals, "myfile2.txt")
+
+ _, err = f.Read(b[:])
+ c.Assert(err, IsNil)
+ c.Assert(string(b[:]), Equals, "2")
+
+ ok = gfs.OpenNext(iter, &f)
+ c.Assert(ok, Equals, true)
+ c.Check(f.Name(), Equals, "myfile1.txt")
+
+ _, err = f.Read(b[:])
+ c.Assert(err, IsNil)
+ c.Assert(string(b[:]), Equals, "1")
+
+ ok = gfs.OpenNext(iter, &f)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Close(), IsNil)
+ c.Assert(f, IsNil)
+
+ // Do it again with a more restrictive query to make sure
+ // it's actually taken into account.
+ iter = gfs.Find(bson.M{"filename": "myfile1.txt"}).Iter()
+
+ ok = gfs.OpenNext(iter, &f)
+ c.Assert(ok, Equals, true)
+ c.Check(f.Name(), Equals, "myfile1.txt")
+
+ ok = gfs.OpenNext(iter, &f)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Close(), IsNil)
+ c.Assert(f, IsNil)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.crt b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.crt
new file mode 100644
index 00000000000..6143d925472
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.crt
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
+BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
+cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
+OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
+DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
+b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
+4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
+616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
+AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
+7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
+Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
+l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
+CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
+DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
+PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
+OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
+/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
+z3A=
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.key b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.key
new file mode 100644
index 00000000000..892db714f91
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
+wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
+r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
+Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
+KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
+Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
+La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
+KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
+bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
+Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
+Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
+QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
+DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
+QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
+Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
+jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
+K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
+HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
+Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
+xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
+28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
+ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
+4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
+I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
+-----END RSA PRIVATE KEY-----
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.pem b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.pem
new file mode 100644
index 00000000000..93aed3556e9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.pem
@@ -0,0 +1,57 @@
+To regenerate the key:
+
+ openssl req -newkey rsa:2048 -new -x509 -days 36500 -nodes -out server.crt -keyout server.key
+ cat server.key server.crt > server.pem
+ openssl genrsa -out client.key 2048
+ openssl req -key client.key -new -out client.req
+ openssl x509 -req -in client.req -CA server.crt -CAkey server.key -days 36500 -CAserial file.srl -out client.crt
+ cat client.key client.crt > client.pem
+
+-----BEGIN RSA PRIVATE KEY-----
+MIIEogIBAAKCAQEAtFIkIZk/h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7
+wQidZwLul+cyDfPRDzzo3za4GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJ
+r4f/tItg0riOEBbLslQDzNTtCAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJ
+Q6DYEQgCa2BTIWq0Uw3WO20M3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AI
+KBhAZwa7vND0RaRYqpO9kyZFzh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5
+Hx+ftNTXnl/69TnxG44BP8M88ZfDWlpzwpsTXwIDAQABAoIBADzCjOAxZkHfuZyu
+La0wTHXpkEfXdJ6ltagq5WY7P6MlOYwcRoK152vlhgXzZl9jL6ely4YjRwec0swq
+KdwezpV4fOGVPmuTuw45bx47HEnr/49ZQ4p9FgF9EYQPofbz53FQc/NaMACJcogv
+bn+osniw+VMFrOVNmGLiZ5p3Smk8zfXE7GRHO8CL5hpWLWO/aK236yytbfWOjM2f
+Pr76ICb26TPRNzYaYUEThU6DtgdLU8pLnJ6QKKaDsjn+zqQzRa+Nvc0c0K8gvWwA
+Afq7t0325+uMSwfpLgCOFldcaZQ5uvteJ0CAVRq1MvStnSHBmMzPlgS+NzsDm6lp
+QH5+rIkCgYEA5j3jrWsv7TueTNbk8Hr/Zwywc+fA2Ex0pBURBHlHyc6ahSXWSCqo
+DtvRGX0GDoK1lCfaIf1qb/DLlGaoHpkEeqcNhXQ+hHs+bZAxfbfBY9+ikit5ZTtl
+QN1tIlhaiyLDnwhkpi/hMw1tiouxJUf84Io61z0sCL4hyZSPCpjn0H0CgYEAyH6F
+Mwl+bCD3VDL/Dr5WSoOr2B/M3bF5SfvdStwy2IPcDJ716je1Ud/2qFCnKGgqvWhJ
++HU15c7CjAWo7/pXq2/pEMD8fDKTYww4Hr4p6duEA7DpbOGkwcUX8u3eknxUWT9F
+jOSbTCvAxuDOC1K3AElyMxVVTNUrFFe8M84R9gsCgYBXmb6RkdG3WlKde7m5gaLB
+K4PLZabq5RQQBe/mmtpkfxYtiLrh1FEC7kG9h+MRDExX5V3KRugDVUOv3+shUSjy
+HbM4ToUm1NloyE78PTj4bfMl2CKlEJcyucy3H5S7kWuKi5/31wnA6d/+sa2huKUP
+Lai7kgu5+9VRJBPUfV7d5QKBgCnhk/13TDtWH5QtGu5/gBMMskbxTaA5xHZZ8H4E
+xXJJCRxx0Dje7jduK145itF8AQGT2W/XPC0HJciOHh4TE2EyfWMMjTF8dyFHmimB
+28uIGWmT+Q7Pi9UWUMxkOAwtgIksGGE4F+CvexOQPjpLSwL6VKqrGCh2lwsm0J+Z
+ulLFAoGAKlC93c6XEj1A31c1+usdEhUe9BrmTqtSYLYpDNpeMLdZ3VctrAZuOQPZ
+4A4gkkQkqqwZGBYYSEqwqiLU6MsBdHPPZ9u3JXLLOQuh1xGeaKylvHj7qx6iT0Xo
+I+FkJ6/3JeMgOina/+wlzD4oyQpqR4Mnh+TuLkDfQTgY+Lg0WPk=
+-----END RSA PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDLjCCAhYCAQcwDQYJKoZIhvcNAQELBQAwXDELMAkGA1UEBhMCR08xDDAKBgNV
+BAgMA01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNl
+cnZlcjESMBAGA1UEAwwJbG9jYWxob3N0MCAXDTE1MDkyOTA4NDAzMFoYDzIxMTUw
+OTA1MDg0MDMwWjBcMQswCQYDVQQGEwJHTzEMMAoGA1UECAwDTUdPMQwwCgYDVQQH
+DANNR08xDDAKBgNVBAoMA01HTzEPMA0GA1UECwwGQ2xpZW50MRIwEAYDVQQDDAls
+b2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0UiQhmT+H
+4IIqrn8SMESDzvcl3rwImwUoRIHlmXkovCIZCbvBCJ1nAu6X5zIN89EPPOjfNrgZ
+616wPgVV/YEQXp+D7+jTAsE5s8JepRXFdecResmvh/+0i2DSuI4QFsuyVAPM1O0I
+AQ5EKgr0weZZmsX6lhPD4uYehV4DxDE0i/8aTAlDoNgRCAJrYFMharRTDdY7bQzd
+7ZYab/pK/3DSmOKxl/AFJ8Enmcj9w1bsvy0fgAgoGEBnBru80PRFpFiqk72TJkXO
+Hx7zcYFpegtKPbAreTCModaCnjP//fskCp4XJrkfH5+01NeeX/r1OfEbjgE/wzzx
+l8NaWnPCmxNfAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAFwYpje3dCLDOIHYjd+5
+CpFOEb+bJsS4ryqm/NblTjIhCLo58hNpMsBqdJHRbHAFRCOE8fvY8yiWtdHeFZcW
+DgVRAXfHONLtN7faZaZQnhy/YzOhLfC/8dUMB0gQA8KXhBCPZqQmexE28AfkEO47
+PwICAxIWINfjm5VnFMkA3b7bDNLHon/pev2m7HqVQ3pRUJQNK3XgFOdDgRrnuXpR
+OKAfHORHVGTh1gf1DVwc0oM+0gnkSiJ1VG0n5pE3zhZ24fmZxu6JQ6X515W7APQI
+/nKVH+f1Fo+ustyTNLt8Bwxi1XmwT7IXwnkVSE9Ff6VejppXRF01V0aaWsa3kU3r
+z3A=
+-----END CERTIFICATE-----
+
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.req b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.req
new file mode 100644
index 00000000000..e44feb4e867
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/client.req
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICoTCCAYkCAQAwXDELMAkGA1UEBhMCR08xDDAKBgNVBAgMA01HTzEMMAoGA1UE
+BwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBkNsaWVudDESMBAGA1UEAwwJ
+bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtFIkIZk/
+h+CCKq5/EjBEg873Jd68CJsFKESB5Zl5KLwiGQm7wQidZwLul+cyDfPRDzzo3za4
+GetesD4FVf2BEF6fg+/o0wLBObPCXqUVxXXnEXrJr4f/tItg0riOEBbLslQDzNTt
+CAEORCoK9MHmWZrF+pYTw+LmHoVeA8QxNIv/GkwJQ6DYEQgCa2BTIWq0Uw3WO20M
+3e2WGm/6Sv9w0pjisZfwBSfBJ5nI/cNW7L8tH4AIKBhAZwa7vND0RaRYqpO9kyZF
+zh8e83GBaXoLSj2wK3kwjKHWgp4z//37JAqeFya5Hx+ftNTXnl/69TnxG44BP8M8
+8ZfDWlpzwpsTXwIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAKbOFblIscxlXalV
+sEGNm2oz380RN2QoLhN6nKtAiv0jWm6iKhdAhOIQIeaRPhUP3cyi8bcBvLdMeQ3d
+ZYIByB55/R0VSP1vs4qkXJCQegHcpMpyuIzsMV8p3Q4lxzGKyKtPA6Bb5c49p8Sk
+ncD+LL4ymrMEia4cBPsHL9hhFOm4gqDacbU8+ETLTpuoSvUZiw7OwngqhE2r+kMv
+KDweq5TOPeb+ftKzQKrrfB+XVdBoTKYw6CwARpogbc0/7mvottVcJ/0yAgC1fBbM
+vupkohkXwKfjxKl6nKNL3R2GkzHQOh91hglAx5zyybKQn2YMM328Vk4X6csBg+pg
+tb1s0MA=
+-----END CERTIFICATE REQUEST-----
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.crt b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.crt
new file mode 100644
index 00000000000..4515f559294
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.crt
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
+BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
+MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
+ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
+A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
+cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
+6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
+IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
+GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
+fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
+JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
+OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
+2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
+TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
+nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
+UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
+W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
+yQ==
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.key b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.key
new file mode 100644
index 00000000000..082d093e922
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.key
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
+Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
+mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
+xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
+YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
+ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
+uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
+wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
+MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
+wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
+yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
+eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
+ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
+tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
+xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
+MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
+Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
+IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
+Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
+QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
+GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
+4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
+ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
+1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
+9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
+SruEA1+5bfBRMW0P+h7Qfe4=
+-----END PRIVATE KEY-----
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.pem b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.pem
new file mode 100644
index 00000000000..487b92d66b8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/certs/server.pem
@@ -0,0 +1,50 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD9PlbW9OwAX7aB
+Nc/UkrKCMztP/YFceIlzoNEpWOWyFO09i4LeulN10Obp3zp3XstYSj5PZsJPgzNk
+mFIYC6f2l4W96F0SVEyvnvGzuPlXVBiPBp0xMGQtC4ogCDpwhI3Uo9TOlRNQqxYi
+xvH3uwDS3TCIQ+J9E5vud9IwhVCx3P9z0uVjZQ1gj7kaJTzyIMaDbCt2xrdT6XYb
+YpLH/24TdzmIWSLpt16q4uJaYFnqvF+hot7iCTUg2OJ8qyw2yfaLe4niLhOavc9R
+ziTHHSYwq5Yrcd2VCwyq2mr74dCYdK+w+tuByOX0fI8mIcOygn7g7ltu1wTnWhBs
+uHVtkCFjAgMBAAECggEASRAfRc1L+Z+jrAu2doIMdnwJdL6S//bW0UFolyFKw+I9
+wC/sBg6D3c3zkS4SVDZJPKPO7mGbVg1oWnGH3eAfCYoV0ACmOY+QwGp/GXcYmRVu
+MHWcDIEFpelaZHt7QNM9iEfsMd3YwMFblZUIYozVZADk66uKQMPTjS2Muur7qRSi
+wuVfSmsVZ5afH3B1Tr96BbmPsHrXLjvNpjO44k2wrnnSPQjUL7+YiZPvtnNW8Fby
+yuo2uoAyjg3+68PYZftOvvNneMsv1uyGlUs6Bk+DVWaqofIztWFdFZyXbHnK2PTk
+eGQt5EsL+RwIck5eoqd5vSE+KyzhhydL0zcpngVQoQKBgQD/Yelvholbz5NQtSy3
+ZoiW1y7hL1BKzvVNHuAMKJ5WOnj5szhjhKxt/wZ+hk0qcAmlV9WAPbf4izbEwPRC
+tnMBQzf1uBxqqbLL6WZ4YAyGrcX3UrT7GXsGfVT4zJjz7oYSw8aPircecw5V4exB
+xa4NF+ki8IycXSkHwvW2R56fRwKBgQD92xpxXtte/rUnmENbQmr0aKg7JEfMoih6
+MdX+f6mfgjMmqj+L4jPTI8/ql8HEy13SQS1534aDSHO+nBqBK5aHUCRMIgSLnTP9
+Xyx9Ngg03SZIkPfykqxQmnZgWkTPMhYS+K1Ao9FGVs8W5jVi7veyAdhHptAcxhP3
+IuxvrxVTBQKBgQCluMPiu0snaOwP04HRAZhhSgIB3tIbuXE1OnPpb/JPwmH+p25Q
+Jig+uN9d+4jXoRyhTv4c2fAoOS6xPwVCxWKbzyLhMTg/fx+ncy4rryhxvRJaDDGl
+QEO1Ul9xlFMs9/vI8YJIY5uxBrimwpStmbn4hSukoLSeQ1X802bfglpMwQKBgD8z
+GTY4Y20XBIrDAaHquy32EEwJEEcF6AXj+l7N8bDgfVOW9xMgUb6zH8RL29Xeu5Do
+4SWCXL66fvZpbr/R1jwB28eIgJExpgvicfUKSqi+lhVi4hfmJDg8/FOopZDf61b1
+ykxZfHSCkDQnRAtJaylKBEpyYUWImtfgPfTgJfLxAoGAc8A/Tl2h/DsdTA+cA5d7
+1e0l64m13ObruSWRczyru4hy8Yq6E/K2rOFw8cYCcFpy24NqNlk+2iXPLRpWm2zt
+9R497zAPvhK/bfPXjvm0j/VjB44lvRTC9hby/RRMHy9UJk4o/UQaD+1IodxZovvk
+SruEA1+5bfBRMW0P+h7Qfe4=
+-----END PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDjTCCAnWgAwIBAgIJAMW+wDfcdzC+MA0GCSqGSIb3DQEBCwUAMFwxCzAJBgNV
+BAYTAkdPMQwwCgYDVQQIDANNR08xDDAKBgNVBAcMA01HTzEMMAoGA1UECgwDTUdP
+MQ8wDQYDVQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDAgFw0xNTA5Mjkw
+ODM0MTBaGA8yMTE1MDkwNTA4MzQxMFowXDELMAkGA1UEBhMCR08xDDAKBgNVBAgM
+A01HTzEMMAoGA1UEBwwDTUdPMQwwCgYDVQQKDANNR08xDzANBgNVBAsMBlNlcnZl
+cjESMBAGA1UEAwwJbG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEA/T5W1vTsAF+2gTXP1JKygjM7T/2BXHiJc6DRKVjlshTtPYuC3rpTddDm
+6d86d17LWEo+T2bCT4MzZJhSGAun9peFvehdElRMr57xs7j5V1QYjwadMTBkLQuK
+IAg6cISN1KPUzpUTUKsWIsbx97sA0t0wiEPifROb7nfSMIVQsdz/c9LlY2UNYI+5
+GiU88iDGg2wrdsa3U+l2G2KSx/9uE3c5iFki6bdequLiWmBZ6rxfoaLe4gk1INji
+fKssNsn2i3uJ4i4Tmr3PUc4kxx0mMKuWK3HdlQsMqtpq++HQmHSvsPrbgcjl9HyP
+JiHDsoJ+4O5bbtcE51oQbLh1bZAhYwIDAQABo1AwTjAdBgNVHQ4EFgQUhku/u9Kd
+OAc1L0OR649vCCuQT+0wHwYDVR0jBBgwFoAUhku/u9KdOAc1L0OR649vCCuQT+0w
+DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAw7Bgw3hlWXWSZjLhnSOu
+2mW/UJ2Sj31unHngmgtXwW/04cyzoULb+qmzPe/Z06QMgGIsku1jFBcu0JabQtUG
+TyalpfW77tfnvz238CYdImYwE9ZcIGuZGfhs6ySFN9XpW43B8YM7R8wTNPvOcSPw
+nfjqU6kueN4TTspQg9cKhDss5DcMTIdgJgLbITXhIsrCu6GlKOgtX3HrdMGpQX7s
+UoMXtZVG8pK32vxKWGTZ6DPqESeKjjq74NbYnB3H5U/kDU2dt7LF90C/Umdr9y+C
+W2OJb1WBrf6RTcbt8D6d7P9kOfLPOtyn/cbaA/pfXBMQMHqr7XNXzjnaNU+jB7hL
+yQ==
+-----END CERTIFICATE-----
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/.env b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/.env
new file mode 100644
index 00000000000..96ee89e947d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/.env
@@ -0,0 +1,57 @@
+
+set -e
+
+MONGOVERSION=$(mongod --version | sed -n 's/.*v\([0-9]\+\.[0-9]\+\)\..*/\1/p')
+MONGOMAJOR=$(echo $MONGOVERSION | sed 's/\([0-9]\+\)\..*/\1/')
+MONGOMINOR=$(echo $MONGOVERSION | sed 's/[0-9]\+\.\([0-9]\+\)/\1/')
+
+versionAtLeast() {
+ TESTMAJOR="$1"
+ TESTMINOR="$2"
+ if [ "$MONGOMAJOR" -gt "$TESTMAJOR" ]; then
+ return 0
+ fi
+ if [ "$MONGOMAJOR" -lt "$TESTMAJOR" ]; then
+ return 100
+ fi
+ if [ "$MONGOMINOR" -ge "$TESTMINOR" ]; then
+ return 0
+ fi
+ return 100
+}
+
+COMMONDOPTSNOIP="
+ --nohttpinterface
+ --noprealloc
+ --nojournal
+ --smallfiles
+ --nssize=1
+ --oplogSize=1
+ --dbpath ./db
+ "
+COMMONDOPTS="
+ $COMMONDOPTSNOIP
+ --bind_ip=127.0.0.1
+ "
+COMMONCOPTS="
+ $COMMONDOPTS
+ "
+COMMONSOPTS="
+ --chunkSize 1
+ --bind_ip=127.0.0.1
+ "
+
+if versionAtLeast 3 2; then
+ # 3.2 doesn't like --nojournal on config servers.
+ #COMMONCOPTS="$(echo "$COMMONCOPTS" | sed '/--nojournal/d')"
+ # Using a hacked version of MongoDB 3.2 for now.
+
+ # Go back to MMAPv1 so it's not super sluggish. :-(
+ COMMONDOPTSNOIP="--storageEngine=mmapv1 $COMMONDOPTSNOIP"
+ COMMONDOPTS="--storageEngine=mmapv1 $COMMONDOPTS"
+ COMMONCOPTS="--storageEngine=mmapv1 $COMMONCOPTS"
+fi
+
+if [ "$TRAVIS" = true ]; then
+ set -x
+fi
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest
new file mode 100644
index 00000000000..52972ec9e05
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/journal/tempLatencyTest
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock
new file mode 100755
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/db/mongod.lock
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/run
new file mode 100755
index 00000000000..ad6bddd040d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg1/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+ --port 40101 \
+ --configsvr
+
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/run
new file mode 100755
index 00000000000..07d159ef538
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg2/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+ --port 40102 \
+ --configsvr
+
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/run
new file mode 100755
index 00000000000..bd812fa3e39
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/cfg3/run
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONCOPTS \
+ --port 40103 \
+ --configsvr \
+ --auth \
+ --keyFile=../../certs/keyfile
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/run
new file mode 100755
index 00000000000..b6636d195ed
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db1/run
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+. ../.env
+
+if [ x$NOIPV6 = x1 ]; then
+ BINDIP="127.0.0.1"
+else
+ BINDIP="127.0.0.1,::1"
+fi
+
+exec mongod $COMMONDOPTSNOIP \
+ --shardsvr \
+ --bind_ip=$BINDIP \
+ --port 40001 \
+ --ipv6
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/run
new file mode 100755
index 00000000000..5c7b1aa502a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db2/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --port 40002 \
+ --auth
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/run
new file mode 100755
index 00000000000..539da5fb211
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/db3/run
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --port 40003 \
+ --auth \
+ --sslMode preferSSL \
+ --sslCAFile ../../certs/server.pem \
+ --sslPEMKeyFile ../../certs/server.pem
+
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/run
new file mode 100755
index 00000000000..9de773041b7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1a/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs1 \
+ --port 40011
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/run
new file mode 100755
index 00000000000..dae593e1231
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1b/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs1 \
+ --port 40012
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/run
new file mode 100755
index 00000000000..c28cdc35d89
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs1c/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs1 \
+ --port 40013
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/run
new file mode 100755
index 00000000000..2c77ab1ab04
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2a/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs2 \
+ --port 40021
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/run
new file mode 100755
index 00000000000..57bcfce1580
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2b/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs2 \
+ --port 40022
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/run
new file mode 100755
index 00000000000..a71222705ce
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs2c/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs2 \
+ --port 40023
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/run
new file mode 100755
index 00000000000..002fbaf8e3e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3a/run
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs3 \
+ --port 40031 \
+ --keyFile=../../certs/keyfile
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/run
new file mode 100755
index 00000000000..69825843ea1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3b/run
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs3 \
+ --port 40032 \
+ --keyFile=../../certs/keyfile
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/run
new file mode 100755
index 00000000000..97b32c92762
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs3c/run
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs3 \
+ --port 40033 \
+ --keyFile=../../certs/keyfile
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/db/.empty b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/db/.empty
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/db/.empty
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/run
new file mode 100755
index 00000000000..c2f2d556341
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/rs4a/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongod $COMMONDOPTS \
+ --shardsvr \
+ --replSet rs4 \
+ --port 40041
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s1/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s1/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s1/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s1/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s1/run
new file mode 100755
index 00000000000..0e31d2c9486
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s1/run
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongos $COMMONSOPTS \
+ --port 40201 \
+ --configdb 127.0.0.1:40101
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s2/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s2/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s2/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s2/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s2/run
new file mode 100755
index 00000000000..3b5c67d5880
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s2/run
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongos $COMMONSOPTS \
+ --port 40202 \
+ --configdb 127.0.0.1:40102
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s3/log/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s3/log/run
new file mode 100755
index 00000000000..e9d4404ba4f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s3/log/run
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec cat - > log.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s3/run b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s3/run
new file mode 100755
index 00000000000..fde6e479ba2
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/daemons/s3/run
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+. ../.env
+
+exec mongos $COMMONSOPTS \
+ --port 40203 \
+ --configdb 127.0.0.1:40103 \
+ --keyFile=../../certs/keyfile
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/dropall.js b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/dropall.js
new file mode 100644
index 00000000000..7fa39d112e1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/dropall.js
@@ -0,0 +1,66 @@
+
+var ports = [40001, 40002, 40011, 40012, 40013, 40021, 40022, 40023, 40041, 40101, 40102, 40103, 40201, 40202, 40203]
+var auth = [40002, 40103, 40203, 40031]
+var db1 = new Mongo("localhost:40001")
+
+if (db1.getDB("admin").serverBuildInfo().OpenSSLVersion) {
+ ports.push(40003)
+ auth.push(40003)
+}
+
+for (var i in ports) {
+ var port = ports[i]
+ var server = "localhost:" + port
+ var mongo = new Mongo("localhost:" + port)
+ var admin = mongo.getDB("admin")
+
+ for (var j in auth) {
+ if (auth[j] == port) {
+ admin.auth("root", "rapadura")
+ admin.system.users.find().forEach(function(u) {
+ if (u.user == "root" || u.user == "reader") {
+ return;
+ }
+ if (typeof admin.dropUser == "function") {
+ mongo.getDB(u.db).dropUser(u.user);
+ } else {
+ admin.removeUser(u.user);
+ }
+ })
+ break
+ }
+ }
+ var result = admin.runCommand({"listDatabases": 1})
+ for (var j = 0; j != 100; j++) {
+ if (typeof result.databases != "undefined" || notMaster(result)) {
+ break
+ }
+ result = admin.runCommand({"listDatabases": 1})
+ }
+ if (notMaster(result)) {
+ continue
+ }
+ if (typeof result.databases == "undefined") {
+ print("Could not list databases. Command result:")
+ print(JSON.stringify(result))
+ quit(12)
+ }
+ var dbs = result.databases
+ for (var j = 0; j != dbs.length; j++) {
+ var db = dbs[j]
+ switch (db.name) {
+ case "admin":
+ case "local":
+ case "config":
+ break
+ default:
+ mongo.getDB(db.name).dropDatabase()
+ }
+ }
+}
+
+function notMaster(result) {
+ return typeof result.errmsg != "undefined" && (result.errmsg.indexOf("not master") >= 0 || result.errmsg.indexOf("no master found"))
+}
+
+// vim:ts=4:sw=4:et
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/init.js b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/init.js
new file mode 100644
index 00000000000..ceb75a5e4a0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/init.js
@@ -0,0 +1,132 @@
+//var settings = {heartbeatSleep: 0.05, heartbeatTimeout: 0.5}
+var settings = {};
+
+// We know the master of the first set (pri=1), but not of the second.
+var rs1cfg = {_id: "rs1",
+ members: [{_id: 1, host: "127.0.0.1:40011", priority: 1, tags: {rs1: "a"}},
+ {_id: 2, host: "127.0.0.1:40012", priority: 0, tags: {rs1: "b"}},
+ {_id: 3, host: "127.0.0.1:40013", priority: 0, tags: {rs1: "c"}}],
+ settings: settings}
+var rs2cfg = {_id: "rs2",
+ members: [{_id: 1, host: "127.0.0.1:40021", priority: 1, tags: {rs2: "a"}},
+ {_id: 2, host: "127.0.0.1:40022", priority: 1, tags: {rs2: "b"}},
+ {_id: 3, host: "127.0.0.1:40023", priority: 1, tags: {rs2: "c"}}],
+ settings: settings}
+var rs3cfg = {_id: "rs3",
+ members: [{_id: 1, host: "127.0.0.1:40031", priority: 1, tags: {rs3: "a"}},
+ {_id: 2, host: "127.0.0.1:40032", priority: 1, tags: {rs3: "b"}},
+ {_id: 3, host: "127.0.0.1:40033", priority: 1, tags: {rs3: "c"}}],
+ settings: settings}
+
+for (var i = 0; i != 60; i++) {
+ try {
+ db1 = new Mongo("127.0.0.1:40001").getDB("admin")
+ db2 = new Mongo("127.0.0.1:40002").getDB("admin")
+ rs1a = new Mongo("127.0.0.1:40011").getDB("admin")
+ rs2a = new Mongo("127.0.0.1:40021").getDB("admin")
+ rs3a = new Mongo("127.0.0.1:40031").getDB("admin")
+ break
+ } catch(err) {
+ print("Can't connect yet...")
+ }
+ sleep(1000)
+}
+
+function hasSSL() {
+ return Boolean(db1.serverBuildInfo().OpenSSLVersion)
+}
+
+rs1a.runCommand({replSetInitiate: rs1cfg})
+rs2a.runCommand({replSetInitiate: rs2cfg})
+rs3a.runCommand({replSetInitiate: rs3cfg})
+
+function configShards() {
+ cfg1 = new Mongo("127.0.0.1:40201").getDB("admin")
+ cfg1.runCommand({addshard: "127.0.0.1:40001"})
+ cfg1.runCommand({addshard: "rs1/127.0.0.1:40011"})
+
+ cfg2 = new Mongo("127.0.0.1:40202").getDB("admin")
+ cfg2.runCommand({addshard: "rs2/127.0.0.1:40021"})
+
+ cfg3 = new Mongo("127.0.0.1:40203").getDB("admin")
+ cfg3.runCommand({addshard: "rs3/127.0.0.1:40031"})
+}
+
+function configAuth() {
+ var addrs = ["127.0.0.1:40002", "127.0.0.1:40203", "127.0.0.1:40031"]
+ if (hasSSL()) {
+ addrs.push("127.0.0.1:40003")
+ }
+ for (var i in addrs) {
+ print("Configuring auth for", addrs[i])
+ var db = new Mongo(addrs[i]).getDB("admin")
+ var v = db.serverBuildInfo().versionArray
+ var timedOut = false
+ if (v < [2, 5]) {
+ db.addUser("root", "rapadura")
+ } else {
+ try {
+ db.createUser({user: "root", pwd: "rapadura", roles: ["root"]})
+ } catch (err) {
+ // 3.2 consistently fails replication of creds on 40031 (config server)
+ print("createUser command returned an error: " + err)
+ if (String(err).indexOf("timed out") >= 0) {
+ timedOut = true;
+ }
+ }
+ }
+ for (var i = 0; i < 60; i++) {
+ var ok = db.auth("root", "rapadura")
+ if (ok || !timedOut) {
+ break
+ }
+ sleep(1000);
+ }
+ if (v >= [2, 6]) {
+ db.createUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
+ } else if (v >= [2, 4]) {
+ db.addUser({user: "reader", pwd: "rapadura", roles: ["readAnyDatabase"]})
+ } else {
+ db.addUser("reader", "rapadura", true)
+ }
+ }
+}
+
+function countHealthy(rs) {
+ var status = rs.runCommand({replSetGetStatus: 1})
+ var count = 0
+ var primary = 0
+ if (typeof status.members != "undefined") {
+ for (var i = 0; i != status.members.length; i++) {
+ var m = status.members[i]
+ if (m.health == 1 && (m.state == 1 || m.state == 2)) {
+ count += 1
+ if (m.state == 1) {
+ primary = 1
+ }
+ }
+ }
+ }
+ if (primary == 0) {
+ count = 0
+ }
+ return count
+}
+
+var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
+
+for (var i = 0; i != 60; i++) {
+ var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
+ print("Replica sets have", count, "healthy nodes.")
+ if (count == totalRSMembers) {
+ configShards()
+ configAuth()
+ quit(0)
+ }
+ sleep(1000)
+}
+
+print("Replica sets didn't sync up properly.")
+quit(12)
+
+// vim:ts=4:sw=4:et
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/wait.js b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/wait.js
new file mode 100644
index 00000000000..2735d0e56e5
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/mongojs/wait.js
@@ -0,0 +1,67 @@
+// We know the master of the first set (pri=1), but not of the second.
+var settings = {}
+var rs1cfg = {_id: "rs1",
+ members: [{_id: 1, host: "127.0.0.1:40011", priority: 1},
+ {_id: 2, host: "127.0.0.1:40012", priority: 0},
+ {_id: 3, host: "127.0.0.1:40013", priority: 0}]}
+var rs2cfg = {_id: "rs2",
+ members: [{_id: 1, host: "127.0.0.1:40021", priority: 1},
+ {_id: 2, host: "127.0.0.1:40022", priority: 1},
+ {_id: 3, host: "127.0.0.1:40023", priority: 0}]}
+var rs3cfg = {_id: "rs3",
+ members: [{_id: 1, host: "127.0.0.1:40031", priority: 1},
+ {_id: 2, host: "127.0.0.1:40032", priority: 1},
+ {_id: 3, host: "127.0.0.1:40033", priority: 1}],
+ settings: settings}
+
+for (var i = 0; i != 60; i++) {
+ try {
+ rs1a = new Mongo("127.0.0.1:40011").getDB("admin")
+ rs2a = new Mongo("127.0.0.1:40021").getDB("admin")
+ rs3a = new Mongo("127.0.0.1:40031").getDB("admin")
+ rs3a.auth("root", "rapadura")
+ db1 = new Mongo("127.0.0.1:40001").getDB("admin")
+ db2 = new Mongo("127.0.0.1:40002").getDB("admin")
+ break
+ } catch(err) {
+ print("Can't connect yet...")
+ }
+ sleep(1000)
+}
+
+function countHealthy(rs) {
+ var status = rs.runCommand({replSetGetStatus: 1})
+ var count = 0
+ var primary = 0
+ if (typeof status.members != "undefined") {
+ for (var i = 0; i != status.members.length; i++) {
+ var m = status.members[i]
+ if (m.health == 1 && (m.state == 1 || m.state == 2)) {
+ count += 1
+ if (m.state == 1) {
+ primary = 1
+ }
+ }
+ }
+ }
+ if (primary == 0) {
+ count = 0
+ }
+ return count
+}
+
+var totalRSMembers = rs1cfg.members.length + rs2cfg.members.length + rs3cfg.members.length
+
+for (var i = 0; i != 90; i++) {
+ var count = countHealthy(rs1a) + countHealthy(rs2a) + countHealthy(rs3a)
+ print("Replica sets have", count, "healthy nodes.")
+ if (count == totalRSMembers) {
+ quit(0)
+ }
+ sleep(1000)
+}
+
+print("Replica sets didn't sync up properly.")
+quit(12)
+
+// vim:ts=4:sw=4:et
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/setup.sh b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/setup.sh
new file mode 100755
index 00000000000..e5db78a783d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/harness/setup.sh
@@ -0,0 +1,96 @@
+#!/bin/sh -e
+
+LINE="---------------"
+
+start() {
+ if [ -d _harness ]; then
+ echo "Daemon setup already in place, stop it first."
+ exit 1
+ fi
+ mkdir -p _harness
+ cd _harness
+ cp -a ../harness/daemons .
+ cp -a ../harness/certs .
+ echo keyfile > certs/keyfile
+ chmod 600 certs/keyfile
+ if ! mongod --help | grep -q -- --ssl; then
+ rm -rf daemons/db3
+ fi
+ COUNT=$(ls daemons | wc -l)
+ echo "Running daemons..."
+ svscan daemons &
+ SVSCANPID=$!
+ echo $SVSCANPID > svscan.pid
+ if ! kill -0 $SVSCANPID; then
+ echo "Cannot execute svscan."
+ exit 1
+ fi
+ echo "Starting $COUNT processes..."
+ for i in $(seq 30); do
+ UP=$(svstat daemons/* | grep ' up ' | grep -v ' [0-3] seconds' | wc -l)
+ echo "$UP processes up..."
+ if [ x$COUNT = x$UP ]; then
+ echo "Running setup.js with mongo..."
+ mongo --nodb ../harness/mongojs/init.js
+ exit 0
+ fi
+ sleep 1
+ done
+ echo "Failed to start processes. svstat _harness/daemons/* output:"
+ echo $LINE
+ svstat daemons/*
+ echo $LINE
+ for DAEMON in daemons/*; do
+ if $(svstat $DAEMON | grep ' up ' | grep ' [0-3] seconds' > /dev/null); then
+ echo "Logs for _harness/$DAEMON:"
+ echo $LINE
+ cat $DAEMON/log/log.txt
+ echo $LINE
+ fi
+ done
+ exit 1
+}
+
+stop() {
+ if [ -d _harness ]; then
+ cd _harness
+ if [ -f svscan.pid ]; then
+ kill -9 $(cat svscan.pid) 2> /dev/null || true
+ svc -dx daemons/* daemons/*/log > /dev/null 2>&1 || true
+ COUNT=$(ls daemons | wc -l)
+ echo "Shutting down $COUNT processes..."
+ while true; do
+ DOWN=$(svstat daemons/* | grep 'supervise not running' | wc -l)
+ echo "$DOWN processes down..."
+ if [ x$DOWN = x$COUNT ]; then
+ break
+ fi
+ sleep 1
+ done
+ rm svscan.pid
+ echo "Done."
+ fi
+ cd ..
+ rm -rf _harness
+ fi
+}
+
+
+if [ ! -f suite_test.go ]; then
+ echo "This script must be run from within the source directory."
+ exit 1
+fi
+
+case "$1" in
+
+ start)
+ start $2
+ ;;
+
+ stop)
+ stop $2
+ ;;
+
+esac
+
+# vim:ts=4:sw=4:et
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/LICENSE b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/LICENSE
new file mode 100644
index 00000000000..74487567632
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/bench_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/bench_test.go
new file mode 100644
index 00000000000..cd7380b1efb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/bench_test.go
@@ -0,0 +1,223 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Large data benchmark.
+// The JSON data is a summary of agl's changes in the
+// go, webkit, and chromium open source projects.
+// We benchmark converting between the JSON form
+// and in-memory data structures.
+
+package json
+
+import (
+ "bytes"
+ "compress/gzip"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+type codeResponse struct {
+ Tree *codeNode `json:"tree"`
+ Username string `json:"username"`
+}
+
+type codeNode struct {
+ Name string `json:"name"`
+ Kids []*codeNode `json:"kids"`
+ CLWeight float64 `json:"cl_weight"`
+ Touches int `json:"touches"`
+ MinT int64 `json:"min_t"`
+ MaxT int64 `json:"max_t"`
+ MeanT int64 `json:"mean_t"`
+}
+
+var codeJSON []byte
+var codeStruct codeResponse
+
+func codeInit() {
+ f, err := os.Open("testdata/code.json.gz")
+ if err != nil {
+ panic(err)
+ }
+ defer f.Close()
+ gz, err := gzip.NewReader(f)
+ if err != nil {
+ panic(err)
+ }
+ data, err := ioutil.ReadAll(gz)
+ if err != nil {
+ panic(err)
+ }
+
+ codeJSON = data
+
+ if err := Unmarshal(codeJSON, &codeStruct); err != nil {
+ panic("unmarshal code.json: " + err.Error())
+ }
+
+ if data, err = Marshal(&codeStruct); err != nil {
+ panic("marshal code.json: " + err.Error())
+ }
+
+ if !bytes.Equal(data, codeJSON) {
+ println("different lengths", len(data), len(codeJSON))
+ for i := 0; i < len(data) && i < len(codeJSON); i++ {
+ if data[i] != codeJSON[i] {
+ println("re-marshal: changed at byte", i)
+ println("orig: ", string(codeJSON[i-10:i+10]))
+ println("new: ", string(data[i-10:i+10]))
+ break
+ }
+ }
+ panic("re-marshal code.json: different result")
+ }
+}
+
+func BenchmarkCodeEncoder(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ enc := NewEncoder(ioutil.Discard)
+ for i := 0; i < b.N; i++ {
+ if err := enc.Encode(&codeStruct); err != nil {
+ b.Fatal("Encode:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeMarshal(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ for i := 0; i < b.N; i++ {
+ if _, err := Marshal(&codeStruct); err != nil {
+ b.Fatal("Marshal:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeDecoder(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ var buf bytes.Buffer
+ dec := NewDecoder(&buf)
+ var r codeResponse
+ for i := 0; i < b.N; i++ {
+ buf.Write(codeJSON)
+ // hide EOF
+ buf.WriteByte('\n')
+ buf.WriteByte('\n')
+ buf.WriteByte('\n')
+ if err := dec.Decode(&r); err != nil {
+ b.Fatal("Decode:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkDecoderStream(b *testing.B) {
+ b.StopTimer()
+ var buf bytes.Buffer
+ dec := NewDecoder(&buf)
+ buf.WriteString(`"` + strings.Repeat("x", 1000000) + `"` + "\n\n\n")
+ var x interface{}
+ if err := dec.Decode(&x); err != nil {
+ b.Fatal("Decode:", err)
+ }
+ ones := strings.Repeat(" 1\n", 300000) + "\n\n\n"
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ if i%300000 == 0 {
+ buf.WriteString(ones)
+ }
+ x = nil
+ if err := dec.Decode(&x); err != nil || x != 1.0 {
+ b.Fatalf("Decode: %v after %d", err, i)
+ }
+ }
+}
+
+func BenchmarkCodeUnmarshal(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ for i := 0; i < b.N; i++ {
+ var r codeResponse
+ if err := Unmarshal(codeJSON, &r); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+ b.SetBytes(int64(len(codeJSON)))
+}
+
+func BenchmarkCodeUnmarshalReuse(b *testing.B) {
+ if codeJSON == nil {
+ b.StopTimer()
+ codeInit()
+ b.StartTimer()
+ }
+ var r codeResponse
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(codeJSON, &r); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalString(b *testing.B) {
+ data := []byte(`"hello, world"`)
+ var s string
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &s); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalFloat64(b *testing.B) {
+ var f float64
+ data := []byte(`3.14`)
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &f); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkUnmarshalInt64(b *testing.B) {
+ var x int64
+ data := []byte(`3`)
+
+ for i := 0; i < b.N; i++ {
+ if err := Unmarshal(data, &x); err != nil {
+ b.Fatal("Unmarshal:", err)
+ }
+ }
+}
+
+func BenchmarkIssue10335(b *testing.B) {
+ b.ReportAllocs()
+ var s struct{}
+ j := []byte(`{"a":{ }}`)
+ for n := 0; n < b.N; n++ {
+ if err := Unmarshal(j, &s); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/decode.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/decode.go
new file mode 100644
index 00000000000..ce7c7d2493d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/decode.go
@@ -0,0 +1,1685 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strconv"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
+// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
+// reuses the existing map, keeping existing entries. Unmarshal then stores key-
+// value pairs from the JSON object into the map. The map's key type must
+// either be a string or implement encoding.TextUnmarshaler.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by types
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ d.value(rv)
+ return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and http://json.org/number.gif
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // read offset in data
+ scan scanner
+ nextscan scanner // for calls to nextValue
+ savedError error
+ useNumber bool
+ ext Extension
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+ panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = err
+ }
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+ c := d.data[d.off]
+ item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // Our scanner has seen the opening brace/bracket
+ // and thinks we're still in the middle of the object.
+ // invent a closing brace/bracket to get it out.
+ if c == '{' {
+ d.scan.step(&d.scan, '}')
+ } else if c == '[' {
+ d.scan.step(&d.scan, ']')
+ } else {
+ // Was inside a function name. Get out of it.
+ d.scan.step(&d.scan, '(')
+ d.scan.step(&d.scan, ')')
+ }
+
+ return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+ var newOp int
+ for {
+ if d.off >= len(d.data) {
+ newOp = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ } else {
+ c := d.data[d.off]
+ d.off++
+ newOp = d.scan.step(&d.scan, c)
+ }
+ if newOp != op {
+ break
+ }
+ }
+ return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+ if !v.IsValid() {
+ _, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+ if err != nil {
+ d.error(err)
+ }
+ d.off = len(d.data) - len(rest)
+
+ // d.scan thinks we're still at the beginning of the item.
+ // Feed in an empty string - the shortest, simplest value -
+ // so that it knows we got to the end of the value.
+ if d.scan.redo {
+ // rewind.
+ d.scan.redo = false
+ d.scan.step = stateBeginValue
+ }
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+
+ n := len(d.scan.parseState)
+ if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+ // d.scan thinks we just read an object key; finish the object
+ d.scan.step(&d.scan, ':')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '"')
+ d.scan.step(&d.scan, '}')
+ }
+
+ return
+ }
+
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(v)
+
+ case scanBeginObject:
+ d.object(v)
+
+ case scanBeginLiteral:
+ d.literal(v)
+
+ case scanBeginName:
+ d.name(v)
+ }
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch op := d.scanWhile(scanSkipSpace); op {
+ default:
+ d.error(errPhase)
+
+ case scanBeginArray:
+ d.array(reflect.Value{})
+
+ case scanBeginObject:
+ d.object(reflect.Value{})
+
+ case scanBeginName:
+ switch v := d.nameInterface().(type) {
+ case nil, string:
+ return v
+ }
+
+ case scanBeginLiteral:
+ switch v := d.literalInterface().(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, v
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, v
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.arrayInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+ d.off--
+ d.next()
+ return
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.value(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.value(reflect.Value{})
+ }
+ i++
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+}
+
+var nullLiteral = []byte("null")
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if d.storeKeyed(pv) {
+ return
+ }
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.objectInterface()))
+ return
+ }
+
+ // Check type of target:
+ // struct or
+ // map[string]T or map[encoding.TextUnmarshaler]T
+ switch v.Kind() {
+ case reflect.Map:
+ // Map key must either have string kind or be an encoding.TextUnmarshaler.
+ t := v.Type()
+ if t.Key().Kind() != reflect.String &&
+ !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ var mapElem reflect.Value
+
+ empty := true
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ if !empty && !d.ext.trailingCommas {
+ d.syntaxError("beginning of object key string")
+ }
+ break
+ }
+ empty = false
+ if op == scanBeginName {
+ if !d.ext.unquotedKeys {
+ d.syntaxError("beginning of object key string")
+ }
+ } else if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+ unquotedKey := op == scanBeginName
+
+ // Read key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ var key []byte
+ if unquotedKey {
+ key = item
+ // TODO Fix code below to quote item when necessary.
+ } else {
+ var ok bool
+ key, ok = unquoteBytes(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ if f == nil && ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kt := v.Type().Key()
+ var kv reflect.Value
+ switch {
+ case kt.Kind() == reflect.String:
+ kv = reflect.ValueOf(key).Convert(v.Type().Key())
+ case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+ kv = reflect.New(v.Type().Key())
+ d.literalStore(item, kv, true)
+ kv = kv.Elem()
+ default:
+ panic("json: Unexpected key type") // should never occur
+ }
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+}
+
+// isNull returns whether there's a null literal at the provided offset.
+func (d *decodeState) isNull(off int) bool {
+ if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' {
+ return false
+ }
+ d.nextscan.reset()
+ for i, c := range d.data[off:] {
+ if i > 4 {
+ return false
+ }
+ switch d.nextscan.step(&d.nextscan, c) {
+ case scanContinue, scanBeginName:
+ continue
+ }
+ break
+ }
+ return true
+}
+
+// name consumes a const or function from d.data[d.off-1:], decoding into the value v.
+// the first byte of the function name has been read already.
+func (d *decodeState) name(v reflect.Value) {
+ if d.isNull(d.off-1) {
+ d.literal(v)
+ return
+ }
+
+ // Check for unmarshaler.
+ u, ut, pv := d.indirect(v, false)
+ if d.storeKeyed(pv) {
+ return
+ }
+ if u != nil {
+ d.off--
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over function in input
+ return
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ out := d.nameInterface()
+ if out == nil {
+ v.Set(reflect.Zero(v.Type()))
+ } else {
+ v.Set(reflect.ValueOf(out))
+ }
+ return
+ }
+
+ nameStart := d.off - 1
+
+ op := d.scanWhile(scanContinue)
+
+ name := d.data[nameStart : d.off-1]
+ if op != scanParam {
+ // Back up so the byte just read is consumed next.
+ d.off--
+ d.scan.undo(op)
+ if l, ok := d.convertLiteral(name); ok {
+ d.storeValue(v, l)
+ return
+ }
+ d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+ }
+
+ funcName := string(name)
+ funcData := d.ext.funcs[funcName]
+ if funcData.key == "" {
+ d.error(fmt.Errorf("json: unknown function %q", funcName))
+ }
+
+ // Check type of target:
+ // struct or
+ // map[string]T or map[encoding.TextUnmarshaler]T
+ switch v.Kind() {
+ case reflect.Map:
+ // Map key must either have string kind or be an encoding.TextUnmarshaler.
+ t := v.Type()
+ if t.Key().Kind() != reflect.String &&
+ !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+ case reflect.Struct:
+
+ default:
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ d.off--
+ d.next() // skip over { } in input
+ return
+ }
+
+ // TODO Fix case of func field as map.
+ //topv := v
+
+ // Figure out field corresponding to function.
+ key := []byte(funcData.key)
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ v = reflect.New(elemType).Elem()
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ if f == nil && ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ for _, i := range f.index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ }
+
+ // Check for unmarshaler on func field itself.
+ u, ut, pv = d.indirect(v, false)
+ if u != nil {
+ d.off = nameStart
+ err := u.UnmarshalJSON(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ var mapElem reflect.Value
+
+ // Parse function arguments.
+ for i := 0; ; i++ {
+ // closing ) - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndParams {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ if i >= len(funcData.args) {
+ d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+ }
+ key := []byte(funcData.args[i])
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := v.Type().Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ } else {
+ var f *field
+ fields := cachedTypeFields(v.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, key) {
+ f = ff
+ break
+ }
+ if f == nil && ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ }
+
+ // Read value.
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ d.literalStore(nullLiteral, subv, false)
+ case string:
+ d.literalStore([]byte(qv), subv, true)
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ d.value(subv)
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kt := v.Type().Key()
+ var kv reflect.Value
+ switch {
+ case kt.Kind() == reflect.String:
+ kv = reflect.ValueOf(key).Convert(v.Type().Key())
+ case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+ kv = reflect.New(v.Type().Key())
+ d.literalStore(key, kv, true)
+ kv = kv.Elem()
+ default:
+ panic("json: Unexpected key type") // should never occur
+ }
+ v.SetMapIndex(kv, subv)
+ }
+
+ // Next token must be , or ).
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndParams {
+ break
+ }
+ if op != scanParam {
+ d.error(errPhase)
+ }
+ }
+}
+
+// keyed attempts to decode an object or function using a keyed doc extension,
+// and returns the value and true on success, or nil and false otherwise.
+func (d *decodeState) keyed() (interface{}, bool) {
+ if len(d.ext.keyed) == 0 {
+ return nil, false
+ }
+
+ unquote := false
+
+ // Look-ahead first key to check for a keyed document extension.
+ d.nextscan.reset()
+ var start, end int
+ for i, c := range d.data[d.off-1:] {
+ switch op := d.nextscan.step(&d.nextscan, c); op {
+ case scanSkipSpace, scanContinue, scanBeginObject:
+ continue
+ case scanBeginLiteral, scanBeginName:
+ unquote = op == scanBeginLiteral
+ start = i
+ continue
+ }
+ end = i
+ break
+ }
+
+ name := d.data[d.off-1+start : d.off-1+end]
+
+ var key []byte
+ var ok bool
+ if unquote {
+ key, ok = unquoteBytes(name)
+ if !ok {
+ d.error(errPhase)
+ }
+ } else {
+ funcData, ok := d.ext.funcs[string(name)]
+ if !ok {
+ return nil, false
+ }
+ key = []byte(funcData.key)
+ }
+
+ decode, ok := d.ext.keyed[string(key)]
+ if !ok {
+ return nil, false
+ }
+
+ d.off--
+ out, err := decode(d.next())
+ if err != nil {
+ d.error(err)
+ }
+ return out, true
+}
+
+func (d *decodeState) storeKeyed(v reflect.Value) bool {
+ keyed, ok := d.keyed()
+ if !ok {
+ return false
+ }
+ d.storeValue(v, keyed)
+ return true
+}
+
+var (
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ nullBytes = []byte("null")
+)
+
+func (d *decodeState) storeValue(v reflect.Value, from interface{}) {
+ switch from {
+ case nil:
+ d.literalStore(nullBytes, v, false)
+ return
+ case true:
+ d.literalStore(trueBytes, v, false)
+ return
+ case false:
+ d.literalStore(falseBytes, v, false)
+ return
+ }
+ fromv := reflect.ValueOf(from)
+ for fromv.Kind() == reflect.Ptr && !fromv.IsNil() {
+ fromv = fromv.Elem()
+ }
+ fromt := fromv.Type()
+ for v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ vt := v.Type()
+ if fromt.AssignableTo(vt) {
+ v.Set(fromv)
+ } else if fromt.ConvertibleTo(vt) {
+ v.Set(fromv.Convert(vt))
+ } else {
+ d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+ }
+}
+
+func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) {
+ if len(name) == 0 {
+ return nil, false
+ }
+ switch name[0] {
+ case 't':
+ if bytes.Equal(name, trueBytes) {
+ return true, true
+ }
+ case 'f':
+ if bytes.Equal(name, falseBytes) {
+ return false, true
+ }
+ case 'n':
+ if bytes.Equal(name, nullBytes) {
+ return nil, true
+ }
+ }
+ if l, ok := d.ext.consts[string(name)]; ok {
+ return l, true
+ }
+ return nil, false
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+
+ d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return
+ }
+ wantptr := item[0] == 'n' // null
+ u, ut, pv := d.indirect(v, wantptr)
+ if u != nil {
+ err := u.UnmarshalJSON(item)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ return
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ err := ut.UnmarshalText(s)
+ if err != nil {
+ d.error(err)
+ }
+ return
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := c == 't'
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(errPhase)
+ }
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ v.SetString(s)
+ if !isValidNumber(s) {
+ d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+ }
+ break
+ }
+ if fromQuoted {
+ d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ }
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+ switch d.scanWhile(scanSkipSpace) {
+ default:
+ d.error(errPhase)
+ panic("unreachable")
+ case scanBeginArray:
+ return d.arrayInterface()
+ case scanBeginObject:
+ return d.objectInterface()
+ case scanBeginLiteral:
+ return d.literalInterface()
+ case scanBeginName:
+ return d.nameInterface()
+ }
+}
+
+func (d *decodeState) syntaxError(expected string) {
+ msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected)
+ d.error(&SyntaxError{msg, int64(d.off)})
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ if len(v) > 0 && !d.ext.trailingCommas {
+ d.syntaxError("beginning of value")
+ }
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndArray {
+ break
+ }
+ if op != scanArrayValue {
+ d.error(errPhase)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() interface{} {
+ v, ok := d.keyed()
+ if ok {
+ return v
+ }
+
+ m := make(map[string]interface{})
+ for {
+ // Read opening " of string key or closing }.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ if len(m) > 0 && !d.ext.trailingCommas {
+ d.syntaxError("beginning of object key string")
+ }
+ break
+ }
+ if op == scanBeginName {
+ if !d.ext.unquotedKeys {
+ d.syntaxError("beginning of object key string")
+ }
+ } else if op != scanBeginLiteral {
+ d.error(errPhase)
+ }
+ unquotedKey := op == scanBeginName
+
+ // Read string key.
+ start := d.off - 1
+ op = d.scanWhile(scanContinue)
+ item := d.data[start : d.off-1]
+ var key string
+ if unquotedKey {
+ key = string(item)
+ } else {
+ var ok bool
+ key, ok = unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ }
+
+ // Read : before value.
+ if op == scanSkipSpace {
+ op = d.scanWhile(scanSkipSpace)
+ }
+ if op != scanObjectKey {
+ d.error(errPhase)
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndObject {
+ break
+ }
+ if op != scanObjectValue {
+ d.error(errPhase)
+ }
+ }
+ return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.off - 1
+ op := d.scanWhile(scanContinue)
+
+ // Scan read one byte too far; back up.
+ d.off--
+ d.scan.undo(op)
+ item := d.data[start:d.off]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ d.error(errPhase)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ d.error(errPhase)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// nameInterface is like function but returns map[string]interface{}.
+func (d *decodeState) nameInterface() interface{} {
+ v, ok := d.keyed()
+ if ok {
+ return v
+ }
+
+ nameStart := d.off - 1
+
+ op := d.scanWhile(scanContinue)
+
+ name := d.data[nameStart : d.off-1]
+ if op != scanParam {
+ // Back up so the byte just read is consumed next.
+ d.off--
+ d.scan.undo(op)
+ if l, ok := d.convertLiteral(name); ok {
+ return l
+ }
+ d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+ }
+
+ funcName := string(name)
+ funcData := d.ext.funcs[funcName]
+ if funcData.key == "" {
+ d.error(fmt.Errorf("json: unknown function %q", funcName))
+ }
+
+ m := make(map[string]interface{})
+ for i := 0; ; i++ {
+ // Look ahead for ) - can only happen on first iteration.
+ op := d.scanWhile(scanSkipSpace)
+ if op == scanEndParams {
+ break
+ }
+
+ // Back up so d.value can have the byte we just read.
+ d.off--
+ d.scan.undo(op)
+
+ if i >= len(funcData.args) {
+ d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+ }
+ m[funcData.args[i]] = d.valueInterface()
+
+ // Next token must be , or ).
+ op = d.scanWhile(scanSkipSpace)
+ if op == scanEndParams {
+ break
+ }
+ if op != scanParam {
+ d.error(errPhase)
+ }
+ }
+ return map[string]interface{}{funcData.key: m}
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+ if err != nil {
+ return -1
+ }
+ return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/decode_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/decode_test.go
new file mode 100644
index 00000000000..30e46ca44f0
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/decode_test.go
@@ -0,0 +1,1512 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "image"
+ "net"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+)
+
+type T struct {
+ X string
+ Y int
+ Z int `json:"-"`
+}
+
+type U struct {
+ Alphabet string `json:"alpha"`
+}
+
+type V struct {
+ F1 interface{}
+ F2 int32
+ F3 Number
+}
+
+// ifaceNumAsFloat64/ifaceNumAsNumber are used to test unmarshaling with and
+// without UseNumber
+var ifaceNumAsFloat64 = map[string]interface{}{
+ "k1": float64(1),
+ "k2": "s",
+ "k3": []interface{}{float64(1), float64(2.0), float64(3e-3)},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": float64(2)},
+}
+
+var ifaceNumAsNumber = map[string]interface{}{
+ "k1": Number("1"),
+ "k2": "s",
+ "k3": []interface{}{Number("1"), Number("2.0"), Number("3e-3")},
+ "k4": map[string]interface{}{"kk1": "s", "kk2": Number("2")},
+}
+
+type tx struct {
+ x int
+}
+
+// A type that can unmarshal itself.
+
+type unmarshaler struct {
+ T bool
+}
+
+func (u *unmarshaler) UnmarshalJSON(b []byte) error {
+ *u = unmarshaler{true} // All we need to see that UnmarshalJSON is called.
+ return nil
+}
+
+type ustruct struct {
+ M unmarshaler
+}
+
+type unmarshalerText struct {
+ A, B string
+}
+
+// needed for re-marshaling tests
+func (u unmarshalerText) MarshalText() ([]byte, error) {
+ return []byte(u.A + ":" + u.B), nil
+}
+
+func (u *unmarshalerText) UnmarshalText(b []byte) error {
+ pos := bytes.Index(b, []byte(":"))
+ if pos == -1 {
+ return errors.New("missing separator")
+ }
+ u.A, u.B = string(b[:pos]), string(b[pos+1:])
+ return nil
+}
+
+var _ encoding.TextUnmarshaler = (*unmarshalerText)(nil)
+
+type ustructText struct {
+ M unmarshalerText
+}
+
+var (
+ um0, um1 unmarshaler // target2 of unmarshaling
+ ump = &um1
+ umtrue = unmarshaler{true}
+ umslice = []unmarshaler{{true}}
+ umslicep = new([]unmarshaler)
+ umstruct = ustruct{unmarshaler{true}}
+
+ um0T, um1T unmarshalerText // target2 of unmarshaling
+ umpType = &um1T
+ umtrueXY = unmarshalerText{"x", "y"}
+ umsliceXY = []unmarshalerText{{"x", "y"}}
+ umslicepType = new([]unmarshalerText)
+ umstructType = new(ustructText)
+ umstructXY = ustructText{unmarshalerText{"x", "y"}}
+
+ ummapType = map[unmarshalerText]bool{}
+ ummapXY = map[unmarshalerText]bool{unmarshalerText{"x", "y"}: true}
+)
+
+// Test data structures for anonymous fields.
+
+type Point struct {
+ Z int
+}
+
+type Top struct {
+ Level0 int
+ Embed0
+ *Embed0a
+ *Embed0b `json:"e,omitempty"` // treated as named
+ Embed0c `json:"-"` // ignored
+ Loop
+ Embed0p // has Point with X, Y, used
+ Embed0q // has Point with Z, used
+ embed // contains exported field
+}
+
+type Embed0 struct {
+ Level1a int // overridden by Embed0a's Level1a with json tag
+ Level1b int // used because Embed0a's Level1b is renamed
+ Level1c int // used because Embed0a's Level1c is ignored
+ Level1d int // annihilated by Embed0a's Level1d
+ Level1e int `json:"x"` // annihilated by Embed0a.Level1e
+}
+
+type Embed0a struct {
+ Level1a int `json:"Level1a,omitempty"`
+ Level1b int `json:"LEVEL1B,omitempty"`
+ Level1c int `json:"-"`
+ Level1d int // annihilated by Embed0's Level1d
+ Level1f int `json:"x"` // annihilated by Embed0's Level1e
+}
+
+type Embed0b Embed0
+
+type Embed0c Embed0
+
+type Embed0p struct {
+ image.Point
+}
+
+type Embed0q struct {
+ Point
+}
+
+type embed struct {
+ Q int
+}
+
+type Loop struct {
+ Loop1 int `json:",omitempty"`
+ Loop2 int `json:",omitempty"`
+ *Loop
+}
+
+// From reflect test:
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// From reflect test:
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+type unmarshalTest struct {
+ in string
+ ptr interface{}
+ out interface{}
+ err error
+ useNumber bool
+}
+
+type Ambig struct {
+ // Given "hello", the first match should win.
+ First int `json:"HELLO"`
+ Second int `json:"Hello"`
+}
+
+type XYZ struct {
+ X interface{}
+ Y interface{}
+ Z interface{}
+}
+
+func sliceAddr(x []int) *[]int { return &x }
+func mapAddr(x map[string]int) *map[string]int { return &x }
+
+var unmarshalTests = []unmarshalTest{
+ // basic types
+ {in: `true`, ptr: new(bool), out: true},
+ {in: `1`, ptr: new(int), out: 1},
+ {in: `1.2`, ptr: new(float64), out: 1.2},
+ {in: `-5`, ptr: new(int16), out: int16(-5)},
+ {in: `2`, ptr: new(Number), out: Number("2"), useNumber: true},
+ {in: `2`, ptr: new(Number), out: Number("2")},
+ {in: `2`, ptr: new(interface{}), out: float64(2.0)},
+ {in: `2`, ptr: new(interface{}), out: Number("2"), useNumber: true},
+ {in: `"a\u1234"`, ptr: new(string), out: "a\u1234"},
+ {in: `"http:\/\/"`, ptr: new(string), out: "http://"},
+ {in: `"g-clef: \uD834\uDD1E"`, ptr: new(string), out: "g-clef: \U0001D11E"},
+ {in: `"invalid: \uD834x\uDD1E"`, ptr: new(string), out: "invalid: \uFFFDx\uFFFD"},
+ {in: "null", ptr: new(interface{}), out: nil},
+ {in: `{"X": [1,2,3], "Y": 4}`, ptr: new(T), out: T{Y: 4}, err: &UnmarshalTypeError{"array", reflect.TypeOf(""), 7}},
+ {in: `{"x": 1}`, ptr: new(tx), out: tx{}},
+ {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: float64(1), F2: int32(2), F3: Number("3")}},
+ {in: `{"F1":1,"F2":2,"F3":3}`, ptr: new(V), out: V{F1: Number("1"), F2: int32(2), F3: Number("3")}, useNumber: true},
+ {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsFloat64},
+ {in: `{"k1":1,"k2":"s","k3":[1,2.0,3e-3],"k4":{"kk1":"s","kk2":2}}`, ptr: new(interface{}), out: ifaceNumAsNumber, useNumber: true},
+
+ // raw values with whitespace
+ {in: "\n true ", ptr: new(bool), out: true},
+ {in: "\t 1 ", ptr: new(int), out: 1},
+ {in: "\r 1.2 ", ptr: new(float64), out: 1.2},
+ {in: "\t -5 \n", ptr: new(int16), out: int16(-5)},
+ {in: "\t \"a\\u1234\" \n", ptr: new(string), out: "a\u1234"},
+
+ // Z has a "-" tag.
+ {in: `{"Y": 1, "Z": 2}`, ptr: new(T), out: T{Y: 1}},
+
+ {in: `{"alpha": "abc", "alphabet": "xyz"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+ {in: `{"alpha": "abc"}`, ptr: new(U), out: U{Alphabet: "abc"}},
+ {in: `{"alphabet": "xyz"}`, ptr: new(U), out: U{}},
+
+ // syntax errors
+ {in: `{"X": "foo", "Y"}`, err: &SyntaxError{"invalid character '}' after object key", 17}},
+ {in: `[1, 2, 3+]`, err: &SyntaxError{"invalid character '+' after array element", 9}},
+ {in: `{"X":12x}`, err: &SyntaxError{"invalid character 'x' after object key:value pair", 8}, useNumber: true},
+
+ // raw value errors
+ {in: "\x01 42", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " 42 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 5}},
+ {in: "\x01 true", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " false \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 8}},
+ {in: "\x01 1.2", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " 3.4 \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 6}},
+ {in: "\x01 \"string\"", err: &SyntaxError{"invalid character '\\x01' looking for beginning of value", 1}},
+ {in: " \"string\" \x01", err: &SyntaxError{"invalid character '\\x01' after top-level value", 11}},
+
+ // array tests
+ {in: `[1, 2, 3]`, ptr: new([3]int), out: [3]int{1, 2, 3}},
+ {in: `[1, 2, 3]`, ptr: new([1]int), out: [1]int{1}},
+ {in: `[1, 2, 3]`, ptr: new([5]int), out: [5]int{1, 2, 3, 0, 0}},
+
+ // empty array to interface test
+ {in: `[]`, ptr: new([]interface{}), out: []interface{}{}},
+ {in: `null`, ptr: new([]interface{}), out: []interface{}(nil)},
+ {in: `{"T":[]}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": []interface{}{}}},
+ {in: `{"T":null}`, ptr: new(map[string]interface{}), out: map[string]interface{}{"T": interface{}(nil)}},
+
+ // composite tests
+ {in: allValueIndent, ptr: new(All), out: allValue},
+ {in: allValueCompact, ptr: new(All), out: allValue},
+ {in: allValueIndent, ptr: new(*All), out: &allValue},
+ {in: allValueCompact, ptr: new(*All), out: &allValue},
+ {in: pallValueIndent, ptr: new(All), out: pallValue},
+ {in: pallValueCompact, ptr: new(All), out: pallValue},
+ {in: pallValueIndent, ptr: new(*All), out: &pallValue},
+ {in: pallValueCompact, ptr: new(*All), out: &pallValue},
+
+ // unmarshal interface test
+ {in: `{"T":false}`, ptr: &um0, out: umtrue}, // use "false" so test will fail if custom unmarshaler is not called
+ {in: `{"T":false}`, ptr: &ump, out: &umtrue},
+ {in: `[{"T":false}]`, ptr: &umslice, out: umslice},
+ {in: `[{"T":false}]`, ptr: &umslicep, out: &umslice},
+ {in: `{"M":{"T":"x:y"}}`, ptr: &umstruct, out: umstruct},
+
+ // UnmarshalText interface test
+ {in: `"x:y"`, ptr: &um0T, out: umtrueXY},
+ {in: `"x:y"`, ptr: &umpType, out: &umtrueXY},
+ {in: `["x:y"]`, ptr: &umsliceXY, out: umsliceXY},
+ {in: `["x:y"]`, ptr: &umslicepType, out: &umsliceXY},
+ {in: `{"M":"x:y"}`, ptr: umstructType, out: umstructXY},
+
+ // Map keys can be encoding.TextUnmarshalers
+ {in: `{"x:y":true}`, ptr: &ummapType, out: ummapXY},
+ // If multiple values for the same key exists, only the most recent value is used.
+ {in: `{"x:y":false,"x:y":true}`, ptr: &ummapType, out: ummapXY},
+
+ // Overwriting of data.
+ // This is different from package xml, but it's what we've always done.
+ // Now documented and tested.
+ {in: `[2]`, ptr: sliceAddr([]int{1}), out: []int{2}},
+ {in: `{"key": 2}`, ptr: mapAddr(map[string]int{"old": 0, "key": 1}), out: map[string]int{"key": 2}},
+
+ {
+ in: `{
+ "Level0": 1,
+ "Level1b": 2,
+ "Level1c": 3,
+ "x": 4,
+ "Level1a": 5,
+ "LEVEL1B": 6,
+ "e": {
+ "Level1a": 8,
+ "Level1b": 9,
+ "Level1c": 10,
+ "Level1d": 11,
+ "x": 12
+ },
+ "Loop1": 13,
+ "Loop2": 14,
+ "X": 15,
+ "Y": 16,
+ "Z": 17,
+ "Q": 18
+ }`,
+ ptr: new(Top),
+ out: Top{
+ Level0: 1,
+ Embed0: Embed0{
+ Level1b: 2,
+ Level1c: 3,
+ },
+ Embed0a: &Embed0a{
+ Level1a: 5,
+ Level1b: 6,
+ },
+ Embed0b: &Embed0b{
+ Level1a: 8,
+ Level1b: 9,
+ Level1c: 10,
+ Level1d: 11,
+ Level1e: 12,
+ },
+ Loop: Loop{
+ Loop1: 13,
+ Loop2: 14,
+ },
+ Embed0p: Embed0p{
+ Point: image.Point{X: 15, Y: 16},
+ },
+ Embed0q: Embed0q{
+ Point: Point{Z: 17},
+ },
+ embed: embed{
+ Q: 18,
+ },
+ },
+ },
+ {
+ in: `{"hello": 1}`,
+ ptr: new(Ambig),
+ out: Ambig{First: 1},
+ },
+
+ {
+ in: `{"X": 1,"Y":2}`,
+ ptr: new(S5),
+ out: S5{S8: S8{S9: S9{Y: 2}}},
+ },
+ {
+ in: `{"X": 1,"Y":2}`,
+ ptr: new(S10),
+ out: S10{S13: S13{S8: S8{S9: S9{Y: 2}}}},
+ },
+
+ // invalid UTF-8 is coerced to valid UTF-8.
+ {
+ in: "\"hello\xffworld\"",
+ ptr: new(string),
+ out: "hello\ufffdworld",
+ },
+ {
+ in: "\"hello\xc2\xc2world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\xc2\xffworld\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\\ud800\\ud800world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffdworld",
+ },
+ {
+ in: "\"hello\xed\xa0\x80\xed\xb0\x80world\"",
+ ptr: new(string),
+ out: "hello\ufffd\ufffd\ufffd\ufffd\ufffd\ufffdworld",
+ },
+
+ // Used to be issue 8305, but time.Time implements encoding.TextUnmarshaler so this works now.
+ {
+ in: `{"2009-11-10T23:00:00Z": "hello world"}`,
+ ptr: &map[time.Time]string{},
+ out: map[time.Time]string{time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC): "hello world"},
+ },
+
+ // issue 8305
+ {
+ in: `{"2009-11-10T23:00:00Z": "hello world"}`,
+ ptr: &map[Point]string{},
+ err: &UnmarshalTypeError{"object", reflect.TypeOf(map[Point]string{}), 1},
+ },
+ {
+ in: `{"asdf": "hello world"}`,
+ ptr: &map[unmarshaler]string{},
+ err: &UnmarshalTypeError{"object", reflect.TypeOf(map[unmarshaler]string{}), 1},
+ },
+}
+
+func TestMarshal(t *testing.T) {
+ b, err := Marshal(allValue)
+ if err != nil {
+ t.Fatalf("Marshal allValue: %v", err)
+ }
+ if string(b) != allValueCompact {
+ t.Errorf("Marshal allValueCompact")
+ diff(t, b, []byte(allValueCompact))
+ return
+ }
+
+ b, err = Marshal(pallValue)
+ if err != nil {
+ t.Fatalf("Marshal pallValue: %v", err)
+ }
+ if string(b) != pallValueCompact {
+ t.Errorf("Marshal pallValueCompact")
+ diff(t, b, []byte(pallValueCompact))
+ return
+ }
+}
+
+var badUTF8 = []struct {
+ in, out string
+}{
+ {"hello\xffworld", `"hello\ufffdworld"`},
+ {"", `""`},
+ {"\xff", `"\ufffd"`},
+ {"\xff\xff", `"\ufffd\ufffd"`},
+ {"a\xffb", `"a\ufffdb"`},
+ {"\xe6\x97\xa5\xe6\x9c\xac\xff\xaa\x9e", `"日本\ufffd\ufffd\ufffd"`},
+}
+
+func TestMarshalBadUTF8(t *testing.T) {
+ for _, tt := range badUTF8 {
+ b, err := Marshal(tt.in)
+ if string(b) != tt.out || err != nil {
+ t.Errorf("Marshal(%q) = %#q, %v, want %#q, nil", tt.in, b, err, tt.out)
+ }
+ }
+}
+
+func TestMarshalNumberZeroVal(t *testing.T) {
+ var n Number
+ out, err := Marshal(n)
+ if err != nil {
+ t.Fatal(err)
+ }
+ outStr := string(out)
+ if outStr != "0" {
+ t.Fatalf("Invalid zero val for Number: %q", outStr)
+ }
+}
+
+func TestMarshalEmbeds(t *testing.T) {
+ top := &Top{
+ Level0: 1,
+ Embed0: Embed0{
+ Level1b: 2,
+ Level1c: 3,
+ },
+ Embed0a: &Embed0a{
+ Level1a: 5,
+ Level1b: 6,
+ },
+ Embed0b: &Embed0b{
+ Level1a: 8,
+ Level1b: 9,
+ Level1c: 10,
+ Level1d: 11,
+ Level1e: 12,
+ },
+ Loop: Loop{
+ Loop1: 13,
+ Loop2: 14,
+ },
+ Embed0p: Embed0p{
+ Point: image.Point{X: 15, Y: 16},
+ },
+ Embed0q: Embed0q{
+ Point: Point{Z: 17},
+ },
+ embed: embed{
+ Q: 18,
+ },
+ }
+ b, err := Marshal(top)
+ if err != nil {
+ t.Fatal(err)
+ }
+ want := "{\"Level0\":1,\"Level1b\":2,\"Level1c\":3,\"Level1a\":5,\"LEVEL1B\":6,\"e\":{\"Level1a\":8,\"Level1b\":9,\"Level1c\":10,\"Level1d\":11,\"x\":12},\"Loop1\":13,\"Loop2\":14,\"X\":15,\"Y\":16,\"Z\":17,\"Q\":18}"
+ if string(b) != want {
+ t.Errorf("Wrong marshal result.\n got: %q\nwant: %q", b, want)
+ }
+}
+
+func TestUnmarshal(t *testing.T) {
+ for i, tt := range unmarshalTests {
+ var scan scanner
+ in := []byte(tt.in)
+ if err := checkValid(in, &scan); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: checkValid: %#v", i, err)
+ continue
+ }
+ }
+ if tt.ptr == nil {
+ continue
+ }
+
+ // v = new(right-type)
+ v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec := NewDecoder(bytes.NewReader(in))
+ if tt.useNumber {
+ dec.UseNumber()
+ }
+ if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: %v, want %v", i, err, tt.err)
+ continue
+ } else if err != nil {
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
+ data, _ := Marshal(v.Elem().Interface())
+ println(string(data))
+ data, _ = Marshal(tt.out)
+ println(string(data))
+ continue
+ }
+
+ // Check round trip.
+ if tt.err == nil {
+ enc, err := Marshal(v.Interface())
+ if err != nil {
+ t.Errorf("#%d: error re-marshaling: %v", i, err)
+ continue
+ }
+ vv := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec = NewDecoder(bytes.NewReader(enc))
+ if tt.useNumber {
+ dec.UseNumber()
+ }
+ if err := dec.Decode(vv.Interface()); err != nil {
+ t.Errorf("#%d: error re-unmarshaling %#q: %v", i, enc, err)
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), vv.Elem().Interface()) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), vv.Elem().Interface())
+ t.Errorf(" In: %q", strings.Map(noSpace, string(in)))
+ t.Errorf("Marshal: %q", strings.Map(noSpace, string(enc)))
+ continue
+ }
+ }
+ }
+}
+
+func TestUnmarshalMarshal(t *testing.T) {
+ initBig()
+ var v interface{}
+ if err := Unmarshal(jsonBig, &v); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if !bytes.Equal(jsonBig, b) {
+ t.Errorf("Marshal jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+var numberTests = []struct {
+ in string
+ i int64
+ intErr string
+ f float64
+ floatErr string
+}{
+ {in: "-1.23e1", intErr: "strconv.ParseInt: parsing \"-1.23e1\": invalid syntax", f: -1.23e1},
+ {in: "-12", i: -12, f: -12.0},
+ {in: "1e1000", intErr: "strconv.ParseInt: parsing \"1e1000\": invalid syntax", floatErr: "strconv.ParseFloat: parsing \"1e1000\": value out of range"},
+}
+
+// Independent of Decode, basic coverage of the accessors in Number
+func TestNumberAccessors(t *testing.T) {
+ for _, tt := range numberTests {
+ n := Number(tt.in)
+ if s := n.String(); s != tt.in {
+ t.Errorf("Number(%q).String() is %q", tt.in, s)
+ }
+ if i, err := n.Int64(); err == nil && tt.intErr == "" && i != tt.i {
+ t.Errorf("Number(%q).Int64() is %d", tt.in, i)
+ } else if (err == nil && tt.intErr != "") || (err != nil && err.Error() != tt.intErr) {
+ t.Errorf("Number(%q).Int64() wanted error %q but got: %v", tt.in, tt.intErr, err)
+ }
+ if f, err := n.Float64(); err == nil && tt.floatErr == "" && f != tt.f {
+ t.Errorf("Number(%q).Float64() is %g", tt.in, f)
+ } else if (err == nil && tt.floatErr != "") || (err != nil && err.Error() != tt.floatErr) {
+ t.Errorf("Number(%q).Float64() wanted error %q but got: %v", tt.in, tt.floatErr, err)
+ }
+ }
+}
+
+func TestLargeByteSlice(t *testing.T) {
+ s0 := make([]byte, 2000)
+ for i := range s0 {
+ s0[i] = byte(i)
+ }
+ b, err := Marshal(s0)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ var s1 []byte
+ if err := Unmarshal(b, &s1); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !bytes.Equal(s0, s1) {
+ t.Errorf("Marshal large byte slice")
+ diff(t, s0, s1)
+ }
+}
+
+type Xint struct {
+ X int
+}
+
+func TestUnmarshalInterface(t *testing.T) {
+ var xint Xint
+ var i interface{} = &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &i); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestUnmarshalPtrPtr(t *testing.T) {
+ var xint Xint
+ pxint := &xint
+ if err := Unmarshal([]byte(`{"X":1}`), &pxint); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if xint.X != 1 {
+ t.Fatalf("Did not write to xint")
+ }
+}
+
+func TestEscape(t *testing.T) {
+ const input = `"foobar"<html>` + " [\u2028 \u2029]"
+ const expected = `"\"foobar\"\u003chtml\u003e [\u2028 \u2029]"`
+ b, err := Marshal(input)
+ if err != nil {
+ t.Fatalf("Marshal error: %v", err)
+ }
+ if s := string(b); s != expected {
+ t.Errorf("Encoding of [%s]:\n got [%s]\nwant [%s]", input, s, expected)
+ }
+}
+
+// WrongString is a struct that's misusing the ,string modifier.
+type WrongString struct {
+ Message string `json:"result,string"`
+}
+
+type wrongStringTest struct {
+ in, err string
+}
+
+var wrongStringTests = []wrongStringTest{
+ {`{"result":"x"}`, `json: invalid use of ,string struct tag, trying to unmarshal "x" into string`},
+ {`{"result":"foo"}`, `json: invalid use of ,string struct tag, trying to unmarshal "foo" into string`},
+ {`{"result":"123"}`, `json: invalid use of ,string struct tag, trying to unmarshal "123" into string`},
+ {`{"result":123}`, `json: invalid use of ,string struct tag, trying to unmarshal unquoted value into string`},
+}
+
+// If people misuse the ,string modifier, the error message should be
+// helpful, telling the user that they're doing it wrong.
+func TestErrorMessageFromMisusedString(t *testing.T) {
+ for n, tt := range wrongStringTests {
+ r := strings.NewReader(tt.in)
+ var s WrongString
+ err := NewDecoder(r).Decode(&s)
+ got := fmt.Sprintf("%v", err)
+ if got != tt.err {
+ t.Errorf("%d. got err = %q, want %q", n, got, tt.err)
+ }
+ }
+}
+
+func noSpace(c rune) rune {
+ if isSpace(byte(c)) { //only used for ascii
+ return -1
+ }
+ return c
+}
+
+type All struct {
+ Bool bool
+ Int int
+ Int8 int8
+ Int16 int16
+ Int32 int32
+ Int64 int64
+ Uint uint
+ Uint8 uint8
+ Uint16 uint16
+ Uint32 uint32
+ Uint64 uint64
+ Uintptr uintptr
+ Float32 float32
+ Float64 float64
+
+ Foo string `json:"bar"`
+ Foo2 string `json:"bar2,dummyopt"`
+
+ IntStr int64 `json:",string"`
+
+ PBool *bool
+ PInt *int
+ PInt8 *int8
+ PInt16 *int16
+ PInt32 *int32
+ PInt64 *int64
+ PUint *uint
+ PUint8 *uint8
+ PUint16 *uint16
+ PUint32 *uint32
+ PUint64 *uint64
+ PUintptr *uintptr
+ PFloat32 *float32
+ PFloat64 *float64
+
+ String string
+ PString *string
+
+ Map map[string]Small
+ MapP map[string]*Small
+ PMap *map[string]Small
+ PMapP *map[string]*Small
+
+ EmptyMap map[string]Small
+ NilMap map[string]Small
+
+ Slice []Small
+ SliceP []*Small
+ PSlice *[]Small
+ PSliceP *[]*Small
+
+ EmptySlice []Small
+ NilSlice []Small
+
+ StringSlice []string
+ ByteSlice []byte
+
+ Small Small
+ PSmall *Small
+ PPSmall **Small
+
+ Interface interface{}
+ PInterface *interface{}
+
+ unexported int
+}
+
+type Small struct {
+ Tag string
+}
+
+var allValue = All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Uintptr: 12,
+ Float32: 14.1,
+ Float64: 15.1,
+ Foo: "foo",
+ Foo2: "foo2",
+ IntStr: 42,
+ String: "16",
+ Map: map[string]Small{
+ "17": {Tag: "tag17"},
+ "18": {Tag: "tag18"},
+ },
+ MapP: map[string]*Small{
+ "19": {Tag: "tag19"},
+ "20": nil,
+ },
+ EmptyMap: map[string]Small{},
+ Slice: []Small{{Tag: "tag20"}, {Tag: "tag21"}},
+ SliceP: []*Small{{Tag: "tag22"}, nil, {Tag: "tag23"}},
+ EmptySlice: []Small{},
+ StringSlice: []string{"str24", "str25", "str26"},
+ ByteSlice: []byte{27, 28, 29},
+ Small: Small{Tag: "tag30"},
+ PSmall: &Small{Tag: "tag31"},
+ Interface: 5.2,
+}
+
+var pallValue = All{
+ PBool: &allValue.Bool,
+ PInt: &allValue.Int,
+ PInt8: &allValue.Int8,
+ PInt16: &allValue.Int16,
+ PInt32: &allValue.Int32,
+ PInt64: &allValue.Int64,
+ PUint: &allValue.Uint,
+ PUint8: &allValue.Uint8,
+ PUint16: &allValue.Uint16,
+ PUint32: &allValue.Uint32,
+ PUint64: &allValue.Uint64,
+ PUintptr: &allValue.Uintptr,
+ PFloat32: &allValue.Float32,
+ PFloat64: &allValue.Float64,
+ PString: &allValue.String,
+ PMap: &allValue.Map,
+ PMapP: &allValue.MapP,
+ PSlice: &allValue.Slice,
+ PSliceP: &allValue.SliceP,
+ PPSmall: &allValue.PSmall,
+ PInterface: &allValue.Interface,
+}
+
+var allValueIndent = `{
+ "Bool": true,
+ "Int": 2,
+ "Int8": 3,
+ "Int16": 4,
+ "Int32": 5,
+ "Int64": 6,
+ "Uint": 7,
+ "Uint8": 8,
+ "Uint16": 9,
+ "Uint32": 10,
+ "Uint64": 11,
+ "Uintptr": 12,
+ "Float32": 14.1,
+ "Float64": 15.1,
+ "bar": "foo",
+ "bar2": "foo2",
+ "IntStr": "42",
+ "PBool": null,
+ "PInt": null,
+ "PInt8": null,
+ "PInt16": null,
+ "PInt32": null,
+ "PInt64": null,
+ "PUint": null,
+ "PUint8": null,
+ "PUint16": null,
+ "PUint32": null,
+ "PUint64": null,
+ "PUintptr": null,
+ "PFloat32": null,
+ "PFloat64": null,
+ "String": "16",
+ "PString": null,
+ "Map": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "MapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "PMap": null,
+ "PMapP": null,
+ "EmptyMap": {},
+ "NilMap": null,
+ "Slice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "SliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "PSlice": null,
+ "PSliceP": null,
+ "EmptySlice": [],
+ "NilSlice": null,
+ "StringSlice": [
+ "str24",
+ "str25",
+ "str26"
+ ],
+ "ByteSlice": "Gxwd",
+ "Small": {
+ "Tag": "tag30"
+ },
+ "PSmall": {
+ "Tag": "tag31"
+ },
+ "PPSmall": null,
+ "Interface": 5.2,
+ "PInterface": null
+}`
+
+var allValueCompact = strings.Map(noSpace, allValueIndent)
+
+var pallValueIndent = `{
+ "Bool": false,
+ "Int": 0,
+ "Int8": 0,
+ "Int16": 0,
+ "Int32": 0,
+ "Int64": 0,
+ "Uint": 0,
+ "Uint8": 0,
+ "Uint16": 0,
+ "Uint32": 0,
+ "Uint64": 0,
+ "Uintptr": 0,
+ "Float32": 0,
+ "Float64": 0,
+ "bar": "",
+ "bar2": "",
+ "IntStr": "0",
+ "PBool": true,
+ "PInt": 2,
+ "PInt8": 3,
+ "PInt16": 4,
+ "PInt32": 5,
+ "PInt64": 6,
+ "PUint": 7,
+ "PUint8": 8,
+ "PUint16": 9,
+ "PUint32": 10,
+ "PUint64": 11,
+ "PUintptr": 12,
+ "PFloat32": 14.1,
+ "PFloat64": 15.1,
+ "String": "",
+ "PString": "16",
+ "Map": null,
+ "MapP": null,
+ "PMap": {
+ "17": {
+ "Tag": "tag17"
+ },
+ "18": {
+ "Tag": "tag18"
+ }
+ },
+ "PMapP": {
+ "19": {
+ "Tag": "tag19"
+ },
+ "20": null
+ },
+ "EmptyMap": null,
+ "NilMap": null,
+ "Slice": null,
+ "SliceP": null,
+ "PSlice": [
+ {
+ "Tag": "tag20"
+ },
+ {
+ "Tag": "tag21"
+ }
+ ],
+ "PSliceP": [
+ {
+ "Tag": "tag22"
+ },
+ null,
+ {
+ "Tag": "tag23"
+ }
+ ],
+ "EmptySlice": null,
+ "NilSlice": null,
+ "StringSlice": null,
+ "ByteSlice": null,
+ "Small": {
+ "Tag": ""
+ },
+ "PSmall": null,
+ "PPSmall": {
+ "Tag": "tag31"
+ },
+ "Interface": null,
+ "PInterface": 5.2
+}`
+
+var pallValueCompact = strings.Map(noSpace, pallValueIndent)
+
+func TestRefUnmarshal(t *testing.T) {
+ type S struct {
+ // Ref is defined in encode_test.go.
+ R0 Ref
+ R1 *Ref
+ R2 RefText
+ R3 *RefText
+ }
+ want := S{
+ R0: 12,
+ R1: new(Ref),
+ R2: 13,
+ R3: new(RefText),
+ }
+ *want.R1 = 12
+ *want.R3 = 13
+
+ var got S
+ if err := Unmarshal([]byte(`{"R0":"ref","R1":"ref","R2":"ref","R3":"ref"}`), &got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %+v, want %+v", got, want)
+ }
+}
+
+// Test that the empty string doesn't panic decoding when ,string is specified
+// Issue 3450
+func TestEmptyString(t *testing.T) {
+ type T2 struct {
+ Number1 int `json:",string"`
+ Number2 int `json:",string"`
+ }
+ data := `{"Number1":"1", "Number2":""}`
+ dec := NewDecoder(strings.NewReader(data))
+ var t2 T2
+ err := dec.Decode(&t2)
+ if err == nil {
+ t.Fatal("Decode: did not return error")
+ }
+ if t2.Number1 != 1 {
+ t.Fatal("Decode: did not set Number1")
+ }
+}
+
+// Test that a null for ,string is not replaced with the previous quoted string (issue 7046).
+// It should also not be an error (issue 2540, issue 8587).
+func TestNullString(t *testing.T) {
+ type T struct {
+ A int `json:",string"`
+ B int `json:",string"`
+ C *int `json:",string"`
+ }
+ data := []byte(`{"A": "1", "B": null, "C": null}`)
+ var s T
+ s.B = 1
+ s.C = new(int)
+ *s.C = 2
+ err := Unmarshal(data, &s)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if s.B != 1 || s.C != nil {
+ t.Fatalf("after Unmarshal, s.B=%d, s.C=%p, want 1, nil", s.B, s.C)
+ }
+}
+
+func intp(x int) *int {
+ p := new(int)
+ *p = x
+ return p
+}
+
+func intpp(x *int) **int {
+ pp := new(*int)
+ *pp = x
+ return pp
+}
+
+var interfaceSetTests = []struct {
+ pre interface{}
+ json string
+ post interface{}
+}{
+ {"foo", `"bar"`, "bar"},
+ {"foo", `2`, 2.0},
+ {"foo", `true`, true},
+ {"foo", `null`, nil},
+
+ {nil, `null`, nil},
+ {new(int), `null`, nil},
+ {(*int)(nil), `null`, nil},
+ {new(*int), `null`, new(*int)},
+ {(**int)(nil), `null`, nil},
+ {intp(1), `null`, nil},
+ {intpp(nil), `null`, intpp(nil)},
+ {intpp(intp(1)), `null`, intpp(nil)},
+}
+
+func TestInterfaceSet(t *testing.T) {
+ for _, tt := range interfaceSetTests {
+ b := struct{ X interface{} }{tt.pre}
+ blob := `{"X":` + tt.json + `}`
+ if err := Unmarshal([]byte(blob), &b); err != nil {
+ t.Errorf("Unmarshal %#q: %v", blob, err)
+ continue
+ }
+ if !reflect.DeepEqual(b.X, tt.post) {
+ t.Errorf("Unmarshal %#q into %#v: X=%#v, want %#v", blob, tt.pre, b.X, tt.post)
+ }
+ }
+}
+
+// JSON null values should be ignored for primitives and string values instead of resulting in an error.
+// Issue 2540
+func TestUnmarshalNulls(t *testing.T) {
+ jsonData := []byte(`{
+ "Bool" : null,
+ "Int" : null,
+ "Int8" : null,
+ "Int16" : null,
+ "Int32" : null,
+ "Int64" : null,
+ "Uint" : null,
+ "Uint8" : null,
+ "Uint16" : null,
+ "Uint32" : null,
+ "Uint64" : null,
+ "Float32" : null,
+ "Float64" : null,
+ "String" : null}`)
+
+ nulls := All{
+ Bool: true,
+ Int: 2,
+ Int8: 3,
+ Int16: 4,
+ Int32: 5,
+ Int64: 6,
+ Uint: 7,
+ Uint8: 8,
+ Uint16: 9,
+ Uint32: 10,
+ Uint64: 11,
+ Float32: 12.1,
+ Float64: 13.1,
+ String: "14"}
+
+ err := Unmarshal(jsonData, &nulls)
+ if err != nil {
+ t.Errorf("Unmarshal of null values failed: %v", err)
+ }
+ if !nulls.Bool || nulls.Int != 2 || nulls.Int8 != 3 || nulls.Int16 != 4 || nulls.Int32 != 5 || nulls.Int64 != 6 ||
+ nulls.Uint != 7 || nulls.Uint8 != 8 || nulls.Uint16 != 9 || nulls.Uint32 != 10 || nulls.Uint64 != 11 ||
+ nulls.Float32 != 12.1 || nulls.Float64 != 13.1 || nulls.String != "14" {
+
+ t.Errorf("Unmarshal of null values affected primitives")
+ }
+}
+
+func TestStringKind(t *testing.T) {
+ type stringKind string
+
+ var m1, m2 map[stringKind]int
+ m1 = map[stringKind]int{
+ "foo": 42,
+ }
+
+ data, err := Marshal(m1)
+ if err != nil {
+ t.Errorf("Unexpected error marshaling: %v", err)
+ }
+
+ err = Unmarshal(data, &m2)
+ if err != nil {
+ t.Errorf("Unexpected error unmarshaling: %v", err)
+ }
+
+ if !reflect.DeepEqual(m1, m2) {
+ t.Error("Items should be equal after encoding and then decoding")
+ }
+}
+
+// Custom types with []byte as underlying type could not be marshalled
+// and then unmarshalled.
+// Issue 8962.
+func TestByteKind(t *testing.T) {
+ type byteKind []byte
+
+ a := byteKind("hello")
+
+ data, err := Marshal(a)
+ if err != nil {
+ t.Error(err)
+ }
+ var b byteKind
+ err = Unmarshal(data, &b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.Errorf("expected %v == %v", a, b)
+ }
+}
+
+// The fix for issue 8962 introduced a regression.
+// Issue 12921.
+func TestSliceOfCustomByte(t *testing.T) {
+ type Uint8 uint8
+
+ a := []Uint8("hello")
+
+ data, err := Marshal(a)
+ if err != nil {
+ t.Fatal(err)
+ }
+ var b []Uint8
+ err = Unmarshal(data, &b)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(a, b) {
+ t.Fatalf("expected %v == %v", a, b)
+ }
+}
+
+var decodeTypeErrorTests = []struct {
+ dest interface{}
+ src string
+}{
+ {new(string), `{"user": "name"}`}, // issue 4628.
+ {new(error), `{}`}, // issue 4222
+ {new(error), `[]`},
+ {new(error), `""`},
+ {new(error), `123`},
+ {new(error), `true`},
+}
+
+func TestUnmarshalTypeError(t *testing.T) {
+ for _, item := range decodeTypeErrorTests {
+ err := Unmarshal([]byte(item.src), item.dest)
+ if _, ok := err.(*UnmarshalTypeError); !ok {
+ t.Errorf("expected type error for Unmarshal(%q, type %T): got %T",
+ item.src, item.dest, err)
+ }
+ }
+}
+
+var unmarshalSyntaxTests = []string{
+ "tru",
+ "fals",
+ "nul",
+ "123e",
+ `"hello`,
+ `[1,2,3`,
+ `{"key":1`,
+ `{"key":1,`,
+}
+
+func TestUnmarshalSyntax(t *testing.T) {
+ var x interface{}
+ for _, src := range unmarshalSyntaxTests {
+ err := Unmarshal([]byte(src), &x)
+ if _, ok := err.(*SyntaxError); !ok {
+ t.Errorf("expected syntax error for Unmarshal(%q): got %T", src, err)
+ }
+ }
+}
+
+// Test handling of unexported fields that should be ignored.
+// Issue 4660
+type unexportedFields struct {
+ Name string
+ m map[string]interface{} `json:"-"`
+ m2 map[string]interface{} `json:"abcd"`
+}
+
+func TestUnmarshalUnexported(t *testing.T) {
+ input := `{"Name": "Bob", "m": {"x": 123}, "m2": {"y": 456}, "abcd": {"z": 789}}`
+ want := &unexportedFields{Name: "Bob"}
+
+ out := &unexportedFields{}
+ err := Unmarshal([]byte(input), out)
+ if err != nil {
+ t.Errorf("got error %v, expected nil", err)
+ }
+ if !reflect.DeepEqual(out, want) {
+ t.Errorf("got %q, want %q", out, want)
+ }
+}
+
+// Time3339 is a time.Time which encodes to and from JSON
+// as an RFC 3339 time in UTC.
+type Time3339 time.Time
+
+func (t *Time3339) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("types: failed to unmarshal non-string value %q as an RFC 3339 time", b)
+ }
+ tm, err := time.Parse(time.RFC3339, string(b[1:len(b)-1]))
+ if err != nil {
+ return err
+ }
+ *t = Time3339(tm)
+ return nil
+}
+
+func TestUnmarshalJSONLiteralError(t *testing.T) {
+ var t3 Time3339
+ err := Unmarshal([]byte(`"0000-00-00T00:00:00Z"`), &t3)
+ if err == nil {
+ t.Fatalf("expected error; got time %v", time.Time(t3))
+ }
+ if !strings.Contains(err.Error(), "range") {
+ t.Errorf("got err = %v; want out of range error", err)
+ }
+}
+
+// Test that extra object elements in an array do not result in a
+// "data changing underfoot" error.
+// Issue 3717
+func TestSkipArrayObjects(t *testing.T) {
+ json := `[{}]`
+ var dest [0]interface{}
+
+ err := Unmarshal([]byte(json), &dest)
+ if err != nil {
+ t.Errorf("got error %q, want nil", err)
+ }
+}
+
+// Test semantics of pre-filled struct fields and pre-filled map fields.
+// Issue 4900.
+func TestPrefilled(t *testing.T) {
+ ptrToMap := func(m map[string]interface{}) *map[string]interface{} { return &m }
+
+ // Values here change, cannot reuse table across runs.
+ var prefillTests = []struct {
+ in string
+ ptr interface{}
+ out interface{}
+ }{
+ {
+ in: `{"X": 1, "Y": 2}`,
+ ptr: &XYZ{X: float32(3), Y: int16(4), Z: 1.5},
+ out: &XYZ{X: float64(1), Y: float64(2), Z: 1.5},
+ },
+ {
+ in: `{"X": 1, "Y": 2}`,
+ ptr: ptrToMap(map[string]interface{}{"X": float32(3), "Y": int16(4), "Z": 1.5}),
+ out: ptrToMap(map[string]interface{}{"X": float64(1), "Y": float64(2), "Z": 1.5}),
+ },
+ }
+
+ for _, tt := range prefillTests {
+ ptrstr := fmt.Sprintf("%v", tt.ptr)
+ err := Unmarshal([]byte(tt.in), tt.ptr) // tt.ptr edited here
+ if err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+ if !reflect.DeepEqual(tt.ptr, tt.out) {
+ t.Errorf("Unmarshal(%#q, %s): have %v, want %v", tt.in, ptrstr, tt.ptr, tt.out)
+ }
+ }
+}
+
+var invalidUnmarshalTests = []struct {
+ v interface{}
+ want string
+}{
+ {nil, "json: Unmarshal(nil)"},
+ {struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+ {(*int)(nil), "json: Unmarshal(nil *int)"},
+}
+
+func TestInvalidUnmarshal(t *testing.T) {
+ buf := []byte(`{"a":"1"}`)
+ for _, tt := range invalidUnmarshalTests {
+ err := Unmarshal(buf, tt.v)
+ if err == nil {
+ t.Errorf("Unmarshal expecting error, got nil")
+ continue
+ }
+ if got := err.Error(); got != tt.want {
+ t.Errorf("Unmarshal = %q; want %q", got, tt.want)
+ }
+ }
+}
+
+var invalidUnmarshalTextTests = []struct {
+ v interface{}
+ want string
+}{
+ {nil, "json: Unmarshal(nil)"},
+ {struct{}{}, "json: Unmarshal(non-pointer struct {})"},
+ {(*int)(nil), "json: Unmarshal(nil *int)"},
+ {new(net.IP), "json: cannot unmarshal string into Go value of type *net.IP"},
+}
+
+func TestInvalidUnmarshalText(t *testing.T) {
+ buf := []byte(`123`)
+ for _, tt := range invalidUnmarshalTextTests {
+ err := Unmarshal(buf, tt.v)
+ if err == nil {
+ t.Errorf("Unmarshal expecting error, got nil")
+ continue
+ }
+ if got := err.Error(); got != tt.want {
+ t.Errorf("Unmarshal = %q; want %q", got, tt.want)
+ }
+ }
+}
+
+// Test that string option is ignored for invalid types.
+// Issue 9812.
+func TestInvalidStringOption(t *testing.T) {
+ num := 0
+ item := struct {
+ T time.Time `json:",string"`
+ M map[string]string `json:",string"`
+ S []string `json:",string"`
+ A [1]string `json:",string"`
+ I interface{} `json:",string"`
+ P *int `json:",string"`
+ }{M: make(map[string]string), S: make([]string, 0), I: num, P: &num}
+
+ data, err := Marshal(item)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ err = Unmarshal(data, &item)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/encode.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/encode.go
new file mode 100644
index 00000000000..67a0f0062ba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/encode.go
@@ -0,0 +1,1256 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON as defined in
+// RFC 4627. The mapping between JSON and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+// This escaping can be disabled using an Encoder with DisableHTMLEscaping.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON value.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+// - the field's tag is "-", or
+// - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects. The map's key type must either be a string
+// or implement encoding.TextMarshaler. The map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON value.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON value.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := &encodeState{}
+ err := e.marshal(v, encOpts{escapeHTML: true})
+ if err != nil {
+ return nil, err
+ }
+ return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+ // The characters can only appear in string literals,
+ // so just scan the string one byte at a time.
+ start := 0
+ for i, c := range src {
+ if c == '<' || c == '>' || c == '&' {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+ Value reflect.Value
+ Str string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+ S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+ return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+ Type reflect.Type
+ Err error
+}
+
+func (e *MarshalerError) Error() string {
+ return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+ bytes.Buffer // accumulated output
+ scratch [64]byte
+ ext Extension
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+ if v := encodeStatePool.Get(); v != nil {
+ e := v.(*encodeState)
+ e.Reset()
+ return e
+ }
+ return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}, opts encOpts) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if s, ok := r.(string); ok {
+ panic(s)
+ }
+ err = r.(error)
+ }
+ }()
+ e.reflectValue(reflect.ValueOf(v), opts)
+ return nil
+}
+
+func (e *encodeState) error(err error) {
+ panic(err)
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) {
+ valueEncoder(v)(e, v, opts)
+}
+
+type encOpts struct {
+ // quoted causes primitive fields to be encoded inside JSON strings.
+ quoted bool
+ // escapeHTML causes '<', '>', and '&' to be escaped in JSON strings.
+ escapeHTML bool
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts)
+
+var encoderCache struct {
+ sync.RWMutex
+ m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+ if !v.IsValid() {
+ return invalidValueEncoder
+ }
+ return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+ encoderCache.RLock()
+ f := encoderCache.m[t]
+ encoderCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // To deal with recursive types, populate the map with an
+ // indirect func before we build it. This type waits on the
+ // real func (f) to be ready and then calls it. This indirect
+ // func is only used for recursive types.
+ encoderCache.Lock()
+ if encoderCache.m == nil {
+ encoderCache.m = make(map[reflect.Type]encoderFunc)
+ }
+ var wg sync.WaitGroup
+ wg.Add(1)
+ encoderCache.m[t] = func(e *encodeState, v reflect.Value, opts encOpts) {
+ wg.Wait()
+ f(e, v, opts)
+ }
+ encoderCache.Unlock()
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ innerf := newTypeEncoder(t, true)
+ f = func(e *encodeState, v reflect.Value, opts encOpts) {
+ encode, ok := e.ext.encode[v.Type()]
+ if !ok {
+ innerf(e, v, opts)
+ return
+ }
+
+ b, err := encode(v.Interface())
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, opts.escapeHTML)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ }
+ wg.Done()
+ encoderCache.Lock()
+ encoderCache.m[t] = f
+ encoderCache.Unlock()
+ return f
+}
+
+var (
+ marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+ textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+ if t.Implements(marshalerType) {
+ return marshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ if t.Implements(textMarshalerType) {
+ return textMarshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(t).Implements(textMarshalerType) {
+ return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return boolEncoder
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intEncoder
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintEncoder
+ case reflect.Float32:
+ return float32Encoder
+ case reflect.Float64:
+ return float64Encoder
+ case reflect.String:
+ return stringEncoder
+ case reflect.Interface:
+ return interfaceEncoder
+ case reflect.Struct:
+ return newStructEncoder(t)
+ case reflect.Map:
+ return newMapEncoder(t)
+ case reflect.Slice:
+ return newSliceEncoder(t)
+ case reflect.Array:
+ return newArrayEncoder(t)
+ case reflect.Ptr:
+ return newPtrEncoder(t)
+ default:
+ return unsupportedTypeEncoder
+ }
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+ e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, opts.escapeHTML)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, true)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := v.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ e.stringBytes(b, opts.escapeHTML)
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ e.stringBytes(b, opts.escapeHTML)
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ if v.Bool() {
+ e.WriteString("true")
+ } else {
+ e.WriteString("false")
+ }
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+}
+
+func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ f := v.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+ }
+ b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+}
+
+var (
+ float32Encoder = (floatEncoder(32)).encode
+ float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.Type() == numberType {
+ numStr := v.String()
+ // In Go1.5 the empty string encodes to "0", while this is not a valid number literal
+ // we keep compatibility so check validity after this.
+ if numStr == "" {
+ numStr = "0" // Number's zero-val
+ }
+ if !isValidNumber(numStr) {
+ e.error(fmt.Errorf("json: invalid number literal %q", numStr))
+ }
+ e.WriteString(numStr)
+ return
+ }
+ if opts.quoted {
+ sb, err := Marshal(v.String())
+ if err != nil {
+ e.error(err)
+ }
+ e.string(string(sb), opts.escapeHTML)
+ } else {
+ e.string(v.String(), opts.escapeHTML)
+ }
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.reflectValue(v.Elem(), opts)
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+ e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+ fields []field
+ fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ e.WriteByte('{')
+ first := true
+ for i, f := range se.fields {
+ fv := fieldByIndex(v, f.index)
+ if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+ continue
+ }
+ if first {
+ first = false
+ } else {
+ e.WriteByte(',')
+ }
+ e.string(f.name, opts.escapeHTML)
+ e.WriteByte(':')
+ opts.quoted = f.quoted
+ se.fieldEncs[i](e, fv, opts)
+ }
+ e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+ fields := cachedTypeFields(t)
+ se := &structEncoder{
+ fields: fields,
+ fieldEncs: make([]encoderFunc, len(fields)),
+ }
+ for i, f := range fields {
+ se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+ }
+ return se.encode
+}
+
+type mapEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.WriteByte('{')
+
+ // Extract and sort the keys.
+ keys := v.MapKeys()
+ sv := make([]reflectWithString, len(keys))
+ for i, v := range keys {
+ sv[i].v = v
+ if err := sv[i].resolve(); err != nil {
+ e.error(&MarshalerError{v.Type(), err})
+ }
+ }
+ sort.Sort(byString(sv))
+
+ for i, kv := range sv {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.string(kv.s, opts.escapeHTML)
+ e.WriteByte(':')
+ me.elemEnc(e, v.MapIndex(kv.v), opts)
+ }
+ e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+ if t.Key().Kind() != reflect.String && !t.Key().Implements(textMarshalerType) {
+ return unsupportedTypeEncoder
+ }
+ me := &mapEncoder{typeEncoder(t.Elem())}
+ return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ s := v.Bytes()
+ e.WriteByte('"')
+ if len(s) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, e)
+ enc.Write(s)
+ enc.Close()
+ }
+ e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+ arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ se.arrayEnc(e, v, opts)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+ // Byte slices get special treatment; arrays don't.
+ if t.Elem().Kind() == reflect.Uint8 &&
+ !t.Elem().Implements(marshalerType) &&
+ !t.Elem().Implements(textMarshalerType) {
+ return encodeByteSlice
+ }
+ enc := &sliceEncoder{newArrayEncoder(t)}
+ return enc.encode
+}
+
+type arrayEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ e.WriteByte('[')
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ ae.elemEnc(e, v.Index(i), opts)
+ }
+ e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+ enc := &arrayEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type ptrEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ pe.elemEnc(e, v.Elem(), opts)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+ enc := &ptrEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type condAddrEncoder struct {
+ canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.CanAddr() {
+ ce.canAddrEnc(e, v, opts)
+ } else {
+ ce.elseEnc(e, v, opts)
+ }
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+ enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+ return enc.encode
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+ for _, i := range index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+ for _, i := range index {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ t = t.Field(i).Type
+ }
+ return t
+}
+
+type reflectWithString struct {
+ v reflect.Value
+ s string
+}
+
+func (w *reflectWithString) resolve() error {
+ if w.v.Kind() == reflect.String {
+ w.s = w.v.String()
+ return nil
+ }
+ buf, err := w.v.Interface().(encoding.TextMarshaler).MarshalText()
+ w.s = string(buf)
+ return err
+}
+
+// byString is a slice of reflectWithString where the reflect.Value is either
+// a string or an encoding.TextMarshaler.
+// It implements the methods to sort by string.
+type byString []reflectWithString
+
+func (sv byString) Len() int { return len(sv) }
+func (sv byString) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv byString) Less(i, j int) bool { return sv[i].s < sv[j].s }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string, escapeHTML bool) int {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' &&
+ (!escapeHTML || b != '<' && b != '>' && b != '&') {
+ i++
+ continue
+ }
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \t, \n and \r.
+ // If escapeHTML is set, it also escapes <, >, and &
+ // because they can lead to security holes when
+ // user-controlled strings are rendered into JSON
+ // and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.WriteString(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte, escapeHTML bool) int {
+ len0 := e.Len()
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' &&
+ (!escapeHTML || b != '<' && b != '>' && b != '&') {
+ i++
+ continue
+ }
+ if start < i {
+ e.Write(s[start:i])
+ }
+ switch b {
+ case '\\', '"':
+ e.WriteByte('\\')
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('\\')
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('\\')
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('\\')
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \t, \n and \r.
+ // If escapeHTML is set, it also escapes <, >, and &
+ // because they can lead to security holes when
+ // user-controlled strings are rendered into JSON
+ // and served to some browsers.
+ e.WriteString(`\u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.Write(s[start:])
+ }
+ e.WriteByte('"')
+ return e.Len() - len0
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Only strings, floats, integers, and booleans can be quoted.
+ quoted := false
+ if opts.Contains("string") {
+ switch ft.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ quoted = true
+ }
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: quoted,
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/encode_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/encode_test.go
new file mode 100644
index 00000000000..b484022a70e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/encode_test.go
@@ -0,0 +1,613 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "testing"
+ "unicode"
+)
+
+type Optionals struct {
+ Sr string `json:"sr"`
+ So string `json:"so,omitempty"`
+ Sw string `json:"-"`
+
+ Ir int `json:"omitempty"` // actually named omitempty, not an option
+ Io int `json:"io,omitempty"`
+
+ Slr []string `json:"slr,random"`
+ Slo []string `json:"slo,omitempty"`
+
+ Mr map[string]interface{} `json:"mr"`
+ Mo map[string]interface{} `json:",omitempty"`
+
+ Fr float64 `json:"fr"`
+ Fo float64 `json:"fo,omitempty"`
+
+ Br bool `json:"br"`
+ Bo bool `json:"bo,omitempty"`
+
+ Ur uint `json:"ur"`
+ Uo uint `json:"uo,omitempty"`
+
+ Str struct{} `json:"str"`
+ Sto struct{} `json:"sto,omitempty"`
+}
+
+var optionalsExpected = `{
+ "sr": "",
+ "omitempty": 0,
+ "slr": null,
+ "mr": {},
+ "fr": 0,
+ "br": false,
+ "ur": 0,
+ "str": {},
+ "sto": {}
+}`
+
+func TestOmitEmpty(t *testing.T) {
+ var o Optionals
+ o.Sw = "something"
+ o.Mr = map[string]interface{}{}
+ o.Mo = map[string]interface{}{}
+
+ got, err := MarshalIndent(&o, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := string(got); got != optionalsExpected {
+ t.Errorf(" got: %s\nwant: %s\n", got, optionalsExpected)
+ }
+}
+
+type StringTag struct {
+ BoolStr bool `json:",string"`
+ IntStr int64 `json:",string"`
+ StrStr string `json:",string"`
+}
+
+var stringTagExpected = `{
+ "BoolStr": "true",
+ "IntStr": "42",
+ "StrStr": "\"xzbit\""
+}`
+
+func TestStringTag(t *testing.T) {
+ var s StringTag
+ s.BoolStr = true
+ s.IntStr = 42
+ s.StrStr = "xzbit"
+ got, err := MarshalIndent(&s, "", " ")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got := string(got); got != stringTagExpected {
+ t.Fatalf(" got: %s\nwant: %s\n", got, stringTagExpected)
+ }
+
+ // Verify that it round-trips.
+ var s2 StringTag
+ err = NewDecoder(bytes.NewReader(got)).Decode(&s2)
+ if err != nil {
+ t.Fatalf("Decode: %v", err)
+ }
+ if !reflect.DeepEqual(s, s2) {
+ t.Fatalf("decode didn't match.\nsource: %#v\nEncoded as:\n%s\ndecode: %#v", s, string(got), s2)
+ }
+}
+
+// byte slices are special even if they're renamed types.
+type renamedByte byte
+type renamedByteSlice []byte
+type renamedRenamedByteSlice []renamedByte
+
+func TestEncodeRenamedByteSlice(t *testing.T) {
+ s := renamedByteSlice("abc")
+ result, err := Marshal(s)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expect := `"YWJj"`
+ if string(result) != expect {
+ t.Errorf(" got %s want %s", result, expect)
+ }
+ r := renamedRenamedByteSlice("abc")
+ result, err = Marshal(r)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(result) != expect {
+ t.Errorf(" got %s want %s", result, expect)
+ }
+}
+
+var unsupportedValues = []interface{}{
+ math.NaN(),
+ math.Inf(-1),
+ math.Inf(1),
+}
+
+func TestUnsupportedValues(t *testing.T) {
+ for _, v := range unsupportedValues {
+ if _, err := Marshal(v); err != nil {
+ if _, ok := err.(*UnsupportedValueError); !ok {
+ t.Errorf("for %v, got %T want UnsupportedValueError", v, err)
+ }
+ } else {
+ t.Errorf("for %v, expected error", v)
+ }
+ }
+}
+
+// Ref has Marshaler and Unmarshaler methods with pointer receiver.
+type Ref int
+
+func (*Ref) MarshalJSON() ([]byte, error) {
+ return []byte(`"ref"`), nil
+}
+
+func (r *Ref) UnmarshalJSON([]byte) error {
+ *r = 12
+ return nil
+}
+
+// Val has Marshaler methods with value receiver.
+type Val int
+
+func (Val) MarshalJSON() ([]byte, error) {
+ return []byte(`"val"`), nil
+}
+
+// RefText has Marshaler and Unmarshaler methods with pointer receiver.
+type RefText int
+
+func (*RefText) MarshalText() ([]byte, error) {
+ return []byte(`"ref"`), nil
+}
+
+func (r *RefText) UnmarshalText([]byte) error {
+ *r = 13
+ return nil
+}
+
+// ValText has Marshaler methods with value receiver.
+type ValText int
+
+func (ValText) MarshalText() ([]byte, error) {
+ return []byte(`"val"`), nil
+}
+
+func TestRefValMarshal(t *testing.T) {
+ var s = struct {
+ R0 Ref
+ R1 *Ref
+ R2 RefText
+ R3 *RefText
+ V0 Val
+ V1 *Val
+ V2 ValText
+ V3 *ValText
+ }{
+ R0: 12,
+ R1: new(Ref),
+ R2: 14,
+ R3: new(RefText),
+ V0: 13,
+ V1: new(Val),
+ V2: 15,
+ V3: new(ValText),
+ }
+ const want = `{"R0":"ref","R1":"ref","R2":"\"ref\"","R3":"\"ref\"","V0":"val","V1":"val","V2":"\"val\"","V3":"\"val\""}`
+ b, err := Marshal(&s)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+// C implements Marshaler and returns unescaped JSON.
+type C int
+
+func (C) MarshalJSON() ([]byte, error) {
+ return []byte(`"<&>"`), nil
+}
+
+// CText implements Marshaler and returns unescaped text.
+type CText int
+
+func (CText) MarshalText() ([]byte, error) {
+ return []byte(`"<&>"`), nil
+}
+
+func TestMarshalerEscaping(t *testing.T) {
+ var c C
+ want := `"\u003c\u0026\u003e"`
+ b, err := Marshal(c)
+ if err != nil {
+ t.Fatalf("Marshal(c): %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("Marshal(c) = %#q, want %#q", got, want)
+ }
+
+ var ct CText
+ want = `"\"\u003c\u0026\u003e\""`
+ b, err = Marshal(ct)
+ if err != nil {
+ t.Fatalf("Marshal(ct): %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("Marshal(ct) = %#q, want %#q", got, want)
+ }
+}
+
+type IntType int
+
+type MyStruct struct {
+ IntType
+}
+
+func TestAnonymousNonstruct(t *testing.T) {
+ var i IntType = 11
+ a := MyStruct{i}
+ const want = `{"IntType":11}`
+
+ b, err := Marshal(a)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got := string(b); got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+type BugA struct {
+ S string
+}
+
+type BugB struct {
+ BugA
+ S string
+}
+
+type BugC struct {
+ S string
+}
+
+// Legal Go: We never use the repeated embedded field (S).
+type BugX struct {
+ A int
+ BugA
+ BugB
+}
+
+// Issue 5245.
+func TestEmbeddedBug(t *testing.T) {
+ v := BugB{
+ BugA{"A"},
+ "B",
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{"S":"B"}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+ // Now check that the duplicate field, S, does not appear.
+ x := BugX{
+ A: 23,
+ }
+ b, err = Marshal(x)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want = `{"A":23}`
+ got = string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+type BugD struct { // Same as BugA after tagging.
+ XXX string `json:"S"`
+}
+
+// BugD's tagged S field should dominate BugA's.
+type BugY struct {
+ BugA
+ BugD
+}
+
+// Test that a field with a tag dominates untagged fields.
+func TestTaggedFieldDominates(t *testing.T) {
+ v := BugY{
+ BugA{"BugA"},
+ BugD{"BugD"},
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{"S":"BugD"}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+// There are no tags here, so S should not appear.
+type BugZ struct {
+ BugA
+ BugC
+ BugY // Contains a tagged S field through BugD; should not dominate.
+}
+
+func TestDuplicatedFieldDisappears(t *testing.T) {
+ v := BugZ{
+ BugA{"BugA"},
+ BugC{"BugC"},
+ BugY{
+ BugA{"nested BugA"},
+ BugD{"nested BugD"},
+ },
+ }
+ b, err := Marshal(v)
+ if err != nil {
+ t.Fatal("Marshal:", err)
+ }
+ want := `{}`
+ got := string(b)
+ if got != want {
+ t.Fatalf("Marshal: got %s want %s", got, want)
+ }
+}
+
+func TestStringBytes(t *testing.T) {
+ // Test that encodeState.stringBytes and encodeState.string use the same encoding.
+ var r []rune
+ for i := '\u0000'; i <= unicode.MaxRune; i++ {
+ r = append(r, i)
+ }
+ s := string(r) + "\xff\xff\xffhello" // some invalid UTF-8 too
+
+ for _, escapeHTML := range []bool{true, false} {
+ es := &encodeState{}
+ es.string(s, escapeHTML)
+
+ esBytes := &encodeState{}
+ esBytes.stringBytes([]byte(s), escapeHTML)
+
+ enc := es.Buffer.String()
+ encBytes := esBytes.Buffer.String()
+ if enc != encBytes {
+ i := 0
+ for i < len(enc) && i < len(encBytes) && enc[i] == encBytes[i] {
+ i++
+ }
+ enc = enc[i:]
+ encBytes = encBytes[i:]
+ i = 0
+ for i < len(enc) && i < len(encBytes) && enc[len(enc)-i-1] == encBytes[len(encBytes)-i-1] {
+ i++
+ }
+ enc = enc[:len(enc)-i]
+ encBytes = encBytes[:len(encBytes)-i]
+
+ if len(enc) > 20 {
+ enc = enc[:20] + "..."
+ }
+ if len(encBytes) > 20 {
+ encBytes = encBytes[:20] + "..."
+ }
+
+ t.Errorf("with escapeHTML=%t, encodings differ at %#q vs %#q",
+ escapeHTML, enc, encBytes)
+ }
+ }
+}
+
+func TestIssue6458(t *testing.T) {
+ type Foo struct {
+ M RawMessage
+ }
+ x := Foo{RawMessage(`"foo"`)}
+
+ b, err := Marshal(&x)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want := `{"M":"foo"}`; string(b) != want {
+ t.Errorf("Marshal(&x) = %#q; want %#q", b, want)
+ }
+
+ b, err = Marshal(x)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if want := `{"M":"ImZvbyI="}`; string(b) != want {
+ t.Errorf("Marshal(x) = %#q; want %#q", b, want)
+ }
+}
+
+func TestIssue10281(t *testing.T) {
+ type Foo struct {
+ N Number
+ }
+ x := Foo{Number(`invalid`)}
+
+ b, err := Marshal(&x)
+ if err == nil {
+ t.Errorf("Marshal(&x) = %#q; want error", b)
+ }
+}
+
+func TestHTMLEscape(t *testing.T) {
+ var b, want bytes.Buffer
+ m := `{"M":"<html>foo &` + "\xe2\x80\xa8 \xe2\x80\xa9" + `</html>"}`
+ want.Write([]byte(`{"M":"\u003chtml\u003efoo \u0026\u2028 \u2029\u003c/html\u003e"}`))
+ HTMLEscape(&b, []byte(m))
+ if !bytes.Equal(b.Bytes(), want.Bytes()) {
+ t.Errorf("HTMLEscape(&b, []byte(m)) = %s; want %s", b.Bytes(), want.Bytes())
+ }
+}
+
+// golang.org/issue/8582
+func TestEncodePointerString(t *testing.T) {
+ type stringPointer struct {
+ N *int64 `json:"n,string"`
+ }
+ var n int64 = 42
+ b, err := Marshal(stringPointer{N: &n})
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if got, want := string(b), `{"n":"42"}`; got != want {
+ t.Errorf("Marshal = %s, want %s", got, want)
+ }
+ var back stringPointer
+ err = Unmarshal(b, &back)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if back.N == nil {
+ t.Fatalf("Unmarshalled nil N field")
+ }
+ if *back.N != 42 {
+ t.Fatalf("*N = %d; want 42", *back.N)
+ }
+}
+
+var encodeStringTests = []struct {
+ in string
+ out string
+}{
+ {"\x00", `"\u0000"`},
+ {"\x01", `"\u0001"`},
+ {"\x02", `"\u0002"`},
+ {"\x03", `"\u0003"`},
+ {"\x04", `"\u0004"`},
+ {"\x05", `"\u0005"`},
+ {"\x06", `"\u0006"`},
+ {"\x07", `"\u0007"`},
+ {"\x08", `"\u0008"`},
+ {"\x09", `"\t"`},
+ {"\x0a", `"\n"`},
+ {"\x0b", `"\u000b"`},
+ {"\x0c", `"\u000c"`},
+ {"\x0d", `"\r"`},
+ {"\x0e", `"\u000e"`},
+ {"\x0f", `"\u000f"`},
+ {"\x10", `"\u0010"`},
+ {"\x11", `"\u0011"`},
+ {"\x12", `"\u0012"`},
+ {"\x13", `"\u0013"`},
+ {"\x14", `"\u0014"`},
+ {"\x15", `"\u0015"`},
+ {"\x16", `"\u0016"`},
+ {"\x17", `"\u0017"`},
+ {"\x18", `"\u0018"`},
+ {"\x19", `"\u0019"`},
+ {"\x1a", `"\u001a"`},
+ {"\x1b", `"\u001b"`},
+ {"\x1c", `"\u001c"`},
+ {"\x1d", `"\u001d"`},
+ {"\x1e", `"\u001e"`},
+ {"\x1f", `"\u001f"`},
+}
+
+func TestEncodeString(t *testing.T) {
+ for _, tt := range encodeStringTests {
+ b, err := Marshal(tt.in)
+ if err != nil {
+ t.Errorf("Marshal(%q): %v", tt.in, err)
+ continue
+ }
+ out := string(b)
+ if out != tt.out {
+ t.Errorf("Marshal(%q) = %#q, want %#q", tt.in, out, tt.out)
+ }
+ }
+}
+
+type jsonbyte byte
+
+func (b jsonbyte) MarshalJSON() ([]byte, error) { return tenc(`{"JB":%d}`, b) }
+
+type textbyte byte
+
+func (b textbyte) MarshalText() ([]byte, error) { return tenc(`TB:%d`, b) }
+
+type jsonint int
+
+func (i jsonint) MarshalJSON() ([]byte, error) { return tenc(`{"JI":%d}`, i) }
+
+type textint int
+
+func (i textint) MarshalText() ([]byte, error) { return tenc(`TI:%d`, i) }
+
+func tenc(format string, a ...interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, format, a...)
+ return buf.Bytes(), nil
+}
+
+// Issue 13783
+func TestEncodeBytekind(t *testing.T) {
+ testdata := []struct {
+ data interface{}
+ want string
+ }{
+ {byte(7), "7"},
+ {jsonbyte(7), `{"JB":7}`},
+ {textbyte(4), `"TB:4"`},
+ {jsonint(5), `{"JI":5}`},
+ {textint(1), `"TI:1"`},
+ {[]byte{0, 1}, `"AAE="`},
+ {[]jsonbyte{0, 1}, `[{"JB":0},{"JB":1}]`},
+ {[][]jsonbyte{{0, 1}, {3}}, `[[{"JB":0},{"JB":1}],[{"JB":3}]]`},
+ {[]textbyte{2, 3}, `["TB:2","TB:3"]`},
+ {[]jsonint{5, 4}, `[{"JI":5},{"JI":4}]`},
+ {[]textint{9, 3}, `["TI:9","TI:3"]`},
+ {[]int{9, 3}, `[9,3]`},
+ }
+ for _, d := range testdata {
+ js, err := Marshal(d.data)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ got, want := string(js), d.want
+ if got != want {
+ t.Errorf("got %s, want %s", got, want)
+ }
+ }
+}
+
+func TestTextMarshalerMapKeysAreSorted(t *testing.T) {
+ b, err := Marshal(map[unmarshalerText]int{
+ {"x", "y"}: 1,
+ {"y", "x"}: 2,
+ {"a", "z"}: 3,
+ {"z", "a"}: 4,
+ })
+ if err != nil {
+ t.Fatalf("Failed to Marshal text.Marshaler: %v", err)
+ }
+ const want = `{"a:z":3,"x:y":1,"y:x":2,"z:a":4}`
+ if string(b) != want {
+ t.Errorf("Marshal map with text.Marshaler keys: got %#q, want %#q", b, want)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/example_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/example_test.go
new file mode 100644
index 00000000000..326bdc9540e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/example_test.go
@@ -0,0 +1,252 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "strings"
+)
+
+func ExampleMarshal() {
+ type ColorGroup struct {
+ ID int
+ Name string
+ Colors []string
+ }
+ group := ColorGroup{
+ ID: 1,
+ Name: "Reds",
+ Colors: []string{"Crimson", "Red", "Ruby", "Maroon"},
+ }
+ b, err := json.Marshal(group)
+ if err != nil {
+ fmt.Println("error:", err)
+ }
+ os.Stdout.Write(b)
+ // Output:
+ // {"ID":1,"Name":"Reds","Colors":["Crimson","Red","Ruby","Maroon"]}
+}
+
+func ExampleUnmarshal() {
+ var jsonBlob = []byte(`[
+ {"Name": "Platypus", "Order": "Monotremata"},
+ {"Name": "Quoll", "Order": "Dasyuromorphia"}
+ ]`)
+ type Animal struct {
+ Name string
+ Order string
+ }
+ var animals []Animal
+ err := json.Unmarshal(jsonBlob, &animals)
+ if err != nil {
+ fmt.Println("error:", err)
+ }
+ fmt.Printf("%+v", animals)
+ // Output:
+ // [{Name:Platypus Order:Monotremata} {Name:Quoll Order:Dasyuromorphia}]
+}
+
+// This example uses a Decoder to decode a stream of distinct JSON values.
+func ExampleDecoder() {
+ const jsonStream = `
+ {"Name": "Ed", "Text": "Knock knock."}
+ {"Name": "Sam", "Text": "Who's there?"}
+ {"Name": "Ed", "Text": "Go fmt."}
+ {"Name": "Sam", "Text": "Go fmt who?"}
+ {"Name": "Ed", "Text": "Go fmt yourself!"}
+ `
+ type Message struct {
+ Name, Text string
+ }
+ dec := json.NewDecoder(strings.NewReader(jsonStream))
+ for {
+ var m Message
+ if err := dec.Decode(&m); err == io.EOF {
+ break
+ } else if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("%s: %s\n", m.Name, m.Text)
+ }
+ // Output:
+ // Ed: Knock knock.
+ // Sam: Who's there?
+ // Ed: Go fmt.
+ // Sam: Go fmt who?
+ // Ed: Go fmt yourself!
+}
+
+// This example uses a Decoder to decode a stream of distinct JSON values.
+func ExampleDecoder_Token() {
+ const jsonStream = `
+ {"Message": "Hello", "Array": [1, 2, 3], "Null": null, "Number": 1.234}
+ `
+ dec := json.NewDecoder(strings.NewReader(jsonStream))
+ for {
+ t, err := dec.Token()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("%T: %v", t, t)
+ if dec.More() {
+ fmt.Printf(" (more)")
+ }
+ fmt.Printf("\n")
+ }
+ // Output:
+ // json.Delim: { (more)
+ // string: Message (more)
+ // string: Hello (more)
+ // string: Array (more)
+ // json.Delim: [ (more)
+ // float64: 1 (more)
+ // float64: 2 (more)
+ // float64: 3
+ // json.Delim: ] (more)
+ // string: Null (more)
+ // <nil>: <nil> (more)
+ // string: Number (more)
+ // float64: 1.234
+ // json.Delim: }
+}
+
+// This example uses a Decoder to decode a streaming array of JSON objects.
+func ExampleDecoder_Decode_stream() {
+ const jsonStream = `
+ [
+ {"Name": "Ed", "Text": "Knock knock."},
+ {"Name": "Sam", "Text": "Who's there?"},
+ {"Name": "Ed", "Text": "Go fmt."},
+ {"Name": "Sam", "Text": "Go fmt who?"},
+ {"Name": "Ed", "Text": "Go fmt yourself!"}
+ ]
+ `
+ type Message struct {
+ Name, Text string
+ }
+ dec := json.NewDecoder(strings.NewReader(jsonStream))
+
+ // read open bracket
+ t, err := dec.Token()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("%T: %v\n", t, t)
+
+ var m Message
+ // while the array contains values
+ for dec.More() {
+
+ // decode an array value (Message)
+ err := dec.Decode(&m)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ fmt.Printf("%v: %v\n", m.Name, m.Text)
+ }
+
+ // read closing bracket
+ t, err = dec.Token()
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("%T: %v\n", t, t)
+
+ // Output:
+ // json.Delim: [
+ // Ed: Knock knock.
+ // Sam: Who's there?
+ // Ed: Go fmt.
+ // Sam: Go fmt who?
+ // Ed: Go fmt yourself!
+ // json.Delim: ]
+
+}
+
+// This example uses RawMessage to delay parsing part of a JSON message.
+func ExampleRawMessage() {
+ type Color struct {
+ Space string
+ Point json.RawMessage // delay parsing until we know the color space
+ }
+ type RGB struct {
+ R uint8
+ G uint8
+ B uint8
+ }
+ type YCbCr struct {
+ Y uint8
+ Cb int8
+ Cr int8
+ }
+
+ var j = []byte(`[
+ {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
+ {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
+ ]`)
+ var colors []Color
+ err := json.Unmarshal(j, &colors)
+ if err != nil {
+ log.Fatalln("error:", err)
+ }
+
+ for _, c := range colors {
+ var dst interface{}
+ switch c.Space {
+ case "RGB":
+ dst = new(RGB)
+ case "YCbCr":
+ dst = new(YCbCr)
+ }
+ err := json.Unmarshal(c.Point, dst)
+ if err != nil {
+ log.Fatalln("error:", err)
+ }
+ fmt.Println(c.Space, dst)
+ }
+ // Output:
+ // YCbCr &{255 0 -10}
+ // RGB &{98 218 255}
+}
+
+func ExampleIndent() {
+ type Road struct {
+ Name string
+ Number int
+ }
+ roads := []Road{
+ {"Diamond Fork", 29},
+ {"Sheep Creek", 51},
+ }
+
+ b, err := json.Marshal(roads)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var out bytes.Buffer
+ json.Indent(&out, b, "=", "\t")
+ out.WriteTo(os.Stdout)
+ // Output:
+ // [
+ // = {
+ // = "Name": "Diamond Fork",
+ // = "Number": 29
+ // = },
+ // = {
+ // = "Name": "Sheep Creek",
+ // = "Number": 51
+ // = }
+ // =]
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/extension.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/extension.go
new file mode 100644
index 00000000000..1c8fd459753
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/extension.go
@@ -0,0 +1,95 @@
+package json
+
+import (
+ "reflect"
+)
+
+// Extension holds a set of additional rules to be used when unmarshaling
+// strict JSON or JSON-like content.
+type Extension struct {
+ funcs map[string]funcExt
+ consts map[string]interface{}
+ keyed map[string]func([]byte) (interface{}, error)
+ encode map[reflect.Type]func(v interface{}) ([]byte, error)
+
+ unquotedKeys bool
+ trailingCommas bool
+}
+
+type funcExt struct {
+ key string
+ args []string
+}
+
+// Extend changes the decoder behavior to consider the provided extension.
+func (dec *Decoder) Extend(ext *Extension) { dec.d.ext = *ext }
+
+// Extend changes the encoder behavior to consider the provided extension.
+func (enc *Encoder) Extend(ext *Extension) { enc.ext = *ext }
+
+// Extend includes in e the extensions defined in ext.
+func (e *Extension) Extend(ext *Extension) {
+ for name, fext := range ext.funcs {
+ e.DecodeFunc(name, fext.key, fext.args...)
+ }
+ for name, value := range ext.consts {
+ e.DecodeConst(name, value)
+ }
+ for key, decode := range ext.keyed {
+ e.DecodeKeyed(key, decode)
+ }
+ for typ, encode := range ext.encode {
+ if e.encode == nil {
+ e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
+ }
+ e.encode[typ] = encode
+ }
+}
+
+// DecodeFunc defines a function call that may be observed inside JSON content.
+// A function with the provided name will be unmarshaled as the document
+// {key: {args[0]: ..., args[N]: ...}}.
+func (e *Extension) DecodeFunc(name string, key string, args ...string) {
+ if e.funcs == nil {
+ e.funcs = make(map[string]funcExt)
+ }
+ e.funcs[name] = funcExt{key, args}
+}
+
+// DecodeConst defines a constant name that may be observed inside JSON content
+// and will be decoded with the provided value.
+func (e *Extension) DecodeConst(name string, value interface{}) {
+ if e.consts == nil {
+ e.consts = make(map[string]interface{})
+ }
+ e.consts[name] = value
+}
+
+// DecodeKeyed defines a key that when observed as the first element inside a
+// JSON document triggers the decoding of that document via the provided
+// decode function.
+func (e *Extension) DecodeKeyed(key string, decode func(data []byte) (interface{}, error)) {
+ if e.keyed == nil {
+ e.keyed = make(map[string]func([]byte) (interface{}, error))
+ }
+ e.keyed[key] = decode
+}
+
+// DecodeUnquotedKeys defines whether to accept map keys that are unquoted strings.
+func (e *Extension) DecodeUnquotedKeys(accept bool) {
+ e.unquotedKeys = accept
+}
+
+// DecodeTrailingCommas defines whether to accept trailing commas in maps and arrays.
+func (e *Extension) DecodeTrailingCommas(accept bool) {
+ e.trailingCommas = accept
+}
+
+// EncodeType registers a function to encode values with the same type of the
+// provided sample.
+func (e *Extension) EncodeType(sample interface{}, encode func(v interface{}) ([]byte, error)) {
+ if e.encode == nil {
+ e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
+ }
+ e.encode[reflect.TypeOf(sample)] = encode
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/extension_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/extension_test.go
new file mode 100644
index 00000000000..8c228189724
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/extension_test.go
@@ -0,0 +1,218 @@
+package json
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "testing"
+)
+
+type funcN struct {
+ Arg1 int `json:"arg1"`
+ Arg2 int `json:"arg2"`
+}
+
+type funcs struct {
+ Func2 *funcN `json:"$func2"`
+ Func1 *funcN `json:"$func1"`
+}
+
+type funcsText struct {
+ Func1 jsonText `json:"$func1"`
+ Func2 jsonText `json:"$func2"`
+}
+
+type jsonText struct {
+ json string
+}
+
+func (jt *jsonText) UnmarshalJSON(data []byte) error {
+ jt.json = string(data)
+ return nil
+}
+
+type nestedText struct {
+ F jsonText
+ B bool
+}
+
+type unquotedKey struct {
+ S string `json:"$k_1"`
+}
+
+var ext Extension
+
+type keyed string
+
+func decodeKeyed(data []byte) (interface{}, error) {
+ return keyed(data), nil
+}
+
+type keyedType struct {
+ K keyed
+ I int
+}
+
+type docint int
+
+type const1Type struct{}
+
+var const1 = new(const1Type)
+
+func init() {
+ ext.DecodeFunc("Func1", "$func1")
+ ext.DecodeFunc("Func2", "$func2", "arg1", "arg2")
+ ext.DecodeFunc("Func3", "$func3", "arg1")
+ ext.DecodeFunc("new Func4", "$func4", "arg1")
+
+ ext.DecodeConst("Const1", const1)
+
+ ext.DecodeKeyed("$key1", decodeKeyed)
+ ext.DecodeKeyed("$func3", decodeKeyed)
+
+ ext.EncodeType(docint(0), func(v interface{}) ([]byte, error) {
+ s := `{"$docint": ` + strconv.Itoa(int(v.(docint))) + `}`
+ return []byte(s), nil
+ })
+
+ ext.DecodeUnquotedKeys(true)
+ ext.DecodeTrailingCommas(true)
+}
+
+type extDecodeTest struct {
+ in string
+ ptr interface{}
+ out interface{}
+ err error
+
+ noext bool
+}
+
+var extDecodeTests = []extDecodeTest{
+ // Functions
+ {in: `Func1()`, ptr: new(interface{}), out: map[string]interface{}{
+ "$func1": map[string]interface{}{},
+ }},
+ {in: `{"v": Func1()}`, ptr: new(interface{}), out: map[string]interface{}{
+ "v": map[string]interface{}{"$func1": map[string]interface{}{}},
+ }},
+ {in: `Func2(1)`, ptr: new(interface{}), out: map[string]interface{}{
+ "$func2": map[string]interface{}{"arg1": float64(1)},
+ }},
+ {in: `Func2(1, 2)`, ptr: new(interface{}), out: map[string]interface{}{
+ "$func2": map[string]interface{}{"arg1": float64(1), "arg2": float64(2)},
+ }},
+ {in: `Func2(Func1())`, ptr: new(interface{}), out: map[string]interface{}{
+ "$func2": map[string]interface{}{"arg1": map[string]interface{}{"$func1": map[string]interface{}{}}},
+ }},
+ {in: `Func2(1, 2, 3)`, ptr: new(interface{}), err: fmt.Errorf("json: too many arguments for function Func2")},
+ {in: `BadFunc()`, ptr: new(interface{}), err: fmt.Errorf(`json: unknown function "BadFunc"`)},
+
+ {in: `Func1()`, ptr: new(funcs), out: funcs{Func1: &funcN{}}},
+ {in: `Func2(1)`, ptr: new(funcs), out: funcs{Func2: &funcN{Arg1: 1}}},
+ {in: `Func2(1, 2)`, ptr: new(funcs), out: funcs{Func2: &funcN{Arg1: 1, Arg2: 2}}},
+
+ {in: `Func2(1, 2, 3)`, ptr: new(funcs), err: fmt.Errorf("json: too many arguments for function Func2")},
+ {in: `BadFunc()`, ptr: new(funcs), err: fmt.Errorf(`json: unknown function "BadFunc"`)},
+
+ {in: `Func2(1)`, ptr: new(jsonText), out: jsonText{"Func2(1)"}},
+ {in: `Func2(1, 2)`, ptr: new(funcsText), out: funcsText{Func2: jsonText{"Func2(1, 2)"}}},
+ {in: `{"f": Func2(1, 2), "b": true}`, ptr: new(nestedText), out: nestedText{jsonText{"Func2(1, 2)"}, true}},
+
+ {in: `Func1()`, ptr: new(struct{}), out: struct{}{}},
+
+ // Functions with "new" prefix
+ {in: `new Func4(1)`, ptr: new(interface{}), out: map[string]interface{}{
+ "$func4": map[string]interface{}{"arg1": float64(1)},
+ }},
+
+ // Constants
+ {in: `Const1`, ptr: new(interface{}), out: const1},
+ {in: `{"c": Const1}`, ptr: new(struct{ C *const1Type }), out: struct{ C *const1Type }{const1}},
+
+ // Keyed documents
+ {in: `{"v": {"$key1": 1}}`, ptr: new(interface{}), out: map[string]interface{}{"v": keyed(`{"$key1": 1}`)}},
+ {in: `{"k": {"$key1": 1}}`, ptr: new(keyedType), out: keyedType{K: keyed(`{"$key1": 1}`)}},
+ {in: `{"i": {"$key1": 1}}`, ptr: new(keyedType), err: &UnmarshalTypeError{"object", reflect.TypeOf(0), 18}},
+
+ // Keyed function documents
+ {in: `{"v": Func3()}`, ptr: new(interface{}), out: map[string]interface{}{"v": keyed(`Func3()`)}},
+ {in: `{"k": Func3()}`, ptr: new(keyedType), out: keyedType{K: keyed(`Func3()`)}},
+ {in: `{"i": Func3()}`, ptr: new(keyedType), err: &UnmarshalTypeError{"object", reflect.TypeOf(0), 13}},
+
+ // Unquoted keys
+ {in: `{$k_1: "bar"}`, ptr: new(interface{}), out: map[string]interface{}{"$k_1": "bar"}},
+ {in: `{$k_1: "bar"}`, ptr: new(unquotedKey), out: unquotedKey{"bar"}},
+
+ {in: `{$k_1: "bar"}`, noext: true, ptr: new(interface{}),
+ err: &SyntaxError{"invalid character '$' looking for beginning of object key string", 2}},
+ {in: `{$k_1: "bar"}`, noext: true, ptr: new(unquotedKey),
+ err: &SyntaxError{"invalid character '$' looking for beginning of object key string", 2}},
+
+ // Trailing commas
+ {in: `{"k": "v",}`, ptr: new(interface{}), out: map[string]interface{}{"k": "v"}},
+ {in: `{"k": "v",}`, ptr: new(struct{}), out: struct{}{}},
+ {in: `["v",]`, ptr: new(interface{}), out: []interface{}{"v"}},
+
+ {in: `{"k": "v",}`, noext: true, ptr: new(interface{}),
+ err: &SyntaxError{"invalid character '}' looking for beginning of object key string", 11}},
+ {in: `{"k": "v",}`, noext: true, ptr: new(struct{}),
+ err: &SyntaxError{"invalid character '}' looking for beginning of object key string", 11}},
+ {in: `["a",]`, noext: true, ptr: new(interface{}),
+ err: &SyntaxError{"invalid character ']' looking for beginning of value", 6}},
+}
+
+type extEncodeTest struct {
+ in interface{}
+ out string
+ err error
+}
+
+var extEncodeTests = []extEncodeTest{
+ {in: docint(13), out: "{\"$docint\":13}\n"},
+}
+
+func TestExtensionDecode(t *testing.T) {
+ for i, tt := range extDecodeTests {
+ in := []byte(tt.in)
+
+ // v = new(right-type)
+ v := reflect.New(reflect.TypeOf(tt.ptr).Elem())
+ dec := NewDecoder(bytes.NewReader(in))
+ if !tt.noext {
+ dec.Extend(&ext)
+ }
+ if err := dec.Decode(v.Interface()); !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: %v, want %v", i, err, tt.err)
+ continue
+ } else if err != nil {
+ continue
+ }
+ if !reflect.DeepEqual(v.Elem().Interface(), tt.out) {
+ t.Errorf("#%d: mismatch\nhave: %#+v\nwant: %#+v", i, v.Elem().Interface(), tt.out)
+ data, _ := Marshal(v.Elem().Interface())
+ t.Logf("%s", string(data))
+ data, _ = Marshal(tt.out)
+ t.Logf("%s", string(data))
+ continue
+ }
+ }
+}
+
+func TestExtensionEncode(t *testing.T) {
+ var buf bytes.Buffer
+ for i, tt := range extEncodeTests {
+ buf.Truncate(0)
+ enc := NewEncoder(&buf)
+ enc.Extend(&ext)
+ err := enc.Encode(tt.in)
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: %v, want %v", i, err, tt.err)
+ continue
+ }
+ if buf.String() != tt.out {
+ t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, buf.String(), tt.out)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/fold.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/fold.go
new file mode 100644
index 00000000000..9e170127dba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/fold.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "unicode/utf8"
+)
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See https://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/fold_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/fold_test.go
new file mode 100644
index 00000000000..9fb94646a85
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/fold_test.go
@@ -0,0 +1,116 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "strings"
+ "testing"
+ "unicode/utf8"
+)
+
+var foldTests = []struct {
+ fn func(s, t []byte) bool
+ s, t string
+ want bool
+}{
+ {equalFoldRight, "", "", true},
+ {equalFoldRight, "a", "a", true},
+ {equalFoldRight, "", "a", false},
+ {equalFoldRight, "a", "", false},
+ {equalFoldRight, "a", "A", true},
+ {equalFoldRight, "AB", "ab", true},
+ {equalFoldRight, "AB", "ac", false},
+ {equalFoldRight, "sbkKc", "ſbKKc", true},
+ {equalFoldRight, "SbKkc", "ſbKKc", true},
+ {equalFoldRight, "SbKkc", "ſbKK", false},
+ {equalFoldRight, "e", "é", false},
+ {equalFoldRight, "s", "S", true},
+
+ {simpleLetterEqualFold, "", "", true},
+ {simpleLetterEqualFold, "abc", "abc", true},
+ {simpleLetterEqualFold, "abc", "ABC", true},
+ {simpleLetterEqualFold, "abc", "ABCD", false},
+ {simpleLetterEqualFold, "abc", "xxx", false},
+
+ {asciiEqualFold, "a_B", "A_b", true},
+ {asciiEqualFold, "aa@", "aa`", false}, // verify 0x40 and 0x60 aren't case-equivalent
+}
+
+func TestFold(t *testing.T) {
+ for i, tt := range foldTests {
+ if got := tt.fn([]byte(tt.s), []byte(tt.t)); got != tt.want {
+ t.Errorf("%d. %q, %q = %v; want %v", i, tt.s, tt.t, got, tt.want)
+ }
+ truth := strings.EqualFold(tt.s, tt.t)
+ if truth != tt.want {
+ t.Errorf("strings.EqualFold doesn't agree with case %d", i)
+ }
+ }
+}
+
+func TestFoldAgainstUnicode(t *testing.T) {
+ const bufSize = 5
+ buf1 := make([]byte, 0, bufSize)
+ buf2 := make([]byte, 0, bufSize)
+ var runes []rune
+ for i := 0x20; i <= 0x7f; i++ {
+ runes = append(runes, rune(i))
+ }
+ runes = append(runes, kelvin, smallLongEss)
+
+ funcs := []struct {
+ name string
+ fold func(s, t []byte) bool
+ letter bool // must be ASCII letter
+ simple bool // must be simple ASCII letter (not 'S' or 'K')
+ }{
+ {
+ name: "equalFoldRight",
+ fold: equalFoldRight,
+ },
+ {
+ name: "asciiEqualFold",
+ fold: asciiEqualFold,
+ simple: true,
+ },
+ {
+ name: "simpleLetterEqualFold",
+ fold: simpleLetterEqualFold,
+ simple: true,
+ letter: true,
+ },
+ }
+
+ for _, ff := range funcs {
+ for _, r := range runes {
+ if r >= utf8.RuneSelf {
+ continue
+ }
+ if ff.letter && !isASCIILetter(byte(r)) {
+ continue
+ }
+ if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') {
+ continue
+ }
+ for _, r2 := range runes {
+ buf1 := append(buf1[:0], 'x')
+ buf2 := append(buf2[:0], 'x')
+ buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)]
+ buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)]
+ buf1 = append(buf1, 'x')
+ buf2 = append(buf2, 'x')
+ want := bytes.EqualFold(buf1, buf2)
+ if got := ff.fold(buf1, buf2); got != want {
+ t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want)
+ }
+ }
+ }
+ }
+}
+
+func isASCIILetter(b byte) bool {
+ return ('A' <= b && b <= 'Z') || ('a' <= b && b <= 'z')
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/indent.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/indent.go
new file mode 100644
index 00000000000..fba19548c92
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/indent.go
@@ -0,0 +1,141 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+ return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ start := 0
+ for i, c := range src {
+ if escape && (c == '<' || c == '>' || c == '&') {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ v := scan.step(&scan, c)
+ if v >= scanSkipSpace {
+ if v == scanError {
+ break
+ }
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ start = i + 1
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+ return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+ dst.WriteByte('\n')
+ dst.WriteString(prefix)
+ for i := 0; i < depth; i++ {
+ dst.WriteString(indent)
+ }
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+ origLen := dst.Len()
+ var scan scanner
+ scan.reset()
+ needIndent := false
+ depth := 0
+ for _, c := range src {
+ scan.bytes++
+ v := scan.step(&scan, c)
+ if v == scanSkipSpace {
+ continue
+ }
+ if v == scanError {
+ break
+ }
+ if needIndent && v != scanEndObject && v != scanEndArray {
+ needIndent = false
+ depth++
+ newline(dst, prefix, indent, depth)
+ }
+
+ // Emit semantically uninteresting bytes
+ // (in particular, punctuation in strings) unmodified.
+ if v == scanContinue {
+ dst.WriteByte(c)
+ continue
+ }
+
+ // Add spacing around real punctuation.
+ switch c {
+ case '{', '[':
+ // delay indent so that empty object and array are formatted as {} and [].
+ needIndent = true
+ dst.WriteByte(c)
+
+ case ',':
+ dst.WriteByte(c)
+ newline(dst, prefix, indent, depth)
+
+ case ':':
+ dst.WriteByte(c)
+ dst.WriteByte(' ')
+
+ case '}', ']':
+ if needIndent {
+ // suppress indent in empty object/array
+ needIndent = false
+ } else {
+ depth--
+ newline(dst, prefix, indent, depth)
+ }
+ dst.WriteByte(c)
+
+ default:
+ dst.WriteByte(c)
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/number_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/number_test.go
new file mode 100644
index 00000000000..4b869996388
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/number_test.go
@@ -0,0 +1,133 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "regexp"
+ "testing"
+)
+
+func TestNumberIsValid(t *testing.T) {
+ // From: http://stackoverflow.com/a/13340826
+ var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`)
+
+ validTests := []string{
+ "0",
+ "-0",
+ "1",
+ "-1",
+ "0.1",
+ "-0.1",
+ "1234",
+ "-1234",
+ "12.34",
+ "-12.34",
+ "12E0",
+ "12E1",
+ "12e34",
+ "12E-0",
+ "12e+1",
+ "12e-34",
+ "-12E0",
+ "-12E1",
+ "-12e34",
+ "-12E-0",
+ "-12e+1",
+ "-12e-34",
+ "1.2E0",
+ "1.2E1",
+ "1.2e34",
+ "1.2E-0",
+ "1.2e+1",
+ "1.2e-34",
+ "-1.2E0",
+ "-1.2E1",
+ "-1.2e34",
+ "-1.2E-0",
+ "-1.2e+1",
+ "-1.2e-34",
+ "0E0",
+ "0E1",
+ "0e34",
+ "0E-0",
+ "0e+1",
+ "0e-34",
+ "-0E0",
+ "-0E1",
+ "-0e34",
+ "-0E-0",
+ "-0e+1",
+ "-0e-34",
+ }
+
+ for _, test := range validTests {
+ if !isValidNumber(test) {
+ t.Errorf("%s should be valid", test)
+ }
+
+ var f float64
+ if err := Unmarshal([]byte(test), &f); err != nil {
+ t.Errorf("%s should be valid but Unmarshal failed: %v", test, err)
+ }
+
+ if !jsonNumberRegexp.MatchString(test) {
+ t.Errorf("%s should be valid but regexp does not match", test)
+ }
+ }
+
+ invalidTests := []string{
+ "",
+ "invalid",
+ "1.0.1",
+ "1..1",
+ "-1-2",
+ "012a42",
+ "01.2",
+ "012",
+ "12E12.12",
+ "1e2e3",
+ "1e+-2",
+ "1e--23",
+ "1e",
+ "e1",
+ "1e+",
+ "1ea",
+ "1a",
+ "1.a",
+ "1.",
+ "01",
+ "1.e1",
+ }
+
+ for _, test := range invalidTests {
+ if isValidNumber(test) {
+ t.Errorf("%s should be invalid", test)
+ }
+
+ var f float64
+ if err := Unmarshal([]byte(test), &f); err == nil {
+ t.Errorf("%s should be invalid but unmarshal wrote %v", test, f)
+ }
+
+ if jsonNumberRegexp.MatchString(test) {
+ t.Errorf("%s should be invalid but matches regexp", test)
+ }
+ }
+}
+
+func BenchmarkNumberIsValid(b *testing.B) {
+ s := "-61657.61667E+61673"
+ for i := 0; i < b.N; i++ {
+ isValidNumber(s)
+ }
+}
+
+func BenchmarkNumberIsValidRegexp(b *testing.B) {
+ var jsonNumberRegexp = regexp.MustCompile(`^-?(?:0|[1-9]\d*)(?:\.\d+)?(?:[eE][+-]?\d+)?$`)
+ s := "-61657.61667E+61673"
+ for i := 0; i < b.N; i++ {
+ jsonNumberRegexp.MatchString(s)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/scanner.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/scanner.go
new file mode 100644
index 00000000000..97080438873
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/scanner.go
@@ -0,0 +1,697 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.bytes++
+ if scan.step(scan, c) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+ scan.reset()
+ for i, c := range data {
+ v := scan.step(scan, c)
+ if v >= scanEndObject {
+ switch v {
+ // probe the scanner with a space to determine whether we will
+ // get scanEnd on the next character. Otherwise, if the next character
+ // is not a space, scanEndTop allocates a needless error.
+ case scanEndObject, scanEndArray, scanEndParams:
+ if scan.step(scan, ' ') == scanEnd {
+ return data[:i+1], data[i+1:], nil
+ }
+ case scanError:
+ return nil, nil, scan.err
+ case scanEnd:
+ return data[:i], data[i:], nil
+ }
+ }
+ }
+ if scan.eof() == scanError {
+ return nil, nil, scan.err
+ }
+ return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+ msg string // description of error
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in. (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+ // The step is a func to be called to execute the next transition.
+ // Also tried using an integer constant and a single func
+ // with a switch, but using the func directly was 10% faster
+ // on a 64-bit Mac Mini, and it's nicer to read.
+ step func(*scanner, byte) int
+
+ // Reached end of top-level value.
+ endTop bool
+
+ // Stack of what we're in the middle of - array values, object keys, object values.
+ parseState []int
+
+ // Error that happened, if any.
+ err error
+
+ // 1-byte redo (see undo method)
+ redo bool
+ redoCode int
+ redoState func(*scanner, byte) int
+
+ // total bytes consumed, updated by decoder.Decode
+ bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+ // Continue.
+ scanContinue = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanBeginName // begin function call
+ scanParam // begin function argument
+ scanEndParams // end function call
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+
+ // Stop.
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+ parseName // parsing unquoted name
+ parseParam // parsing function argument value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+ s.redo = false
+ s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+ s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ s.redo = false
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+func isSpace(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ s.pushParseState(parseObjectKey)
+ return scanBeginObject
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ s.pushParseState(parseArrayValue)
+ return scanBeginArray
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 'n':
+ s.step = stateNew0
+ return scanBeginName
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ if isName(c) {
+ s.step = stateName
+ return scanBeginName
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+func isName(c byte) bool {
+ return c == '$' || c == '_' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9'
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ if isName(c) {
+ s.step = stateName
+ return scanBeginName
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if c <= ' ' && isSpace(c) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginStringOrEmpty
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValueOrEmpty
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ case parseParam:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanParam
+ }
+ if c == ')' {
+ s.popParseState()
+ return scanEndParams
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+ if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ case 'u':
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+ if c == '+' || c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateNew0 is the state after reading `n`.
+func stateNew0(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateNew1
+ return scanContinue
+ }
+ s.step = stateName
+ return stateName(s, c)
+}
+
+// stateNew1 is the state after reading `ne`.
+func stateNew1(s *scanner, c byte) int {
+ if c == 'w' {
+ s.step = stateNew2
+ return scanContinue
+ }
+ s.step = stateName
+ return stateName(s, c)
+}
+
+// stateNew2 is the state after reading `new`.
+func stateNew2(s *scanner, c byte) int {
+ s.step = stateName
+ if c == ' ' {
+ return scanContinue
+ }
+ return stateName(s, c)
+}
+
+// stateName is the state while reading an unquoted function name.
+func stateName(s *scanner, c byte) int {
+ if isName(c) {
+ return scanContinue
+ }
+ if c == '(' {
+ s.step = stateParamOrEmpty
+ s.pushParseState(parseParam)
+ return scanParam
+ }
+ return stateEndValue(s, c)
+}
+
+// stateParamOrEmpty is the state after reading `(`.
+func stateParamOrEmpty(s *scanner, c byte) int {
+ if c <= ' ' && isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ')' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+ s.step = stateError
+ s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+ return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+ // special cases - different from quoted strings
+ if c == '\'' {
+ return `'\''`
+ }
+ if c == '"' {
+ return `'"'`
+ }
+
+ // use quoted string with different quotation marks
+ s := strconv.Quote(string(c))
+ return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+ if s.redo {
+ panic("json: invalid use of scanner")
+ }
+ s.redoCode = scanCode
+ s.redoState = s.step
+ s.step = stateRedo
+ s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c byte) int {
+ s.redo = false
+ s.step = s.redoState
+ return s.redoCode
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/scanner_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/scanner_test.go
new file mode 100644
index 00000000000..70a28974f78
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/scanner_test.go
@@ -0,0 +1,316 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "math"
+ "math/rand"
+ "reflect"
+ "testing"
+)
+
+// Tests of simple examples.
+
+type example struct {
+ compact string
+ indent string
+}
+
+var examples = []example{
+ {`1`, `1`},
+ {`{}`, `{}`},
+ {`[]`, `[]`},
+ {`{"":2}`, "{\n\t\"\": 2\n}"},
+ {`[3]`, "[\n\t3\n]"},
+ {`[1,2,3]`, "[\n\t1,\n\t2,\n\t3\n]"},
+ {`{"x":1}`, "{\n\t\"x\": 1\n}"},
+ {ex1, ex1i},
+}
+
+var ex1 = `[true,false,null,"x",1,1.5,0,-5e+2]`
+
+var ex1i = `[
+ true,
+ false,
+ null,
+ "x",
+ 1,
+ 1.5,
+ 0,
+ -5e+2
+]`
+
+func TestCompact(t *testing.T) {
+ var buf bytes.Buffer
+ for _, tt := range examples {
+ buf.Reset()
+ if err := Compact(&buf, []byte(tt.compact)); err != nil {
+ t.Errorf("Compact(%#q): %v", tt.compact, err)
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%#q) = %#q, want original", tt.compact, s)
+ }
+
+ buf.Reset()
+ if err := Compact(&buf, []byte(tt.indent)); err != nil {
+ t.Errorf("Compact(%#q): %v", tt.indent, err)
+ continue
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%#q) = %#q, want %#q", tt.indent, s, tt.compact)
+ }
+ }
+}
+
+func TestCompactSeparators(t *testing.T) {
+ // U+2028 and U+2029 should be escaped inside strings.
+ // They should not appear outside strings.
+ tests := []struct {
+ in, compact string
+ }{
+ {"{\"\u2028\": 1}", `{"\u2028":1}`},
+ {"{\"\u2029\" :2}", `{"\u2029":2}`},
+ }
+ for _, tt := range tests {
+ var buf bytes.Buffer
+ if err := Compact(&buf, []byte(tt.in)); err != nil {
+ t.Errorf("Compact(%q): %v", tt.in, err)
+ } else if s := buf.String(); s != tt.compact {
+ t.Errorf("Compact(%q) = %q, want %q", tt.in, s, tt.compact)
+ }
+ }
+}
+
+func TestIndent(t *testing.T) {
+ var buf bytes.Buffer
+ for _, tt := range examples {
+ buf.Reset()
+ if err := Indent(&buf, []byte(tt.indent), "", "\t"); err != nil {
+ t.Errorf("Indent(%#q): %v", tt.indent, err)
+ } else if s := buf.String(); s != tt.indent {
+ t.Errorf("Indent(%#q) = %#q, want original", tt.indent, s)
+ }
+
+ buf.Reset()
+ if err := Indent(&buf, []byte(tt.compact), "", "\t"); err != nil {
+ t.Errorf("Indent(%#q): %v", tt.compact, err)
+ continue
+ } else if s := buf.String(); s != tt.indent {
+ t.Errorf("Indent(%#q) = %#q, want %#q", tt.compact, s, tt.indent)
+ }
+ }
+}
+
+// Tests of a large random structure.
+
+func TestCompactBig(t *testing.T) {
+ initBig()
+ var buf bytes.Buffer
+ if err := Compact(&buf, jsonBig); err != nil {
+ t.Fatalf("Compact: %v", err)
+ }
+ b := buf.Bytes()
+ if !bytes.Equal(b, jsonBig) {
+ t.Error("Compact(jsonBig) != jsonBig")
+ diff(t, b, jsonBig)
+ return
+ }
+}
+
+func TestIndentBig(t *testing.T) {
+ initBig()
+ var buf bytes.Buffer
+ if err := Indent(&buf, jsonBig, "", "\t"); err != nil {
+ t.Fatalf("Indent1: %v", err)
+ }
+ b := buf.Bytes()
+ if len(b) == len(jsonBig) {
+ // jsonBig is compact (no unnecessary spaces);
+ // indenting should make it bigger
+ t.Fatalf("Indent(jsonBig) did not get bigger")
+ }
+
+ // should be idempotent
+ var buf1 bytes.Buffer
+ if err := Indent(&buf1, b, "", "\t"); err != nil {
+ t.Fatalf("Indent2: %v", err)
+ }
+ b1 := buf1.Bytes()
+ if !bytes.Equal(b1, b) {
+ t.Error("Indent(Indent(jsonBig)) != Indent(jsonBig)")
+ diff(t, b1, b)
+ return
+ }
+
+ // should get back to original
+ buf1.Reset()
+ if err := Compact(&buf1, b); err != nil {
+ t.Fatalf("Compact: %v", err)
+ }
+ b1 = buf1.Bytes()
+ if !bytes.Equal(b1, jsonBig) {
+ t.Error("Compact(Indent(jsonBig)) != jsonBig")
+ diff(t, b1, jsonBig)
+ return
+ }
+}
+
+type indentErrorTest struct {
+ in string
+ err error
+}
+
+var indentErrorTests = []indentErrorTest{
+ {`{"X": "foo", "Y"}`, &SyntaxError{"invalid character '}' after object key", 17}},
+ {`{"X": "foo" "Y": "bar"}`, &SyntaxError{"invalid character '\"' after object key:value pair", 13}},
+}
+
+func TestIndentErrors(t *testing.T) {
+ for i, tt := range indentErrorTests {
+ slice := make([]uint8, 0)
+ buf := bytes.NewBuffer(slice)
+ if err := Indent(buf, []uint8(tt.in), "", ""); err != nil {
+ if !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("#%d: Indent: %#v", i, err)
+ continue
+ }
+ }
+ }
+}
+
+func TestNextValueBig(t *testing.T) {
+ initBig()
+ var scan scanner
+ item, rest, err := nextValue(jsonBig, &scan)
+ if err != nil {
+ t.Fatalf("nextValue: %s", err)
+ }
+ if len(item) != len(jsonBig) || &item[0] != &jsonBig[0] {
+ t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
+ }
+ if len(rest) != 0 {
+ t.Errorf("invalid rest: %d", len(rest))
+ }
+
+ item, rest, err = nextValue(append(jsonBig, "HELLO WORLD"...), &scan)
+ if err != nil {
+ t.Fatalf("nextValue extra: %s", err)
+ }
+ if len(item) != len(jsonBig) {
+ t.Errorf("invalid item: %d %d", len(item), len(jsonBig))
+ }
+ if string(rest) != "HELLO WORLD" {
+ t.Errorf("invalid rest: %d", len(rest))
+ }
+}
+
+var benchScan scanner
+
+func BenchmarkSkipValue(b *testing.B) {
+ initBig()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ nextValue(jsonBig, &benchScan)
+ }
+ b.SetBytes(int64(len(jsonBig)))
+}
+
+func diff(t *testing.T, a, b []byte) {
+ for i := 0; ; i++ {
+ if i >= len(a) || i >= len(b) || a[i] != b[i] {
+ j := i - 10
+ if j < 0 {
+ j = 0
+ }
+ t.Errorf("diverge at %d: «%s» vs «%s»", i, trim(a[j:]), trim(b[j:]))
+ return
+ }
+ }
+}
+
+func trim(b []byte) []byte {
+ if len(b) > 20 {
+ return b[0:20]
+ }
+ return b
+}
+
+// Generate a random JSON object.
+
+var jsonBig []byte
+
+func initBig() {
+ n := 10000
+ if testing.Short() {
+ n = 100
+ }
+ b, err := Marshal(genValue(n))
+ if err != nil {
+ panic(err)
+ }
+ jsonBig = b
+}
+
+func genValue(n int) interface{} {
+ if n > 1 {
+ switch rand.Intn(2) {
+ case 0:
+ return genArray(n)
+ case 1:
+ return genMap(n)
+ }
+ }
+ switch rand.Intn(3) {
+ case 0:
+ return rand.Intn(2) == 0
+ case 1:
+ return rand.NormFloat64()
+ case 2:
+ return genString(30)
+ }
+ panic("unreachable")
+}
+
+func genString(stddev float64) string {
+ n := int(math.Abs(rand.NormFloat64()*stddev + stddev/2))
+ c := make([]rune, n)
+ for i := range c {
+ f := math.Abs(rand.NormFloat64()*64 + 32)
+ if f > 0x10ffff {
+ f = 0x10ffff
+ }
+ c[i] = rune(f)
+ }
+ return string(c)
+}
+
+func genArray(n int) []interface{} {
+ f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
+ if f > n {
+ f = n
+ }
+ if f < 1 {
+ f = 1
+ }
+ x := make([]interface{}, f)
+ for i := range x {
+ x[i] = genValue(((i+1)*n)/f - (i*n)/f)
+ }
+ return x
+}
+
+func genMap(n int) map[string]interface{} {
+ f := int(math.Abs(rand.NormFloat64()) * math.Min(10, float64(n/2)))
+ if f > n {
+ f = n
+ }
+ if n > 0 && f == 0 {
+ f = 1
+ }
+ x := make(map[string]interface{})
+ for i := 0; i < f; i++ {
+ x[genString(10)] = genValue(((i+1)*n)/f - (i*n)/f)
+ }
+ return x
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/stream.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/stream.go
new file mode 100644
index 00000000000..e023702b571
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/stream.go
@@ -0,0 +1,510 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "errors"
+ "io"
+)
+
+// A Decoder reads and decodes JSON values from an input stream.
+type Decoder struct {
+ r io.Reader
+ buf []byte
+ d decodeState
+ scanp int // start of unread data in buf
+ scan scanner
+ err error
+
+ tokenState int
+ tokenStack []int
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+ if dec.err != nil {
+ return dec.err
+ }
+
+ if err := dec.tokenPrepareForDecode(); err != nil {
+ return err
+ }
+
+ if !dec.tokenValueAllowed() {
+ return &SyntaxError{msg: "not at beginning of value"}
+ }
+
+ // Read whole value into buffer.
+ n, err := dec.readValue()
+ if err != nil {
+ return err
+ }
+ dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
+ dec.scanp += n
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ err = dec.d.unmarshal(v)
+
+ // fixup token streaming state
+ dec.tokenValueEnd()
+
+ return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf[dec.scanp:])
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+ dec.scan.reset()
+
+ scanp := dec.scanp
+ var err error
+Input:
+ for {
+ // Look in the buffer for a new value.
+ for i, c := range dec.buf[scanp:] {
+ dec.scan.bytes++
+ v := dec.scan.step(&dec.scan, c)
+ if v == scanEnd {
+ scanp += i
+ break Input
+ }
+ // scanEnd is delayed one byte.
+ // We might block trying to get that byte from src,
+ // so instead invent a space byte.
+ if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
+ scanp += i + 1
+ break Input
+ }
+ if v == scanError {
+ dec.err = dec.scan.err
+ return 0, dec.scan.err
+ }
+ }
+ scanp = len(dec.buf)
+
+ // Did the last read have an error?
+ // Delayed until now to allow buffer scan.
+ if err != nil {
+ if err == io.EOF {
+ if dec.scan.step(&dec.scan, ' ') == scanEnd {
+ break Input
+ }
+ if nonSpace(dec.buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ dec.err = err
+ return 0, err
+ }
+
+ n := scanp - dec.scanp
+ err = dec.refill()
+ scanp = dec.scanp + n
+ }
+ return scanp - dec.scanp, nil
+}
+
+func (dec *Decoder) refill() error {
+ // Make room to read more into the buffer.
+ // First slide down data already consumed.
+ if dec.scanp > 0 {
+ n := copy(dec.buf, dec.buf[dec.scanp:])
+ dec.buf = dec.buf[:n]
+ dec.scanp = 0
+ }
+
+ // Grow buffer if not large enough.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf) < minRead {
+ newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+ copy(newBuf, dec.buf)
+ dec.buf = newBuf
+ }
+
+ // Read. Delay error for next iteration (after scan).
+ n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+
+ return err
+}
+
+func nonSpace(b []byte) bool {
+ for _, c := range b {
+ if !isSpace(c) {
+ return true
+ }
+ }
+ return false
+}
+
+// An Encoder writes JSON values to an output stream.
+type Encoder struct {
+ w io.Writer
+ err error
+ escapeHTML bool
+
+ indentBuf *bytes.Buffer
+ indentPrefix string
+ indentValue string
+
+ ext Extension
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w, escapeHTML: true}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+ if enc.err != nil {
+ return enc.err
+ }
+ e := newEncodeState()
+ e.ext = enc.ext
+ err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
+ if err != nil {
+ return err
+ }
+
+ // Terminate each value with a newline.
+ // This makes the output look a little nicer
+ // when debugging, and some kind of space
+ // is required if the encoded value was a number,
+ // so that the reader knows there aren't more
+ // digits coming.
+ e.WriteByte('\n')
+
+ b := e.Bytes()
+ if enc.indentBuf != nil {
+ enc.indentBuf.Reset()
+ err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
+ if err != nil {
+ return err
+ }
+ b = enc.indentBuf.Bytes()
+ }
+ if _, err = enc.w.Write(b); err != nil {
+ enc.err = err
+ }
+ encodeStatePool.Put(e)
+ return err
+}
+
+// Indent sets the encoder to format each encoded value with Indent.
+func (enc *Encoder) Indent(prefix, indent string) {
+ enc.indentBuf = new(bytes.Buffer)
+ enc.indentPrefix = prefix
+ enc.indentValue = indent
+}
+
+// DisableHTMLEscaping causes the encoder not to escape angle brackets
+// ("<" and ">") or ampersands ("&") in JSON strings.
+func (enc *Encoder) DisableHTMLEscaping() {
+ enc.escapeHTML = false
+}
+
+// RawMessage is a raw encoded JSON value.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+ return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
+
+// A Token holds a value of one of these types:
+//
+// Delim, for the four JSON delimiters [ ] { }
+// bool, for JSON booleans
+// float64, for JSON numbers
+// Number, for JSON numbers
+// string, for JSON string literals
+// nil, for JSON null
+//
+type Token interface{}
+
+const (
+ tokenTopValue = iota
+ tokenArrayStart
+ tokenArrayValue
+ tokenArrayComma
+ tokenObjectStart
+ tokenObjectKey
+ tokenObjectColon
+ tokenObjectValue
+ tokenObjectComma
+)
+
+// advance tokenstate from a separator state to a value state
+func (dec *Decoder) tokenPrepareForDecode() error {
+ // Note: Not calling peek before switch, to avoid
+ // putting peek into the standard Decode path.
+ // peek is only called when using the Token API.
+ switch dec.tokenState {
+ case tokenArrayComma:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ',' {
+ return &SyntaxError{"expected comma after array element", 0}
+ }
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ case tokenObjectColon:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ':' {
+ return &SyntaxError{"expected colon after object key", 0}
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ }
+ return nil
+}
+
+func (dec *Decoder) tokenValueAllowed() bool {
+ switch dec.tokenState {
+ case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ return true
+ }
+ return false
+}
+
+func (dec *Decoder) tokenValueEnd() {
+ switch dec.tokenState {
+ case tokenArrayStart, tokenArrayValue:
+ dec.tokenState = tokenArrayComma
+ case tokenObjectValue:
+ dec.tokenState = tokenObjectComma
+ }
+}
+
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim rune
+
+func (d Delim) String() string {
+ return string(d)
+}
+
+// Token returns the next JSON token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Token guarantees that the delimiters [ ] { } it returns are
+// properly nested and matched: if Token encounters an unexpected
+// delimiter in the input, it will return an error.
+//
+// The input stream consists of basic JSON values—bool, string,
+// number, and null—along with delimiters [ ] { } of type Delim
+// to mark the start and end of arrays and objects.
+// Commas and colons are elided.
+func (dec *Decoder) Token() (Token, error) {
+ for {
+ c, err := dec.peek()
+ if err != nil {
+ return nil, err
+ }
+ switch c {
+ case '[':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenArrayStart
+ return Delim('['), nil
+
+ case ']':
+ if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim(']'), nil
+
+ case '{':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenObjectStart
+ return Delim('{'), nil
+
+ case '}':
+ if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim('}'), nil
+
+ case ':':
+ if dec.tokenState != tokenObjectColon {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ continue
+
+ case ',':
+ if dec.tokenState == tokenArrayComma {
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ continue
+ }
+ if dec.tokenState == tokenObjectComma {
+ dec.scanp++
+ dec.tokenState = tokenObjectKey
+ continue
+ }
+ return dec.tokenError(c)
+
+ case '"':
+ if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
+ var x string
+ old := dec.tokenState
+ dec.tokenState = tokenTopValue
+ err := dec.Decode(&x)
+ dec.tokenState = old
+ if err != nil {
+ clearOffset(err)
+ return nil, err
+ }
+ dec.tokenState = tokenObjectColon
+ return x, nil
+ }
+ fallthrough
+
+ default:
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ var x interface{}
+ if err := dec.Decode(&x); err != nil {
+ clearOffset(err)
+ return nil, err
+ }
+ return x, nil
+ }
+ }
+}
+
+func clearOffset(err error) {
+ if s, ok := err.(*SyntaxError); ok {
+ s.Offset = 0
+ }
+}
+
+func (dec *Decoder) tokenError(c byte) (Token, error) {
+ var context string
+ switch dec.tokenState {
+ case tokenTopValue:
+ context = " looking for beginning of value"
+ case tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ context = " looking for beginning of value"
+ case tokenArrayComma:
+ context = " after array element"
+ case tokenObjectKey:
+ context = " looking for beginning of object key string"
+ case tokenObjectColon:
+ context = " after object key"
+ case tokenObjectComma:
+ context = " after object key:value pair"
+ }
+ return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (dec *Decoder) More() bool {
+ c, err := dec.peek()
+ return err == nil && c != ']' && c != '}'
+}
+
+func (dec *Decoder) peek() (byte, error) {
+ var err error
+ for {
+ for i := dec.scanp; i < len(dec.buf); i++ {
+ c := dec.buf[i]
+ if isSpace(c) {
+ continue
+ }
+ dec.scanp = i
+ return c, nil
+ }
+ // buffer has been scanned, now report any error
+ if err != nil {
+ return 0, err
+ }
+ err = dec.refill()
+ }
+}
+
+/*
+TODO
+
+// EncodeToken writes the given JSON token to the stream.
+// It returns an error if the delimiters [ ] { } are not properly used.
+//
+// EncodeToken does not call Flush, because usually it is part of
+// a larger operation such as Encode, and those will call Flush when finished.
+// Callers that create an Encoder and then invoke EncodeToken directly,
+// without using Encode, need to call Flush when finished to ensure that
+// the JSON is written to the underlying writer.
+func (e *Encoder) EncodeToken(t Token) error {
+ ...
+}
+
+*/
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/stream_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/stream_test.go
new file mode 100644
index 00000000000..0abdf7b5654
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/stream_test.go
@@ -0,0 +1,418 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+// Test values for the stream test.
+// One of each JSON kind.
+var streamTest = []interface{}{
+ 0.1,
+ "hello",
+ nil,
+ true,
+ false,
+ []interface{}{"a", "b", "c"},
+ map[string]interface{}{"K": "Kelvin", "ß": "long s"},
+ 3.14, // another value to make sure something can follow map
+}
+
+var streamEncoded = `0.1
+"hello"
+null
+true
+false
+["a","b","c"]
+{"ß":"long s","K":"Kelvin"}
+3.14
+`
+
+func TestEncoder(t *testing.T) {
+ for i := 0; i <= len(streamTest); i++ {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ for j, v := range streamTest[0:i] {
+ if err := enc.Encode(v); err != nil {
+ t.Fatalf("encode #%d: %v", j, err)
+ }
+ }
+ if have, want := buf.String(), nlines(streamEncoded, i); have != want {
+ t.Errorf("encoding %d items: mismatch", i)
+ diff(t, []byte(have), []byte(want))
+ break
+ }
+ }
+}
+
+var streamEncodedIndent = `0.1
+"hello"
+null
+true
+false
+[
+>."a",
+>."b",
+>."c"
+>]
+{
+>."ß": "long s",
+>."K": "Kelvin"
+>}
+3.14
+`
+
+func TestEncoderIndent(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ enc.Indent(">", ".")
+ for _, v := range streamTest {
+ enc.Encode(v)
+ }
+ if have, want := buf.String(), streamEncodedIndent; have != want {
+ t.Error("indented encoding mismatch")
+ diff(t, []byte(have), []byte(want))
+ }
+}
+
+func TestEncoderDisableHTMLEscaping(t *testing.T) {
+ var c C
+ var ct CText
+ for _, tt := range []struct {
+ name string
+ v interface{}
+ wantEscape string
+ want string
+ }{
+ {"c", c, `"\u003c\u0026\u003e"`, `"<&>"`},
+ {"ct", ct, `"\"\u003c\u0026\u003e\""`, `"\"<&>\""`},
+ {`"<&>"`, "<&>", `"\u003c\u0026\u003e"`, `"<&>"`},
+ } {
+ var buf bytes.Buffer
+ enc := NewEncoder(&buf)
+ if err := enc.Encode(tt.v); err != nil {
+ t.Fatalf("Encode(%s): %s", tt.name, err)
+ }
+ if got := strings.TrimSpace(buf.String()); got != tt.wantEscape {
+ t.Errorf("Encode(%s) = %#q, want %#q", tt.name, got, tt.wantEscape)
+ }
+ buf.Reset()
+ enc.DisableHTMLEscaping()
+ if err := enc.Encode(tt.v); err != nil {
+ t.Fatalf("DisableHTMLEscaping Encode(%s): %s", tt.name, err)
+ }
+ if got := strings.TrimSpace(buf.String()); got != tt.want {
+ t.Errorf("DisableHTMLEscaping Encode(%s) = %#q, want %#q",
+ tt.name, got, tt.want)
+ }
+ }
+}
+
+func TestDecoder(t *testing.T) {
+ for i := 0; i <= len(streamTest); i++ {
+ // Use stream without newlines as input,
+ // just to stress the decoder even more.
+ // Our test input does not include back-to-back numbers.
+ // Otherwise stripping the newlines would
+ // merge two adjacent JSON values.
+ var buf bytes.Buffer
+ for _, c := range nlines(streamEncoded, i) {
+ // That's stupid isn't it!? nulltrue!?!? :/
+ //if c != '\n' {
+ buf.WriteRune(c)
+ //}
+ }
+ out := make([]interface{}, i)
+ dec := NewDecoder(&buf)
+ for j := range out {
+ if err := dec.Decode(&out[j]); err != nil {
+ t.Fatalf("decode #%d/%d: %v", j, i, err)
+ }
+ }
+ if !reflect.DeepEqual(out, streamTest[0:i]) {
+ t.Errorf("decoding %d items: mismatch", i)
+ for j := range out {
+ if !reflect.DeepEqual(out[j], streamTest[j]) {
+ t.Errorf("#%d: have %v want %v", j, out[j], streamTest[j])
+ }
+ }
+ break
+ }
+ }
+}
+
+func TestDecoderBuffered(t *testing.T) {
+ r := strings.NewReader(`{"Name": "Gopher"} extra `)
+ var m struct {
+ Name string
+ }
+ d := NewDecoder(r)
+ err := d.Decode(&m)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if m.Name != "Gopher" {
+ t.Errorf("Name = %q; want Gopher", m.Name)
+ }
+ rest, err := ioutil.ReadAll(d.Buffered())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if g, w := string(rest), " extra "; g != w {
+ t.Errorf("Remaining = %q; want %q", g, w)
+ }
+}
+
+func nlines(s string, n int) string {
+ if n <= 0 {
+ return ""
+ }
+ for i, c := range s {
+ if c == '\n' {
+ if n--; n == 0 {
+ return s[0 : i+1]
+ }
+ }
+ }
+ return s
+}
+
+func TestRawMessage(t *testing.T) {
+ // TODO(rsc): Should not need the * in *RawMessage
+ var data struct {
+ X float64
+ Id *RawMessage
+ Y float32
+ }
+ const raw = `["\u0056",null]`
+ const msg = `{"X":0.1,"Id":["\u0056",null],"Y":0.2}`
+ err := Unmarshal([]byte(msg), &data)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if string([]byte(*data.Id)) != raw {
+ t.Fatalf("Raw mismatch: have %#q want %#q", []byte(*data.Id), raw)
+ }
+ b, err := Marshal(&data)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if string(b) != msg {
+ t.Fatalf("Marshal: have %#q want %#q", b, msg)
+ }
+}
+
+func TestNullRawMessage(t *testing.T) {
+ // TODO(rsc): Should not need the * in *RawMessage
+ var data struct {
+ X float64
+ Id *RawMessage
+ Y float32
+ }
+ data.Id = new(RawMessage)
+ const msg = `{"X":0.1,"Id":null,"Y":0.2}`
+ err := Unmarshal([]byte(msg), &data)
+ if err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if data.Id != nil {
+ t.Fatalf("Raw mismatch: have non-nil, want nil")
+ }
+ b, err := Marshal(&data)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if string(b) != msg {
+ t.Fatalf("Marshal: have %#q want %#q", b, msg)
+ }
+}
+
+var blockingTests = []string{
+ `{"x": 1}`,
+ `[1, 2, 3]`,
+}
+
+func TestBlocking(t *testing.T) {
+ for _, enc := range blockingTests {
+ r, w := net.Pipe()
+ go w.Write([]byte(enc))
+ var val interface{}
+
+ // If Decode reads beyond what w.Write writes above,
+ // it will block, and the test will deadlock.
+ if err := NewDecoder(r).Decode(&val); err != nil {
+ t.Errorf("decoding %s: %v", enc, err)
+ }
+ r.Close()
+ w.Close()
+ }
+}
+
+func BenchmarkEncoderEncode(b *testing.B) {
+ b.ReportAllocs()
+ type T struct {
+ X, Y string
+ }
+ v := &T{"foo", "bar"}
+ for i := 0; i < b.N; i++ {
+ if err := NewEncoder(ioutil.Discard).Encode(v); err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+type tokenStreamCase struct {
+ json string
+ expTokens []interface{}
+}
+
+type decodeThis struct {
+ v interface{}
+}
+
+var tokenStreamCases []tokenStreamCase = []tokenStreamCase{
+ // streaming token cases
+ {json: `10`, expTokens: []interface{}{float64(10)}},
+ {json: ` [10] `, expTokens: []interface{}{
+ Delim('['), float64(10), Delim(']')}},
+ {json: ` [false,10,"b"] `, expTokens: []interface{}{
+ Delim('['), false, float64(10), "b", Delim(']')}},
+ {json: `{ "a": 1 }`, expTokens: []interface{}{
+ Delim('{'), "a", float64(1), Delim('}')}},
+ {json: `{"a": 1, "b":"3"}`, expTokens: []interface{}{
+ Delim('{'), "a", float64(1), "b", "3", Delim('}')}},
+ {json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
+ Delim('['),
+ Delim('{'), "a", float64(1), Delim('}'),
+ Delim('{'), "a", float64(2), Delim('}'),
+ Delim(']')}},
+ {json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
+ Delim('{'), "obj", Delim('{'), "a", float64(1), Delim('}'),
+ Delim('}')}},
+ {json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
+ Delim('{'), "obj", Delim('['),
+ Delim('{'), "a", float64(1), Delim('}'),
+ Delim(']'), Delim('}')}},
+
+ // streaming tokens with intermittent Decode()
+ {json: `{ "a": 1 }`, expTokens: []interface{}{
+ Delim('{'), "a",
+ decodeThis{float64(1)},
+ Delim('}')}},
+ {json: ` [ { "a" : 1 } ] `, expTokens: []interface{}{
+ Delim('['),
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ Delim(']')}},
+ {json: ` [{"a": 1},{"a": 2}] `, expTokens: []interface{}{
+ Delim('['),
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ decodeThis{map[string]interface{}{"a": float64(2)}},
+ Delim(']')}},
+ {json: `{ "obj" : [ { "a" : 1 } ] }`, expTokens: []interface{}{
+ Delim('{'), "obj", Delim('['),
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ Delim(']'), Delim('}')}},
+
+ {json: `{"obj": {"a": 1}}`, expTokens: []interface{}{
+ Delim('{'), "obj",
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ Delim('}')}},
+ {json: `{"obj": [{"a": 1}]}`, expTokens: []interface{}{
+ Delim('{'), "obj",
+ decodeThis{[]interface{}{
+ map[string]interface{}{"a": float64(1)},
+ }},
+ Delim('}')}},
+ {json: ` [{"a": 1} {"a": 2}] `, expTokens: []interface{}{
+ Delim('['),
+ decodeThis{map[string]interface{}{"a": float64(1)}},
+ decodeThis{&SyntaxError{"expected comma after array element", 0}},
+ }},
+ {json: `{ "a" 1 }`, expTokens: []interface{}{
+ Delim('{'), "a",
+ decodeThis{&SyntaxError{"expected colon after object key", 0}},
+ }},
+}
+
+func TestDecodeInStream(t *testing.T) {
+
+ for ci, tcase := range tokenStreamCases {
+
+ dec := NewDecoder(strings.NewReader(tcase.json))
+ for i, etk := range tcase.expTokens {
+
+ var tk interface{}
+ var err error
+
+ if dt, ok := etk.(decodeThis); ok {
+ etk = dt.v
+ err = dec.Decode(&tk)
+ } else {
+ tk, err = dec.Token()
+ }
+ if experr, ok := etk.(error); ok {
+ if err == nil || err.Error() != experr.Error() {
+ t.Errorf("case %v: Expected error %v in %q, but was %v", ci, experr, tcase.json, err)
+ }
+ break
+ } else if err == io.EOF {
+ t.Errorf("case %v: Unexpected EOF in %q", ci, tcase.json)
+ break
+ } else if err != nil {
+ t.Errorf("case %v: Unexpected error '%v' in %q", ci, err, tcase.json)
+ break
+ }
+ if !reflect.DeepEqual(tk, etk) {
+ t.Errorf(`case %v: %q @ %v expected %T(%v) was %T(%v)`, ci, tcase.json, i, etk, etk, tk, tk)
+ break
+ }
+ }
+ }
+
+}
+
+// Test from golang.org/issue/11893
+func TestHTTPDecoding(t *testing.T) {
+ const raw = `{ "foo": "bar" }`
+
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(raw))
+ }))
+ defer ts.Close()
+ res, err := http.Get(ts.URL)
+ if err != nil {
+ log.Fatalf("GET failed: %v", err)
+ }
+ defer res.Body.Close()
+
+ foo := struct {
+ Foo string
+ }{}
+
+ d := NewDecoder(res.Body)
+ err = d.Decode(&foo)
+ if err != nil {
+ t.Fatalf("Decode: %v", err)
+ }
+ if foo.Foo != "bar" {
+ t.Errorf("decoded %q; want \"bar\"", foo.Foo)
+ }
+
+ // make sure we get the EOF the second time
+ err = d.Decode(&foo)
+ if err != io.EOF {
+ t.Errorf("err = %v; want io.EOF", err)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tagkey_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tagkey_test.go
new file mode 100644
index 00000000000..c1739ea97f7
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tagkey_test.go
@@ -0,0 +1,115 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "testing"
+)
+
+type basicLatin2xTag struct {
+ V string `json:"$%-/"`
+}
+
+type basicLatin3xTag struct {
+ V string `json:"0123456789"`
+}
+
+type basicLatin4xTag struct {
+ V string `json:"ABCDEFGHIJKLMO"`
+}
+
+type basicLatin5xTag struct {
+ V string `json:"PQRSTUVWXYZ_"`
+}
+
+type basicLatin6xTag struct {
+ V string `json:"abcdefghijklmno"`
+}
+
+type basicLatin7xTag struct {
+ V string `json:"pqrstuvwxyz"`
+}
+
+type miscPlaneTag struct {
+ V string `json:"色は匂へど"`
+}
+
+type percentSlashTag struct {
+ V string `json:"text/html%"` // https://golang.org/issue/2718
+}
+
+type punctuationTag struct {
+ V string `json:"!#$%&()*+-./:<=>?@[]^_{|}~"` // https://golang.org/issue/3546
+}
+
+type emptyTag struct {
+ W string
+}
+
+type misnamedTag struct {
+ X string `jsom:"Misnamed"`
+}
+
+type badFormatTag struct {
+ Y string `:"BadFormat"`
+}
+
+type badCodeTag struct {
+ Z string `json:" !\"#&'()*+,."`
+}
+
+type spaceTag struct {
+ Q string `json:"With space"`
+}
+
+type unicodeTag struct {
+ W string `json:"Ελλάδα"`
+}
+
+var structTagObjectKeyTests = []struct {
+ raw interface{}
+ value string
+ key string
+}{
+ {basicLatin2xTag{"2x"}, "2x", "$%-/"},
+ {basicLatin3xTag{"3x"}, "3x", "0123456789"},
+ {basicLatin4xTag{"4x"}, "4x", "ABCDEFGHIJKLMO"},
+ {basicLatin5xTag{"5x"}, "5x", "PQRSTUVWXYZ_"},
+ {basicLatin6xTag{"6x"}, "6x", "abcdefghijklmno"},
+ {basicLatin7xTag{"7x"}, "7x", "pqrstuvwxyz"},
+ {miscPlaneTag{"いろはにほへと"}, "いろはにほへと", "色は匂へど"},
+ {emptyTag{"Pour Moi"}, "Pour Moi", "W"},
+ {misnamedTag{"Animal Kingdom"}, "Animal Kingdom", "X"},
+ {badFormatTag{"Orfevre"}, "Orfevre", "Y"},
+ {badCodeTag{"Reliable Man"}, "Reliable Man", "Z"},
+ {percentSlashTag{"brut"}, "brut", "text/html%"},
+ {punctuationTag{"Union Rags"}, "Union Rags", "!#$%&()*+-./:<=>?@[]^_{|}~"},
+ {spaceTag{"Perreddu"}, "Perreddu", "With space"},
+ {unicodeTag{"Loukanikos"}, "Loukanikos", "Ελλάδα"},
+}
+
+func TestStructTagObjectKey(t *testing.T) {
+ for _, tt := range structTagObjectKeyTests {
+ b, err := Marshal(tt.raw)
+ if err != nil {
+ t.Fatalf("Marshal(%#q) failed: %v", tt.raw, err)
+ }
+ var f interface{}
+ err = Unmarshal(b, &f)
+ if err != nil {
+ t.Fatalf("Unmarshal(%#q) failed: %v", b, err)
+ }
+ for i, v := range f.(map[string]interface{}) {
+ switch i {
+ case tt.key:
+ if s, ok := v.(string); !ok || s != tt.value {
+ t.Fatalf("Unexpected value: %#q, want %v", s, tt.value)
+ }
+ default:
+ t.Fatalf("Unexpected key: %#q, from %#q", i, b)
+ }
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tags.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tags.go
new file mode 100644
index 00000000000..c38fd5102f6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tags_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tags_test.go
new file mode 100644
index 00000000000..8ba8ddd5f80
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/tags_test.go
@@ -0,0 +1,28 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "testing"
+)
+
+func TestTagParsing(t *testing.T) {
+ name, opts := parseTag("field,foobar,foo")
+ if name != "field" {
+ t.Fatalf("name = %q, want field", name)
+ }
+ for _, tt := range []struct {
+ opt string
+ want bool
+ }{
+ {"foobar", true},
+ {"foo", true},
+ {"bar", false},
+ } {
+ if opts.Contains(tt.opt) != tt.want {
+ t.Errorf("Contains(%q) = %v", tt.opt, !tt.want)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/testdata/code.json.gz b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/testdata/code.json.gz
new file mode 100644
index 00000000000..1572a92bfbd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/json/testdata/code.json.gz
Binary files differ
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl.c b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl.c
new file mode 100644
index 00000000000..8be0bc45964
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl.c
@@ -0,0 +1,77 @@
+// +build !windows
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <sasl/sasl.h>
+
+static int mgo_sasl_simple(void *context, int id, const char **result, unsigned int *len)
+{
+ if (!result) {
+ return SASL_BADPARAM;
+ }
+ switch (id) {
+ case SASL_CB_USER:
+ *result = (char *)context;
+ break;
+ case SASL_CB_AUTHNAME:
+ *result = (char *)context;
+ break;
+ case SASL_CB_LANGUAGE:
+ *result = NULL;
+ break;
+ default:
+ return SASL_BADPARAM;
+ }
+ if (len) {
+ *len = *result ? strlen(*result) : 0;
+ }
+ return SASL_OK;
+}
+
+typedef int (*callback)(void);
+
+static int mgo_sasl_secret(sasl_conn_t *conn, void *context, int id, sasl_secret_t **result)
+{
+ if (!conn || !result || id != SASL_CB_PASS) {
+ return SASL_BADPARAM;
+ }
+ *result = (sasl_secret_t *)context;
+ return SASL_OK;
+}
+
+sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password)
+{
+ sasl_callback_t *cb = malloc(4 * sizeof(sasl_callback_t));
+ int n = 0;
+
+ size_t len = strlen(password);
+ sasl_secret_t *secret = (sasl_secret_t*)malloc(sizeof(sasl_secret_t) + len);
+ if (!secret) {
+ free(cb);
+ return NULL;
+ }
+ strcpy((char *)secret->data, password);
+ secret->len = len;
+
+ cb[n].id = SASL_CB_PASS;
+ cb[n].proc = (callback)&mgo_sasl_secret;
+ cb[n].context = secret;
+ n++;
+
+ cb[n].id = SASL_CB_USER;
+ cb[n].proc = (callback)&mgo_sasl_simple;
+ cb[n].context = (char*)username;
+ n++;
+
+ cb[n].id = SASL_CB_AUTHNAME;
+ cb[n].proc = (callback)&mgo_sasl_simple;
+ cb[n].context = (char*)username;
+ n++;
+
+ cb[n].id = SASL_CB_LIST_END;
+ cb[n].proc = NULL;
+ cb[n].context = NULL;
+
+ return cb;
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl.go
new file mode 100644
index 00000000000..8375dddf82a
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl.go
@@ -0,0 +1,138 @@
+// Package sasl is an implementation detail of the mgo package.
+//
+// This package is not meant to be used by itself.
+//
+
+// +build !windows
+
+package sasl
+
+// #cgo LDFLAGS: -lsasl2
+//
+// struct sasl_conn {};
+//
+// #include <stdlib.h>
+// #include <sasl/sasl.h>
+//
+// sasl_callback_t *mgo_sasl_callbacks(const char *username, const char *password);
+//
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "unsafe"
+)
+
+type saslStepper interface {
+ Step(serverData []byte) (clientData []byte, done bool, err error)
+ Close()
+}
+
+type saslSession struct {
+ conn *C.sasl_conn_t
+ step int
+ mech string
+
+ cstrings []*C.char
+ callbacks *C.sasl_callback_t
+}
+
+var initError error
+var initOnce sync.Once
+
+func initSASL() {
+ rc := C.sasl_client_init(nil)
+ if rc != C.SASL_OK {
+ initError = saslError(rc, nil, "cannot initialize SASL library")
+ }
+}
+
+func New(username, password, mechanism, service, host string) (saslStepper, error) {
+ initOnce.Do(initSASL)
+ if initError != nil {
+ return nil, initError
+ }
+
+ ss := &saslSession{mech: mechanism}
+ if service == "" {
+ service = "mongodb"
+ }
+ if i := strings.Index(host, ":"); i >= 0 {
+ host = host[:i]
+ }
+ ss.callbacks = C.mgo_sasl_callbacks(ss.cstr(username), ss.cstr(password))
+ rc := C.sasl_client_new(ss.cstr(service), ss.cstr(host), nil, nil, ss.callbacks, 0, &ss.conn)
+ if rc != C.SASL_OK {
+ ss.Close()
+ return nil, saslError(rc, nil, "cannot create new SASL client")
+ }
+ return ss, nil
+}
+
+func (ss *saslSession) cstr(s string) *C.char {
+ cstr := C.CString(s)
+ ss.cstrings = append(ss.cstrings, cstr)
+ return cstr
+}
+
+func (ss *saslSession) Close() {
+ for _, cstr := range ss.cstrings {
+ C.free(unsafe.Pointer(cstr))
+ }
+ ss.cstrings = nil
+
+ if ss.callbacks != nil {
+ C.free(unsafe.Pointer(ss.callbacks))
+ }
+
+ // The documentation of SASL dispose makes it clear that this should only
+ // be done when the connection is done, not when the authentication phase
+ // is done, because an encryption layer may have been negotiated.
+ // Even then, we'll do this for now, because it's simpler and prevents
+ // keeping track of this state for every socket. If it breaks, we'll fix it.
+ C.sasl_dispose(&ss.conn)
+}
+
+func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
+ ss.step++
+ if ss.step > 10 {
+ return nil, false, fmt.Errorf("too many SASL steps without authentication")
+ }
+ var cclientData *C.char
+ var cclientDataLen C.uint
+ var rc C.int
+ if ss.step == 1 {
+ var mechanism *C.char // ignored - must match cred
+ rc = C.sasl_client_start(ss.conn, ss.cstr(ss.mech), nil, &cclientData, &cclientDataLen, &mechanism)
+ } else {
+ var cserverData *C.char
+ var cserverDataLen C.uint
+ if len(serverData) > 0 {
+ cserverData = (*C.char)(unsafe.Pointer(&serverData[0]))
+ cserverDataLen = C.uint(len(serverData))
+ }
+ rc = C.sasl_client_step(ss.conn, cserverData, cserverDataLen, nil, &cclientData, &cclientDataLen)
+ }
+ if cclientData != nil && cclientDataLen > 0 {
+ clientData = C.GoBytes(unsafe.Pointer(cclientData), C.int(cclientDataLen))
+ }
+ if rc == C.SASL_OK {
+ return clientData, true, nil
+ }
+ if rc == C.SASL_CONTINUE {
+ return clientData, false, nil
+ }
+ return nil, false, saslError(rc, ss.conn, "cannot establish SASL session")
+}
+
+func saslError(rc C.int, conn *C.sasl_conn_t, msg string) error {
+ var detail string
+ if conn == nil {
+ detail = C.GoString(C.sasl_errstring(rc, nil, nil))
+ } else {
+ detail = C.GoString(C.sasl_errdetail(conn))
+ }
+ return fmt.Errorf(msg + ": " + detail)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c
new file mode 100644
index 00000000000..c359fd6edba
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.c
@@ -0,0 +1,122 @@
+#include "sasl_windows.h"
+
+static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
+
+SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle *cred_handle, char *username, char *password, char *domain)
+{
+ SEC_WINNT_AUTH_IDENTITY auth_identity;
+ SECURITY_INTEGER ignored;
+
+ auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
+ auth_identity.User = (LPSTR) username;
+ auth_identity.UserLength = strlen(username);
+ auth_identity.Password = NULL;
+ auth_identity.PasswordLength = 0;
+ if(password){
+ auth_identity.Password = (LPSTR) password;
+ auth_identity.PasswordLength = strlen(password);
+ }
+ auth_identity.Domain = (LPSTR) domain;
+ auth_identity.DomainLength = strlen(domain);
+ return call_sspi_acquire_credentials_handle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, cred_handle, &ignored);
+}
+
+int sspi_step(CredHandle *cred_handle, int has_context, CtxtHandle *context, PVOID buffer, ULONG buffer_length, PVOID *out_buffer, ULONG *out_buffer_length, char *target)
+{
+ SecBufferDesc inbuf;
+ SecBuffer in_bufs[1];
+ SecBufferDesc outbuf;
+ SecBuffer out_bufs[1];
+
+ if (has_context > 0) {
+ // If we already have a context, we now have data to send.
+ // Put this data in an inbuf.
+ inbuf.ulVersion = SECBUFFER_VERSION;
+ inbuf.cBuffers = 1;
+ inbuf.pBuffers = in_bufs;
+ in_bufs[0].pvBuffer = buffer;
+ in_bufs[0].cbBuffer = buffer_length;
+ in_bufs[0].BufferType = SECBUFFER_TOKEN;
+ }
+
+ outbuf.ulVersion = SECBUFFER_VERSION;
+ outbuf.cBuffers = 1;
+ outbuf.pBuffers = out_bufs;
+ out_bufs[0].pvBuffer = NULL;
+ out_bufs[0].cbBuffer = 0;
+ out_bufs[0].BufferType = SECBUFFER_TOKEN;
+
+ ULONG context_attr = 0;
+
+ int ret = call_sspi_initialize_security_context(cred_handle,
+ has_context > 0 ? context : NULL,
+ (LPSTR) target,
+ ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
+ 0,
+ SECURITY_NETWORK_DREP,
+ has_context > 0 ? &inbuf : NULL,
+ 0,
+ context,
+ &outbuf,
+ &context_attr,
+ NULL);
+
+ *out_buffer = malloc(out_bufs[0].cbBuffer);
+ *out_buffer_length = out_bufs[0].cbBuffer;
+ memcpy(*out_buffer, out_bufs[0].pvBuffer, *out_buffer_length);
+
+ return ret;
+}
+
+int sspi_send_client_authz_id(CtxtHandle *context, PVOID *buffer, ULONG *buffer_length, char *user_plus_realm)
+{
+ SecPkgContext_Sizes sizes;
+ SECURITY_STATUS status = call_sspi_query_context_attributes(context, SECPKG_ATTR_SIZES, &sizes);
+
+ if (status != SEC_E_OK) {
+ return status;
+ }
+
+ size_t user_plus_realm_length = strlen(user_plus_realm);
+ int msgSize = 4 + user_plus_realm_length;
+ char *msg = malloc((sizes.cbSecurityTrailer + msgSize + sizes.cbBlockSize) * sizeof(char));
+ msg[sizes.cbSecurityTrailer + 0] = 1;
+ msg[sizes.cbSecurityTrailer + 1] = 0;
+ msg[sizes.cbSecurityTrailer + 2] = 0;
+ msg[sizes.cbSecurityTrailer + 3] = 0;
+ memcpy(&msg[sizes.cbSecurityTrailer + 4], user_plus_realm, user_plus_realm_length);
+
+ SecBuffer wrapBufs[3];
+ SecBufferDesc wrapBufDesc;
+ wrapBufDesc.cBuffers = 3;
+ wrapBufDesc.pBuffers = wrapBufs;
+ wrapBufDesc.ulVersion = SECBUFFER_VERSION;
+
+ wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer;
+ wrapBufs[0].BufferType = SECBUFFER_TOKEN;
+ wrapBufs[0].pvBuffer = msg;
+
+ wrapBufs[1].cbBuffer = msgSize;
+ wrapBufs[1].BufferType = SECBUFFER_DATA;
+ wrapBufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
+
+ wrapBufs[2].cbBuffer = sizes.cbBlockSize;
+ wrapBufs[2].BufferType = SECBUFFER_PADDING;
+ wrapBufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + msgSize;
+
+ status = call_sspi_encrypt_message(context, SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0);
+ if (status != SEC_E_OK) {
+ free(msg);
+ return status;
+ }
+
+ *buffer_length = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer;
+ *buffer = malloc(*buffer_length);
+
+ memcpy(*buffer, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer);
+ memcpy(*buffer + wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer);
+ memcpy(*buffer + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer);
+
+ free(msg);
+ return SEC_E_OK;
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go
new file mode 100644
index 00000000000..d8ec0013709
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.go
@@ -0,0 +1,142 @@
+package sasl
+
+// #include "sasl_windows.h"
+import "C"
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "unsafe"
+)
+
+type saslStepper interface {
+ Step(serverData []byte) (clientData []byte, done bool, err error)
+ Close()
+}
+
+type saslSession struct {
+ // Credentials
+ mech string
+ service string
+ host string
+ userPlusRealm string
+ target string
+ domain string
+
+ // Internal state
+ authComplete bool
+ errored bool
+ step int
+
+ // C internal state
+ credHandle C.CredHandle
+ context C.CtxtHandle
+ hasContext C.int
+
+ // Keep track of pointers we need to explicitly free
+ stringsToFree []*C.char
+}
+
+var initError error
+var initOnce sync.Once
+
+func initSSPI() {
+ rc := C.load_secur32_dll()
+ if rc != 0 {
+ initError = fmt.Errorf("Error loading libraries: %v", rc)
+ }
+}
+
+func New(username, password, mechanism, service, host string) (saslStepper, error) {
+ initOnce.Do(initSSPI)
+ ss := &saslSession{mech: mechanism, hasContext: 0, userPlusRealm: username}
+ if service == "" {
+ service = "mongodb"
+ }
+ if i := strings.Index(host, ":"); i >= 0 {
+ host = host[:i]
+ }
+ ss.service = service
+ ss.host = host
+
+ usernameComponents := strings.Split(username, "@")
+ if len(usernameComponents) < 2 {
+ return nil, fmt.Errorf("Username '%v' doesn't contain a realm!", username)
+ }
+ user := usernameComponents[0]
+ ss.domain = usernameComponents[1]
+ ss.target = fmt.Sprintf("%s/%s", ss.service, ss.host)
+
+ var status C.SECURITY_STATUS
+ // Step 0: call AcquireCredentialsHandle to get a nice SSPI CredHandle
+ if len(password) > 0 {
+ status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), ss.cstr(password), ss.cstr(ss.domain))
+ } else {
+ status = C.sspi_acquire_credentials_handle(&ss.credHandle, ss.cstr(user), nil, ss.cstr(ss.domain))
+ }
+ if status != C.SEC_E_OK {
+ ss.errored = true
+ return nil, fmt.Errorf("Couldn't create new SSPI client, error code %v", status)
+ }
+ return ss, nil
+}
+
+func (ss *saslSession) cstr(s string) *C.char {
+ cstr := C.CString(s)
+ ss.stringsToFree = append(ss.stringsToFree, cstr)
+ return cstr
+}
+
+func (ss *saslSession) Close() {
+ for _, cstr := range ss.stringsToFree {
+ C.free(unsafe.Pointer(cstr))
+ }
+}
+
+func (ss *saslSession) Step(serverData []byte) (clientData []byte, done bool, err error) {
+ ss.step++
+ if ss.step > 10 {
+ return nil, false, fmt.Errorf("too many SSPI steps without authentication")
+ }
+ var buffer C.PVOID
+ var bufferLength C.ULONG
+ var outBuffer C.PVOID
+ var outBufferLength C.ULONG
+ if len(serverData) > 0 {
+ buffer = (C.PVOID)(unsafe.Pointer(&serverData[0]))
+ bufferLength = C.ULONG(len(serverData))
+ }
+ var status C.int
+ if ss.authComplete {
+ // Step 3: last bit of magic to use the correct server credentials
+ status = C.sspi_send_client_authz_id(&ss.context, &outBuffer, &outBufferLength, ss.cstr(ss.userPlusRealm))
+ } else {
+ // Step 1 + Step 2: set up security context with the server and TGT
+ status = C.sspi_step(&ss.credHandle, ss.hasContext, &ss.context, buffer, bufferLength, &outBuffer, &outBufferLength, ss.cstr(ss.target))
+ }
+ if outBuffer != C.PVOID(nil) {
+ defer C.free(unsafe.Pointer(outBuffer))
+ }
+ if status != C.SEC_E_OK && status != C.SEC_I_CONTINUE_NEEDED {
+ ss.errored = true
+ return nil, false, ss.handleSSPIErrorCode(status)
+ }
+
+ clientData = C.GoBytes(unsafe.Pointer(outBuffer), C.int(outBufferLength))
+ if status == C.SEC_E_OK {
+ ss.authComplete = true
+ return clientData, true, nil
+ } else {
+ ss.hasContext = 1
+ return clientData, false, nil
+ }
+}
+
+func (ss *saslSession) handleSSPIErrorCode(code C.int) error {
+ switch {
+ case code == C.SEC_E_TARGET_UNKNOWN:
+ return fmt.Errorf("Target %v@%v not found", ss.target, ss.domain)
+ }
+ return fmt.Errorf("Unknown error doing step %v, error code %v", ss.step, code)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h
new file mode 100644
index 00000000000..a6b039567cf
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sasl_windows.h
@@ -0,0 +1,7 @@
+#include <windows.h>
+
+#include "sspi_windows.h"
+
+SECURITY_STATUS SEC_ENTRY sspi_acquire_credentials_handle(CredHandle* cred_handle, char* username, char* password, char* domain);
+int sspi_step(CredHandle* cred_handle, int has_context, CtxtHandle* context, PVOID buffer, ULONG buffer_length, PVOID* out_buffer, ULONG* out_buffer_length, char* target);
+int sspi_send_client_authz_id(CtxtHandle* context, PVOID* buffer, ULONG* buffer_length, char* user_plus_realm);
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c
new file mode 100644
index 00000000000..63f9a6f8697
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.c
@@ -0,0 +1,96 @@
+// Code adapted from the NodeJS kerberos library:
+//
+// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.c
+//
+// Under the terms of the Apache License, Version 2.0:
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+#include <stdlib.h>
+
+#include "sspi_windows.h"
+
+static HINSTANCE sspi_secur32_dll = NULL;
+
+int load_secur32_dll()
+{
+ sspi_secur32_dll = LoadLibrary("secur32.dll");
+ if (sspi_secur32_dll == NULL) {
+ return GetLastError();
+ }
+ return 0;
+}
+
+SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo)
+{
+ if (sspi_secur32_dll == NULL) {
+ return -1;
+ }
+ encryptMessage_fn pfn_encryptMessage = (encryptMessage_fn) GetProcAddress(sspi_secur32_dll, "EncryptMessage");
+ if (!pfn_encryptMessage) {
+ return -2;
+ }
+ return (*pfn_encryptMessage)(phContext, fQOP, pMessage, MessageSeqNo);
+}
+
+SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
+ LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
+ void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
+ PCredHandle phCredential, PTimeStamp ptsExpiry)
+{
+ if (sspi_secur32_dll == NULL) {
+ return -1;
+ }
+ acquireCredentialsHandle_fn pfn_acquireCredentialsHandle;
+#ifdef _UNICODE
+ pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleW");
+#else
+ pfn_acquireCredentialsHandle = (acquireCredentialsHandle_fn) GetProcAddress(sspi_secur32_dll, "AcquireCredentialsHandleA");
+#endif
+ if (!pfn_acquireCredentialsHandle) {
+ return -2;
+ }
+ return (*pfn_acquireCredentialsHandle)(
+ pszPrincipal, pszPackage, fCredentialUse, pvLogonId, pAuthData,
+ pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
+}
+
+SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
+ PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName,
+ unsigned long fContextReq, unsigned long Reserved1, unsigned long TargetDataRep,
+ PSecBufferDesc pInput, unsigned long Reserved2, PCtxtHandle phNewContext,
+ PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry)
+{
+ if (sspi_secur32_dll == NULL) {
+ return -1;
+ }
+ initializeSecurityContext_fn pfn_initializeSecurityContext;
+#ifdef _UNICODE
+ pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextW");
+#else
+ pfn_initializeSecurityContext = (initializeSecurityContext_fn) GetProcAddress(sspi_secur32_dll, "InitializeSecurityContextA");
+#endif
+ if (!pfn_initializeSecurityContext) {
+ return -2;
+ }
+ return (*pfn_initializeSecurityContext)(
+ phCredential, phContext, pszTargetName, fContextReq, Reserved1, TargetDataRep,
+ pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry);
+}
+
+SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer)
+{
+ if (sspi_secur32_dll == NULL) {
+ return -1;
+ }
+ queryContextAttributes_fn pfn_queryContextAttributes;
+#ifdef _UNICODE
+ pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesW");
+#else
+ pfn_queryContextAttributes = (queryContextAttributes_fn) GetProcAddress(sspi_secur32_dll, "QueryContextAttributesA");
+#endif
+ if (!pfn_queryContextAttributes) {
+ return -2;
+ }
+ return (*pfn_queryContextAttributes)(phContext, ulAttribute, pBuffer);
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h
new file mode 100644
index 00000000000..d2832703171
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/sasl/sspi_windows.h
@@ -0,0 +1,70 @@
+// Code adapted from the NodeJS kerberos library:
+//
+// https://github.com/christkv/kerberos/tree/master/lib/win32/kerberos_sspi.h
+//
+// Under the terms of the Apache License, Version 2.0:
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+#ifndef SSPI_WINDOWS_H
+#define SSPI_WINDOWS_H
+
+#define SECURITY_WIN32 1
+
+#include <windows.h>
+#include <sspi.h>
+
+int load_secur32_dll();
+
+SECURITY_STATUS SEC_ENTRY call_sspi_encrypt_message(PCtxtHandle phContext, unsigned long fQOP, PSecBufferDesc pMessage, unsigned long MessageSeqNo);
+
+typedef DWORD (WINAPI *encryptMessage_fn)(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo);
+
+SECURITY_STATUS SEC_ENTRY call_sspi_acquire_credentials_handle(
+ LPSTR pszPrincipal, // Name of principal
+ LPSTR pszPackage, // Name of package
+ unsigned long fCredentialUse, // Flags indicating use
+ void *pvLogonId, // Pointer to logon ID
+ void *pAuthData, // Package specific data
+ SEC_GET_KEY_FN pGetKeyFn, // Pointer to GetKey() func
+ void *pvGetKeyArgument, // Value to pass to GetKey()
+ PCredHandle phCredential, // (out) Cred Handle
+ PTimeStamp ptsExpiry // (out) Lifetime (optional)
+);
+
+typedef DWORD (WINAPI *acquireCredentialsHandle_fn)(
+ LPSTR pszPrincipal, LPSTR pszPackage, unsigned long fCredentialUse,
+ void *pvLogonId, void *pAuthData, SEC_GET_KEY_FN pGetKeyFn, void *pvGetKeyArgument,
+ PCredHandle phCredential, PTimeStamp ptsExpiry
+);
+
+SECURITY_STATUS SEC_ENTRY call_sspi_initialize_security_context(
+ PCredHandle phCredential, // Cred to base context
+ PCtxtHandle phContext, // Existing context (OPT)
+ LPSTR pszTargetName, // Name of target
+ unsigned long fContextReq, // Context Requirements
+ unsigned long Reserved1, // Reserved, MBZ
+ unsigned long TargetDataRep, // Data rep of target
+ PSecBufferDesc pInput, // Input Buffers
+ unsigned long Reserved2, // Reserved, MBZ
+ PCtxtHandle phNewContext, // (out) New Context handle
+ PSecBufferDesc pOutput, // (inout) Output Buffers
+ unsigned long *pfContextAttr, // (out) Context attrs
+ PTimeStamp ptsExpiry // (out) Life span (OPT)
+);
+
+typedef DWORD (WINAPI *initializeSecurityContext_fn)(
+ PCredHandle phCredential, PCtxtHandle phContext, LPSTR pszTargetName, unsigned long fContextReq,
+ unsigned long Reserved1, unsigned long TargetDataRep, PSecBufferDesc pInput, unsigned long Reserved2,
+ PCtxtHandle phNewContext, PSecBufferDesc pOutput, unsigned long *pfContextAttr, PTimeStamp ptsExpiry);
+
+SECURITY_STATUS SEC_ENTRY call_sspi_query_context_attributes(
+ PCtxtHandle phContext, // Context to query
+ unsigned long ulAttribute, // Attribute to query
+ void *pBuffer // Buffer for attributes
+);
+
+typedef DWORD (WINAPI *queryContextAttributes_fn)(
+ PCtxtHandle phContext, unsigned long ulAttribute, void *pBuffer);
+
+#endif // SSPI_WINDOWS_H
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/scram/scram.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/scram/scram.go
new file mode 100644
index 00000000000..80cda913526
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/scram/scram.go
@@ -0,0 +1,266 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2014 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
+//
+// http://tools.ietf.org/html/rfc5802
+//
+package scram
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "hash"
+ "strconv"
+ "strings"
+)
+
+// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
+//
+// A Client may be used within a SASL conversation with logic resembling:
+//
+// var in []byte
+// var client = scram.NewClient(sha1.New, user, pass)
+// for client.Step(in) {
+// out := client.Out()
+// // send out to server
+// in := serverOut
+// }
+// if client.Err() != nil {
+// // auth failed
+// }
+//
+type Client struct {
+ newHash func() hash.Hash
+
+ user string
+ pass string
+ step int
+ out bytes.Buffer
+ err error
+
+ clientNonce []byte
+ serverNonce []byte
+ saltedPass []byte
+ authMsg bytes.Buffer
+}
+
+// NewClient returns a new SCRAM-* client with the provided hash algorithm.
+//
+// For SCRAM-SHA-1, for example, use:
+//
+// client := scram.NewClient(sha1.New, user, pass)
+//
+func NewClient(newHash func() hash.Hash, user, pass string) *Client {
+ c := &Client{
+ newHash: newHash,
+ user: user,
+ pass: pass,
+ }
+ c.out.Grow(256)
+ c.authMsg.Grow(256)
+ return c
+}
+
+// Out returns the data to be sent to the server in the current step.
+func (c *Client) Out() []byte {
+ if c.out.Len() == 0 {
+ return nil
+ }
+ return c.out.Bytes()
+}
+
+// Err returns the error that ocurred, or nil if there were no errors.
+func (c *Client) Err() error {
+ return c.err
+}
+
+// SetNonce sets the client nonce to the provided value.
+// If not set, the nonce is generated automatically out of crypto/rand on the first step.
+func (c *Client) SetNonce(nonce []byte) {
+ c.clientNonce = nonce
+}
+
+var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
+
+// Step processes the incoming data from the server and makes the
+// next round of data for the server available via Client.Out.
+// Step returns false if there are no errors and more data is
+// still expected.
+func (c *Client) Step(in []byte) bool {
+ c.out.Reset()
+ if c.step > 2 || c.err != nil {
+ return false
+ }
+ c.step++
+ switch c.step {
+ case 1:
+ c.err = c.step1(in)
+ case 2:
+ c.err = c.step2(in)
+ case 3:
+ c.err = c.step3(in)
+ }
+ return c.step > 2 || c.err != nil
+}
+
+func (c *Client) step1(in []byte) error {
+ if len(c.clientNonce) == 0 {
+ const nonceLen = 6
+ buf := make([]byte, nonceLen + b64.EncodedLen(nonceLen))
+ if _, err := rand.Read(buf[:nonceLen]); err != nil {
+ return fmt.Errorf("cannot read random SCRAM-SHA-1 nonce from operating system: %v", err)
+ }
+ c.clientNonce = buf[nonceLen:]
+ b64.Encode(c.clientNonce, buf[:nonceLen])
+ }
+ c.authMsg.WriteString("n=")
+ escaper.WriteString(&c.authMsg, c.user)
+ c.authMsg.WriteString(",r=")
+ c.authMsg.Write(c.clientNonce)
+
+ c.out.WriteString("n,,")
+ c.out.Write(c.authMsg.Bytes())
+ return nil
+}
+
+var b64 = base64.StdEncoding
+
+func (c *Client) step2(in []byte) error {
+ c.authMsg.WriteByte(',')
+ c.authMsg.Write(in)
+
+ fields := bytes.Split(in, []byte(","))
+ if len(fields) != 3 {
+ return fmt.Errorf("expected 3 fields in first SCRAM-SHA-1 server message, got %d: %q", len(fields), in)
+ }
+ if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
+ return fmt.Errorf("server sent an invalid SCRAM-SHA-1 nonce: %q", fields[0])
+ }
+ if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
+ return fmt.Errorf("server sent an invalid SCRAM-SHA-1 salt: %q", fields[1])
+ }
+ if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
+ return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
+ }
+
+ c.serverNonce = fields[0][2:]
+ if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
+ return fmt.Errorf("server SCRAM-SHA-1 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
+ }
+
+ salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
+ n, err := b64.Decode(salt, fields[1][2:])
+ if err != nil {
+ return fmt.Errorf("cannot decode SCRAM-SHA-1 salt sent by server: %q", fields[1])
+ }
+ salt = salt[:n]
+ iterCount, err := strconv.Atoi(string(fields[2][2:]))
+ if err != nil {
+ return fmt.Errorf("server sent an invalid SCRAM-SHA-1 iteration count: %q", fields[2])
+ }
+ c.saltPassword(salt, iterCount)
+
+ c.authMsg.WriteString(",c=biws,r=")
+ c.authMsg.Write(c.serverNonce)
+
+ c.out.WriteString("c=biws,r=")
+ c.out.Write(c.serverNonce)
+ c.out.WriteString(",p=")
+ c.out.Write(c.clientProof())
+ return nil
+}
+
+func (c *Client) step3(in []byte) error {
+ var isv, ise bool
+ var fields = bytes.Split(in, []byte(","))
+ if len(fields) == 1 {
+ isv = bytes.HasPrefix(fields[0], []byte("v="))
+ ise = bytes.HasPrefix(fields[0], []byte("e="))
+ }
+ if ise {
+ return fmt.Errorf("SCRAM-SHA-1 authentication error: %s", fields[0][2:])
+ } else if !isv {
+ return fmt.Errorf("unsupported SCRAM-SHA-1 final message from server: %q", in)
+ }
+ if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
+ return fmt.Errorf("cannot authenticate SCRAM-SHA-1 server signature: %q", fields[0][2:])
+ }
+ return nil
+}
+
+func (c *Client) saltPassword(salt []byte, iterCount int) {
+ mac := hmac.New(c.newHash, []byte(c.pass))
+ mac.Write(salt)
+ mac.Write([]byte{0, 0, 0, 1})
+ ui := mac.Sum(nil)
+ hi := make([]byte, len(ui))
+ copy(hi, ui)
+ for i := 1; i < iterCount; i++ {
+ mac.Reset()
+ mac.Write(ui)
+ mac.Sum(ui[:0])
+ for j, b := range ui {
+ hi[j] ^= b
+ }
+ }
+ c.saltedPass = hi
+}
+
+func (c *Client) clientProof() []byte {
+ mac := hmac.New(c.newHash, c.saltedPass)
+ mac.Write([]byte("Client Key"))
+ clientKey := mac.Sum(nil)
+ hash := c.newHash()
+ hash.Write(clientKey)
+ storedKey := hash.Sum(nil)
+ mac = hmac.New(c.newHash, storedKey)
+ mac.Write(c.authMsg.Bytes())
+ clientProof := mac.Sum(nil)
+ for i, b := range clientKey {
+ clientProof[i] ^= b
+ }
+ clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
+ b64.Encode(clientProof64, clientProof)
+ return clientProof64
+}
+
+func (c *Client) serverSignature() []byte {
+ mac := hmac.New(c.newHash, c.saltedPass)
+ mac.Write([]byte("Server Key"))
+ serverKey := mac.Sum(nil)
+
+ mac = hmac.New(c.newHash, serverKey)
+ mac.Write(c.authMsg.Bytes())
+ serverSignature := mac.Sum(nil)
+
+ encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
+ b64.Encode(encoded, serverSignature)
+ return encoded
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/scram/scram_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/scram/scram_test.go
new file mode 100644
index 00000000000..9c20fdfc488
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/internal/scram/scram_test.go
@@ -0,0 +1,67 @@
+package scram_test
+
+import (
+ "crypto/sha1"
+ "testing"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2/internal/scram"
+ "strings"
+)
+
+var _ = Suite(&S{})
+
+func Test(t *testing.T) { TestingT(t) }
+
+type S struct{}
+
+var tests = [][]string{{
+ "U: user pencil",
+ "N: fyko+d2lbbFgONRv9qkxdawL",
+ "C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL",
+ "S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096",
+ "C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts=",
+ "S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ=",
+}, {
+ "U: root fe8c89e308ec08763df36333cbf5d3a2",
+ "N: OTcxNDk5NjM2MzE5",
+ "C: n,,n=root,r=OTcxNDk5NjM2MzE5",
+ "S: r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,s=XRDkVrFC9JuL7/F4tG0acQ==,i=10000",
+ "C: c=biws,r=OTcxNDk5NjM2MzE581Ra3provgG0iDsMkDiIAlrh4532dDLp,p=6y1jp9R7ETyouTXS9fW9k5UHdBc=",
+ "S: v=LBnd9dUJRxdqZiEq91NKP3z/bHA=",
+}}
+
+func (s *S) TestExamples(c *C) {
+ for _, steps := range tests {
+ if len(steps) < 2 || len(steps[0]) < 3 || !strings.HasPrefix(steps[0], "U: ") {
+ c.Fatalf("Invalid test: %#v", steps)
+ }
+ auth := strings.Fields(steps[0][3:])
+ client := scram.NewClient(sha1.New, auth[0], auth[1])
+ first, done := true, false
+ c.Logf("-----")
+ c.Logf("%s", steps[0])
+ for _, step := range steps[1:] {
+ c.Logf("%s", step)
+ switch step[:3] {
+ case "N: ":
+ client.SetNonce([]byte(step[3:]))
+ case "C: ":
+ if first {
+ first = false
+ done = client.Step(nil)
+ }
+ c.Assert(done, Equals, false)
+ c.Assert(client.Err(), IsNil)
+ c.Assert(string(client.Out()), Equals, step[3:])
+ case "S: ":
+ first = false
+ done = client.Step([]byte(step[3:]))
+ default:
+ panic("invalid test line: " + step)
+ }
+ }
+ c.Assert(done, Equals, true)
+ c.Assert(client.Err(), IsNil)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/log.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/log.go
new file mode 100644
index 00000000000..53eb4237b89
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/log.go
@@ -0,0 +1,133 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "fmt"
+ "sync"
+)
+
+// ---------------------------------------------------------------------------
+// Logging integration.
+
+// Avoid importing the log type information unnecessarily. There's a small cost
+// associated with using an interface rather than the type. Depending on how
+// often the logger is plugged in, it would be worth using the type instead.
+type log_Logger interface {
+ Output(calldepth int, s string) error
+}
+
+var (
+ globalLogger log_Logger
+ globalDebug bool
+ globalMutex sync.Mutex
+)
+
+// RACE WARNING: There are known data races when logging, which are manually
+// silenced when the race detector is in use. These data races won't be
+// observed in typical use, because logging is supposed to be set up once when
+// the application starts. Having raceDetector as a constant, the compiler
+// should elide the locks altogether in actual use.
+
+// Specify the *log.Logger object where log messages should be sent to.
+func SetLogger(logger log_Logger) {
+ if raceDetector {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ globalLogger = logger
+}
+
+// Enable the delivery of debug messages to the logger. Only meaningful
+// if a logger is also set.
+func SetDebug(debug bool) {
+ if raceDetector {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ globalDebug = debug
+}
+
+func log(v ...interface{}) {
+ if raceDetector {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if globalLogger != nil {
+ globalLogger.Output(2, fmt.Sprint(v...))
+ }
+}
+
+func logln(v ...interface{}) {
+ if raceDetector {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if globalLogger != nil {
+ globalLogger.Output(2, fmt.Sprintln(v...))
+ }
+}
+
+func logf(format string, v ...interface{}) {
+ if raceDetector {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if globalLogger != nil {
+ globalLogger.Output(2, fmt.Sprintf(format, v...))
+ }
+}
+
+func debug(v ...interface{}) {
+ if raceDetector {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if globalDebug && globalLogger != nil {
+ globalLogger.Output(2, fmt.Sprint(v...))
+ }
+}
+
+func debugln(v ...interface{}) {
+ if raceDetector {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if globalDebug && globalLogger != nil {
+ globalLogger.Output(2, fmt.Sprintln(v...))
+ }
+}
+
+func debugf(format string, v ...interface{}) {
+ if raceDetector {
+ globalMutex.Lock()
+ defer globalMutex.Unlock()
+ }
+ if globalDebug && globalLogger != nil {
+ globalLogger.Output(2, fmt.Sprintf(format, v...))
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/queue.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/queue.go
new file mode 100644
index 00000000000..e9245de7001
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/queue.go
@@ -0,0 +1,91 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+type queue struct {
+ elems []interface{}
+ nelems, popi, pushi int
+}
+
+func (q *queue) Len() int {
+ return q.nelems
+}
+
+func (q *queue) Push(elem interface{}) {
+ //debugf("Pushing(pushi=%d popi=%d cap=%d): %#v\n",
+ // q.pushi, q.popi, len(q.elems), elem)
+ if q.nelems == len(q.elems) {
+ q.expand()
+ }
+ q.elems[q.pushi] = elem
+ q.nelems++
+ q.pushi = (q.pushi + 1) % len(q.elems)
+ //debugf(" Pushed(pushi=%d popi=%d cap=%d): %#v\n",
+ // q.pushi, q.popi, len(q.elems), elem)
+}
+
+func (q *queue) Pop() (elem interface{}) {
+ //debugf("Popping(pushi=%d popi=%d cap=%d)\n",
+ // q.pushi, q.popi, len(q.elems))
+ if q.nelems == 0 {
+ return nil
+ }
+ elem = q.elems[q.popi]
+ q.elems[q.popi] = nil // Help GC.
+ q.nelems--
+ q.popi = (q.popi + 1) % len(q.elems)
+ //debugf(" Popped(pushi=%d popi=%d cap=%d): %#v\n",
+ // q.pushi, q.popi, len(q.elems), elem)
+ return elem
+}
+
+func (q *queue) expand() {
+ curcap := len(q.elems)
+ var newcap int
+ if curcap == 0 {
+ newcap = 8
+ } else if curcap < 1024 {
+ newcap = curcap * 2
+ } else {
+ newcap = curcap + (curcap / 4)
+ }
+ elems := make([]interface{}, newcap)
+
+ if q.popi == 0 {
+ copy(elems, q.elems)
+ q.pushi = curcap
+ } else {
+ newpopi := newcap - (curcap - q.popi)
+ copy(elems, q.elems[:q.popi])
+ copy(elems[newpopi:], q.elems[q.popi:])
+ q.popi = newpopi
+ }
+ for i := range q.elems {
+ q.elems[i] = nil // Help GC.
+ }
+ q.elems = elems
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/queue_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/queue_test.go
new file mode 100644
index 00000000000..bd0ab550f97
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/queue_test.go
@@ -0,0 +1,101 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ . "gopkg.in/check.v1"
+)
+
+type QS struct{}
+
+var _ = Suite(&QS{})
+
+func (s *QS) TestSequentialGrowth(c *C) {
+ q := queue{}
+ n := 2048
+ for i := 0; i != n; i++ {
+ q.Push(i)
+ }
+ for i := 0; i != n; i++ {
+ c.Assert(q.Pop(), Equals, i)
+ }
+}
+
+var queueTestLists = [][]int{
+ // {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
+
+ // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
+ {0, 1, 2, 3, 4, 5, 6, 7, -1, -1, 8, 9, 10, 11},
+
+ // {8, 9, 10, 11, ... 2, 3, 4, 5, 6, 7}
+ {0, 1, 2, 3, -1, -1, 4, 5, 6, 7, 8, 9, 10, 11},
+
+ // {0, 1, 2, 3, 4, 5, 6, 7, 8}
+ {0, 1, 2, 3, 4, 5, 6, 7, 8,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8},
+}
+
+func (s *QS) TestQueueTestLists(c *C) {
+ test := []int{}
+ testi := 0
+ reset := func() {
+ test = test[0:0]
+ testi = 0
+ }
+ push := func(i int) {
+ test = append(test, i)
+ }
+ pop := func() (i int) {
+ if testi == len(test) {
+ return -1
+ }
+ i = test[testi]
+ testi++
+ return
+ }
+
+ for _, list := range queueTestLists {
+ reset()
+ q := queue{}
+ for _, n := range list {
+ if n == -1 {
+ c.Assert(q.Pop(), Equals, pop(), Commentf("With list %#v", list))
+ } else {
+ q.Push(n)
+ push(n)
+ }
+ }
+
+ for n := pop(); n != -1; n = pop() {
+ c.Assert(q.Pop(), Equals, n, Commentf("With list %#v", list))
+ }
+
+ c.Assert(q.Pop(), Equals, nil, Commentf("With list %#v", list))
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/raceoff.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/raceoff.go
new file mode 100644
index 00000000000..e60b141442e
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/raceoff.go
@@ -0,0 +1,5 @@
+// +build !race
+
+package mgo
+
+const raceDetector = false
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/raceon.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/raceon.go
new file mode 100644
index 00000000000..737b08eced8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/raceon.go
@@ -0,0 +1,5 @@
+// +build race
+
+package mgo
+
+const raceDetector = true
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/saslimpl.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/saslimpl.go
new file mode 100644
index 00000000000..0d25f25cbb6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/saslimpl.go
@@ -0,0 +1,11 @@
+//+build sasl
+
+package mgo
+
+import (
+ "gopkg.in/mgo.v2/internal/sasl"
+)
+
+func saslNew(cred Credential, host string) (saslStepper, error) {
+ return sasl.New(cred.Username, cred.Password, cred.Mechanism, cred.Service, host)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/saslstub.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/saslstub.go
new file mode 100644
index 00000000000..6e9e30986dc
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/saslstub.go
@@ -0,0 +1,11 @@
+//+build !sasl
+
+package mgo
+
+import (
+ "fmt"
+)
+
+func saslNew(cred Credential, host string) (saslStepper, error) {
+ return nil, fmt.Errorf("SASL support not enabled during build (-tags sasl)")
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/server.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/server.go
new file mode 100644
index 00000000000..392598691f8
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/server.go
@@ -0,0 +1,463 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "errors"
+ "net"
+ "sort"
+ "sync"
+ "time"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+// ---------------------------------------------------------------------------
+// Mongo server encapsulation.
+
+type mongoServer struct {
+ sync.RWMutex
+ Addr string
+ ResolvedAddr string
+ tcpaddr *net.TCPAddr
+ unusedSockets []*mongoSocket
+ liveSockets []*mongoSocket
+ closed bool
+ abended bool
+ sync chan bool
+ dial dialer
+ pingValue time.Duration
+ pingIndex int
+ pingCount uint32
+ pingWindow [6]time.Duration
+ info *mongoServerInfo
+}
+
+type dialer struct {
+ old func(addr net.Addr) (net.Conn, error)
+ new func(addr *ServerAddr) (net.Conn, error)
+}
+
+func (dial dialer) isSet() bool {
+ return dial.old != nil || dial.new != nil
+}
+
+type mongoServerInfo struct {
+ Master bool
+ Mongos bool
+ Tags bson.D
+ MaxWireVersion int
+ SetName string
+}
+
+var defaultServerInfo mongoServerInfo
+
+func newServer(addr string, tcpaddr *net.TCPAddr, sync chan bool, dial dialer) *mongoServer {
+ server := &mongoServer{
+ Addr: addr,
+ ResolvedAddr: tcpaddr.String(),
+ tcpaddr: tcpaddr,
+ sync: sync,
+ dial: dial,
+ info: &defaultServerInfo,
+ pingValue: time.Hour, // Push it back before an actual ping.
+ }
+ go server.pinger(true)
+ return server
+}
+
+var errPoolLimit = errors.New("per-server connection limit reached")
+var errServerClosed = errors.New("server was closed")
+
+// AcquireSocket returns a socket for communicating with the server.
+// This will attempt to reuse an old connection, if one is available. Otherwise,
+// it will establish a new one. The returned socket is owned by the call site,
+// and will return to the cache when the socket has its Release method called
+// the same number of times as AcquireSocket + Acquire were called for it.
+// If the poolLimit argument is greater than zero and the number of sockets in
+// use in this server is greater than the provided limit, errPoolLimit is
+// returned.
+func (server *mongoServer) AcquireSocket(poolLimit int, timeout time.Duration) (socket *mongoSocket, abended bool, err error) {
+ for {
+ server.Lock()
+ abended = server.abended
+ if server.closed {
+ server.Unlock()
+ return nil, abended, errServerClosed
+ }
+ n := len(server.unusedSockets)
+ if poolLimit > 0 && len(server.liveSockets)-n >= poolLimit {
+ server.Unlock()
+ return nil, false, errPoolLimit
+ }
+ if n > 0 {
+ socket = server.unusedSockets[n-1]
+ server.unusedSockets[n-1] = nil // Help GC.
+ server.unusedSockets = server.unusedSockets[:n-1]
+ info := server.info
+ server.Unlock()
+ err = socket.InitialAcquire(info, timeout)
+ if err != nil {
+ continue
+ }
+ } else {
+ server.Unlock()
+ socket, err = server.Connect(timeout)
+ if err == nil {
+ server.Lock()
+ // We've waited for the Connect, see if we got
+ // closed in the meantime
+ if server.closed {
+ server.Unlock()
+ socket.Release()
+ socket.Close()
+ return nil, abended, errServerClosed
+ }
+ server.liveSockets = append(server.liveSockets, socket)
+ server.Unlock()
+ }
+ }
+ return
+ }
+ panic("unreachable")
+}
+
+// Connect establishes a new connection to the server. This should
+// generally be done through server.AcquireSocket().
+func (server *mongoServer) Connect(timeout time.Duration) (*mongoSocket, error) {
+ server.RLock()
+ master := server.info.Master
+ dial := server.dial
+ server.RUnlock()
+
+ logf("Establishing new connection to %s (timeout=%s)...", server.Addr, timeout)
+ var conn net.Conn
+ var err error
+ switch {
+ case !dial.isSet():
+ // Cannot do this because it lacks timeout support. :-(
+ //conn, err = net.DialTCP("tcp", nil, server.tcpaddr)
+ conn, err = net.DialTimeout("tcp", server.ResolvedAddr, timeout)
+ if tcpconn, ok := conn.(*net.TCPConn); ok {
+ tcpconn.SetKeepAlive(true)
+ } else if err == nil {
+ panic("internal error: obtained TCP connection is not a *net.TCPConn!?")
+ }
+ case dial.old != nil:
+ conn, err = dial.old(server.tcpaddr)
+ case dial.new != nil:
+ conn, err = dial.new(&ServerAddr{server.Addr, server.tcpaddr})
+ default:
+ panic("dialer is set, but both dial.old and dial.new are nil")
+ }
+ if err != nil {
+ logf("Connection to %s failed: %v", server.Addr, err.Error())
+ return nil, err
+ }
+ logf("Connection to %s established.", server.Addr)
+
+ stats.conn(+1, master)
+ return newSocket(server, conn, timeout), nil
+}
+
+// Close forces closing all sockets that are alive, whether
+// they're currently in use or not.
+func (server *mongoServer) Close() {
+ server.Lock()
+ server.closed = true
+ liveSockets := server.liveSockets
+ unusedSockets := server.unusedSockets
+ server.liveSockets = nil
+ server.unusedSockets = nil
+ server.Unlock()
+ logf("Connections to %s closing (%d live sockets).", server.Addr, len(liveSockets))
+ for i, s := range liveSockets {
+ s.Close()
+ liveSockets[i] = nil
+ }
+ for i := range unusedSockets {
+ unusedSockets[i] = nil
+ }
+}
+
+// RecycleSocket puts socket back into the unused cache.
+func (server *mongoServer) RecycleSocket(socket *mongoSocket) {
+ server.Lock()
+ if !server.closed {
+ server.unusedSockets = append(server.unusedSockets, socket)
+ }
+ server.Unlock()
+}
+
+func removeSocket(sockets []*mongoSocket, socket *mongoSocket) []*mongoSocket {
+ for i, s := range sockets {
+ if s == socket {
+ copy(sockets[i:], sockets[i+1:])
+ n := len(sockets) - 1
+ sockets[n] = nil
+ sockets = sockets[:n]
+ break
+ }
+ }
+ return sockets
+}
+
+// AbendSocket notifies the server that the given socket has terminated
+// abnormally, and thus should be discarded rather than cached.
+func (server *mongoServer) AbendSocket(socket *mongoSocket) {
+ server.Lock()
+ server.abended = true
+ if server.closed {
+ server.Unlock()
+ return
+ }
+ server.liveSockets = removeSocket(server.liveSockets, socket)
+ server.unusedSockets = removeSocket(server.unusedSockets, socket)
+ server.Unlock()
+ // Maybe just a timeout, but suggest a cluster sync up just in case.
+ select {
+ case server.sync <- true:
+ default:
+ }
+}
+
+func (server *mongoServer) SetInfo(info *mongoServerInfo) {
+ server.Lock()
+ server.info = info
+ server.Unlock()
+}
+
+func (server *mongoServer) Info() *mongoServerInfo {
+ server.Lock()
+ info := server.info
+ server.Unlock()
+ return info
+}
+
+func (server *mongoServer) hasTags(serverTags []bson.D) bool {
+NextTagSet:
+ for _, tags := range serverTags {
+ NextReqTag:
+ for _, req := range tags {
+ for _, has := range server.info.Tags {
+ if req.Name == has.Name {
+ if req.Value == has.Value {
+ continue NextReqTag
+ }
+ continue NextTagSet
+ }
+ }
+ continue NextTagSet
+ }
+ return true
+ }
+ return false
+}
+
+var pingDelay = 15 * time.Second
+
+func (server *mongoServer) pinger(loop bool) {
+ var delay time.Duration
+ if raceDetector {
+ // This variable is only ever touched by tests.
+ globalMutex.Lock()
+ delay = pingDelay
+ globalMutex.Unlock()
+ } else {
+ delay = pingDelay
+ }
+ op := queryOp{
+ collection: "admin.$cmd",
+ query: bson.D{{"ping", 1}},
+ flags: flagSlaveOk,
+ limit: -1,
+ }
+ for {
+ if loop {
+ time.Sleep(delay)
+ }
+ op := op
+ socket, _, err := server.AcquireSocket(0, delay)
+ if err == nil {
+ start := time.Now()
+ _, _ = socket.SimpleQuery(&op)
+ delay := time.Now().Sub(start)
+
+ server.pingWindow[server.pingIndex] = delay
+ server.pingIndex = (server.pingIndex + 1) % len(server.pingWindow)
+ server.pingCount++
+ var max time.Duration
+ for i := 0; i < len(server.pingWindow) && uint32(i) < server.pingCount; i++ {
+ if server.pingWindow[i] > max {
+ max = server.pingWindow[i]
+ }
+ }
+ socket.Release()
+ server.Lock()
+ if server.closed {
+ loop = false
+ }
+ server.pingValue = max
+ server.Unlock()
+ logf("Ping for %s is %d ms", server.Addr, max/time.Millisecond)
+ } else if err == errServerClosed {
+ return
+ }
+ if !loop {
+ return
+ }
+ }
+}
+
+type mongoServerSlice []*mongoServer
+
+func (s mongoServerSlice) Len() int {
+ return len(s)
+}
+
+func (s mongoServerSlice) Less(i, j int) bool {
+ return s[i].ResolvedAddr < s[j].ResolvedAddr
+}
+
+func (s mongoServerSlice) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s mongoServerSlice) Sort() {
+ sort.Sort(s)
+}
+
+func (s mongoServerSlice) Search(resolvedAddr string) (i int, ok bool) {
+ n := len(s)
+ i = sort.Search(n, func(i int) bool {
+ return s[i].ResolvedAddr >= resolvedAddr
+ })
+ return i, i != n && s[i].ResolvedAddr == resolvedAddr
+}
+
+type mongoServers struct {
+ slice mongoServerSlice
+}
+
+func (servers *mongoServers) Search(resolvedAddr string) (server *mongoServer) {
+ if i, ok := servers.slice.Search(resolvedAddr); ok {
+ return servers.slice[i]
+ }
+ return nil
+}
+
+func (servers *mongoServers) Add(server *mongoServer) {
+ servers.slice = append(servers.slice, server)
+ servers.slice.Sort()
+}
+
+func (servers *mongoServers) Remove(other *mongoServer) (server *mongoServer) {
+ if i, found := servers.slice.Search(other.ResolvedAddr); found {
+ server = servers.slice[i]
+ copy(servers.slice[i:], servers.slice[i+1:])
+ n := len(servers.slice) - 1
+ servers.slice[n] = nil // Help GC.
+ servers.slice = servers.slice[:n]
+ }
+ return
+}
+
+func (servers *mongoServers) Slice() []*mongoServer {
+ return ([]*mongoServer)(servers.slice)
+}
+
+func (servers *mongoServers) Get(i int) *mongoServer {
+ return servers.slice[i]
+}
+
+func (servers *mongoServers) Len() int {
+ return len(servers.slice)
+}
+
+func (servers *mongoServers) Empty() bool {
+ return len(servers.slice) == 0
+}
+
+func (servers *mongoServers) HasMongos() bool {
+ for _, s := range servers.slice {
+ if s.Info().Mongos {
+ return true
+ }
+ }
+ return false
+}
+
+// BestFit returns the best guess of what would be the most interesting
+// server to perform operations on at this point in time.
+func (servers *mongoServers) BestFit(mode Mode, serverTags []bson.D) *mongoServer {
+ var best *mongoServer
+ for _, next := range servers.slice {
+ if best == nil {
+ best = next
+ best.RLock()
+ if serverTags != nil && !next.info.Mongos && !best.hasTags(serverTags) {
+ best.RUnlock()
+ best = nil
+ }
+ continue
+ }
+ next.RLock()
+ swap := false
+ switch {
+ case serverTags != nil && !next.info.Mongos && !next.hasTags(serverTags):
+ // Must have requested tags.
+ case mode == Secondary && next.info.Master && !next.info.Mongos:
+ // Must be a secondary or mongos.
+ case next.info.Master != best.info.Master && mode != Nearest:
+ // Prefer slaves, unless the mode is PrimaryPreferred.
+ swap = (mode == PrimaryPreferred) != best.info.Master
+ case absDuration(next.pingValue-best.pingValue) > 15*time.Millisecond:
+ // Prefer nearest server.
+ swap = next.pingValue < best.pingValue
+ case len(next.liveSockets)-len(next.unusedSockets) < len(best.liveSockets)-len(best.unusedSockets):
+ // Prefer servers with less connections.
+ swap = true
+ }
+ if swap {
+ best.RUnlock()
+ best = next
+ } else {
+ next.RUnlock()
+ }
+ }
+ if best != nil {
+ best.RUnlock()
+ }
+ return best
+}
+
+func absDuration(d time.Duration) time.Duration {
+ if d < 0 {
+ return -d
+ }
+ return d
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/session.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/session.go
new file mode 100644
index 00000000000..12ca8f2ac37
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/session.go
@@ -0,0 +1,4826 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "crypto/md5"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "net"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+type Mode int
+
+const (
+ // Relevant documentation on read preference modes:
+ //
+ // http://docs.mongodb.org/manual/reference/read-preference/
+ //
+ Primary Mode = 2 // Default mode. All operations read from the current replica set primary.
+ PrimaryPreferred Mode = 3 // Read from the primary if available. Read from the secondary otherwise.
+ Secondary Mode = 4 // Read from one of the nearest secondary members of the replica set.
+ SecondaryPreferred Mode = 5 // Read from one of the nearest secondaries if available. Read from primary otherwise.
+ Nearest Mode = 6 // Read from one of the nearest members, irrespective of it being primary or secondary.
+
+ // Read preference modes are specific to mgo:
+ Eventual Mode = 0 // Same as Nearest, but may change servers between reads.
+ Monotonic Mode = 1 // Same as SecondaryPreferred before first write. Same as Primary after first write.
+ Strong Mode = 2 // Same as Primary.
+)
+
+// mgo.v3: Drop Strong mode, suffix all modes with "Mode".
+
+// When changing the Session type, check if newSession and copySession
+// need to be updated too.
+
+// Session represents a communication session with the database.
+//
+// All Session methods are concurrency-safe and may be called from multiple
+// goroutines. In all session modes but Eventual, using the session from
+// multiple goroutines will cause them to share the same underlying socket.
+// See the documentation on Session.SetMode for more details.
+type Session struct {
+ m sync.RWMutex
+ cluster_ *mongoCluster
+ slaveSocket *mongoSocket
+ masterSocket *mongoSocket
+ slaveOk bool
+ consistency Mode
+ queryConfig query
+ safeOp *queryOp
+ syncTimeout time.Duration
+ sockTimeout time.Duration
+ defaultdb string
+ sourcedb string
+ dialCred *Credential
+ creds []Credential
+ poolLimit int
+ bypassValidation bool
+}
+
+type Database struct {
+ Session *Session
+ Name string
+}
+
+type Collection struct {
+ Database *Database
+ Name string // "collection"
+ FullName string // "db.collection"
+}
+
+type Query struct {
+ m sync.Mutex
+ session *Session
+ query // Enables default settings in session.
+}
+
+type query struct {
+ op queryOp
+ prefetch float64
+ limit int32
+}
+
+type getLastError struct {
+ CmdName int "getLastError,omitempty"
+ W interface{} "w,omitempty"
+ WTimeout int "wtimeout,omitempty"
+ FSync bool "fsync,omitempty"
+ J bool "j,omitempty"
+}
+
+type Iter struct {
+ m sync.Mutex
+ gotReply sync.Cond
+ session *Session
+ server *mongoServer
+ docData queue
+ err error
+ op getMoreOp
+ prefetch float64
+ limit int32
+ docsToReceive int
+ docsBeforeMore int
+ timeout time.Duration
+ timedout bool
+ findCmd bool
+}
+
+var (
+ ErrNotFound = errors.New("not found")
+ ErrCursor = errors.New("invalid cursor")
+)
+
+const (
+ defaultPrefetch = 0.25
+ maxUpsertRetries = 5
+)
+
+// Dial establishes a new session to the cluster identified by the given seed
+// server(s). The session will enable communication with all of the servers in
+// the cluster, so the seed servers are used only to find out about the cluster
+// topology.
+//
+// Dial will timeout after 10 seconds if a server isn't reached. The returned
+// session will timeout operations after one minute by default if servers
+// aren't available. To customize the timeout, see DialWithTimeout,
+// SetSyncTimeout, and SetSocketTimeout.
+//
+// This method is generally called just once for a given cluster. Further
+// sessions to the same cluster are then established using the New or Copy
+// methods on the obtained session. This will make them share the underlying
+// cluster, and manage the pool of connections appropriately.
+//
+// Once the session is not useful anymore, Close must be called to release the
+// resources appropriately.
+//
+// The seed servers must be provided in the following format:
+//
+// [mongodb://][user:pass@]host1[:port1][,host2[:port2],...][/database][?options]
+//
+// For example, it may be as simple as:
+//
+// localhost
+//
+// Or more involved like:
+//
+// mongodb://myuser:mypass@localhost:40001,otherhost:40001/mydb
+//
+// If the port number is not provided for a server, it defaults to 27017.
+//
+// The username and password provided in the URL will be used to authenticate
+// into the database named after the slash at the end of the host names, or
+// into the "admin" database if none is provided. The authentication information
+// will persist in sessions obtained through the New method as well.
+//
+// The following connection options are supported after the question mark:
+//
+// connect=direct
+//
+// Disables the automatic replica set server discovery logic, and
+// forces the use of servers provided only (even if secondaries).
+// Note that to talk to a secondary the consistency requirements
+// must be relaxed to Monotonic or Eventual via SetMode.
+//
+//
+// connect=replicaSet
+//
+// Discover replica sets automatically. Default connection behavior.
+//
+//
+// replicaSet=<setname>
+//
+// If specified will prevent the obtained session from communicating
+// with any server which is not part of a replica set with the given name.
+// The default is to communicate with any server specified or discovered
+// via the servers contacted.
+//
+//
+// authSource=<db>
+//
+// Informs the database used to establish credentials and privileges
+// with a MongoDB server. Defaults to the database name provided via
+// the URL path, and "admin" if that's unset.
+//
+//
+// authMechanism=<mechanism>
+//
+// Defines the protocol for credential negotiation. Defaults to "MONGODB-CR",
+// which is the default username/password challenge-response mechanism.
+//
+//
+// gssapiServiceName=<name>
+//
+// Defines the service name to use when authenticating with the GSSAPI
+// mechanism. Defaults to "mongodb".
+//
+//
+// maxPoolSize=<limit>
+//
+// Defines the per-server socket pool limit. Defaults to 4096.
+// See Session.SetPoolLimit for details.
+//
+//
+// Relevant documentation:
+//
+// http://docs.mongodb.org/manual/reference/connection-string/
+//
+func Dial(url string) (*Session, error) {
+ session, err := DialWithTimeout(url, 10*time.Second)
+ if err == nil {
+ session.SetSyncTimeout(1 * time.Minute)
+ session.SetSocketTimeout(1 * time.Minute)
+ }
+ return session, err
+}
+
+// DialWithTimeout works like Dial, but uses timeout as the amount of time to
+// wait for a server to respond when first connecting and also on follow up
+// operations in the session. If timeout is zero, the call may block
+// forever waiting for a connection to be made.
+//
+// See SetSyncTimeout for customizing the timeout for the session.
+func DialWithTimeout(url string, timeout time.Duration) (*Session, error) {
+ info, err := ParseURL(url)
+ if err != nil {
+ return nil, err
+ }
+ info.Timeout = timeout
+ return DialWithInfo(info)
+}
+
+// ParseURL parses a MongoDB URL as accepted by the Dial function and returns
+// a value suitable for providing into DialWithInfo.
+//
+// See Dial for more details on the format of url.
+func ParseURL(url string) (*DialInfo, error) {
+ uinfo, err := extractURL(url)
+ if err != nil {
+ return nil, err
+ }
+ direct := false
+ mechanism := ""
+ service := ""
+ source := ""
+ setName := ""
+ poolLimit := 0
+ for k, v := range uinfo.options {
+ switch k {
+ case "authSource":
+ source = v
+ case "authMechanism":
+ mechanism = v
+ case "gssapiServiceName":
+ service = v
+ case "replicaSet":
+ setName = v
+ case "maxPoolSize":
+ poolLimit, err = strconv.Atoi(v)
+ if err != nil {
+ return nil, errors.New("bad value for maxPoolSize: " + v)
+ }
+ case "connect":
+ if v == "direct" {
+ direct = true
+ break
+ }
+ if v == "replicaSet" {
+ break
+ }
+ fallthrough
+ default:
+ return nil, errors.New("unsupported connection URL option: " + k + "=" + v)
+ }
+ }
+ info := DialInfo{
+ Addrs: uinfo.addrs,
+ Direct: direct,
+ Database: uinfo.db,
+ Username: uinfo.user,
+ Password: uinfo.pass,
+ Mechanism: mechanism,
+ Service: service,
+ Source: source,
+ PoolLimit: poolLimit,
+ ReplicaSetName: setName,
+ }
+ return &info, nil
+}
+
+// DialInfo holds options for establishing a session with a MongoDB cluster.
+// To use a URL, see the Dial function.
+type DialInfo struct {
+ // Addrs holds the addresses for the seed servers.
+ Addrs []string
+
+ // Direct informs whether to establish connections only with the
+ // specified seed servers, or to obtain information for the whole
+ // cluster and establish connections with further servers too.
+ Direct bool
+
+ // Timeout is the amount of time to wait for a server to respond when
+ // first connecting and on follow up operations in the session. If
+ // timeout is zero, the call may block forever waiting for a connection
+ // to be established. Timeout does not affect logic in DialServer.
+ Timeout time.Duration
+
+ // FailFast will cause connection and query attempts to fail faster when
+ // the server is unavailable, instead of retrying until the configured
+ // timeout period. Note that an unavailable server may silently drop
+ // packets instead of rejecting them, in which case it's impossible to
+ // distinguish it from a slow server, so the timeout stays relevant.
+ FailFast bool
+
+ // Database is the default database name used when the Session.DB method
+ // is called with an empty name, and is also used during the initial
+ // authentication if Source is unset.
+ Database string
+
+ // ReplicaSetName, if specified, will prevent the obtained session from
+ // communicating with any server which is not part of a replica set
+ // with the given name. The default is to communicate with any server
+ // specified or discovered via the servers contacted.
+ ReplicaSetName string
+
+ // Source is the database used to establish credentials and privileges
+ // with a MongoDB server. Defaults to the value of Database, if that is
+ // set, or "admin" otherwise.
+ Source string
+
+ // Service defines the service name to use when authenticating with the GSSAPI
+ // mechanism. Defaults to "mongodb".
+ Service string
+
+ // ServiceHost defines which hostname to use when authenticating
+ // with the GSSAPI mechanism. If not specified, defaults to the MongoDB
+ // server's address.
+ ServiceHost string
+
+ // Mechanism defines the protocol for credential negotiation.
+ // Defaults to "MONGODB-CR".
+ Mechanism string
+
+ // Username and Password inform the credentials for the initial authentication
+ // done on the database defined by the Source field. See Session.Login.
+ Username string
+ Password string
+
+ // PoolLimit defines the per-server socket pool limit. Defaults to 4096.
+ // See Session.SetPoolLimit for details.
+ PoolLimit int
+
+ // DialServer optionally specifies the dial function for establishing
+ // connections with the MongoDB servers.
+ DialServer func(addr *ServerAddr) (net.Conn, error)
+
+ // WARNING: This field is obsolete. See DialServer above.
+ Dial func(addr net.Addr) (net.Conn, error)
+}
+
+// mgo.v3: Drop DialInfo.Dial.
+
+// ServerAddr represents the address for establishing a connection to an
+// individual MongoDB server.
+type ServerAddr struct {
+ str string
+ tcp *net.TCPAddr
+}
+
+// String returns the address that was provided for the server before resolution.
+func (addr *ServerAddr) String() string {
+ return addr.str
+}
+
+// TCPAddr returns the resolved TCP address for the server.
+func (addr *ServerAddr) TCPAddr() *net.TCPAddr {
+ return addr.tcp
+}
+
+// DialWithInfo establishes a new session to the cluster identified by info.
+func DialWithInfo(info *DialInfo) (*Session, error) {
+ addrs := make([]string, len(info.Addrs))
+ for i, addr := range info.Addrs {
+ p := strings.LastIndexAny(addr, "]:")
+ if p == -1 || addr[p] != ':' {
+ // XXX This is untested. The test suite doesn't use the standard port.
+ addr += ":27017"
+ }
+ addrs[i] = addr
+ }
+ cluster := newCluster(addrs, info.Direct, info.FailFast, dialer{info.Dial, info.DialServer}, info.ReplicaSetName)
+ session := newSession(Eventual, cluster, info.Timeout)
+ session.defaultdb = info.Database
+ if session.defaultdb == "" {
+ session.defaultdb = "test"
+ }
+ session.sourcedb = info.Source
+ if session.sourcedb == "" {
+ session.sourcedb = info.Database
+ if session.sourcedb == "" {
+ session.sourcedb = "admin"
+ }
+ }
+ if info.Username != "" {
+ source := session.sourcedb
+ if info.Source == "" &&
+ (info.Mechanism == "GSSAPI" || info.Mechanism == "PLAIN" || info.Mechanism == "MONGODB-X509") {
+ source = "$external"
+ }
+ session.dialCred = &Credential{
+ Username: info.Username,
+ Password: info.Password,
+ Mechanism: info.Mechanism,
+ Service: info.Service,
+ ServiceHost: info.ServiceHost,
+ Source: source,
+ }
+ session.creds = []Credential{*session.dialCred}
+ }
+ if info.PoolLimit > 0 {
+ session.poolLimit = info.PoolLimit
+ }
+ cluster.Release()
+
+ // People get confused when we return a session that is not actually
+ // established to any servers yet (e.g. what if url was wrong). So,
+ // ping the server to ensure there's someone there, and abort if it
+ // fails.
+ if err := session.Ping(); err != nil {
+ session.Close()
+ return nil, err
+ }
+ session.SetMode(Strong, true)
+ return session, nil
+}
+
+func isOptSep(c rune) bool {
+ return c == ';' || c == '&'
+}
+
+type urlInfo struct {
+ addrs []string
+ user string
+ pass string
+ db string
+ options map[string]string
+}
+
+func extractURL(s string) (*urlInfo, error) {
+ if strings.HasPrefix(s, "mongodb://") {
+ s = s[10:]
+ }
+ info := &urlInfo{options: make(map[string]string)}
+ if c := strings.Index(s, "?"); c != -1 {
+ for _, pair := range strings.FieldsFunc(s[c+1:], isOptSep) {
+ l := strings.SplitN(pair, "=", 2)
+ if len(l) != 2 || l[0] == "" || l[1] == "" {
+ return nil, errors.New("connection option must be key=value: " + pair)
+ }
+ info.options[l[0]] = l[1]
+ }
+ s = s[:c]
+ }
+ if c := strings.Index(s, "@"); c != -1 {
+ pair := strings.SplitN(s[:c], ":", 2)
+ if len(pair) > 2 || pair[0] == "" {
+ return nil, errors.New("credentials must be provided as user:pass@host")
+ }
+ var err error
+ info.user, err = url.QueryUnescape(pair[0])
+ if err != nil {
+ return nil, fmt.Errorf("cannot unescape username in URL: %q", pair[0])
+ }
+ if len(pair) > 1 {
+ info.pass, err = url.QueryUnescape(pair[1])
+ if err != nil {
+ return nil, fmt.Errorf("cannot unescape password in URL")
+ }
+ }
+ s = s[c+1:]
+ }
+ if c := strings.Index(s, "/"); c != -1 {
+ info.db = s[c+1:]
+ s = s[:c]
+ }
+ info.addrs = strings.Split(s, ",")
+ return info, nil
+}
+
+func newSession(consistency Mode, cluster *mongoCluster, timeout time.Duration) (session *Session) {
+ cluster.Acquire()
+ session = &Session{
+ cluster_: cluster,
+ syncTimeout: timeout,
+ sockTimeout: timeout,
+ poolLimit: 4096,
+ }
+ debugf("New session %p on cluster %p", session, cluster)
+ session.SetMode(consistency, true)
+ session.SetSafe(&Safe{})
+ session.queryConfig.prefetch = defaultPrefetch
+ return session
+}
+
+func copySession(session *Session, keepCreds bool) (s *Session) {
+ cluster := session.cluster()
+ cluster.Acquire()
+ if session.masterSocket != nil {
+ session.masterSocket.Acquire()
+ }
+ if session.slaveSocket != nil {
+ session.slaveSocket.Acquire()
+ }
+ var creds []Credential
+ if keepCreds {
+ creds = make([]Credential, len(session.creds))
+ copy(creds, session.creds)
+ } else if session.dialCred != nil {
+ creds = []Credential{*session.dialCred}
+ }
+ scopy := *session
+ scopy.m = sync.RWMutex{}
+ scopy.creds = creds
+ s = &scopy
+ debugf("New session %p on cluster %p (copy from %p)", s, cluster, session)
+ return s
+}
+
+// LiveServers returns a list of server addresses which are
+// currently known to be alive.
+func (s *Session) LiveServers() (addrs []string) {
+ s.m.RLock()
+ addrs = s.cluster().LiveServers()
+ s.m.RUnlock()
+ return addrs
+}
+
+// DB returns a value representing the named database. If name
+// is empty, the database name provided in the dialed URL is
+// used instead. If that is also empty, "test" is used as a
+// fallback in a way equivalent to the mongo shell.
+//
+// Creating this value is a very lightweight operation, and
+// involves no network communication.
+func (s *Session) DB(name string) *Database {
+ if name == "" {
+ name = s.defaultdb
+ }
+ return &Database{s, name}
+}
+
+// C returns a value representing the named collection.
+//
+// Creating this value is a very lightweight operation, and
+// involves no network communication.
+func (db *Database) C(name string) *Collection {
+ return &Collection{db, name, db.Name + "." + name}
+}
+
+// With returns a copy of db that uses session s.
+func (db *Database) With(s *Session) *Database {
+ newdb := *db
+ newdb.Session = s
+ return &newdb
+}
+
+// With returns a copy of c that uses session s.
+func (c *Collection) With(s *Session) *Collection {
+ newdb := *c.Database
+ newdb.Session = s
+ newc := *c
+ newc.Database = &newdb
+ return &newc
+}
+
+// GridFS returns a GridFS value representing collections in db that
+// follow the standard GridFS specification.
+// The provided prefix (sometimes known as root) will determine which
+// collections to use, and is usually set to "fs" when there is a
+// single GridFS in the database.
+//
+// See the GridFS Create, Open, and OpenId methods for more details.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/GridFS
+// http://www.mongodb.org/display/DOCS/GridFS+Tools
+// http://www.mongodb.org/display/DOCS/GridFS+Specification
+//
+func (db *Database) GridFS(prefix string) *GridFS {
+ return newGridFS(db, prefix)
+}
+
+// Run issues the provided command on the db database and unmarshals
+// its result in the respective argument. The cmd argument may be either
+// a string with the command name itself, in which case an empty document of
+// the form bson.M{cmd: 1} will be used, or it may be a full command document.
+//
+// Note that MongoDB considers the first marshalled key as the command
+// name, so when providing a command with options, it's important to
+// use an ordering-preserving document, such as a struct value or an
+// instance of bson.D. For instance:
+//
+// db.Run(bson.D{{"create", "mycollection"}, {"size", 1024}})
+//
+// For privilleged commands typically run on the "admin" database, see
+// the Run method in the Session type.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Commands
+// http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips
+//
+func (db *Database) Run(cmd interface{}, result interface{}) error {
+ socket, err := db.Session.acquireSocket(true)
+ if err != nil {
+ return err
+ }
+ defer socket.Release()
+
+ // This is an optimized form of db.C("$cmd").Find(cmd).One(result).
+ return db.run(socket, cmd, result)
+}
+
+// Credential holds details to authenticate with a MongoDB server.
+type Credential struct {
+ // Username and Password hold the basic details for authentication.
+ // Password is optional with some authentication mechanisms.
+ Username string
+ Password string
+
+ // Source is the database used to establish credentials and privileges
+ // with a MongoDB server. Defaults to the default database provided
+ // during dial, or "admin" if that was unset.
+ Source string
+
+ // Service defines the service name to use when authenticating with the GSSAPI
+ // mechanism. Defaults to "mongodb".
+ Service string
+
+ // ServiceHost defines which hostname to use when authenticating
+ // with the GSSAPI mechanism. If not specified, defaults to the MongoDB
+ // server's address.
+ ServiceHost string
+
+ // Mechanism defines the protocol for credential negotiation.
+ // Defaults to "MONGODB-CR".
+ Mechanism string
+}
+
+// Login authenticates with MongoDB using the provided credential. The
+// authentication is valid for the whole session and will stay valid until
+// Logout is explicitly called for the same database, or the session is
+// closed.
+func (db *Database) Login(user, pass string) error {
+ return db.Session.Login(&Credential{Username: user, Password: pass, Source: db.Name})
+}
+
+// Login authenticates with MongoDB using the provided credential. The
+// authentication is valid for the whole session and will stay valid until
+// Logout is explicitly called for the same database, or the session is
+// closed.
+func (s *Session) Login(cred *Credential) error {
+ socket, err := s.acquireSocket(true)
+ if err != nil {
+ return err
+ }
+ defer socket.Release()
+
+ credCopy := *cred
+ if cred.Source == "" {
+ if cred.Mechanism == "GSSAPI" {
+ credCopy.Source = "$external"
+ } else {
+ credCopy.Source = s.sourcedb
+ }
+ }
+ err = socket.Login(credCopy)
+ if err != nil {
+ return err
+ }
+
+ s.m.Lock()
+ s.creds = append(s.creds, credCopy)
+ s.m.Unlock()
+ return nil
+}
+
+func (s *Session) socketLogin(socket *mongoSocket) error {
+ for _, cred := range s.creds {
+ if err := socket.Login(cred); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Logout removes any established authentication credentials for the database.
+func (db *Database) Logout() {
+ session := db.Session
+ dbname := db.Name
+ session.m.Lock()
+ found := false
+ for i, cred := range session.creds {
+ if cred.Source == dbname {
+ copy(session.creds[i:], session.creds[i+1:])
+ session.creds = session.creds[:len(session.creds)-1]
+ found = true
+ break
+ }
+ }
+ if found {
+ if session.masterSocket != nil {
+ session.masterSocket.Logout(dbname)
+ }
+ if session.slaveSocket != nil {
+ session.slaveSocket.Logout(dbname)
+ }
+ }
+ session.m.Unlock()
+}
+
+// LogoutAll removes all established authentication credentials for the session.
+func (s *Session) LogoutAll() {
+ s.m.Lock()
+ for _, cred := range s.creds {
+ if s.masterSocket != nil {
+ s.masterSocket.Logout(cred.Source)
+ }
+ if s.slaveSocket != nil {
+ s.slaveSocket.Logout(cred.Source)
+ }
+ }
+ s.creds = s.creds[0:0]
+ s.m.Unlock()
+}
+
+// User represents a MongoDB user.
+//
+// Relevant documentation:
+//
+// http://docs.mongodb.org/manual/reference/privilege-documents/
+// http://docs.mongodb.org/manual/reference/user-privileges/
+//
+type User struct {
+ // Username is how the user identifies itself to the system.
+ Username string `bson:"user"`
+
+ // Password is the plaintext password for the user. If set,
+ // the UpsertUser method will hash it into PasswordHash and
+ // unset it before the user is added to the database.
+ Password string `bson:",omitempty"`
+
+ // PasswordHash is the MD5 hash of Username+":mongo:"+Password.
+ PasswordHash string `bson:"pwd,omitempty"`
+
+ // CustomData holds arbitrary data admins decide to associate
+ // with this user, such as the full name or employee id.
+ CustomData interface{} `bson:"customData,omitempty"`
+
+ // Roles indicates the set of roles the user will be provided.
+ // See the Role constants.
+ Roles []Role `bson:"roles"`
+
+ // OtherDBRoles allows assigning roles in other databases from
+ // user documents inserted in the admin database. This field
+ // only works in the admin database.
+ OtherDBRoles map[string][]Role `bson:"otherDBRoles,omitempty"`
+
+ // UserSource indicates where to look for this user's credentials.
+ // It may be set to a database name, or to "$external" for
+ // consulting an external resource such as Kerberos. UserSource
+ // must not be set if Password or PasswordHash are present.
+ //
+ // WARNING: This setting was only ever supported in MongoDB 2.4,
+ // and is now obsolete.
+ UserSource string `bson:"userSource,omitempty"`
+}
+
+type Role string
+
+const (
+ // Relevant documentation:
+ //
+ // http://docs.mongodb.org/manual/reference/user-privileges/
+ //
+ RoleRoot Role = "root"
+ RoleRead Role = "read"
+ RoleReadAny Role = "readAnyDatabase"
+ RoleReadWrite Role = "readWrite"
+ RoleReadWriteAny Role = "readWriteAnyDatabase"
+ RoleDBAdmin Role = "dbAdmin"
+ RoleDBAdminAny Role = "dbAdminAnyDatabase"
+ RoleUserAdmin Role = "userAdmin"
+ RoleUserAdminAny Role = "userAdminAnyDatabase"
+ RoleClusterAdmin Role = "clusterAdmin"
+)
+
+// UpsertUser updates the authentication credentials and the roles for
+// a MongoDB user within the db database. If the named user doesn't exist
+// it will be created.
+//
+// This method should only be used from MongoDB 2.4 and on. For older
+// MongoDB releases, use the obsolete AddUser method instead.
+//
+// Relevant documentation:
+//
+// http://docs.mongodb.org/manual/reference/user-privileges/
+// http://docs.mongodb.org/manual/reference/privilege-documents/
+//
+func (db *Database) UpsertUser(user *User) error {
+ if user.Username == "" {
+ return fmt.Errorf("user has no Username")
+ }
+ if (user.Password != "" || user.PasswordHash != "") && user.UserSource != "" {
+ return fmt.Errorf("user has both Password/PasswordHash and UserSource set")
+ }
+ if len(user.OtherDBRoles) > 0 && db.Name != "admin" && db.Name != "$external" {
+ return fmt.Errorf("user with OtherDBRoles is only supported in the admin or $external databases")
+ }
+
+ // Attempt to run this using 2.6+ commands.
+ rundb := db
+ if user.UserSource != "" {
+ // Compatibility logic for the userSource field of MongoDB <= 2.4.X
+ rundb = db.Session.DB(user.UserSource)
+ }
+ err := rundb.runUserCmd("updateUser", user)
+ // retry with createUser when isAuthError in order to enable the "localhost exception"
+ if isNotFound(err) || isAuthError(err) {
+ return rundb.runUserCmd("createUser", user)
+ }
+ if !isNoCmd(err) {
+ return err
+ }
+
+ // Command does not exist. Fallback to pre-2.6 behavior.
+ var set, unset bson.D
+ if user.Password != "" {
+ psum := md5.New()
+ psum.Write([]byte(user.Username + ":mongo:" + user.Password))
+ set = append(set, bson.DocElem{"pwd", hex.EncodeToString(psum.Sum(nil))})
+ unset = append(unset, bson.DocElem{"userSource", 1})
+ } else if user.PasswordHash != "" {
+ set = append(set, bson.DocElem{"pwd", user.PasswordHash})
+ unset = append(unset, bson.DocElem{"userSource", 1})
+ }
+ if user.UserSource != "" {
+ set = append(set, bson.DocElem{"userSource", user.UserSource})
+ unset = append(unset, bson.DocElem{"pwd", 1})
+ }
+ if user.Roles != nil || user.OtherDBRoles != nil {
+ set = append(set, bson.DocElem{"roles", user.Roles})
+ if len(user.OtherDBRoles) > 0 {
+ set = append(set, bson.DocElem{"otherDBRoles", user.OtherDBRoles})
+ } else {
+ unset = append(unset, bson.DocElem{"otherDBRoles", 1})
+ }
+ }
+ users := db.C("system.users")
+ err = users.Update(bson.D{{"user", user.Username}}, bson.D{{"$unset", unset}, {"$set", set}})
+ if err == ErrNotFound {
+ set = append(set, bson.DocElem{"user", user.Username})
+ if user.Roles == nil && user.OtherDBRoles == nil {
+ // Roles must be sent, as it's the way MongoDB distinguishes
+ // old-style documents from new-style documents in pre-2.6.
+ set = append(set, bson.DocElem{"roles", user.Roles})
+ }
+ err = users.Insert(set)
+ }
+ return err
+}
+
+func isNoCmd(err error) bool {
+ e, ok := err.(*QueryError)
+ return ok && (e.Code == 59 || e.Code == 13390 || strings.HasPrefix(e.Message, "no such cmd:"))
+}
+
+func isNotFound(err error) bool {
+ e, ok := err.(*QueryError)
+ return ok && e.Code == 11
+}
+
+func isAuthError(err error) bool {
+ e, ok := err.(*QueryError)
+ return ok && e.Code == 13
+}
+
+func (db *Database) runUserCmd(cmdName string, user *User) error {
+ cmd := make(bson.D, 0, 16)
+ cmd = append(cmd, bson.DocElem{cmdName, user.Username})
+ if user.Password != "" {
+ cmd = append(cmd, bson.DocElem{"pwd", user.Password})
+ }
+ var roles []interface{}
+ for _, role := range user.Roles {
+ roles = append(roles, role)
+ }
+ for db, dbroles := range user.OtherDBRoles {
+ for _, role := range dbroles {
+ roles = append(roles, bson.D{{"role", role}, {"db", db}})
+ }
+ }
+ if roles != nil || user.Roles != nil || cmdName == "createUser" {
+ cmd = append(cmd, bson.DocElem{"roles", roles})
+ }
+ err := db.Run(cmd, nil)
+ if !isNoCmd(err) && user.UserSource != "" && (user.UserSource != "$external" || db.Name != "$external") {
+ return fmt.Errorf("MongoDB 2.6+ does not support the UserSource setting")
+ }
+ return err
+}
+
+// AddUser creates or updates the authentication credentials of user within
+// the db database.
+//
+// WARNING: This method is obsolete and should only be used with MongoDB 2.2
+// or earlier. For MongoDB 2.4 and on, use UpsertUser instead.
+func (db *Database) AddUser(username, password string, readOnly bool) error {
+ // Try to emulate the old behavior on 2.6+
+ user := &User{Username: username, Password: password}
+ if db.Name == "admin" {
+ if readOnly {
+ user.Roles = []Role{RoleReadAny}
+ } else {
+ user.Roles = []Role{RoleReadWriteAny}
+ }
+ } else {
+ if readOnly {
+ user.Roles = []Role{RoleRead}
+ } else {
+ user.Roles = []Role{RoleReadWrite}
+ }
+ }
+ err := db.runUserCmd("updateUser", user)
+ if isNotFound(err) {
+ return db.runUserCmd("createUser", user)
+ }
+ if !isNoCmd(err) {
+ return err
+ }
+
+ // Command doesn't exist. Fallback to pre-2.6 behavior.
+ psum := md5.New()
+ psum.Write([]byte(username + ":mongo:" + password))
+ digest := hex.EncodeToString(psum.Sum(nil))
+ c := db.C("system.users")
+ _, err = c.Upsert(bson.M{"user": username}, bson.M{"$set": bson.M{"user": username, "pwd": digest, "readOnly": readOnly}})
+ return err
+}
+
+// RemoveUser removes the authentication credentials of user from the database.
+func (db *Database) RemoveUser(user string) error {
+ err := db.Run(bson.D{{"dropUser", user}}, nil)
+ if isNoCmd(err) {
+ users := db.C("system.users")
+ return users.Remove(bson.M{"user": user})
+ }
+ if isNotFound(err) {
+ return ErrNotFound
+ }
+ return err
+}
+
+type indexSpec struct {
+ Name, NS string
+ Key bson.D
+ Unique bool ",omitempty"
+ DropDups bool "dropDups,omitempty"
+ Background bool ",omitempty"
+ Sparse bool ",omitempty"
+ Bits int ",omitempty"
+ Min, Max float64 ",omitempty"
+ BucketSize float64 "bucketSize,omitempty"
+ ExpireAfter int "expireAfterSeconds,omitempty"
+ Weights bson.D ",omitempty"
+ DefaultLanguage string "default_language,omitempty"
+ LanguageOverride string "language_override,omitempty"
+ TextIndexVersion int "textIndexVersion,omitempty"
+
+ Collation *Collation "collation,omitempty"
+}
+
+type Index struct {
+ Key []string // Index key fields; prefix name with dash (-) for descending order
+ Unique bool // Prevent two documents from having the same index key
+ DropDups bool // Drop documents with the same index key as a previously indexed one
+ Background bool // Build index in background and return immediately
+ Sparse bool // Only index documents containing the Key fields
+
+ // If ExpireAfter is defined the server will periodically delete
+ // documents with indexed time.Time older than the provided delta.
+ ExpireAfter time.Duration
+
+ // Name holds the stored index name. On creation if this field is unset it is
+ // computed by EnsureIndex based on the index key.
+ Name string
+
+ // Properties for spatial indexes.
+ //
+ // Min and Max were improperly typed as int when they should have been
+ // floats. To preserve backwards compatibility they are still typed as
+ // int and the following two fields enable reading and writing the same
+ // fields as float numbers. In mgo.v3, these fields will be dropped and
+ // Min/Max will become floats.
+ Min, Max int
+ Minf, Maxf float64
+ BucketSize float64
+ Bits int
+
+ // Properties for text indexes.
+ DefaultLanguage string
+ LanguageOverride string
+
+ // Weights defines the significance of provided fields relative to other
+ // fields in a text index. The score for a given word in a document is derived
+ // from the weighted sum of the frequency for each of the indexed fields in
+ // that document. The default field weight is 1.
+ Weights map[string]int
+
+ // Collation defines the collation to use for the index.
+ Collation *Collation
+}
+
+type Collation struct {
+
+ // Locale defines the collation locale.
+ Locale string `bson:"locale"`
+
+ // CaseLevel defines whether to turn case sensitivity on at strength 1 or 2.
+ CaseLevel bool `bson:"caseLevel,omitempty"`
+
+ // CaseFirst may be set to "upper" or "lower" to define whether
+ // to have uppercase or lowercase items first. Default is "off".
+ CaseFirst string `bson:"caseFirst,omitempty"`
+
+ // Strength defines the priority of comparison properties, as follows:
+ //
+ // 1 (primary) - Strongest level, denote difference between base characters
+ // 2 (secondary) - Accents in characters are considered secondary differences
+ // 3 (tertiary) - Upper and lower case differences in characters are
+ // distinguished at the tertiary level
+ // 4 (quaternary) - When punctuation is ignored at level 1-3, an additional
+ // level can be used to distinguish words with and without
+ // punctuation. Should only be used if ignoring punctuation
+ // is required or when processing Japanese text.
+ // 5 (identical) - When all other levels are equal, the identical level is
+ // used as a tiebreaker. The Unicode code point values of
+ // the NFD form of each string are compared at this level,
+ // just in case there is no difference at levels 1-4
+ //
+ // Strength defaults to 3.
+ Strength int `bson:"strength,omitempty"`
+
+ // NumericOrdering defines whether to order numbers based on numerical
+ // order and not collation order.
+ NumericOrdering bool `bson:"numericOrdering,omitempty"`
+
+ // Alternate controls whether spaces and punctuation are considered base characters.
+ // May be set to "non-ignorable" (spaces and punctuation considered base characters)
+ // or "shifted" (spaces and punctuation not considered base characters, and only
+ // distinguished at strength > 3). Defaults to "non-ignorable".
+ Alternate string `bson:"alternate,omitempty"`
+
+ // Backwards defines whether to have secondary differences considered in reverse order,
+ // as done in the French language.
+ Backwards bool `bson:"backwards,omitempty"`
+}
+
+// mgo.v3: Drop Minf and Maxf and transform Min and Max to floats.
+// mgo.v3: Drop DropDups as it's unsupported past 2.8.
+
+type indexKeyInfo struct {
+ name string
+ key bson.D
+ weights bson.D
+}
+
+func parseIndexKey(key []string) (*indexKeyInfo, error) {
+ var keyInfo indexKeyInfo
+ isText := false
+ var order interface{}
+ for _, field := range key {
+ raw := field
+ if keyInfo.name != "" {
+ keyInfo.name += "_"
+ }
+ var kind string
+ if field != "" {
+ if field[0] == '$' {
+ if c := strings.Index(field, ":"); c > 1 && c < len(field)-1 {
+ kind = field[1:c]
+ field = field[c+1:]
+ keyInfo.name += field + "_" + kind
+ } else {
+ field = "\x00"
+ }
+ }
+ switch field[0] {
+ case 0:
+ // Logic above failed. Reset and error.
+ field = ""
+ case '@':
+ order = "2d"
+ field = field[1:]
+ // The shell used to render this field as key_ instead of key_2d,
+ // and mgo followed suit. This has been fixed in recent server
+ // releases, and mgo followed as well.
+ keyInfo.name += field + "_2d"
+ case '-':
+ order = -1
+ field = field[1:]
+ keyInfo.name += field + "_-1"
+ case '+':
+ field = field[1:]
+ fallthrough
+ default:
+ if kind == "" {
+ order = 1
+ keyInfo.name += field + "_1"
+ } else {
+ order = kind
+ }
+ }
+ }
+ if field == "" || kind != "" && order != kind {
+ return nil, fmt.Errorf(`invalid index key: want "[$<kind>:][-]<field name>", got %q`, raw)
+ }
+ if kind == "text" {
+ if !isText {
+ keyInfo.key = append(keyInfo.key, bson.DocElem{"_fts", "text"}, bson.DocElem{"_ftsx", 1})
+ isText = true
+ }
+ keyInfo.weights = append(keyInfo.weights, bson.DocElem{field, 1})
+ } else {
+ keyInfo.key = append(keyInfo.key, bson.DocElem{field, order})
+ }
+ }
+ if keyInfo.name == "" {
+ return nil, errors.New("invalid index key: no fields provided")
+ }
+ return &keyInfo, nil
+}
+
+// EnsureIndexKey ensures an index with the given key exists, creating it
+// if necessary.
+//
+// This example:
+//
+// err := collection.EnsureIndexKey("a", "b")
+//
+// Is equivalent to:
+//
+// err := collection.EnsureIndex(mgo.Index{Key: []string{"a", "b"}})
+//
+// See the EnsureIndex method for more details.
+func (c *Collection) EnsureIndexKey(key ...string) error {
+ return c.EnsureIndex(Index{Key: key})
+}
+
+// EnsureIndex ensures an index with the given key exists, creating it with
+// the provided parameters if necessary. EnsureIndex does not modify a previously
+// existent index with a matching key. The old index must be dropped first instead.
+//
+// Once EnsureIndex returns successfully, following requests for the same index
+// will not contact the server unless Collection.DropIndex is used to drop the
+// same index, or Session.ResetIndexCache is called.
+//
+// For example:
+//
+// index := Index{
+// Key: []string{"lastname", "firstname"},
+// Unique: true,
+// DropDups: true,
+// Background: true, // See notes.
+// Sparse: true,
+// }
+// err := collection.EnsureIndex(index)
+//
+// The Key value determines which fields compose the index. The index ordering
+// will be ascending by default. To obtain an index with a descending order,
+// the field name should be prefixed by a dash (e.g. []string{"-time"}). It can
+// also be optionally prefixed by an index kind, as in "$text:summary" or
+// "$2d:-point". The key string format is:
+//
+// [$<kind>:][-]<field name>
+//
+// If the Unique field is true, the index must necessarily contain only a single
+// document per Key. With DropDups set to true, documents with the same key
+// as a previously indexed one will be dropped rather than an error returned.
+//
+// If Background is true, other connections will be allowed to proceed using
+// the collection without the index while it's being built. Note that the
+// session executing EnsureIndex will be blocked for as long as it takes for
+// the index to be built.
+//
+// If Sparse is true, only documents containing the provided Key fields will be
+// included in the index. When using a sparse index for sorting, only indexed
+// documents will be returned.
+//
+// If ExpireAfter is non-zero, the server will periodically scan the collection
+// and remove documents containing an indexed time.Time field with a value
+// older than ExpireAfter. See the documentation for details:
+//
+// http://docs.mongodb.org/manual/tutorial/expire-data
+//
+// Other kinds of indexes are also supported through that API. Here is an example:
+//
+// index := Index{
+// Key: []string{"$2d:loc"},
+// Bits: 26,
+// }
+// err := collection.EnsureIndex(index)
+//
+// The example above requests the creation of a "2d" index for the "loc" field.
+//
+// The 2D index bounds may be changed using the Min and Max attributes of the
+// Index value. The default bound setting of (-180, 180) is suitable for
+// latitude/longitude pairs.
+//
+// The Bits parameter sets the precision of the 2D geohash values. If not
+// provided, 26 bits are used, which is roughly equivalent to 1 foot of
+// precision for the default (-180, 180) index bounds.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Indexes
+// http://www.mongodb.org/display/DOCS/Indexing+Advice+and+FAQ
+// http://www.mongodb.org/display/DOCS/Indexing+as+a+Background+Operation
+// http://www.mongodb.org/display/DOCS/Geospatial+Indexing
+// http://www.mongodb.org/display/DOCS/Multikeys
+//
+func (c *Collection) EnsureIndex(index Index) error {
+ keyInfo, err := parseIndexKey(index.Key)
+ if err != nil {
+ return err
+ }
+
+ session := c.Database.Session
+ cacheKey := c.FullName + "\x00" + keyInfo.name
+ if session.cluster().HasCachedIndex(cacheKey) {
+ return nil
+ }
+
+ spec := indexSpec{
+ Name: keyInfo.name,
+ NS: c.FullName,
+ Key: keyInfo.key,
+ Unique: index.Unique,
+ DropDups: index.DropDups,
+ Background: index.Background,
+ Sparse: index.Sparse,
+ Bits: index.Bits,
+ Min: index.Minf,
+ Max: index.Maxf,
+ BucketSize: index.BucketSize,
+ ExpireAfter: int(index.ExpireAfter / time.Second),
+ Weights: keyInfo.weights,
+ DefaultLanguage: index.DefaultLanguage,
+ LanguageOverride: index.LanguageOverride,
+ Collation: index.Collation,
+ }
+
+ if spec.Min == 0 && spec.Max == 0 {
+ spec.Min = float64(index.Min)
+ spec.Max = float64(index.Max)
+ }
+
+ if index.Name != "" {
+ spec.Name = index.Name
+ }
+
+NextField:
+ for name, weight := range index.Weights {
+ for i, elem := range spec.Weights {
+ if elem.Name == name {
+ spec.Weights[i].Value = weight
+ continue NextField
+ }
+ }
+ panic("weight provided for field that is not part of index key: " + name)
+ }
+
+ cloned := session.Clone()
+ defer cloned.Close()
+ cloned.SetMode(Strong, false)
+ cloned.EnsureSafe(&Safe{})
+ db := c.Database.With(cloned)
+
+ // Try with a command first.
+ err = db.Run(bson.D{{"createIndexes", c.Name}, {"indexes", []indexSpec{spec}}}, nil)
+ if isNoCmd(err) {
+ // Command not yet supported. Insert into the indexes collection instead.
+ err = db.C("system.indexes").Insert(&spec)
+ }
+ if err == nil {
+ session.cluster().CacheIndex(cacheKey, true)
+ }
+ return err
+}
+
+// DropIndex drops the index with the provided key from the c collection.
+//
+// See EnsureIndex for details on the accepted key variants.
+//
+// For example:
+//
+// err1 := collection.DropIndex("firstField", "-secondField")
+// err2 := collection.DropIndex("customIndexName")
+//
+func (c *Collection) DropIndex(key ...string) error {
+ keyInfo, err := parseIndexKey(key)
+ if err != nil {
+ return err
+ }
+
+ session := c.Database.Session
+ cacheKey := c.FullName + "\x00" + keyInfo.name
+ session.cluster().CacheIndex(cacheKey, false)
+
+ session = session.Clone()
+ defer session.Close()
+ session.SetMode(Strong, false)
+
+ db := c.Database.With(session)
+ result := struct {
+ ErrMsg string
+ Ok bool
+ }{}
+ err = db.Run(bson.D{{"dropIndexes", c.Name}, {"index", keyInfo.name}}, &result)
+ if err != nil {
+ return err
+ }
+ if !result.Ok {
+ return errors.New(result.ErrMsg)
+ }
+ return nil
+}
+
+// DropIndexName removes the index with the provided index name.
+//
+// For example:
+//
+// err := collection.DropIndex("customIndexName")
+//
+func (c *Collection) DropIndexName(name string) error {
+ session := c.Database.Session
+
+ session = session.Clone()
+ defer session.Close()
+ session.SetMode(Strong, false)
+
+ c = c.With(session)
+
+ indexes, err := c.Indexes()
+ if err != nil {
+ return err
+ }
+
+ var index Index
+ for _, idx := range indexes {
+ if idx.Name == name {
+ index = idx
+ break
+ }
+ }
+
+ if index.Name != "" {
+ keyInfo, err := parseIndexKey(index.Key)
+ if err != nil {
+ return err
+ }
+
+ cacheKey := c.FullName + "\x00" + keyInfo.name
+ session.cluster().CacheIndex(cacheKey, false)
+ }
+
+ result := struct {
+ ErrMsg string
+ Ok bool
+ }{}
+ err = c.Database.Run(bson.D{{"dropIndexes", c.Name}, {"index", name}}, &result)
+ if err != nil {
+ return err
+ }
+ if !result.Ok {
+ return errors.New(result.ErrMsg)
+ }
+ return nil
+}
+
+// nonEventual returns a clone of session and ensures it is not Eventual.
+// This guarantees that the server that is used for queries may be reused
+// afterwards when a cursor is received.
+func (session *Session) nonEventual() *Session {
+ cloned := session.Clone()
+ if cloned.consistency == Eventual {
+ cloned.SetMode(Monotonic, false)
+ }
+ return cloned
+}
+
+// Indexes returns a list of all indexes for the collection.
+//
+// For example, this snippet would drop all available indexes:
+//
+// indexes, err := collection.Indexes()
+// if err != nil {
+// return err
+// }
+// for _, index := range indexes {
+// err = collection.DropIndex(index.Key...)
+// if err != nil {
+// return err
+// }
+// }
+//
+// See the EnsureIndex method for more details on indexes.
+func (c *Collection) Indexes() (indexes []Index, err error) {
+ cloned := c.Database.Session.nonEventual()
+ defer cloned.Close()
+
+ batchSize := int(cloned.queryConfig.op.limit)
+
+ // Try with a command.
+ var result struct {
+ Indexes []bson.Raw
+ Cursor cursorData
+ }
+ var iter *Iter
+ err = c.Database.With(cloned).Run(bson.D{{"listIndexes", c.Name}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result)
+ if err == nil {
+ firstBatch := result.Indexes
+ if firstBatch == nil {
+ firstBatch = result.Cursor.FirstBatch
+ }
+ ns := strings.SplitN(result.Cursor.NS, ".", 2)
+ if len(ns) < 2 {
+ iter = c.With(cloned).NewIter(nil, firstBatch, result.Cursor.Id, nil)
+ } else {
+ iter = cloned.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil)
+ }
+ } else if isNoCmd(err) {
+ // Command not yet supported. Query the database instead.
+ iter = c.Database.C("system.indexes").Find(bson.M{"ns": c.FullName}).Iter()
+ } else {
+ return nil, err
+ }
+
+ var spec indexSpec
+ for iter.Next(&spec) {
+ indexes = append(indexes, indexFromSpec(spec))
+ }
+ if err = iter.Close(); err != nil {
+ return nil, err
+ }
+ sort.Sort(indexSlice(indexes))
+ return indexes, nil
+}
+
+func indexFromSpec(spec indexSpec) Index {
+ index := Index{
+ Name: spec.Name,
+ Key: simpleIndexKey(spec.Key),
+ Unique: spec.Unique,
+ DropDups: spec.DropDups,
+ Background: spec.Background,
+ Sparse: spec.Sparse,
+ Minf: spec.Min,
+ Maxf: spec.Max,
+ Bits: spec.Bits,
+ BucketSize: spec.BucketSize,
+ DefaultLanguage: spec.DefaultLanguage,
+ LanguageOverride: spec.LanguageOverride,
+ ExpireAfter: time.Duration(spec.ExpireAfter) * time.Second,
+ Collation: spec.Collation,
+ }
+ if float64(int(spec.Min)) == spec.Min && float64(int(spec.Max)) == spec.Max {
+ index.Min = int(spec.Min)
+ index.Max = int(spec.Max)
+ }
+ if spec.TextIndexVersion > 0 {
+ index.Key = make([]string, len(spec.Weights))
+ index.Weights = make(map[string]int)
+ for i, elem := range spec.Weights {
+ index.Key[i] = "$text:" + elem.Name
+ if w, ok := elem.Value.(int); ok {
+ index.Weights[elem.Name] = w
+ }
+ }
+ }
+ return index
+}
+
+type indexSlice []Index
+
+func (idxs indexSlice) Len() int { return len(idxs) }
+func (idxs indexSlice) Less(i, j int) bool { return idxs[i].Name < idxs[j].Name }
+func (idxs indexSlice) Swap(i, j int) { idxs[i], idxs[j] = idxs[j], idxs[i] }
+
+func simpleIndexKey(realKey bson.D) (key []string) {
+ for i := range realKey {
+ field := realKey[i].Name
+ vi, ok := realKey[i].Value.(int)
+ if !ok {
+ vf, _ := realKey[i].Value.(float64)
+ vi = int(vf)
+ }
+ if vi == 1 {
+ key = append(key, field)
+ continue
+ }
+ if vi == -1 {
+ key = append(key, "-"+field)
+ continue
+ }
+ if vs, ok := realKey[i].Value.(string); ok {
+ key = append(key, "$"+vs+":"+field)
+ continue
+ }
+ panic("Got unknown index key type for field " + field)
+ }
+ return
+}
+
+// ResetIndexCache() clears the cache of previously ensured indexes.
+// Following requests to EnsureIndex will contact the server.
+func (s *Session) ResetIndexCache() {
+ s.cluster().ResetIndexCache()
+}
+
+// New creates a new session with the same parameters as the original
+// session, including consistency, batch size, prefetching, safety mode,
+// etc. The returned session will use sockets from the pool, so there's
+// a chance that writes just performed in another session may not yet
+// be visible.
+//
+// Login information from the original session will not be copied over
+// into the new session unless it was provided through the initial URL
+// for the Dial function.
+//
+// See the Copy and Clone methods.
+//
+func (s *Session) New() *Session {
+ s.m.Lock()
+ scopy := copySession(s, false)
+ s.m.Unlock()
+ scopy.Refresh()
+ return scopy
+}
+
+// Copy works just like New, but preserves the exact authentication
+// information from the original session.
+func (s *Session) Copy() *Session {
+ s.m.Lock()
+ scopy := copySession(s, true)
+ s.m.Unlock()
+ scopy.Refresh()
+ return scopy
+}
+
+// Clone works just like Copy, but also reuses the same socket as the original
+// session, in case it had already reserved one due to its consistency
+// guarantees. This behavior ensures that writes performed in the old session
+// are necessarily observed when using the new session, as long as it was a
+// strong or monotonic session. That said, it also means that long operations
+// may cause other goroutines using the original session to wait.
+func (s *Session) Clone() *Session {
+ s.m.Lock()
+ scopy := copySession(s, true)
+ s.m.Unlock()
+ return scopy
+}
+
+// Close terminates the session. It's a runtime error to use a session
+// after it has been closed.
+func (s *Session) Close() {
+ s.m.Lock()
+ if s.cluster_ != nil {
+ debugf("Closing session %p", s)
+ s.unsetSocket()
+ s.cluster_.Release()
+ s.cluster_ = nil
+ }
+ s.m.Unlock()
+}
+
+func (s *Session) cluster() *mongoCluster {
+ if s.cluster_ == nil {
+ panic("Session already closed")
+ }
+ return s.cluster_
+}
+
+// Refresh puts back any reserved sockets in use and restarts the consistency
+// guarantees according to the current consistency setting for the session.
+func (s *Session) Refresh() {
+ s.m.Lock()
+ s.slaveOk = s.consistency != Strong
+ s.unsetSocket()
+ s.m.Unlock()
+}
+
+// SetMode changes the consistency mode for the session.
+//
+// The default mode is Strong.
+//
+// In the Strong consistency mode reads and writes will always be made to
+// the primary server using a unique connection so that reads and writes are
+// fully consistent, ordered, and observing the most up-to-date data.
+// This offers the least benefits in terms of distributing load, but the
+// most guarantees. See also Monotonic and Eventual.
+//
+// In the Monotonic consistency mode reads may not be entirely up-to-date,
+// but they will always see the history of changes moving forward, the data
+// read will be consistent across sequential queries in the same session,
+// and modifications made within the session will be observed in following
+// queries (read-your-writes).
+//
+// In practice, the Monotonic mode is obtained by performing initial reads
+// on a unique connection to an arbitrary secondary, if one is available,
+// and once the first write happens, the session connection is switched over
+// to the primary server. This manages to distribute some of the reading
+// load with secondaries, while maintaining some useful guarantees.
+//
+// In the Eventual consistency mode reads will be made to any secondary in the
+// cluster, if one is available, and sequential reads will not necessarily
+// be made with the same connection. This means that data may be observed
+// out of order. Writes will of course be issued to the primary, but
+// independent writes in the same Eventual session may also be made with
+// independent connections, so there are also no guarantees in terms of
+// write ordering (no read-your-writes guarantees either).
+//
+// The Eventual mode is the fastest and most resource-friendly, but is
+// also the one offering the least guarantees about ordering of the data
+// read and written.
+//
+// If refresh is true, in addition to ensuring the session is in the given
+// consistency mode, the consistency guarantees will also be reset (e.g.
+// a Monotonic session will be allowed to read from secondaries again).
+// This is equivalent to calling the Refresh function.
+//
+// Shifting between Monotonic and Strong modes will keep a previously
+// reserved connection for the session unless refresh is true or the
+// connection is unsuitable (to a secondary server in a Strong session).
+func (s *Session) SetMode(consistency Mode, refresh bool) {
+ s.m.Lock()
+ debugf("Session %p: setting mode %d with refresh=%v (master=%p, slave=%p)", s, consistency, refresh, s.masterSocket, s.slaveSocket)
+ s.consistency = consistency
+ if refresh {
+ s.slaveOk = s.consistency != Strong
+ s.unsetSocket()
+ } else if s.consistency == Strong {
+ s.slaveOk = false
+ } else if s.masterSocket == nil {
+ s.slaveOk = true
+ }
+ s.m.Unlock()
+}
+
+// Mode returns the current consistency mode for the session.
+func (s *Session) Mode() Mode {
+ s.m.RLock()
+ mode := s.consistency
+ s.m.RUnlock()
+ return mode
+}
+
+// SetSyncTimeout sets the amount of time an operation with this session
+// will wait before returning an error in case a connection to a usable
+// server can't be established. Set it to zero to wait forever. The
+// default value is 7 seconds.
+func (s *Session) SetSyncTimeout(d time.Duration) {
+ s.m.Lock()
+ s.syncTimeout = d
+ s.m.Unlock()
+}
+
+// SetSocketTimeout sets the amount of time to wait for a non-responding
+// socket to the database before it is forcefully closed.
+//
+// The default timeout is 1 minute.
+func (s *Session) SetSocketTimeout(d time.Duration) {
+ s.m.Lock()
+ s.sockTimeout = d
+ if s.masterSocket != nil {
+ s.masterSocket.SetTimeout(d)
+ }
+ if s.slaveSocket != nil {
+ s.slaveSocket.SetTimeout(d)
+ }
+ s.m.Unlock()
+}
+
+// SetCursorTimeout changes the standard timeout period that the server
+// enforces on created cursors. The only supported value right now is
+// 0, which disables the timeout. The standard server timeout is 10 minutes.
+func (s *Session) SetCursorTimeout(d time.Duration) {
+ s.m.Lock()
+ if d == 0 {
+ s.queryConfig.op.flags |= flagNoCursorTimeout
+ } else {
+ panic("SetCursorTimeout: only 0 (disable timeout) supported for now")
+ }
+ s.m.Unlock()
+}
+
+// SetPoolLimit sets the maximum number of sockets in use in a single server
+// before this session will block waiting for a socket to be available.
+// The default limit is 4096.
+//
+// This limit must be set to cover more than any expected workload of the
+// application. It is a bad practice and an unsupported use case to use the
+// database driver to define the concurrency limit of an application. Prevent
+// such concurrency "at the door" instead, by properly restricting the amount
+// of used resources and number of goroutines before they are created.
+func (s *Session) SetPoolLimit(limit int) {
+ s.m.Lock()
+ s.poolLimit = limit
+ s.m.Unlock()
+}
+
+// SetBypassValidation sets whether the server should bypass the registered
+// validation expressions executed when documents are inserted or modified,
+// in the interest of preserving invariants in the collection being modified.
+// The default is to not bypass, and thus to perform the validation
+// expressions registered for modified collections.
+//
+// Document validation was introuced in MongoDB 3.2.
+//
+// Relevant documentation:
+//
+// https://docs.mongodb.org/manual/release-notes/3.2/#bypass-validation
+//
+func (s *Session) SetBypassValidation(bypass bool) {
+ s.m.Lock()
+ s.bypassValidation = bypass
+ s.m.Unlock()
+}
+
+// SetBatch sets the default batch size used when fetching documents from the
+// database. It's possible to change this setting on a per-query basis as
+// well, using the Query.Batch method.
+//
+// The default batch size is defined by the database itself. As of this
+// writing, MongoDB will use an initial size of min(100 docs, 4MB) on the
+// first batch, and 4MB on remaining ones.
+func (s *Session) SetBatch(n int) {
+ if n == 1 {
+ // Server interprets 1 as -1 and closes the cursor (!?)
+ n = 2
+ }
+ s.m.Lock()
+ s.queryConfig.op.limit = int32(n)
+ s.m.Unlock()
+}
+
+// SetPrefetch sets the default point at which the next batch of results will be
+// requested. When there are p*batch_size remaining documents cached in an
+// Iter, the next batch will be requested in background. For instance, when
+// using this:
+//
+// session.SetBatch(200)
+// session.SetPrefetch(0.25)
+//
+// and there are only 50 documents cached in the Iter to be processed, the
+// next batch of 200 will be requested. It's possible to change this setting on
+// a per-query basis as well, using the Prefetch method of Query.
+//
+// The default prefetch value is 0.25.
+func (s *Session) SetPrefetch(p float64) {
+ s.m.Lock()
+ s.queryConfig.prefetch = p
+ s.m.Unlock()
+}
+
+// See SetSafe for details on the Safe type.
+type Safe struct {
+ W int // Min # of servers to ack before success
+ WMode string // Write mode for MongoDB 2.0+ (e.g. "majority")
+ WTimeout int // Milliseconds to wait for W before timing out
+ FSync bool // Sync via the journal if present, or via data files sync otherwise
+ J bool // Sync via the journal if present
+}
+
+// Safe returns the current safety mode for the session.
+func (s *Session) Safe() (safe *Safe) {
+ s.m.Lock()
+ defer s.m.Unlock()
+ if s.safeOp != nil {
+ cmd := s.safeOp.query.(*getLastError)
+ safe = &Safe{WTimeout: cmd.WTimeout, FSync: cmd.FSync, J: cmd.J}
+ switch w := cmd.W.(type) {
+ case string:
+ safe.WMode = w
+ case int:
+ safe.W = w
+ }
+ }
+ return
+}
+
+// SetSafe changes the session safety mode.
+//
+// If the safe parameter is nil, the session is put in unsafe mode, and writes
+// become fire-and-forget, without error checking. The unsafe mode is faster
+// since operations won't hold on waiting for a confirmation.
+//
+// If the safe parameter is not nil, any changing query (insert, update, ...)
+// will be followed by a getLastError command with the specified parameters,
+// to ensure the request was correctly processed.
+//
+// The default is &Safe{}, meaning check for errors and use the default
+// behavior for all fields.
+//
+// The safe.W parameter determines how many servers should confirm a write
+// before the operation is considered successful. If set to 0 or 1, the
+// command will return as soon as the primary is done with the request.
+// If safe.WTimeout is greater than zero, it determines how many milliseconds
+// to wait for the safe.W servers to respond before returning an error.
+//
+// Starting with MongoDB 2.0.0 the safe.WMode parameter can be used instead
+// of W to request for richer semantics. If set to "majority" the server will
+// wait for a majority of members from the replica set to respond before
+// returning. Custom modes may also be defined within the server to create
+// very detailed placement schemas. See the data awareness documentation in
+// the links below for more details (note that MongoDB internally reuses the
+// "w" field name for WMode).
+//
+// If safe.J is true, servers will block until write operations have been
+// committed to the journal. Cannot be used in combination with FSync. Prior
+// to MongoDB 2.6 this option was ignored if the server was running without
+// journaling. Starting with MongoDB 2.6 write operations will fail with an
+// exception if this option is used when the server is running without
+// journaling.
+//
+// If safe.FSync is true and the server is running without journaling, blocks
+// until the server has synced all data files to disk. If the server is running
+// with journaling, this acts the same as the J option, blocking until write
+// operations have been committed to the journal. Cannot be used in
+// combination with J.
+//
+// Since MongoDB 2.0.0, the safe.J option can also be used instead of FSync
+// to force the server to wait for a group commit in case journaling is
+// enabled. The option has no effect if the server has journaling disabled.
+//
+// For example, the following statement will make the session check for
+// errors, without imposing further constraints:
+//
+// session.SetSafe(&mgo.Safe{})
+//
+// The following statement will force the server to wait for a majority of
+// members of a replica set to return (MongoDB 2.0+ only):
+//
+// session.SetSafe(&mgo.Safe{WMode: "majority"})
+//
+// The following statement, on the other hand, ensures that at least two
+// servers have flushed the change to disk before confirming the success
+// of operations:
+//
+// session.EnsureSafe(&mgo.Safe{W: 2, FSync: true})
+//
+// The following statement, on the other hand, disables the verification
+// of errors entirely:
+//
+// session.SetSafe(nil)
+//
+// See also the EnsureSafe method.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/getLastError+Command
+// http://www.mongodb.org/display/DOCS/Verifying+Propagation+of+Writes+with+getLastError
+// http://www.mongodb.org/display/DOCS/Data+Center+Awareness
+//
+func (s *Session) SetSafe(safe *Safe) {
+ s.m.Lock()
+ s.safeOp = nil
+ s.ensureSafe(safe)
+ s.m.Unlock()
+}
+
+// EnsureSafe compares the provided safety parameters with the ones
+// currently in use by the session and picks the most conservative
+// choice for each setting.
+//
+// That is:
+//
+// - safe.WMode is always used if set.
+// - safe.W is used if larger than the current W and WMode is empty.
+// - safe.FSync is always used if true.
+// - safe.J is used if FSync is false.
+// - safe.WTimeout is used if set and smaller than the current WTimeout.
+//
+// For example, the following statement will ensure the session is
+// at least checking for errors, without enforcing further constraints.
+// If a more conservative SetSafe or EnsureSafe call was previously done,
+// the following call will be ignored.
+//
+// session.EnsureSafe(&mgo.Safe{})
+//
+// See also the SetSafe method for details on what each option means.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/getLastError+Command
+// http://www.mongodb.org/display/DOCS/Verifying+Propagation+of+Writes+with+getLastError
+// http://www.mongodb.org/display/DOCS/Data+Center+Awareness
+//
+func (s *Session) EnsureSafe(safe *Safe) {
+ s.m.Lock()
+ s.ensureSafe(safe)
+ s.m.Unlock()
+}
+
+func (s *Session) ensureSafe(safe *Safe) {
+ if safe == nil {
+ return
+ }
+
+ var w interface{}
+ if safe.WMode != "" {
+ w = safe.WMode
+ } else if safe.W > 0 {
+ w = safe.W
+ }
+
+ var cmd getLastError
+ if s.safeOp == nil {
+ cmd = getLastError{1, w, safe.WTimeout, safe.FSync, safe.J}
+ } else {
+ // Copy. We don't want to mutate the existing query.
+ cmd = *(s.safeOp.query.(*getLastError))
+ if cmd.W == nil {
+ cmd.W = w
+ } else if safe.WMode != "" {
+ cmd.W = safe.WMode
+ } else if i, ok := cmd.W.(int); ok && safe.W > i {
+ cmd.W = safe.W
+ }
+ if safe.WTimeout > 0 && safe.WTimeout < cmd.WTimeout {
+ cmd.WTimeout = safe.WTimeout
+ }
+ if safe.FSync {
+ cmd.FSync = true
+ cmd.J = false
+ } else if safe.J && !cmd.FSync {
+ cmd.J = true
+ }
+ }
+ s.safeOp = &queryOp{
+ query: &cmd,
+ collection: "admin.$cmd",
+ limit: -1,
+ }
+}
+
+// Run issues the provided command on the "admin" database and
+// and unmarshals its result in the respective argument. The cmd
+// argument may be either a string with the command name itself, in
+// which case an empty document of the form bson.M{cmd: 1} will be used,
+// or it may be a full command document.
+//
+// Note that MongoDB considers the first marshalled key as the command
+// name, so when providing a command with options, it's important to
+// use an ordering-preserving document, such as a struct value or an
+// instance of bson.D. For instance:
+//
+// db.Run(bson.D{{"create", "mycollection"}, {"size", 1024}})
+//
+// For commands on arbitrary databases, see the Run method in
+// the Database type.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Commands
+// http://www.mongodb.org/display/DOCS/List+of+Database+CommandSkips
+//
+func (s *Session) Run(cmd interface{}, result interface{}) error {
+ return s.DB("admin").Run(cmd, result)
+}
+
+// SelectServers restricts communication to servers configured with the
+// given tags. For example, the following statement restricts servers
+// used for reading operations to those with both tag "disk" set to
+// "ssd" and tag "rack" set to 1:
+//
+// session.SelectServers(bson.D{{"disk", "ssd"}, {"rack", 1}})
+//
+// Multiple sets of tags may be provided, in which case the used server
+// must match all tags within any one set.
+//
+// If a connection was previously assigned to the session due to the
+// current session mode (see Session.SetMode), the tag selection will
+// only be enforced after the session is refreshed.
+//
+// Relevant documentation:
+//
+// http://docs.mongodb.org/manual/tutorial/configure-replica-set-tag-sets
+//
+func (s *Session) SelectServers(tags ...bson.D) {
+ s.m.Lock()
+ s.queryConfig.op.serverTags = tags
+ s.m.Unlock()
+}
+
+// Ping runs a trivial ping command just to get in touch with the server.
+func (s *Session) Ping() error {
+ return s.Run("ping", nil)
+}
+
+// Fsync flushes in-memory writes to disk on the server the session
+// is established with. If async is true, the call returns immediately,
+// otherwise it returns after the flush has been made.
+func (s *Session) Fsync(async bool) error {
+ return s.Run(bson.D{{"fsync", 1}, {"async", async}}, nil)
+}
+
+// FsyncLock locks all writes in the specific server the session is
+// established with and returns. Any writes attempted to the server
+// after it is successfully locked will block until FsyncUnlock is
+// called for the same server.
+//
+// This method works on secondaries as well, preventing the oplog from
+// being flushed while the server is locked, but since only the server
+// connected to is locked, for locking specific secondaries it may be
+// necessary to establish a connection directly to the secondary (see
+// Dial's connect=direct option).
+//
+// As an important caveat, note that once a write is attempted and
+// blocks, follow up reads will block as well due to the way the
+// lock is internally implemented in the server. More details at:
+//
+// https://jira.mongodb.org/browse/SERVER-4243
+//
+// FsyncLock is often used for performing consistent backups of
+// the database files on disk.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/fsync+Command
+// http://www.mongodb.org/display/DOCS/Backups
+//
+func (s *Session) FsyncLock() error {
+ return s.Run(bson.D{{"fsync", 1}, {"lock", true}}, nil)
+}
+
+// FsyncUnlock releases the server for writes. See FsyncLock for details.
+func (s *Session) FsyncUnlock() error {
+ err := s.Run(bson.D{{"fsyncUnlock", 1}}, nil)
+ if isNoCmd(err) {
+ err = s.DB("admin").C("$cmd.sys.unlock").Find(nil).One(nil) // WTF?
+ }
+ return err
+}
+
+// Find prepares a query using the provided document. The document may be a
+// map or a struct value capable of being marshalled with bson. The map
+// may be a generic one using interface{} for its key and/or values, such as
+// bson.M, or it may be a properly typed map. Providing nil as the document
+// is equivalent to providing an empty document such as bson.M{}.
+//
+// Further details of the query may be tweaked using the resulting Query value,
+// and then executed to retrieve results using methods such as One, For,
+// Iter, or Tail.
+//
+// In case the resulting document includes a field named $err or errmsg, which
+// are standard ways for MongoDB to return query errors, the returned err will
+// be set to a *QueryError value including the Err message and the Code. In
+// those cases, the result argument is still unmarshalled into with the
+// received document so that any other custom values may be obtained if
+// desired.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Querying
+// http://www.mongodb.org/display/DOCS/Advanced+Queries
+//
+func (c *Collection) Find(query interface{}) *Query {
+ session := c.Database.Session
+ session.m.RLock()
+ q := &Query{session: session, query: session.queryConfig}
+ session.m.RUnlock()
+ q.op.query = query
+ q.op.collection = c.FullName
+ return q
+}
+
+type repairCmd struct {
+ RepairCursor string `bson:"repairCursor"`
+ Cursor *repairCmdCursor ",omitempty"
+}
+
+type repairCmdCursor struct {
+ BatchSize int `bson:"batchSize,omitempty"`
+}
+
+// Repair returns an iterator that goes over all recovered documents in the
+// collection, in a best-effort manner. This is most useful when there are
+// damaged data files. Multiple copies of the same document may be returned
+// by the iterator.
+//
+// Repair is supported in MongoDB 2.7.8 and later.
+func (c *Collection) Repair() *Iter {
+ // Clone session and set it to Monotonic mode so that the server
+ // used for the query may be safely obtained afterwards, if
+ // necessary for iteration when a cursor is received.
+ session := c.Database.Session
+ cloned := session.nonEventual()
+ defer cloned.Close()
+
+ batchSize := int(cloned.queryConfig.op.limit)
+
+ var result struct{ Cursor cursorData }
+
+ cmd := repairCmd{
+ RepairCursor: c.Name,
+ Cursor: &repairCmdCursor{batchSize},
+ }
+
+ clonedc := c.With(cloned)
+ err := clonedc.Database.Run(cmd, &result)
+ return clonedc.NewIter(session, result.Cursor.FirstBatch, result.Cursor.Id, err)
+}
+
+// FindId is a convenience helper equivalent to:
+//
+// query := collection.Find(bson.M{"_id": id})
+//
+// See the Find method for more details.
+func (c *Collection) FindId(id interface{}) *Query {
+ return c.Find(bson.D{{"_id", id}})
+}
+
+type Pipe struct {
+ session *Session
+ collection *Collection
+ pipeline interface{}
+ allowDisk bool
+ batchSize int
+}
+
+type pipeCmd struct {
+ Aggregate string
+ Pipeline interface{}
+ Cursor *pipeCmdCursor ",omitempty"
+ Explain bool ",omitempty"
+ AllowDisk bool "allowDiskUse,omitempty"
+}
+
+type pipeCmdCursor struct {
+ BatchSize int `bson:"batchSize,omitempty"`
+}
+
+// Pipe prepares a pipeline to aggregate. The pipeline document
+// must be a slice built in terms of the aggregation framework language.
+//
+// For example:
+//
+// pipe := collection.Pipe([]bson.M{{"$match": bson.M{"name": "Otavio"}}})
+// iter := pipe.Iter()
+//
+// Relevant documentation:
+//
+// http://docs.mongodb.org/manual/reference/aggregation
+// http://docs.mongodb.org/manual/applications/aggregation
+// http://docs.mongodb.org/manual/tutorial/aggregation-examples
+//
+func (c *Collection) Pipe(pipeline interface{}) *Pipe {
+ session := c.Database.Session
+ session.m.RLock()
+ batchSize := int(session.queryConfig.op.limit)
+ session.m.RUnlock()
+ return &Pipe{
+ session: session,
+ collection: c,
+ pipeline: pipeline,
+ batchSize: batchSize,
+ }
+}
+
+// Iter executes the pipeline and returns an iterator capable of going
+// over all the generated results.
+func (p *Pipe) Iter() *Iter {
+ // Clone session and set it to Monotonic mode so that the server
+ // used for the query may be safely obtained afterwards, if
+ // necessary for iteration when a cursor is received.
+ cloned := p.session.nonEventual()
+ defer cloned.Close()
+ c := p.collection.With(cloned)
+
+ var result struct {
+ Result []bson.Raw // 2.4, no cursors.
+ Cursor cursorData // 2.6+, with cursors.
+ }
+
+ cmd := pipeCmd{
+ Aggregate: c.Name,
+ Pipeline: p.pipeline,
+ AllowDisk: p.allowDisk,
+ Cursor: &pipeCmdCursor{p.batchSize},
+ }
+ err := c.Database.Run(cmd, &result)
+ if e, ok := err.(*QueryError); ok && e.Message == `unrecognized field "cursor` {
+ cmd.Cursor = nil
+ cmd.AllowDisk = false
+ err = c.Database.Run(cmd, &result)
+ }
+ firstBatch := result.Result
+ if firstBatch == nil {
+ firstBatch = result.Cursor.FirstBatch
+ }
+ return c.NewIter(p.session, firstBatch, result.Cursor.Id, err)
+}
+
+// NewIter returns a newly created iterator with the provided parameters.
+// Using this method is not recommended unless the desired functionality
+// is not yet exposed via a more convenient interface (Find, Pipe, etc).
+//
+// The optional session parameter associates the lifetime of the returned
+// iterator to an arbitrary session. If nil, the iterator will be bound to
+// c's session.
+//
+// Documents in firstBatch will be individually provided by the returned
+// iterator before documents from cursorId are made available. If cursorId
+// is zero, only the documents in firstBatch are provided.
+//
+// If err is not nil, the iterator's Err method will report it after
+// exhausting documents in firstBatch.
+//
+// NewIter must be called right after the cursor id is obtained, and must not
+// be called on a collection in Eventual mode, because the cursor id is
+// associated with the specific server that returned it. The provided session
+// parameter may be in any mode or state, though.
+//
+func (c *Collection) NewIter(session *Session, firstBatch []bson.Raw, cursorId int64, err error) *Iter {
+ var server *mongoServer
+ csession := c.Database.Session
+ csession.m.RLock()
+ socket := csession.masterSocket
+ if socket == nil {
+ socket = csession.slaveSocket
+ }
+ if socket != nil {
+ server = socket.Server()
+ }
+ csession.m.RUnlock()
+
+ if server == nil {
+ if csession.Mode() == Eventual {
+ panic("Collection.NewIter called in Eventual mode")
+ }
+ if err == nil {
+ err = errors.New("server not available")
+ }
+ }
+
+ if session == nil {
+ session = csession
+ }
+
+ iter := &Iter{
+ session: session,
+ server: server,
+ timeout: -1,
+ err: err,
+ }
+ iter.gotReply.L = &iter.m
+ for _, doc := range firstBatch {
+ iter.docData.Push(doc.Data)
+ }
+ if cursorId != 0 {
+ iter.op.cursorId = cursorId
+ iter.op.collection = c.FullName
+ iter.op.replyFunc = iter.replyFunc()
+ }
+ return iter
+}
+
+// All works like Iter.All.
+func (p *Pipe) All(result interface{}) error {
+ return p.Iter().All(result)
+}
+
+// One executes the pipeline and unmarshals the first item from the
+// result set into the result parameter.
+// It returns ErrNotFound if no items are generated by the pipeline.
+func (p *Pipe) One(result interface{}) error {
+ iter := p.Iter()
+ if iter.Next(result) {
+ return nil
+ }
+ if err := iter.Err(); err != nil {
+ return err
+ }
+ return ErrNotFound
+}
+
+// Explain returns a number of details about how the MongoDB server would
+// execute the requested pipeline, such as the number of objects examined,
+// the number of times the read lock was yielded to allow writes to go in,
+// and so on.
+//
+// For example:
+//
+// var m bson.M
+// err := collection.Pipe(pipeline).Explain(&m)
+// if err == nil {
+// fmt.Printf("Explain: %#v\n", m)
+// }
+//
+func (p *Pipe) Explain(result interface{}) error {
+ c := p.collection
+ cmd := pipeCmd{
+ Aggregate: c.Name,
+ Pipeline: p.pipeline,
+ AllowDisk: p.allowDisk,
+ Explain: true,
+ }
+ return c.Database.Run(cmd, result)
+}
+
+// AllowDiskUse enables writing to the "<dbpath>/_tmp" server directory so
+// that aggregation pipelines do not have to be held entirely in memory.
+func (p *Pipe) AllowDiskUse() *Pipe {
+ p.allowDisk = true
+ return p
+}
+
+// Batch sets the batch size used when fetching documents from the database.
+// It's possible to change this setting on a per-session basis as well, using
+// the Batch method of Session.
+//
+// The default batch size is defined by the database server.
+func (p *Pipe) Batch(n int) *Pipe {
+ p.batchSize = n
+ return p
+}
+
+// mgo.v3: Use a single user-visible error type.
+
+type LastError struct {
+ Err string
+ Code, N, Waited int
+ FSyncFiles int `bson:"fsyncFiles"`
+ WTimeout bool
+ UpdatedExisting bool `bson:"updatedExisting"`
+ UpsertedId interface{} `bson:"upserted"`
+
+ modified int
+ ecases []BulkErrorCase
+}
+
+func (err *LastError) Error() string {
+ return err.Err
+}
+
+type queryError struct {
+ Err string "$err"
+ ErrMsg string
+ Assertion string
+ Code int
+ AssertionCode int "assertionCode"
+}
+
+type QueryError struct {
+ Code int
+ Message string
+ Assertion bool
+}
+
+func (err *QueryError) Error() string {
+ return err.Message
+}
+
+// IsDup returns whether err informs of a duplicate key error because
+// a primary key index or a secondary unique index already has an entry
+// with the given value.
+func IsDup(err error) bool {
+ // Besides being handy, helps with MongoDB bugs SERVER-7164 and SERVER-11493.
+ // What follows makes me sad. Hopefully conventions will be more clear over time.
+ switch e := err.(type) {
+ case *LastError:
+ return e.Code == 11000 || e.Code == 11001 || e.Code == 12582 || e.Code == 16460 && strings.Contains(e.Err, " E11000 ")
+ case *QueryError:
+ return e.Code == 11000 || e.Code == 11001 || e.Code == 12582
+ case *BulkError:
+ for _, ecase := range e.ecases {
+ if !IsDup(ecase.Err) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// Insert inserts one or more documents in the respective collection. In
+// case the session is in safe mode (see the SetSafe method) and an error
+// happens while inserting the provided documents, the returned error will
+// be of type *LastError.
+func (c *Collection) Insert(docs ...interface{}) error {
+ _, err := c.writeOp(&insertOp{c.FullName, docs, 0}, true)
+ return err
+}
+
+// Update finds a single document matching the provided selector document
+// and modifies it according to the update document.
+// If the session is in safe mode (see SetSafe) a ErrNotFound error is
+// returned if a document isn't found, or a value of type *LastError
+// when some other error is detected.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Updating
+// http://www.mongodb.org/display/DOCS/Atomic+Operations
+//
+func (c *Collection) Update(selector interface{}, update interface{}) error {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ op := updateOp{
+ Collection: c.FullName,
+ Selector: selector,
+ Update: update,
+ }
+ lerr, err := c.writeOp(&op, true)
+ if err == nil && lerr != nil && !lerr.UpdatedExisting {
+ return ErrNotFound
+ }
+ return err
+}
+
+// UpdateId is a convenience helper equivalent to:
+//
+// err := collection.Update(bson.M{"_id": id}, update)
+//
+// See the Update method for more details.
+func (c *Collection) UpdateId(id interface{}, update interface{}) error {
+ return c.Update(bson.D{{"_id", id}}, update)
+}
+
+// ChangeInfo holds details about the outcome of an update operation.
+type ChangeInfo struct {
+ // Updated reports the number of existing documents modified.
+ // Due to server limitations, this reports the same value as the Matched field when
+ // talking to MongoDB <= 2.4 and on Upsert and Apply (findAndModify) operations.
+ Updated int
+ Removed int // Number of documents removed
+ Matched int // Number of documents matched but not necessarily changed
+ UpsertedId interface{} // Upserted _id field, when not explicitly provided
+}
+
+// UpdateAll finds all documents matching the provided selector document
+// and modifies them according to the update document.
+// If the session is in safe mode (see SetSafe) details of the executed
+// operation are returned in info or an error of type *LastError when
+// some problem is detected. It is not an error for the update to not be
+// applied on any documents because the selector doesn't match.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Updating
+// http://www.mongodb.org/display/DOCS/Atomic+Operations
+//
+func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *ChangeInfo, err error) {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ op := updateOp{
+ Collection: c.FullName,
+ Selector: selector,
+ Update: update,
+ Flags: 2,
+ Multi: true,
+ }
+ lerr, err := c.writeOp(&op, true)
+ if err == nil && lerr != nil {
+ info = &ChangeInfo{Updated: lerr.modified, Matched: lerr.N}
+ }
+ return info, err
+}
+
+// Upsert finds a single document matching the provided selector document
+// and modifies it according to the update document. If no document matching
+// the selector is found, the update document is applied to the selector
+// document and the result is inserted in the collection.
+// If the session is in safe mode (see SetSafe) details of the executed
+// operation are returned in info, or an error of type *LastError when
+// some problem is detected.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Updating
+// http://www.mongodb.org/display/DOCS/Atomic+Operations
+//
+func (c *Collection) Upsert(selector interface{}, update interface{}) (info *ChangeInfo, err error) {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ op := updateOp{
+ Collection: c.FullName,
+ Selector: selector,
+ Update: update,
+ Flags: 1,
+ Upsert: true,
+ }
+ var lerr *LastError
+ for i := 0; i < maxUpsertRetries; i++ {
+ lerr, err = c.writeOp(&op, true)
+ // Retry duplicate key errors on upserts.
+ // https://docs.mongodb.com/v3.2/reference/method/db.collection.update/#use-unique-indexes
+ if !IsDup(err) {
+ break
+ }
+ }
+ if err == nil && lerr != nil {
+ info = &ChangeInfo{}
+ if lerr.UpdatedExisting {
+ info.Matched = lerr.N
+ info.Updated = lerr.modified
+ } else {
+ info.UpsertedId = lerr.UpsertedId
+ }
+ }
+ return info, err
+}
+
+// UpsertId is a convenience helper equivalent to:
+//
+// info, err := collection.Upsert(bson.M{"_id": id}, update)
+//
+// See the Upsert method for more details.
+func (c *Collection) UpsertId(id interface{}, update interface{}) (info *ChangeInfo, err error) {
+ return c.Upsert(bson.D{{"_id", id}}, update)
+}
+
+// Remove finds a single document matching the provided selector document
+// and removes it from the database.
+// If the session is in safe mode (see SetSafe) a ErrNotFound error is
+// returned if a document isn't found, or a value of type *LastError
+// when some other error is detected.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Removing
+//
+func (c *Collection) Remove(selector interface{}) error {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 1, 1}, true)
+ if err == nil && lerr != nil && lerr.N == 0 {
+ return ErrNotFound
+ }
+ return err
+}
+
+// RemoveId is a convenience helper equivalent to:
+//
+// err := collection.Remove(bson.M{"_id": id})
+//
+// See the Remove method for more details.
+func (c *Collection) RemoveId(id interface{}) error {
+ return c.Remove(bson.D{{"_id", id}})
+}
+
+// RemoveAll finds all documents matching the provided selector document
+// and removes them from the database. In case the session is in safe mode
+// (see the SetSafe method) and an error happens when attempting the change,
+// the returned error will be of type *LastError.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Removing
+//
+func (c *Collection) RemoveAll(selector interface{}) (info *ChangeInfo, err error) {
+ if selector == nil {
+ selector = bson.D{}
+ }
+ lerr, err := c.writeOp(&deleteOp{c.FullName, selector, 0, 0}, true)
+ if err == nil && lerr != nil {
+ info = &ChangeInfo{Removed: lerr.N, Matched: lerr.N}
+ }
+ return info, err
+}
+
+// DropDatabase removes the entire database including all of its collections.
+func (db *Database) DropDatabase() error {
+ return db.Run(bson.D{{"dropDatabase", 1}}, nil)
+}
+
+// DropCollection removes the entire collection including all of its documents.
+func (c *Collection) DropCollection() error {
+ return c.Database.Run(bson.D{{"drop", c.Name}}, nil)
+}
+
+// The CollectionInfo type holds metadata about a collection.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/createCollection+Command
+// http://www.mongodb.org/display/DOCS/Capped+Collections
+//
+type CollectionInfo struct {
+ // DisableIdIndex prevents the automatic creation of the index
+ // on the _id field for the collection.
+ DisableIdIndex bool
+
+ // ForceIdIndex enforces the automatic creation of the index
+ // on the _id field for the collection. Capped collections,
+ // for example, do not have such an index by default.
+ ForceIdIndex bool
+
+ // If Capped is true new documents will replace old ones when
+ // the collection is full. MaxBytes must necessarily be set
+ // to define the size when the collection wraps around.
+ // MaxDocs optionally defines the number of documents when it
+ // wraps, but MaxBytes still needs to be set.
+ Capped bool
+ MaxBytes int
+ MaxDocs int
+
+ // Validator contains a validation expression that defines which
+ // documents should be considered valid for this collection.
+ Validator interface{}
+
+ // ValidationLevel may be set to "strict" (the default) to force
+ // MongoDB to validate all documents on inserts and updates, to
+ // "moderate" to apply the validation rules only to documents
+ // that already fulfill the validation criteria, or to "off" for
+ // disabling validation entirely.
+ ValidationLevel string
+
+ // ValidationAction determines how MongoDB handles documents that
+ // violate the validation rules. It may be set to "error" (the default)
+ // to reject inserts or updates that violate the rules, or to "warn"
+ // to log invalid operations but allow them to proceed.
+ ValidationAction string
+
+ // StorageEngine allows specifying collection options for the
+ // storage engine in use. The map keys must hold the storage engine
+ // name for which options are being specified.
+ StorageEngine interface{}
+}
+
+// Create explicitly creates the c collection with details of info.
+// MongoDB creates collections automatically on use, so this method
+// is only necessary when creating collection with non-default
+// characteristics, such as capped collections.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/createCollection+Command
+// http://www.mongodb.org/display/DOCS/Capped+Collections
+//
+func (c *Collection) Create(info *CollectionInfo) error {
+ cmd := make(bson.D, 0, 4)
+ cmd = append(cmd, bson.DocElem{"create", c.Name})
+ if info.Capped {
+ if info.MaxBytes < 1 {
+ return fmt.Errorf("Collection.Create: with Capped, MaxBytes must also be set")
+ }
+ cmd = append(cmd, bson.DocElem{"capped", true})
+ cmd = append(cmd, bson.DocElem{"size", info.MaxBytes})
+ if info.MaxDocs > 0 {
+ cmd = append(cmd, bson.DocElem{"max", info.MaxDocs})
+ }
+ }
+ if info.DisableIdIndex {
+ cmd = append(cmd, bson.DocElem{"autoIndexId", false})
+ }
+ if info.ForceIdIndex {
+ cmd = append(cmd, bson.DocElem{"autoIndexId", true})
+ }
+ if info.Validator != nil {
+ cmd = append(cmd, bson.DocElem{"validator", info.Validator})
+ }
+ if info.ValidationLevel != "" {
+ cmd = append(cmd, bson.DocElem{"validationLevel", info.ValidationLevel})
+ }
+ if info.ValidationAction != "" {
+ cmd = append(cmd, bson.DocElem{"validationAction", info.ValidationAction})
+ }
+ if info.StorageEngine != nil {
+ cmd = append(cmd, bson.DocElem{"storageEngine", info.StorageEngine})
+ }
+ return c.Database.Run(cmd, nil)
+}
+
+// Batch sets the batch size used when fetching documents from the database.
+// It's possible to change this setting on a per-session basis as well, using
+// the Batch method of Session.
+
+// The default batch size is defined by the database itself. As of this
+// writing, MongoDB will use an initial size of min(100 docs, 4MB) on the
+// first batch, and 4MB on remaining ones.
+func (q *Query) Batch(n int) *Query {
+ if n == 1 {
+ // Server interprets 1 as -1 and closes the cursor (!?)
+ n = 2
+ }
+ q.m.Lock()
+ q.op.limit = int32(n)
+ q.m.Unlock()
+ return q
+}
+
+// Prefetch sets the point at which the next batch of results will be requested.
+// When there are p*batch_size remaining documents cached in an Iter, the next
+// batch will be requested in background. For instance, when using this:
+//
+// query.Batch(200).Prefetch(0.25)
+//
+// and there are only 50 documents cached in the Iter to be processed, the
+// next batch of 200 will be requested. It's possible to change this setting on
+// a per-session basis as well, using the SetPrefetch method of Session.
+//
+// The default prefetch value is 0.25.
+func (q *Query) Prefetch(p float64) *Query {
+ q.m.Lock()
+ q.prefetch = p
+ q.m.Unlock()
+ return q
+}
+
+// Skip skips over the n initial documents from the query results. Note that
+// this only makes sense with capped collections where documents are naturally
+// ordered by insertion time, or with sorted results.
+func (q *Query) Skip(n int) *Query {
+ q.m.Lock()
+ q.op.skip = int32(n)
+ q.m.Unlock()
+ return q
+}
+
+// Limit restricts the maximum number of documents retrieved to n, and also
+// changes the batch size to the same value. Once n documents have been
+// returned by Next, the following call will return ErrNotFound.
+func (q *Query) Limit(n int) *Query {
+ q.m.Lock()
+ switch {
+ case n == 1:
+ q.limit = 1
+ q.op.limit = -1
+ case n == math.MinInt32: // -MinInt32 == -MinInt32
+ q.limit = math.MaxInt32
+ q.op.limit = math.MinInt32 + 1
+ case n < 0:
+ q.limit = int32(-n)
+ q.op.limit = int32(n)
+ default:
+ q.limit = int32(n)
+ q.op.limit = int32(n)
+ }
+ q.m.Unlock()
+ return q
+}
+
+// Select enables selecting which fields should be retrieved for the results
+// found. For example, the following query would only retrieve the name field:
+//
+// err := collection.Find(nil).Select(bson.M{"name": 1}).One(&result)
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Retrieving+a+Subset+of+Fields
+//
+func (q *Query) Select(selector interface{}) *Query {
+ q.m.Lock()
+ q.op.selector = selector
+ q.m.Unlock()
+ return q
+}
+
+// Sort asks the database to order returned documents according to the
+// provided field names. A field name may be prefixed by - (minus) for
+// it to be sorted in reverse order.
+//
+// For example:
+//
+// query1 := collection.Find(nil).Sort("firstname", "lastname")
+// query2 := collection.Find(nil).Sort("-age")
+// query3 := collection.Find(nil).Sort("$natural")
+// query4 := collection.Find(nil).Select(bson.M{"score": bson.M{"$meta": "textScore"}}).Sort("$textScore:score")
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order
+//
+func (q *Query) Sort(fields ...string) *Query {
+ q.m.Lock()
+ var order bson.D
+ for _, field := range fields {
+ n := 1
+ var kind string
+ if field != "" {
+ if field[0] == '$' {
+ if c := strings.Index(field, ":"); c > 1 && c < len(field)-1 {
+ kind = field[1:c]
+ field = field[c+1:]
+ }
+ }
+ switch field[0] {
+ case '+':
+ field = field[1:]
+ case '-':
+ n = -1
+ field = field[1:]
+ }
+ }
+ if field == "" {
+ panic("Sort: empty field name")
+ }
+ if kind == "textScore" {
+ order = append(order, bson.DocElem{field, bson.M{"$meta": kind}})
+ } else {
+ order = append(order, bson.DocElem{field, n})
+ }
+ }
+ q.op.options.OrderBy = order
+ q.op.hasOptions = true
+ q.m.Unlock()
+ return q
+}
+
+// Explain returns a number of details about how the MongoDB server would
+// execute the requested query, such as the number of objects examined,
+// the number of times the read lock was yielded to allow writes to go in,
+// and so on.
+//
+// For example:
+//
+// m := bson.M{}
+// err := collection.Find(bson.M{"filename": name}).Explain(m)
+// if err == nil {
+// fmt.Printf("Explain: %#v\n", m)
+// }
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Optimization
+// http://www.mongodb.org/display/DOCS/Query+Optimizer
+//
+func (q *Query) Explain(result interface{}) error {
+ q.m.Lock()
+ clone := &Query{session: q.session, query: q.query}
+ q.m.Unlock()
+ clone.op.options.Explain = true
+ clone.op.hasOptions = true
+ if clone.op.limit > 0 {
+ clone.op.limit = -q.op.limit
+ }
+ iter := clone.Iter()
+ if iter.Next(result) {
+ return nil
+ }
+ return iter.Close()
+}
+
+// TODO: Add Collection.Explain. See https://goo.gl/1MDlvz.
+
+// Hint will include an explicit "hint" in the query to force the server
+// to use a specified index, potentially improving performance in some
+// situations. The provided parameters are the fields that compose the
+// key of the index to be used. For details on how the indexKey may be
+// built, see the EnsureIndex method.
+//
+// For example:
+//
+// query := collection.Find(bson.M{"firstname": "Joe", "lastname": "Winter"})
+// query.Hint("lastname", "firstname")
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Optimization
+// http://www.mongodb.org/display/DOCS/Query+Optimizer
+//
+func (q *Query) Hint(indexKey ...string) *Query {
+ q.m.Lock()
+ keyInfo, err := parseIndexKey(indexKey)
+ q.op.options.Hint = keyInfo.key
+ q.op.hasOptions = true
+ q.m.Unlock()
+ if err != nil {
+ panic(err)
+ }
+ return q
+}
+
+// SetMaxScan constrains the query to stop after scanning the specified
+// number of documents.
+//
+// This modifier is generally used to prevent potentially long running
+// queries from disrupting performance by scanning through too much data.
+func (q *Query) SetMaxScan(n int) *Query {
+ q.m.Lock()
+ q.op.options.MaxScan = n
+ q.op.hasOptions = true
+ q.m.Unlock()
+ return q
+}
+
+// SetMaxTime constrains the query to stop after running for the specified time.
+//
+// When the time limit is reached MongoDB automatically cancels the query.
+// This can be used to efficiently prevent and identify unexpectedly slow queries.
+//
+// A few important notes about the mechanism enforcing this limit:
+//
+// - Requests can block behind locking operations on the server, and that blocking
+// time is not accounted for. In other words, the timer starts ticking only after
+// the actual start of the query when it initially acquires the appropriate lock;
+//
+// - Operations are interrupted only at interrupt points where an operation can be
+// safely aborted – the total execution time may exceed the specified value;
+//
+// - The limit can be applied to both CRUD operations and commands, but not all
+// commands are interruptible;
+//
+// - While iterating over results, computing follow up batches is included in the
+// total time and the iteration continues until the alloted time is over, but
+// network roundtrips are not taken into account for the limit.
+//
+// - This limit does not override the inactive cursor timeout for idle cursors
+// (default is 10 min).
+//
+// This mechanism was introduced in MongoDB 2.6.
+//
+// Relevant documentation:
+//
+// http://blog.mongodb.org/post/83621787773/maxtimems-and-query-optimizer-introspection-in
+//
+func (q *Query) SetMaxTime(d time.Duration) *Query {
+ q.m.Lock()
+ q.op.options.MaxTimeMS = int(d / time.Millisecond)
+ q.op.hasOptions = true
+ q.m.Unlock()
+ return q
+}
+
+// Snapshot will force the performed query to make use of an available
+// index on the _id field to prevent the same document from being returned
+// more than once in a single iteration. This might happen without this
+// setting in situations when the document changes in size and thus has to
+// be moved while the iteration is running.
+//
+// Because snapshot mode traverses the _id index, it may not be used with
+// sorting or explicit hints. It also cannot use any other index for the
+// query.
+//
+// Even with snapshot mode, items inserted or deleted during the query may
+// or may not be returned; that is, this mode is not a true point-in-time
+// snapshot.
+//
+// The same effect of Snapshot may be obtained by using any unique index on
+// field(s) that will not be modified (best to use Hint explicitly too).
+// A non-unique index (such as creation time) may be made unique by
+// appending _id to the index when creating it.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/How+to+do+Snapshotted+Queries+in+the+Mongo+Database
+//
+func (q *Query) Snapshot() *Query {
+ q.m.Lock()
+ q.op.options.Snapshot = true
+ q.op.hasOptions = true
+ q.m.Unlock()
+ return q
+}
+
+// Comment adds a comment to the query to identify it in the database profiler output.
+//
+// Relevant documentation:
+//
+// http://docs.mongodb.org/manual/reference/operator/meta/comment
+// http://docs.mongodb.org/manual/reference/command/profile
+// http://docs.mongodb.org/manual/administration/analyzing-mongodb-performance/#database-profiling
+//
+func (q *Query) Comment(comment string) *Query {
+ q.m.Lock()
+ q.op.options.Comment = comment
+ q.op.hasOptions = true
+ q.m.Unlock()
+ return q
+}
+
+// LogReplay enables an option that optimizes queries that are typically
+// made on the MongoDB oplog for replaying it. This is an internal
+// implementation aspect and most likely uninteresting for other uses.
+// It has seen at least one use case, though, so it's exposed via the API.
+func (q *Query) LogReplay() *Query {
+ q.m.Lock()
+ q.op.flags |= flagLogReplay
+ q.m.Unlock()
+ return q
+}
+
+func checkQueryError(fullname string, d []byte) error {
+ l := len(d)
+ if l < 16 {
+ return nil
+ }
+ if d[5] == '$' && d[6] == 'e' && d[7] == 'r' && d[8] == 'r' && d[9] == '\x00' && d[4] == '\x02' {
+ goto Error
+ }
+ if len(fullname) < 5 || fullname[len(fullname)-5:] != ".$cmd" {
+ return nil
+ }
+ for i := 0; i+8 < l; i++ {
+ if d[i] == '\x02' && d[i+1] == 'e' && d[i+2] == 'r' && d[i+3] == 'r' && d[i+4] == 'm' && d[i+5] == 's' && d[i+6] == 'g' && d[i+7] == '\x00' {
+ goto Error
+ }
+ }
+ return nil
+
+Error:
+ result := &queryError{}
+ bson.Unmarshal(d, result)
+ if result.Err == "" && result.ErrMsg == "" {
+ return nil
+ }
+ if result.AssertionCode != 0 && result.Assertion != "" {
+ return &QueryError{Code: result.AssertionCode, Message: result.Assertion, Assertion: true}
+ }
+ if result.Err != "" {
+ return &QueryError{Code: result.Code, Message: result.Err}
+ }
+ return &QueryError{Code: result.Code, Message: result.ErrMsg}
+}
+
+// One executes the query and unmarshals the first obtained document into the
+// result argument. The result must be a struct or map value capable of being
+// unmarshalled into by gobson. This function blocks until either a result
+// is available or an error happens. For example:
+//
+// err := collection.Find(bson.M{"a": 1}).One(&result)
+//
+// In case the resulting document includes a field named $err or errmsg, which
+// are standard ways for MongoDB to return query errors, the returned err will
+// be set to a *QueryError value including the Err message and the Code. In
+// those cases, the result argument is still unmarshalled into with the
+// received document so that any other custom values may be obtained if
+// desired.
+//
+func (q *Query) One(result interface{}) (err error) {
+ q.m.Lock()
+ session := q.session
+ op := q.op // Copy.
+ q.m.Unlock()
+
+ socket, err := session.acquireSocket(true)
+ if err != nil {
+ return err
+ }
+ defer socket.Release()
+
+ op.limit = -1
+
+ session.prepareQuery(&op)
+
+ expectFindReply := prepareFindOp(socket, &op, 1)
+
+ data, err := socket.SimpleQuery(&op)
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNotFound
+ }
+ if expectFindReply {
+ var findReply struct {
+ Ok bool
+ Code int
+ Errmsg string
+ Cursor cursorData
+ }
+ err = bson.Unmarshal(data, &findReply)
+ if err != nil {
+ return err
+ }
+ if !findReply.Ok && findReply.Errmsg != "" {
+ return &QueryError{Code: findReply.Code, Message: findReply.Errmsg}
+ }
+ if len(findReply.Cursor.FirstBatch) == 0 {
+ return ErrNotFound
+ }
+ data = findReply.Cursor.FirstBatch[0].Data
+ }
+ if result != nil {
+ err = bson.Unmarshal(data, result)
+ if err == nil {
+ debugf("Query %p document unmarshaled: %#v", q, result)
+ } else {
+ debugf("Query %p document unmarshaling failed: %#v", q, err)
+ return err
+ }
+ }
+ return checkQueryError(op.collection, data)
+}
+
+// prepareFindOp translates op from being an old-style wire protocol query into
+// a new-style find command if that's supported by the MongoDB server (3.2+).
+// It returns whether to expect a find command result or not. Note op may be
+// translated into an explain command, in which case the function returns false.
+func prepareFindOp(socket *mongoSocket, op *queryOp, limit int32) bool {
+ if socket.ServerInfo().MaxWireVersion < 4 || op.collection == "admin.$cmd" {
+ return false
+ }
+
+ nameDot := strings.Index(op.collection, ".")
+ if nameDot < 0 {
+ panic("invalid query collection name: " + op.collection)
+ }
+
+ find := findCmd{
+ Collection: op.collection[nameDot+1:],
+ Filter: op.query,
+ Projection: op.selector,
+ Sort: op.options.OrderBy,
+ Skip: op.skip,
+ Limit: limit,
+ MaxTimeMS: op.options.MaxTimeMS,
+ MaxScan: op.options.MaxScan,
+ Hint: op.options.Hint,
+ Comment: op.options.Comment,
+ Snapshot: op.options.Snapshot,
+ OplogReplay: op.flags&flagLogReplay != 0,
+ }
+ if op.limit < 0 {
+ find.BatchSize = -op.limit
+ find.SingleBatch = true
+ } else {
+ find.BatchSize = op.limit
+ }
+
+ explain := op.options.Explain
+
+ op.collection = op.collection[:nameDot] + ".$cmd"
+ op.query = &find
+ op.skip = 0
+ op.limit = -1
+ op.options = queryWrapper{}
+ op.hasOptions = false
+
+ if explain {
+ op.query = bson.D{{"explain", op.query}}
+ return false
+ }
+ return true
+}
+
+type cursorData struct {
+ FirstBatch []bson.Raw "firstBatch"
+ NextBatch []bson.Raw "nextBatch"
+ NS string
+ Id int64
+}
+
+// findCmd holds the command used for performing queries on MongoDB 3.2+.
+//
+// Relevant documentation:
+//
+// https://docs.mongodb.org/master/reference/command/find/#dbcmd.find
+//
+type findCmd struct {
+ Collection string `bson:"find"`
+ Filter interface{} `bson:"filter,omitempty"`
+ Sort interface{} `bson:"sort,omitempty"`
+ Projection interface{} `bson:"projection,omitempty"`
+ Hint interface{} `bson:"hint,omitempty"`
+ Skip interface{} `bson:"skip,omitempty"`
+ Limit int32 `bson:"limit,omitempty"`
+ BatchSize int32 `bson:"batchSize,omitempty"`
+ SingleBatch bool `bson:"singleBatch,omitempty"`
+ Comment string `bson:"comment,omitempty"`
+ MaxScan int `bson:"maxScan,omitempty"`
+ MaxTimeMS int `bson:"maxTimeMS,omitempty"`
+ ReadConcern interface{} `bson:"readConcern,omitempty"`
+ Max interface{} `bson:"max,omitempty"`
+ Min interface{} `bson:"min,omitempty"`
+ ReturnKey bool `bson:"returnKey,omitempty"`
+ ShowRecordId bool `bson:"showRecordId,omitempty"`
+ Snapshot bool `bson:"snapshot,omitempty"`
+ Tailable bool `bson:"tailable,omitempty"`
+ AwaitData bool `bson:"awaitData,omitempty"`
+ OplogReplay bool `bson:"oplogReplay,omitempty"`
+ NoCursorTimeout bool `bson:"noCursorTimeout,omitempty"`
+ AllowPartialResults bool `bson:"allowPartialResults,omitempty"`
+}
+
+// getMoreCmd holds the command used for requesting more query results on MongoDB 3.2+.
+//
+// Relevant documentation:
+//
+// https://docs.mongodb.org/master/reference/command/getMore/#dbcmd.getMore
+//
+type getMoreCmd struct {
+ CursorId int64 `bson:"getMore"`
+ Collection string `bson:"collection"`
+ BatchSize int32 `bson:"batchSize,omitempty"`
+ MaxTimeMS int64 `bson:"maxTimeMS,omitempty"`
+}
+
+// run duplicates the behavior of collection.Find(query).One(&result)
+// as performed by Database.Run, specializing the logic for running
+// database commands on a given socket.
+func (db *Database) run(socket *mongoSocket, cmd, result interface{}) (err error) {
+ // Database.Run:
+ if name, ok := cmd.(string); ok {
+ cmd = bson.D{{name, 1}}
+ }
+
+ // Collection.Find:
+ session := db.Session
+ session.m.RLock()
+ op := session.queryConfig.op // Copy.
+ session.m.RUnlock()
+ op.query = cmd
+ op.collection = db.Name + ".$cmd"
+
+ // Query.One:
+ session.prepareQuery(&op)
+ op.limit = -1
+
+ data, err := socket.SimpleQuery(&op)
+ if err != nil {
+ return err
+ }
+ if data == nil {
+ return ErrNotFound
+ }
+ if result != nil {
+ err = bson.Unmarshal(data, result)
+ if err != nil {
+ debugf("Run command unmarshaling failed: %#v", op, err)
+ return err
+ }
+ if globalDebug && globalLogger != nil {
+ var res bson.M
+ bson.Unmarshal(data, &res)
+ debugf("Run command unmarshaled: %#v, result: %#v", op, res)
+ }
+ }
+ return checkQueryError(op.collection, data)
+}
+
+// The DBRef type implements support for the database reference MongoDB
+// convention as supported by multiple drivers. This convention enables
+// cross-referencing documents between collections and databases using
+// a structure which includes a collection name, a document id, and
+// optionally a database name.
+//
+// See the FindRef methods on Session and on Database.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Database+References
+//
+type DBRef struct {
+ Collection string `bson:"$ref"`
+ Id interface{} `bson:"$id"`
+ Database string `bson:"$db,omitempty"`
+}
+
+// NOTE: Order of fields for DBRef above does matter, per documentation.
+
+// FindRef returns a query that looks for the document in the provided
+// reference. If the reference includes the DB field, the document will
+// be retrieved from the respective database.
+//
+// See also the DBRef type and the FindRef method on Session.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Database+References
+//
+func (db *Database) FindRef(ref *DBRef) *Query {
+ var c *Collection
+ if ref.Database == "" {
+ c = db.C(ref.Collection)
+ } else {
+ c = db.Session.DB(ref.Database).C(ref.Collection)
+ }
+ return c.FindId(ref.Id)
+}
+
+// FindRef returns a query that looks for the document in the provided
+// reference. For a DBRef to be resolved correctly at the session level
+// it must necessarily have the optional DB field defined.
+//
+// See also the DBRef type and the FindRef method on Database.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Database+References
+//
+func (s *Session) FindRef(ref *DBRef) *Query {
+ if ref.Database == "" {
+ panic(errors.New(fmt.Sprintf("Can't resolve database for %#v", ref)))
+ }
+ c := s.DB(ref.Database).C(ref.Collection)
+ return c.FindId(ref.Id)
+}
+
+// CollectionNames returns the collection names present in the db database.
+func (db *Database) CollectionNames() (names []string, err error) {
+ // Clone session and set it to Monotonic mode so that the server
+ // used for the query may be safely obtained afterwards, if
+ // necessary for iteration when a cursor is received.
+ cloned := db.Session.nonEventual()
+ defer cloned.Close()
+
+ batchSize := int(cloned.queryConfig.op.limit)
+
+ // Try with a command.
+ var result struct {
+ Collections []bson.Raw
+ Cursor cursorData
+ }
+ err = db.With(cloned).Run(bson.D{{"listCollections", 1}, {"cursor", bson.D{{"batchSize", batchSize}}}}, &result)
+ if err == nil {
+ firstBatch := result.Collections
+ if firstBatch == nil {
+ firstBatch = result.Cursor.FirstBatch
+ }
+ var iter *Iter
+ ns := strings.SplitN(result.Cursor.NS, ".", 2)
+ if len(ns) < 2 {
+ iter = db.With(cloned).C("").NewIter(nil, firstBatch, result.Cursor.Id, nil)
+ } else {
+ iter = cloned.DB(ns[0]).C(ns[1]).NewIter(nil, firstBatch, result.Cursor.Id, nil)
+ }
+ var coll struct{ Name string }
+ for iter.Next(&coll) {
+ names = append(names, coll.Name)
+ }
+ if err := iter.Close(); err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, err
+ }
+ if err != nil && !isNoCmd(err) {
+ return nil, err
+ }
+
+ // Command not yet supported. Query the database instead.
+ nameIndex := len(db.Name) + 1
+ iter := db.C("system.namespaces").Find(nil).Iter()
+ var coll struct{ Name string }
+ for iter.Next(&coll) {
+ if strings.Index(coll.Name, "$") < 0 || strings.Index(coll.Name, ".oplog.$") >= 0 {
+ names = append(names, coll.Name[nameIndex:])
+ }
+ }
+ if err := iter.Close(); err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+type dbNames struct {
+ Databases []struct {
+ Name string
+ Empty bool
+ }
+}
+
+// DatabaseNames returns the names of non-empty databases present in the cluster.
+func (s *Session) DatabaseNames() (names []string, err error) {
+ var result dbNames
+ err = s.Run("listDatabases", &result)
+ if err != nil {
+ return nil, err
+ }
+ for _, db := range result.Databases {
+ if !db.Empty {
+ names = append(names, db.Name)
+ }
+ }
+ sort.Strings(names)
+ return names, nil
+}
+
+// Iter executes the query and returns an iterator capable of going over all
+// the results. Results will be returned in batches of configurable
+// size (see the Batch method) and more documents will be requested when a
+// configurable number of documents is iterated over (see the Prefetch method).
+func (q *Query) Iter() *Iter {
+ q.m.Lock()
+ session := q.session
+ op := q.op
+ prefetch := q.prefetch
+ limit := q.limit
+ q.m.Unlock()
+
+ iter := &Iter{
+ session: session,
+ prefetch: prefetch,
+ limit: limit,
+ timeout: -1,
+ }
+ iter.gotReply.L = &iter.m
+ iter.op.collection = op.collection
+ iter.op.limit = op.limit
+ iter.op.replyFunc = iter.replyFunc()
+ iter.docsToReceive++
+
+ socket, err := session.acquireSocket(true)
+ if err != nil {
+ iter.err = err
+ return iter
+ }
+ defer socket.Release()
+
+ session.prepareQuery(&op)
+ op.replyFunc = iter.op.replyFunc
+
+ if prepareFindOp(socket, &op, limit) {
+ iter.findCmd = true
+ }
+
+ iter.server = socket.Server()
+ err = socket.Query(&op)
+ if err != nil {
+ // Must lock as the query is already out and it may call replyFunc.
+ iter.m.Lock()
+ iter.err = err
+ iter.m.Unlock()
+ }
+
+ return iter
+}
+
+// Tail returns a tailable iterator. Unlike a normal iterator, a
+// tailable iterator may wait for new values to be inserted in the
+// collection once the end of the current result set is reached,
+// A tailable iterator may only be used with capped collections.
+//
+// The timeout parameter indicates how long Next will block waiting
+// for a result before timing out. If set to -1, Next will not
+// timeout, and will continue waiting for a result for as long as
+// the cursor is valid and the session is not closed. If set to 0,
+// Next times out as soon as it reaches the end of the result set.
+// Otherwise, Next will wait for at least the given number of
+// seconds for a new document to be available before timing out.
+//
+// On timeouts, Next will unblock and return false, and the Timeout
+// method will return true if called. In these cases, Next may still
+// be called again on the same iterator to check if a new value is
+// available at the current cursor position, and again it will block
+// according to the specified timeoutSecs. If the cursor becomes
+// invalid, though, both Next and Timeout will return false and
+// the query must be restarted.
+//
+// The following example demonstrates timeout handling and query
+// restarting:
+//
+// iter := collection.Find(nil).Sort("$natural").Tail(5 * time.Second)
+// for {
+// for iter.Next(&result) {
+// fmt.Println(result.Id)
+// lastId = result.Id
+// }
+// if iter.Err() != nil {
+// return iter.Close()
+// }
+// if iter.Timeout() {
+// continue
+// }
+// query := collection.Find(bson.M{"_id": bson.M{"$gt": lastId}})
+// iter = query.Sort("$natural").Tail(5 * time.Second)
+// }
+// iter.Close()
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Tailable+Cursors
+// http://www.mongodb.org/display/DOCS/Capped+Collections
+// http://www.mongodb.org/display/DOCS/Sorting+and+Natural+Order
+//
+func (q *Query) Tail(timeout time.Duration) *Iter {
+ q.m.Lock()
+ session := q.session
+ op := q.op
+ prefetch := q.prefetch
+ q.m.Unlock()
+
+ iter := &Iter{session: session, prefetch: prefetch}
+ iter.gotReply.L = &iter.m
+ iter.timeout = timeout
+ iter.op.collection = op.collection
+ iter.op.limit = op.limit
+ iter.op.replyFunc = iter.replyFunc()
+ iter.docsToReceive++
+ session.prepareQuery(&op)
+ op.replyFunc = iter.op.replyFunc
+ op.flags |= flagTailable | flagAwaitData
+
+ socket, err := session.acquireSocket(true)
+ if err != nil {
+ iter.err = err
+ } else {
+ iter.server = socket.Server()
+ err = socket.Query(&op)
+ if err != nil {
+ // Must lock as the query is already out and it may call replyFunc.
+ iter.m.Lock()
+ iter.err = err
+ iter.m.Unlock()
+ }
+ socket.Release()
+ }
+ return iter
+}
+
+func (s *Session) prepareQuery(op *queryOp) {
+ s.m.RLock()
+ op.mode = s.consistency
+ if s.slaveOk {
+ op.flags |= flagSlaveOk
+ }
+ s.m.RUnlock()
+ return
+}
+
+// Err returns nil if no errors happened during iteration, or the actual
+// error otherwise.
+//
+// In case a resulting document included a field named $err or errmsg, which are
+// standard ways for MongoDB to report an improper query, the returned value has
+// a *QueryError type, and includes the Err message and the Code.
+func (iter *Iter) Err() error {
+ iter.m.Lock()
+ err := iter.err
+ iter.m.Unlock()
+ if err == ErrNotFound {
+ return nil
+ }
+ return err
+}
+
+// Close kills the server cursor used by the iterator, if any, and returns
+// nil if no errors happened during iteration, or the actual error otherwise.
+//
+// Server cursors are automatically closed at the end of an iteration, which
+// means close will do nothing unless the iteration was interrupted before
+// the server finished sending results to the driver. If Close is not called
+// in such a situation, the cursor will remain available at the server until
+// the default cursor timeout period is reached. No further problems arise.
+//
+// Close is idempotent. That means it can be called repeatedly and will
+// return the same result every time.
+//
+// In case a resulting document included a field named $err or errmsg, which are
+// standard ways for MongoDB to report an improper query, the returned value has
+// a *QueryError type.
+func (iter *Iter) Close() error {
+ iter.m.Lock()
+ cursorId := iter.op.cursorId
+ iter.op.cursorId = 0
+ err := iter.err
+ iter.m.Unlock()
+ if cursorId == 0 {
+ if err == ErrNotFound {
+ return nil
+ }
+ return err
+ }
+ socket, err := iter.acquireSocket()
+ if err == nil {
+ // TODO Batch kills.
+ err = socket.Query(&killCursorsOp{[]int64{cursorId}})
+ socket.Release()
+ }
+
+ iter.m.Lock()
+ if err != nil && (iter.err == nil || iter.err == ErrNotFound) {
+ iter.err = err
+ } else if iter.err != ErrNotFound {
+ err = iter.err
+ }
+ iter.m.Unlock()
+ return err
+}
+
+// Done returns true only if a follow up Next call is guaranteed
+// to return false.
+//
+// For an iterator created with Tail, Done may return false for
+// an iterator that has no more data. Otherwise it's guaranteed
+// to return false only if there is data or an error happened.
+//
+// Done may block waiting for a pending query to verify whether
+// more data is actually available or not.
+func (iter *Iter) Done() bool {
+ iter.m.Lock()
+ defer iter.m.Unlock()
+
+ for {
+ if iter.docData.Len() > 0 {
+ return false
+ }
+ if iter.docsToReceive > 1 {
+ return true
+ }
+ if iter.docsToReceive > 0 {
+ iter.gotReply.Wait()
+ continue
+ }
+ return iter.op.cursorId == 0
+ }
+}
+
+// Timeout returns true if Next returned false due to a timeout of
+// a tailable cursor. In those cases, Next may be called again to continue
+// the iteration at the previous cursor position.
+func (iter *Iter) Timeout() bool {
+ iter.m.Lock()
+ result := iter.timedout
+ iter.m.Unlock()
+ return result
+}
+
+// Next retrieves the next document from the result set, blocking if necessary.
+// This method will also automatically retrieve another batch of documents from
+// the server when the current one is exhausted, or before that in background
+// if pre-fetching is enabled (see the Query.Prefetch and Session.SetPrefetch
+// methods).
+//
+// Next returns true if a document was successfully unmarshalled onto result,
+// and false at the end of the result set or if an error happened.
+// When Next returns false, the Err method should be called to verify if
+// there was an error during iteration.
+//
+// For example:
+//
+// iter := collection.Find(nil).Iter()
+// for iter.Next(&result) {
+// fmt.Printf("Result: %v\n", result.Id)
+// }
+// if err := iter.Close(); err != nil {
+// return err
+// }
+//
+func (iter *Iter) Next(result interface{}) bool {
+ iter.m.Lock()
+ iter.timedout = false
+ timeout := time.Time{}
+ for iter.err == nil && iter.docData.Len() == 0 && (iter.docsToReceive > 0 || iter.op.cursorId != 0) {
+ if iter.docsToReceive == 0 {
+ if iter.timeout >= 0 {
+ if timeout.IsZero() {
+ timeout = time.Now().Add(iter.timeout)
+ }
+ if time.Now().After(timeout) {
+ iter.timedout = true
+ iter.m.Unlock()
+ return false
+ }
+ }
+ iter.getMore()
+ if iter.err != nil {
+ break
+ }
+ }
+ iter.gotReply.Wait()
+ }
+
+ // Exhaust available data before reporting any errors.
+ if docData, ok := iter.docData.Pop().([]byte); ok {
+ close := false
+ if iter.limit > 0 {
+ iter.limit--
+ if iter.limit == 0 {
+ if iter.docData.Len() > 0 {
+ iter.m.Unlock()
+ panic(fmt.Errorf("data remains after limit exhausted: %d", iter.docData.Len()))
+ }
+ iter.err = ErrNotFound
+ close = true
+ }
+ }
+ if iter.op.cursorId != 0 && iter.err == nil {
+ iter.docsBeforeMore--
+ if iter.docsBeforeMore == -1 {
+ iter.getMore()
+ }
+ }
+ iter.m.Unlock()
+
+ if close {
+ iter.Close()
+ }
+ err := bson.Unmarshal(docData, result)
+ if err != nil {
+ debugf("Iter %p document unmarshaling failed: %#v", iter, err)
+ iter.m.Lock()
+ if iter.err == nil {
+ iter.err = err
+ }
+ iter.m.Unlock()
+ return false
+ }
+ debugf("Iter %p document unmarshaled: %#v", iter, result)
+ // XXX Only have to check first document for a query error?
+ err = checkQueryError(iter.op.collection, docData)
+ if err != nil {
+ iter.m.Lock()
+ if iter.err == nil {
+ iter.err = err
+ }
+ iter.m.Unlock()
+ return false
+ }
+ return true
+ } else if iter.err != nil {
+ debugf("Iter %p returning false: %s", iter, iter.err)
+ iter.m.Unlock()
+ return false
+ } else if iter.op.cursorId == 0 {
+ iter.err = ErrNotFound
+ debugf("Iter %p exhausted with cursor=0", iter)
+ iter.m.Unlock()
+ return false
+ }
+
+ panic("unreachable")
+}
+
+// All retrieves all documents from the result set into the provided slice
+// and closes the iterator.
+//
+// The result argument must necessarily be the address for a slice. The slice
+// may be nil or previously allocated.
+//
+// WARNING: Obviously, All must not be used with result sets that may be
+// potentially large, since it may consume all memory until the system
+// crashes. Consider building the query with a Limit clause to ensure the
+// result size is bounded.
+//
+// For instance:
+//
+// var result []struct{ Value int }
+// iter := collection.Find(nil).Limit(100).Iter()
+// err := iter.All(&result)
+// if err != nil {
+// return err
+// }
+//
+func (iter *Iter) All(result interface{}) error {
+ resultv := reflect.ValueOf(result)
+ if resultv.Kind() != reflect.Ptr || resultv.Elem().Kind() != reflect.Slice {
+ panic("result argument must be a slice address")
+ }
+ slicev := resultv.Elem()
+ slicev = slicev.Slice(0, slicev.Cap())
+ elemt := slicev.Type().Elem()
+ i := 0
+ for {
+ if slicev.Len() == i {
+ elemp := reflect.New(elemt)
+ if !iter.Next(elemp.Interface()) {
+ break
+ }
+ slicev = reflect.Append(slicev, elemp.Elem())
+ slicev = slicev.Slice(0, slicev.Cap())
+ } else {
+ if !iter.Next(slicev.Index(i).Addr().Interface()) {
+ break
+ }
+ }
+ i++
+ }
+ resultv.Elem().Set(slicev.Slice(0, i))
+ return iter.Close()
+}
+
+// All works like Iter.All.
+func (q *Query) All(result interface{}) error {
+ return q.Iter().All(result)
+}
+
+// The For method is obsolete and will be removed in a future release.
+// See Iter as an elegant replacement.
+func (q *Query) For(result interface{}, f func() error) error {
+ return q.Iter().For(result, f)
+}
+
+// The For method is obsolete and will be removed in a future release.
+// See Iter as an elegant replacement.
+func (iter *Iter) For(result interface{}, f func() error) (err error) {
+ valid := false
+ v := reflect.ValueOf(result)
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ switch v.Kind() {
+ case reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice:
+ valid = v.IsNil()
+ }
+ }
+ if !valid {
+ panic("For needs a pointer to nil reference value. See the documentation.")
+ }
+ zero := reflect.Zero(v.Type())
+ for {
+ v.Set(zero)
+ if !iter.Next(result) {
+ break
+ }
+ err = f()
+ if err != nil {
+ return err
+ }
+ }
+ return iter.Err()
+}
+
+// acquireSocket acquires a socket from the same server that the iterator
+// cursor was obtained from.
+//
+// WARNING: This method must not be called with iter.m locked. Acquiring the
+// socket depends on the cluster sync loop, and the cluster sync loop might
+// attempt actions which cause replyFunc to be called, inducing a deadlock.
+func (iter *Iter) acquireSocket() (*mongoSocket, error) {
+ socket, err := iter.session.acquireSocket(true)
+ if err != nil {
+ return nil, err
+ }
+ if socket.Server() != iter.server {
+ // Socket server changed during iteration. This may happen
+ // with Eventual sessions, if a Refresh is done, or if a
+ // monotonic session gets a write and shifts from secondary
+ // to primary. Our cursor is in a specific server, though.
+ iter.session.m.Lock()
+ sockTimeout := iter.session.sockTimeout
+ iter.session.m.Unlock()
+ socket.Release()
+ socket, _, err = iter.server.AcquireSocket(0, sockTimeout)
+ if err != nil {
+ return nil, err
+ }
+ err := iter.session.socketLogin(socket)
+ if err != nil {
+ socket.Release()
+ return nil, err
+ }
+ }
+ return socket, nil
+}
+
+func (iter *Iter) getMore() {
+ // Increment now so that unlocking the iterator won't cause a
+ // different goroutine to get here as well.
+ iter.docsToReceive++
+ iter.m.Unlock()
+ socket, err := iter.acquireSocket()
+ iter.m.Lock()
+ if err != nil {
+ iter.err = err
+ return
+ }
+ defer socket.Release()
+
+ debugf("Iter %p requesting more documents", iter)
+ if iter.limit > 0 {
+ // The -1 below accounts for the fact docsToReceive was incremented above.
+ limit := iter.limit - int32(iter.docsToReceive-1) - int32(iter.docData.Len())
+ if limit < iter.op.limit {
+ iter.op.limit = limit
+ }
+ }
+ var op interface{}
+ if iter.findCmd {
+ op = iter.getMoreCmd()
+ } else {
+ op = &iter.op
+ }
+ if err := socket.Query(op); err != nil {
+ iter.docsToReceive--
+ iter.err = err
+ }
+}
+
+func (iter *Iter) getMoreCmd() *queryOp {
+ // TODO: Define the query statically in the Iter type, next to getMoreOp.
+ nameDot := strings.Index(iter.op.collection, ".")
+ if nameDot < 0 {
+ panic("invalid query collection name: " + iter.op.collection)
+ }
+
+ getMore := getMoreCmd{
+ CursorId: iter.op.cursorId,
+ Collection: iter.op.collection[nameDot+1:],
+ BatchSize: iter.op.limit,
+ }
+
+ var op queryOp
+ op.collection = iter.op.collection[:nameDot] + ".$cmd"
+ op.query = &getMore
+ op.limit = -1
+ op.replyFunc = iter.op.replyFunc
+ return &op
+}
+
+type countCmd struct {
+ Count string
+ Query interface{}
+ Limit int32 ",omitempty"
+ Skip int32 ",omitempty"
+}
+
+// Count returns the total number of documents in the result set.
+func (q *Query) Count() (n int, err error) {
+ q.m.Lock()
+ session := q.session
+ op := q.op
+ limit := q.limit
+ q.m.Unlock()
+
+ c := strings.Index(op.collection, ".")
+ if c < 0 {
+ return 0, errors.New("Bad collection name: " + op.collection)
+ }
+
+ dbname := op.collection[:c]
+ cname := op.collection[c+1:]
+ query := op.query
+ if query == nil {
+ query = bson.D{}
+ }
+ result := struct{ N int }{}
+ err = session.DB(dbname).Run(countCmd{cname, query, limit, op.skip}, &result)
+ return result.N, err
+}
+
+// Count returns the total number of documents in the collection.
+func (c *Collection) Count() (n int, err error) {
+ return c.Find(nil).Count()
+}
+
+type distinctCmd struct {
+ Collection string "distinct"
+ Key string
+ Query interface{} ",omitempty"
+}
+
+// Distinct unmarshals into result the list of distinct values for the given key.
+//
+// For example:
+//
+// var result []int
+// err := collection.Find(bson.M{"gender": "F"}).Distinct("age", &result)
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/Aggregation
+//
+func (q *Query) Distinct(key string, result interface{}) error {
+ q.m.Lock()
+ session := q.session
+ op := q.op // Copy.
+ q.m.Unlock()
+
+ c := strings.Index(op.collection, ".")
+ if c < 0 {
+ return errors.New("Bad collection name: " + op.collection)
+ }
+
+ dbname := op.collection[:c]
+ cname := op.collection[c+1:]
+
+ var doc struct{ Values bson.Raw }
+ err := session.DB(dbname).Run(distinctCmd{cname, key, op.query}, &doc)
+ if err != nil {
+ return err
+ }
+ return doc.Values.Unmarshal(result)
+}
+
+type mapReduceCmd struct {
+ Collection string "mapreduce"
+ Map string ",omitempty"
+ Reduce string ",omitempty"
+ Finalize string ",omitempty"
+ Limit int32 ",omitempty"
+ Out interface{}
+ Query interface{} ",omitempty"
+ Sort interface{} ",omitempty"
+ Scope interface{} ",omitempty"
+ Verbose bool ",omitempty"
+}
+
+type mapReduceResult struct {
+ Results bson.Raw
+ Result bson.Raw
+ TimeMillis int64 "timeMillis"
+ Counts struct{ Input, Emit, Output int }
+ Ok bool
+ Err string
+ Timing *MapReduceTime
+}
+
+type MapReduce struct {
+ Map string // Map Javascript function code (required)
+ Reduce string // Reduce Javascript function code (required)
+ Finalize string // Finalize Javascript function code (optional)
+ Out interface{} // Output collection name or document. If nil, results are inlined into the result parameter.
+ Scope interface{} // Optional global scope for Javascript functions
+ Verbose bool
+}
+
+type MapReduceInfo struct {
+ InputCount int // Number of documents mapped
+ EmitCount int // Number of times reduce called emit
+ OutputCount int // Number of documents in resulting collection
+ Database string // Output database, if results are not inlined
+ Collection string // Output collection, if results are not inlined
+ Time int64 // Time to run the job, in nanoseconds
+ VerboseTime *MapReduceTime // Only defined if Verbose was true
+}
+
+type MapReduceTime struct {
+ Total int64 // Total time, in nanoseconds
+ Map int64 "mapTime" // Time within map function, in nanoseconds
+ EmitLoop int64 "emitLoop" // Time within the emit/map loop, in nanoseconds
+}
+
+// MapReduce executes a map/reduce job for documents covered by the query.
+// That kind of job is suitable for very flexible bulk aggregation of data
+// performed at the server side via Javascript functions.
+//
+// Results from the job may be returned as a result of the query itself
+// through the result parameter in case they'll certainly fit in memory
+// and in a single document. If there's the possibility that the amount
+// of data might be too large, results must be stored back in an alternative
+// collection or even a separate database, by setting the Out field of the
+// provided MapReduce job. In that case, provide nil as the result parameter.
+//
+// These are some of the ways to set Out:
+//
+// nil
+// Inline results into the result parameter.
+//
+// bson.M{"replace": "mycollection"}
+// The output will be inserted into a collection which replaces any
+// existing collection with the same name.
+//
+// bson.M{"merge": "mycollection"}
+// This option will merge new data into the old output collection. In
+// other words, if the same key exists in both the result set and the
+// old collection, the new key will overwrite the old one.
+//
+// bson.M{"reduce": "mycollection"}
+// If documents exist for a given key in the result set and in the old
+// collection, then a reduce operation (using the specified reduce
+// function) will be performed on the two values and the result will be
+// written to the output collection. If a finalize function was
+// provided, this will be run after the reduce as well.
+//
+// bson.M{...., "db": "mydb"}
+// Any of the above options can have the "db" key included for doing
+// the respective action in a separate database.
+//
+// The following is a trivial example which will count the number of
+// occurrences of a field named n on each document in a collection, and
+// will return results inline:
+//
+// job := &mgo.MapReduce{
+// Map: "function() { emit(this.n, 1) }",
+// Reduce: "function(key, values) { return Array.sum(values) }",
+// }
+// var result []struct { Id int "_id"; Value int }
+// _, err := collection.Find(nil).MapReduce(job, &result)
+// if err != nil {
+// return err
+// }
+// for _, item := range result {
+// fmt.Println(item.Value)
+// }
+//
+// This function is compatible with MongoDB 1.7.4+.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/MapReduce
+//
+func (q *Query) MapReduce(job *MapReduce, result interface{}) (info *MapReduceInfo, err error) {
+ q.m.Lock()
+ session := q.session
+ op := q.op // Copy.
+ limit := q.limit
+ q.m.Unlock()
+
+ c := strings.Index(op.collection, ".")
+ if c < 0 {
+ return nil, errors.New("Bad collection name: " + op.collection)
+ }
+
+ dbname := op.collection[:c]
+ cname := op.collection[c+1:]
+
+ cmd := mapReduceCmd{
+ Collection: cname,
+ Map: job.Map,
+ Reduce: job.Reduce,
+ Finalize: job.Finalize,
+ Out: fixMROut(job.Out),
+ Scope: job.Scope,
+ Verbose: job.Verbose,
+ Query: op.query,
+ Sort: op.options.OrderBy,
+ Limit: limit,
+ }
+
+ if cmd.Out == nil {
+ cmd.Out = bson.D{{"inline", 1}}
+ }
+
+ var doc mapReduceResult
+ err = session.DB(dbname).Run(&cmd, &doc)
+ if err != nil {
+ return nil, err
+ }
+ if doc.Err != "" {
+ return nil, errors.New(doc.Err)
+ }
+
+ info = &MapReduceInfo{
+ InputCount: doc.Counts.Input,
+ EmitCount: doc.Counts.Emit,
+ OutputCount: doc.Counts.Output,
+ Time: doc.TimeMillis * 1e6,
+ }
+
+ if doc.Result.Kind == 0x02 {
+ err = doc.Result.Unmarshal(&info.Collection)
+ info.Database = dbname
+ } else if doc.Result.Kind == 0x03 {
+ var v struct{ Collection, Db string }
+ err = doc.Result.Unmarshal(&v)
+ info.Collection = v.Collection
+ info.Database = v.Db
+ }
+
+ if doc.Timing != nil {
+ info.VerboseTime = doc.Timing
+ info.VerboseTime.Total *= 1e6
+ info.VerboseTime.Map *= 1e6
+ info.VerboseTime.EmitLoop *= 1e6
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ return info, doc.Results.Unmarshal(result)
+ }
+ return info, nil
+}
+
+// The "out" option in the MapReduce command must be ordered. This was
+// found after the implementation was accepting maps for a long time,
+// so rather than breaking the API, we'll fix the order if necessary.
+// Details about the order requirement may be seen in MongoDB's code:
+//
+// http://goo.gl/L8jwJX
+//
+func fixMROut(out interface{}) interface{} {
+ outv := reflect.ValueOf(out)
+ if outv.Kind() != reflect.Map || outv.Type().Key() != reflect.TypeOf("") {
+ return out
+ }
+ outs := make(bson.D, outv.Len())
+
+ outTypeIndex := -1
+ for i, k := range outv.MapKeys() {
+ ks := k.String()
+ outs[i].Name = ks
+ outs[i].Value = outv.MapIndex(k).Interface()
+ switch ks {
+ case "normal", "replace", "merge", "reduce", "inline":
+ outTypeIndex = i
+ }
+ }
+ if outTypeIndex > 0 {
+ outs[0], outs[outTypeIndex] = outs[outTypeIndex], outs[0]
+ }
+ return outs
+}
+
+// Change holds fields for running a findAndModify MongoDB command via
+// the Query.Apply method.
+type Change struct {
+ Update interface{} // The update document
+ Upsert bool // Whether to insert in case the document isn't found
+ Remove bool // Whether to remove the document found rather than updating
+ ReturnNew bool // Should the modified document be returned rather than the old one
+}
+
+type findModifyCmd struct {
+ Collection string "findAndModify"
+ Query, Update, Sort, Fields interface{} ",omitempty"
+ Upsert, Remove, New bool ",omitempty"
+}
+
+type valueResult struct {
+ Value bson.Raw
+ LastError LastError "lastErrorObject"
+}
+
+// Apply runs the findAndModify MongoDB command, which allows updating, upserting
+// or removing a document matching a query and atomically returning either the old
+// version (the default) or the new version of the document (when ReturnNew is true).
+// If no objects are found Apply returns ErrNotFound.
+//
+// The Sort and Select query methods affect the result of Apply. In case
+// multiple documents match the query, Sort enables selecting which document to
+// act upon by ordering it first. Select enables retrieving only a selection
+// of fields of the new or old document.
+//
+// This simple example increments a counter and prints its new value:
+//
+// change := mgo.Change{
+// Update: bson.M{"$inc": bson.M{"n": 1}},
+// ReturnNew: true,
+// }
+// info, err = col.Find(M{"_id": id}).Apply(change, &doc)
+// fmt.Println(doc.N)
+//
+// This method depends on MongoDB >= 2.0 to work properly.
+//
+// Relevant documentation:
+//
+// http://www.mongodb.org/display/DOCS/findAndModify+Command
+// http://www.mongodb.org/display/DOCS/Updating
+// http://www.mongodb.org/display/DOCS/Atomic+Operations
+//
+func (q *Query) Apply(change Change, result interface{}) (info *ChangeInfo, err error) {
+ q.m.Lock()
+ session := q.session
+ op := q.op // Copy.
+ q.m.Unlock()
+
+ c := strings.Index(op.collection, ".")
+ if c < 0 {
+ return nil, errors.New("bad collection name: " + op.collection)
+ }
+
+ dbname := op.collection[:c]
+ cname := op.collection[c+1:]
+
+ cmd := findModifyCmd{
+ Collection: cname,
+ Update: change.Update,
+ Upsert: change.Upsert,
+ Remove: change.Remove,
+ New: change.ReturnNew,
+ Query: op.query,
+ Sort: op.options.OrderBy,
+ Fields: op.selector,
+ }
+
+ session = session.Clone()
+ defer session.Close()
+ session.SetMode(Strong, false)
+
+ var doc valueResult
+ for i := 0; i < maxUpsertRetries; i++ {
+ err = session.DB(dbname).Run(&cmd, &doc)
+
+ if err == nil {
+ break
+ }
+ if change.Upsert && IsDup(err) {
+ // Retry duplicate key errors on upserts.
+ // https://docs.mongodb.com/v3.2/reference/method/db.collection.update/#use-unique-indexes
+ continue
+ }
+ if qerr, ok := err.(*QueryError); ok && qerr.Message == "No matching object found" {
+ return nil, ErrNotFound
+ }
+ return nil, err
+ }
+ if doc.LastError.N == 0 {
+ return nil, ErrNotFound
+ }
+ if doc.Value.Kind != 0x0A && result != nil {
+ err = doc.Value.Unmarshal(result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ info = &ChangeInfo{}
+ lerr := &doc.LastError
+ if lerr.UpdatedExisting {
+ info.Updated = lerr.N
+ info.Matched = lerr.N
+ } else if change.Remove {
+ info.Removed = lerr.N
+ info.Matched = lerr.N
+ } else if change.Upsert {
+ info.UpsertedId = lerr.UpsertedId
+ }
+ return info, nil
+}
+
+// The BuildInfo type encapsulates details about the running MongoDB server.
+//
+// Note that the VersionArray field was introduced in MongoDB 2.0+, but it is
+// internally assembled from the Version information for previous versions.
+// In both cases, VersionArray is guaranteed to have at least 4 entries.
+type BuildInfo struct {
+ Version string
+ VersionArray []int `bson:"versionArray"` // On MongoDB 2.0+; assembled from Version otherwise
+ GitVersion string `bson:"gitVersion"`
+ OpenSSLVersion string `bson:"OpenSSLVersion"`
+ SysInfo string `bson:"sysInfo"` // Deprecated and empty on MongoDB 3.2+.
+ Bits int
+ Debug bool
+ MaxObjectSize int `bson:"maxBsonObjectSize"`
+}
+
+// VersionAtLeast returns whether the BuildInfo version is greater than or
+// equal to the provided version number. If more than one number is
+// provided, numbers will be considered as major, minor, and so on.
+func (bi *BuildInfo) VersionAtLeast(version ...int) bool {
+ for i, vi := range version {
+ if i == len(bi.VersionArray) {
+ return false
+ }
+ if bivi := bi.VersionArray[i]; bivi != vi {
+ return bivi >= vi
+ }
+ }
+ return true
+}
+
+// BuildInfo retrieves the version and other details about the
+// running MongoDB server.
+func (s *Session) BuildInfo() (info BuildInfo, err error) {
+ err = s.Run(bson.D{{"buildInfo", "1"}}, &info)
+ if len(info.VersionArray) == 0 {
+ for _, a := range strings.Split(info.Version, ".") {
+ i, err := strconv.Atoi(a)
+ if err != nil {
+ break
+ }
+ info.VersionArray = append(info.VersionArray, i)
+ }
+ }
+ for len(info.VersionArray) < 4 {
+ info.VersionArray = append(info.VersionArray, 0)
+ }
+ if i := strings.IndexByte(info.GitVersion, ' '); i >= 0 {
+ // Strip off the " modules: enterprise" suffix. This is a _git version_.
+ // That information may be moved to another field if people need it.
+ info.GitVersion = info.GitVersion[:i]
+ }
+ if info.SysInfo == "deprecated" {
+ info.SysInfo = ""
+ }
+ return
+}
+
+// ---------------------------------------------------------------------------
+// Internal session handling helpers.
+
+func (s *Session) acquireSocket(slaveOk bool) (*mongoSocket, error) {
+
+ // Read-only lock to check for previously reserved socket.
+ s.m.RLock()
+ // If there is a slave socket reserved and its use is acceptable, take it as long
+ // as there isn't a master socket which would be preferred by the read preference mode.
+ if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) {
+ socket := s.slaveSocket
+ socket.Acquire()
+ s.m.RUnlock()
+ return socket, nil
+ }
+ if s.masterSocket != nil {
+ socket := s.masterSocket
+ socket.Acquire()
+ s.m.RUnlock()
+ return socket, nil
+ }
+ s.m.RUnlock()
+
+ // No go. We may have to request a new socket and change the session,
+ // so try again but with an exclusive lock now.
+ s.m.Lock()
+ defer s.m.Unlock()
+
+ if s.slaveSocket != nil && s.slaveOk && slaveOk && (s.masterSocket == nil || s.consistency != PrimaryPreferred && s.consistency != Monotonic) {
+ s.slaveSocket.Acquire()
+ return s.slaveSocket, nil
+ }
+ if s.masterSocket != nil {
+ s.masterSocket.Acquire()
+ return s.masterSocket, nil
+ }
+
+ // Still not good. We need a new socket.
+ sock, err := s.cluster().AcquireSocket(s.consistency, slaveOk && s.slaveOk, s.syncTimeout, s.sockTimeout, s.queryConfig.op.serverTags, s.poolLimit)
+ if err != nil {
+ return nil, err
+ }
+
+ // Authenticate the new socket.
+ if err = s.socketLogin(sock); err != nil {
+ sock.Release()
+ return nil, err
+ }
+
+ // Keep track of the new socket, if necessary.
+ // Note that, as a special case, if the Eventual session was
+ // not refreshed (s.slaveSocket != nil), it means the developer
+ // asked to preserve an existing reserved socket, so we'll
+ // keep a master one around too before a Refresh happens.
+ if s.consistency != Eventual || s.slaveSocket != nil {
+ s.setSocket(sock)
+ }
+
+ // Switch over a Monotonic session to the master.
+ if !slaveOk && s.consistency == Monotonic {
+ s.slaveOk = false
+ }
+
+ return sock, nil
+}
+
+// setSocket binds socket to this section.
+func (s *Session) setSocket(socket *mongoSocket) {
+ info := socket.Acquire()
+ if info.Master {
+ if s.masterSocket != nil {
+ panic("setSocket(master) with existing master socket reserved")
+ }
+ s.masterSocket = socket
+ } else {
+ if s.slaveSocket != nil {
+ panic("setSocket(slave) with existing slave socket reserved")
+ }
+ s.slaveSocket = socket
+ }
+}
+
+// unsetSocket releases any slave and/or master sockets reserved.
+func (s *Session) unsetSocket() {
+ if s.masterSocket != nil {
+ s.masterSocket.Release()
+ }
+ if s.slaveSocket != nil {
+ s.slaveSocket.Release()
+ }
+ s.masterSocket = nil
+ s.slaveSocket = nil
+}
+
+func (iter *Iter) replyFunc() replyFunc {
+ return func(err error, op *replyOp, docNum int, docData []byte) {
+ iter.m.Lock()
+ iter.docsToReceive--
+ if err != nil {
+ iter.err = err
+ debugf("Iter %p received an error: %s", iter, err.Error())
+ } else if docNum == -1 {
+ debugf("Iter %p received no documents (cursor=%d).", iter, op.cursorId)
+ if op != nil && op.cursorId != 0 {
+ // It's a tailable cursor.
+ iter.op.cursorId = op.cursorId
+ } else if op != nil && op.cursorId == 0 && op.flags&1 == 1 {
+ // Cursor likely timed out.
+ iter.err = ErrCursor
+ } else {
+ iter.err = ErrNotFound
+ }
+ } else if iter.findCmd {
+ debugf("Iter %p received reply document %d/%d (cursor=%d)", iter, docNum+1, int(op.replyDocs), op.cursorId)
+ var findReply struct {
+ Ok bool
+ Code int
+ Errmsg string
+ Cursor cursorData
+ }
+ if err := bson.Unmarshal(docData, &findReply); err != nil {
+ iter.err = err
+ } else if !findReply.Ok && findReply.Errmsg != "" {
+ iter.err = &QueryError{Code: findReply.Code, Message: findReply.Errmsg}
+ } else if len(findReply.Cursor.FirstBatch) == 0 && len(findReply.Cursor.NextBatch) == 0 {
+ iter.err = ErrNotFound
+ } else {
+ batch := findReply.Cursor.FirstBatch
+ if len(batch) == 0 {
+ batch = findReply.Cursor.NextBatch
+ }
+ rdocs := len(batch)
+ for _, raw := range batch {
+ iter.docData.Push(raw.Data)
+ }
+ iter.docsToReceive = 0
+ docsToProcess := iter.docData.Len()
+ if iter.limit == 0 || int32(docsToProcess) < iter.limit {
+ iter.docsBeforeMore = docsToProcess - int(iter.prefetch*float64(rdocs))
+ } else {
+ iter.docsBeforeMore = -1
+ }
+ iter.op.cursorId = findReply.Cursor.Id
+ }
+ } else {
+ rdocs := int(op.replyDocs)
+ if docNum == 0 {
+ iter.docsToReceive += rdocs - 1
+ docsToProcess := iter.docData.Len() + rdocs
+ if iter.limit == 0 || int32(docsToProcess) < iter.limit {
+ iter.docsBeforeMore = docsToProcess - int(iter.prefetch*float64(rdocs))
+ } else {
+ iter.docsBeforeMore = -1
+ }
+ iter.op.cursorId = op.cursorId
+ }
+ debugf("Iter %p received reply document %d/%d (cursor=%d)", iter, docNum+1, rdocs, op.cursorId)
+ iter.docData.Push(docData)
+ }
+ iter.gotReply.Broadcast()
+ iter.m.Unlock()
+ }
+}
+
+type writeCmdResult struct {
+ Ok bool
+ N int
+ NModified int `bson:"nModified"`
+ Upserted []struct {
+ Index int
+ Id interface{} `_id`
+ }
+ ConcernError writeConcernError `bson:"writeConcernError"`
+ Errors []writeCmdError `bson:"writeErrors"`
+}
+
+type writeConcernError struct {
+ Code int
+ ErrMsg string
+}
+
+type writeCmdError struct {
+ Index int
+ Code int
+ ErrMsg string
+}
+
+func (r *writeCmdResult) BulkErrorCases() []BulkErrorCase {
+ ecases := make([]BulkErrorCase, len(r.Errors))
+ for i, err := range r.Errors {
+ ecases[i] = BulkErrorCase{err.Index, &QueryError{Code: err.Code, Message: err.ErrMsg}}
+ }
+ return ecases
+}
+
+// writeOp runs the given modifying operation, potentially followed up
+// by a getLastError command in case the session is in safe mode. The
+// LastError result is made available in lerr, and if lerr.Err is set it
+// will also be returned as err.
+func (c *Collection) writeOp(op interface{}, ordered bool) (lerr *LastError, err error) {
+ s := c.Database.Session
+ socket, err := s.acquireSocket(c.Database.Name == "local")
+ if err != nil {
+ return nil, err
+ }
+ defer socket.Release()
+
+ s.m.RLock()
+ safeOp := s.safeOp
+ bypassValidation := s.bypassValidation
+ s.m.RUnlock()
+
+ if socket.ServerInfo().MaxWireVersion >= 2 {
+ // Servers with a more recent write protocol benefit from write commands.
+ if op, ok := op.(*insertOp); ok && len(op.documents) > 1000 {
+ var lerr LastError
+
+ // Maximum batch size is 1000. Must split out in separate operations for compatibility.
+ all := op.documents
+ for i := 0; i < len(all); i += 1000 {
+ l := i + 1000
+ if l > len(all) {
+ l = len(all)
+ }
+ op.documents = all[i:l]
+ oplerr, err := c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation)
+ lerr.N += oplerr.N
+ lerr.modified += oplerr.modified
+ if err != nil {
+ for ei := range oplerr.ecases {
+ oplerr.ecases[ei].Index += i
+ }
+ lerr.ecases = append(lerr.ecases, oplerr.ecases...)
+ if op.flags&1 == 0 {
+ return &lerr, err
+ }
+ }
+ }
+ if len(lerr.ecases) != 0 {
+ return &lerr, lerr.ecases[0].Err
+ }
+ return &lerr, nil
+ }
+ return c.writeOpCommand(socket, safeOp, op, ordered, bypassValidation)
+ } else if updateOps, ok := op.(bulkUpdateOp); ok {
+ var lerr LastError
+ for i, updateOp := range updateOps {
+ oplerr, err := c.writeOpQuery(socket, safeOp, updateOp, ordered)
+ lerr.N += oplerr.N
+ lerr.modified += oplerr.modified
+ if err != nil {
+ lerr.ecases = append(lerr.ecases, BulkErrorCase{i, err})
+ if ordered {
+ break
+ }
+ }
+ }
+ if len(lerr.ecases) != 0 {
+ return &lerr, lerr.ecases[0].Err
+ }
+ return &lerr, nil
+ } else if deleteOps, ok := op.(bulkDeleteOp); ok {
+ var lerr LastError
+ for i, deleteOp := range deleteOps {
+ oplerr, err := c.writeOpQuery(socket, safeOp, deleteOp, ordered)
+ lerr.N += oplerr.N
+ lerr.modified += oplerr.modified
+ if err != nil {
+ lerr.ecases = append(lerr.ecases, BulkErrorCase{i, err})
+ if ordered {
+ break
+ }
+ }
+ }
+ if len(lerr.ecases) != 0 {
+ return &lerr, lerr.ecases[0].Err
+ }
+ return &lerr, nil
+ }
+ return c.writeOpQuery(socket, safeOp, op, ordered)
+}
+
+func (c *Collection) writeOpQuery(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered bool) (lerr *LastError, err error) {
+ if safeOp == nil {
+ return nil, socket.Query(op)
+ }
+
+ var mutex sync.Mutex
+ var replyData []byte
+ var replyErr error
+ mutex.Lock()
+ query := *safeOp // Copy the data.
+ query.collection = c.Database.Name + ".$cmd"
+ query.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+ replyData = docData
+ replyErr = err
+ mutex.Unlock()
+ }
+ err = socket.Query(op, &query)
+ if err != nil {
+ return nil, err
+ }
+ mutex.Lock() // Wait.
+ if replyErr != nil {
+ return nil, replyErr // XXX TESTME
+ }
+ if hasErrMsg(replyData) {
+ // Looks like getLastError itself failed.
+ err = checkQueryError(query.collection, replyData)
+ if err != nil {
+ return nil, err
+ }
+ }
+ result := &LastError{}
+ bson.Unmarshal(replyData, &result)
+ debugf("Result from writing query: %#v", result)
+ if result.Err != "" {
+ result.ecases = []BulkErrorCase{{Index: 0, Err: result}}
+ if insert, ok := op.(*insertOp); ok && len(insert.documents) > 1 {
+ result.ecases[0].Index = -1
+ }
+ return result, result
+ }
+ // With MongoDB <2.6 we don't know how many actually changed, so make it the same as matched.
+ result.modified = result.N
+ return result, nil
+}
+
+func (c *Collection) writeOpCommand(socket *mongoSocket, safeOp *queryOp, op interface{}, ordered, bypassValidation bool) (lerr *LastError, err error) {
+ var writeConcern interface{}
+ if safeOp == nil {
+ writeConcern = bson.D{{"w", 0}}
+ } else {
+ writeConcern = safeOp.query.(*getLastError)
+ }
+
+ var cmd bson.D
+ switch op := op.(type) {
+ case *insertOp:
+ // http://docs.mongodb.org/manual/reference/command/insert
+ cmd = bson.D{
+ {"insert", c.Name},
+ {"documents", op.documents},
+ {"writeConcern", writeConcern},
+ {"ordered", op.flags&1 == 0},
+ }
+ case *updateOp:
+ // http://docs.mongodb.org/manual/reference/command/update
+ cmd = bson.D{
+ {"update", c.Name},
+ {"updates", []interface{}{op}},
+ {"writeConcern", writeConcern},
+ {"ordered", ordered},
+ }
+ case bulkUpdateOp:
+ // http://docs.mongodb.org/manual/reference/command/update
+ cmd = bson.D{
+ {"update", c.Name},
+ {"updates", op},
+ {"writeConcern", writeConcern},
+ {"ordered", ordered},
+ }
+ case *deleteOp:
+ // http://docs.mongodb.org/manual/reference/command/delete
+ cmd = bson.D{
+ {"delete", c.Name},
+ {"deletes", []interface{}{op}},
+ {"writeConcern", writeConcern},
+ {"ordered", ordered},
+ }
+ case bulkDeleteOp:
+ // http://docs.mongodb.org/manual/reference/command/delete
+ cmd = bson.D{
+ {"delete", c.Name},
+ {"deletes", op},
+ {"writeConcern", writeConcern},
+ {"ordered", ordered},
+ }
+ }
+ if bypassValidation {
+ cmd = append(cmd, bson.DocElem{"bypassDocumentValidation", true})
+ }
+
+ var result writeCmdResult
+ err = c.Database.run(socket, cmd, &result)
+ debugf("Write command result: %#v (err=%v)", result, err)
+ ecases := result.BulkErrorCases()
+ lerr = &LastError{
+ UpdatedExisting: result.N > 0 && len(result.Upserted) == 0,
+ N: result.N,
+
+ modified: result.NModified,
+ ecases: ecases,
+ }
+ if len(result.Upserted) > 0 {
+ lerr.UpsertedId = result.Upserted[0].Id
+ }
+ if len(result.Errors) > 0 {
+ e := result.Errors[0]
+ lerr.Code = e.Code
+ lerr.Err = e.ErrMsg
+ err = lerr
+ } else if result.ConcernError.Code != 0 {
+ e := result.ConcernError
+ lerr.Code = e.Code
+ lerr.Err = e.ErrMsg
+ err = lerr
+ }
+
+ if err == nil && safeOp == nil {
+ return nil, nil
+ }
+ return lerr, err
+}
+
+func hasErrMsg(d []byte) bool {
+ l := len(d)
+ for i := 0; i+8 < l; i++ {
+ if d[i] == '\x02' && d[i+1] == 'e' && d[i+2] == 'r' && d[i+3] == 'r' && d[i+4] == 'm' && d[i+5] == 's' && d[i+6] == 'g' && d[i+7] == '\x00' {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/session_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/session_test.go
new file mode 100644
index 00000000000..a89279d38b1
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/session_test.go
@@ -0,0 +1,4216 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+ "flag"
+ "fmt"
+ "math"
+ "os"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func (s *S) TestRunString(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ result := struct{ Ok int }{}
+ err = session.Run("ping", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Ok, Equals, 1)
+}
+
+func (s *S) TestRunValue(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ result := struct{ Ok int }{}
+ err = session.Run(M{"ping": 1}, &result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Ok, Equals, 1)
+}
+
+func (s *S) TestPing(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Just ensure the nonce has been received.
+ result := struct{}{}
+ err = session.Run("ping", &result)
+
+ mgo.ResetStats()
+
+ err = session.Ping()
+ c.Assert(err, IsNil)
+
+ // Pretty boring.
+ stats := mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 1)
+ c.Assert(stats.ReceivedOps, Equals, 1)
+}
+
+func (s *S) TestDialIPAddress(c *C) {
+ session, err := mgo.Dial("127.0.0.1:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ if os.Getenv("NOIPV6") != "1" {
+ session, err = mgo.Dial("[::1%]:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ }
+}
+
+func (s *S) TestURLSingle(c *C) {
+ session, err := mgo.Dial("mongodb://localhost:40001/")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ result := struct{ Ok int }{}
+ err = session.Run("ping", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Ok, Equals, 1)
+}
+
+func (s *S) TestURLMany(c *C) {
+ session, err := mgo.Dial("mongodb://localhost:40011,localhost:40012/")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ result := struct{ Ok int }{}
+ err = session.Run("ping", &result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Ok, Equals, 1)
+}
+
+func (s *S) TestURLParsing(c *C) {
+ urls := []string{
+ "localhost:40001?foo=1&bar=2",
+ "localhost:40001?foo=1;bar=2",
+ }
+ for _, url := range urls {
+ session, err := mgo.Dial(url)
+ if session != nil {
+ session.Close()
+ }
+ c.Assert(err, ErrorMatches, "unsupported connection URL option: (foo=1|bar=2)")
+ }
+}
+
+func (s *S) TestInsertFindOne(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1, "b": 2})
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"a": 1, "b": 3})
+ c.Assert(err, IsNil)
+
+ result := struct{ A, B int }{}
+
+ err = coll.Find(M{"a": 1}).Sort("b").One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.A, Equals, 1)
+ c.Assert(result.B, Equals, 2)
+
+ err = coll.Find(M{"a": 1}).Sort("-b").One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.A, Equals, 1)
+ c.Assert(result.B, Equals, 3)
+}
+
+func (s *S) TestInsertFindOneNil(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Find(nil).One(nil)
+ c.Assert(err, ErrorMatches, "unauthorized.*|not authorized.*")
+}
+
+func (s *S) TestInsertFindOneMap(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1, "b": 2})
+ c.Assert(err, IsNil)
+ result := make(M)
+ err = coll.Find(M{"a": 1}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["a"], Equals, 1)
+ c.Assert(result["b"], Equals, 2)
+}
+
+func (s *S) TestInsertFindAll(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"a": 1, "b": 2})
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"a": 3, "b": 4})
+ c.Assert(err, IsNil)
+
+ type R struct{ A, B int }
+ var result []R
+
+ assertResult := func() {
+ c.Assert(len(result), Equals, 2)
+ c.Assert(result[0].A, Equals, 1)
+ c.Assert(result[0].B, Equals, 2)
+ c.Assert(result[1].A, Equals, 3)
+ c.Assert(result[1].B, Equals, 4)
+ }
+
+ // nil slice
+ err = coll.Find(nil).Sort("a").All(&result)
+ c.Assert(err, IsNil)
+ assertResult()
+
+ // Previously allocated slice
+ allocd := make([]R, 5)
+ result = allocd
+ err = coll.Find(nil).Sort("a").All(&result)
+ c.Assert(err, IsNil)
+ assertResult()
+
+ // Ensure result is backed by the originally allocated array
+ c.Assert(&result[0], Equals, &allocd[0])
+
+ // Non-pointer slice error
+ f := func() { coll.Find(nil).All(result) }
+ c.Assert(f, Panics, "result argument must be a slice address")
+
+ // Non-slice error
+ f = func() { coll.Find(nil).All(new(int)) }
+ c.Assert(f, Panics, "result argument must be a slice address")
+}
+
+func (s *S) TestFindRef(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db1 := session.DB("db1")
+ db1col1 := db1.C("col1")
+
+ db2 := session.DB("db2")
+ db2col1 := db2.C("col1")
+
+ err = db1col1.Insert(M{"_id": 1, "n": 1})
+ c.Assert(err, IsNil)
+ err = db1col1.Insert(M{"_id": 2, "n": 2})
+ c.Assert(err, IsNil)
+ err = db2col1.Insert(M{"_id": 2, "n": 3})
+ c.Assert(err, IsNil)
+
+ result := struct{ N int }{}
+
+ ref1 := &mgo.DBRef{Collection: "col1", Id: 1}
+ ref2 := &mgo.DBRef{Collection: "col1", Id: 2, Database: "db2"}
+
+ err = db1.FindRef(ref1).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 1)
+
+ err = db1.FindRef(ref2).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 3)
+
+ err = db2.FindRef(ref1).One(&result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = db2.FindRef(ref2).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 3)
+
+ err = session.FindRef(ref2).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 3)
+
+ f := func() { session.FindRef(ref1).One(&result) }
+ c.Assert(f, PanicMatches, "Can't resolve database for &mgo.DBRef{Collection:\"col1\", Id:1, Database:\"\"}")
+}
+
+func (s *S) TestDatabaseAndCollectionNames(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db1 := session.DB("db1")
+ db1col1 := db1.C("col1")
+ db1col2 := db1.C("col2")
+
+ db2 := session.DB("db2")
+ db2col1 := db2.C("col3")
+
+ err = db1col1.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+ err = db1col2.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+ err = db2col1.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+
+ names, err := session.DatabaseNames()
+ c.Assert(err, IsNil)
+ c.Assert(filterDBs(names), DeepEquals, []string{"db1", "db2"})
+
+ // Try to exercise cursor logic. 2.8.0-rc3 still ignores this.
+ session.SetBatch(2)
+
+ names, err = db1.CollectionNames()
+ c.Assert(err, IsNil)
+ c.Assert(names, DeepEquals, []string{"col1", "col2", "system.indexes"})
+
+ names, err = db2.CollectionNames()
+ c.Assert(err, IsNil)
+ c.Assert(names, DeepEquals, []string{"col3", "system.indexes"})
+}
+
+func (s *S) TestSelect(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ coll.Insert(M{"a": 1, "b": 2})
+
+ result := struct{ A, B int }{}
+
+ err = coll.Find(M{"a": 1}).Select(M{"b": 1}).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.A, Equals, 0)
+ c.Assert(result.B, Equals, 2)
+}
+
+func (s *S) TestInlineMap(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ var v, result1 struct {
+ A int
+ M map[string]int ",inline"
+ }
+
+ v.A = 1
+ v.M = map[string]int{"b": 2}
+ err = coll.Insert(v)
+ c.Assert(err, IsNil)
+
+ noId := M{"_id": 0}
+
+ err = coll.Find(nil).Select(noId).One(&result1)
+ c.Assert(err, IsNil)
+ c.Assert(result1.A, Equals, 1)
+ c.Assert(result1.M, DeepEquals, map[string]int{"b": 2})
+
+ var result2 M
+ err = coll.Find(nil).Select(noId).One(&result2)
+ c.Assert(err, IsNil)
+ c.Assert(result2, DeepEquals, M{"a": 1, "b": 2})
+
+}
+
+func (s *S) TestUpdate(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(M{"k": n, "n": n})
+ c.Assert(err, IsNil)
+ }
+
+ // No changes is a no-op and shouldn't return an error.
+ err = coll.Update(M{"k": 42}, M{"$set": M{"n": 42}})
+ c.Assert(err, IsNil)
+
+ err = coll.Update(M{"k": 42}, M{"$inc": M{"n": 1}})
+ c.Assert(err, IsNil)
+
+ result := make(M)
+ err = coll.Find(M{"k": 42}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 43)
+
+ err = coll.Update(M{"k": 47}, M{"k": 47, "n": 47})
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.Find(M{"k": 47}).One(result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestUpdateId(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(M{"_id": n, "n": n})
+ c.Assert(err, IsNil)
+ }
+
+ err = coll.UpdateId(42, M{"$inc": M{"n": 1}})
+ c.Assert(err, IsNil)
+
+ result := make(M)
+ err = coll.FindId(42).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 43)
+
+ err = coll.UpdateId(47, M{"k": 47, "n": 47})
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.FindId(47).One(result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestUpdateNil(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"k": 42, "n": 42})
+ c.Assert(err, IsNil)
+ err = coll.Update(nil, M{"$inc": M{"n": 1}})
+ c.Assert(err, IsNil)
+
+ result := make(M)
+ err = coll.Find(M{"k": 42}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 43)
+
+ err = coll.Insert(M{"k": 45, "n": 45})
+ c.Assert(err, IsNil)
+ _, err = coll.UpdateAll(nil, M{"$inc": M{"n": 1}})
+ c.Assert(err, IsNil)
+
+ err = coll.Find(M{"k": 42}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 44)
+ err = coll.Find(M{"k": 45}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 46)
+}
+
+func (s *S) TestUpsert(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(bson.D{{"k", n}, {"n", n}})
+ c.Assert(err, IsNil)
+ }
+
+ info, err := coll.Upsert(M{"k": 42}, bson.D{{"k", 42}, {"n", 24}})
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 1)
+ c.Assert(info.Matched, Equals, 1)
+ c.Assert(info.UpsertedId, IsNil)
+
+ result := M{}
+ err = coll.Find(M{"k": 42}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 24)
+
+ // Match but do not change.
+ info, err = coll.Upsert(M{"k": 42}, bson.D{{"k", 42}, {"n", 24}})
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 1) // On 2.6+ this feels like a server mistake.
+ c.Assert(info.Matched, Equals, 1)
+ c.Assert(info.UpsertedId, IsNil)
+
+ // Insert with internally created id.
+ info, err = coll.Upsert(M{"k": 47}, M{"k": 47, "n": 47})
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 0)
+ c.Assert(info.Matched, Equals, 0)
+ c.Assert(info.UpsertedId, NotNil)
+
+ err = coll.Find(M{"k": 47}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 47)
+
+ result = M{}
+ err = coll.Find(M{"_id": info.UpsertedId}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 47)
+
+ // Insert with provided id.
+ info, err = coll.Upsert(M{"k": 48}, M{"k": 48, "n": 48, "_id": 48})
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 0)
+ c.Assert(info.Matched, Equals, 0)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(info.UpsertedId, Equals, 48)
+ } else {
+ c.Assert(info.UpsertedId, IsNil) // Unfortunate, but that's what Mongo gave us.
+ }
+
+ err = coll.Find(M{"k": 48}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 48)
+}
+
+func (s *S) TestUpsertId(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(M{"_id": n, "n": n})
+ c.Assert(err, IsNil)
+ }
+
+ info, err := coll.UpsertId(42, M{"n": 24})
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 1)
+ c.Assert(info.UpsertedId, IsNil)
+
+ result := M{}
+ err = coll.FindId(42).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 24)
+
+ info, err = coll.UpsertId(47, M{"_id": 47, "n": 47})
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 0)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(info.UpsertedId, Equals, 47)
+ } else {
+ c.Assert(info.UpsertedId, IsNil)
+ }
+
+ err = coll.FindId(47).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 47)
+}
+
+func (s *S) TestUpdateAll(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(M{"k": n, "n": n})
+ c.Assert(err, IsNil)
+ }
+
+ info, err := coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$unset": M{"missing": 1}})
+ c.Assert(err, IsNil)
+ if s.versionAtLeast(2, 6) {
+ c.Assert(info.Updated, Equals, 0)
+ c.Assert(info.Matched, Equals, 4)
+ } else {
+ c.Assert(info.Updated, Equals, 4)
+ c.Assert(info.Matched, Equals, 4)
+ }
+
+ info, err = coll.UpdateAll(M{"k": M{"$gt": 42}}, M{"$inc": M{"n": 1}})
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 4)
+ c.Assert(info.Matched, Equals, 4)
+
+ result := make(M)
+ err = coll.Find(M{"k": 42}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 42)
+
+ err = coll.Find(M{"k": 43}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 44)
+
+ err = coll.Find(M{"k": 44}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 45)
+
+ if !s.versionAtLeast(2, 6) {
+ // 2.6 made this invalid.
+ info, err = coll.UpdateAll(M{"k": 47}, M{"k": 47, "n": 47})
+ c.Assert(err, Equals, nil)
+ c.Assert(info.Updated, Equals, 0)
+ }
+}
+
+func (s *S) TestRemove(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ err = coll.Remove(M{"n": M{"$gt": 42}})
+ c.Assert(err, IsNil)
+
+ result := &struct{ N int }{}
+ err = coll.Find(M{"n": 42}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 42)
+
+ err = coll.Find(M{"n": 43}).One(result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.Find(M{"n": 44}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 44)
+}
+
+func (s *S) TestRemoveId(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"_id": 40}, M{"_id": 41}, M{"_id": 42})
+ c.Assert(err, IsNil)
+
+ err = coll.RemoveId(41)
+ c.Assert(err, IsNil)
+
+ c.Assert(coll.FindId(40).One(nil), IsNil)
+ c.Assert(coll.FindId(41).One(nil), Equals, mgo.ErrNotFound)
+ c.Assert(coll.FindId(42).One(nil), IsNil)
+}
+
+func (s *S) TestRemoveUnsafe(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetSafe(nil)
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"_id": 40}, M{"_id": 41}, M{"_id": 42})
+ c.Assert(err, IsNil)
+
+ err = coll.RemoveId(41)
+ c.Assert(err, IsNil)
+
+ c.Assert(coll.FindId(40).One(nil), IsNil)
+ c.Assert(coll.FindId(41).One(nil), Equals, mgo.ErrNotFound)
+ c.Assert(coll.FindId(42).One(nil), IsNil)
+}
+
+func (s *S) TestRemoveAll(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ info, err := coll.RemoveAll(M{"n": M{"$gt": 42}})
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 0)
+ c.Assert(info.Removed, Equals, 4)
+ c.Assert(info.Matched, Equals, 4)
+ c.Assert(info.UpsertedId, IsNil)
+
+ result := &struct{ N int }{}
+ err = coll.Find(M{"n": 42}).One(result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 42)
+
+ err = coll.Find(M{"n": 43}).One(result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.Find(M{"n": 44}).One(result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ info, err = coll.RemoveAll(nil)
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 0)
+ c.Assert(info.Removed, Equals, 3)
+ c.Assert(info.Matched, Equals, 3)
+ c.Assert(info.UpsertedId, IsNil)
+
+ n, err := coll.Find(nil).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 0)
+}
+
+func (s *S) TestDropDatabase(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db1 := session.DB("db1")
+ db1.C("col").Insert(M{"_id": 1})
+
+ db2 := session.DB("db2")
+ db2.C("col").Insert(M{"_id": 1})
+
+ err = db1.DropDatabase()
+ c.Assert(err, IsNil)
+
+ names, err := session.DatabaseNames()
+ c.Assert(err, IsNil)
+ c.Assert(filterDBs(names), DeepEquals, []string{"db2"})
+
+ err = db2.DropDatabase()
+ c.Assert(err, IsNil)
+
+ names, err = session.DatabaseNames()
+ c.Assert(err, IsNil)
+ c.Assert(filterDBs(names), DeepEquals, []string{})
+}
+
+func filterDBs(dbs []string) []string {
+ var i int
+ for _, name := range dbs {
+ switch name {
+ case "admin", "local":
+ default:
+ dbs[i] = name
+ i++
+ }
+ }
+ if len(dbs) == 0 {
+ return []string{}
+ }
+ return dbs[:i]
+}
+
+func (s *S) TestDropCollection(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("db1")
+ db.C("col1").Insert(M{"_id": 1})
+ db.C("col2").Insert(M{"_id": 1})
+
+ err = db.C("col1").DropCollection()
+ c.Assert(err, IsNil)
+
+ names, err := db.CollectionNames()
+ c.Assert(err, IsNil)
+ c.Assert(names, DeepEquals, []string{"col2", "system.indexes"})
+
+ err = db.C("col2").DropCollection()
+ c.Assert(err, IsNil)
+
+ names, err = db.CollectionNames()
+ c.Assert(err, IsNil)
+ c.Assert(names, DeepEquals, []string{"system.indexes"})
+}
+
+func (s *S) TestCreateCollectionCapped(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ info := &mgo.CollectionInfo{
+ Capped: true,
+ MaxBytes: 1024,
+ MaxDocs: 3,
+ }
+ err = coll.Create(info)
+ c.Assert(err, IsNil)
+
+ ns := []int{1, 2, 3, 4, 5}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ n, err := coll.Find(nil).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+}
+
+func (s *S) TestCreateCollectionNoIndex(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ info := &mgo.CollectionInfo{
+ DisableIdIndex: true,
+ }
+ err = coll.Create(info)
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ indexes, err := coll.Indexes()
+ c.Assert(indexes, HasLen, 0)
+}
+
+func (s *S) TestCreateCollectionForceIndex(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ info := &mgo.CollectionInfo{
+ ForceIdIndex: true,
+ Capped: true,
+ MaxBytes: 1024,
+ }
+ err = coll.Create(info)
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ indexes, err := coll.Indexes()
+ c.Assert(indexes, HasLen, 1)
+}
+
+func (s *S) TestCreateCollectionValidator(c *C) {
+ if !s.versionAtLeast(3, 2) {
+ c.Skip("validation depends on MongoDB 3.2+")
+ }
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+ coll := db.C("mycoll")
+
+ // Test Validator.
+ info := &mgo.CollectionInfo{
+ Validator: M{"b": M{"$exists": true}},
+ }
+ err = coll.Create(info)
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, ErrorMatches, "Document failed validation")
+ err = coll.DropCollection()
+ c.Assert(err, IsNil)
+
+ // Test ValidatorAction.
+ info = &mgo.CollectionInfo{
+ Validator: M{"b": M{"$exists": true}},
+ ValidationAction: "warn",
+ }
+ err = coll.Create(info)
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+ err = coll.DropCollection()
+ c.Assert(err, IsNil)
+
+ // Test ValidationLevel.
+ info = &mgo.CollectionInfo{
+ Validator: M{"a": M{"$exists": true}},
+ ValidationLevel: "moderate",
+ }
+ err = coll.Create(info)
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+ err = db.Run(bson.D{{"collMod", "mycoll"}, {"validator", M{"b": M{"$exists": true}}}}, nil)
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"a": 2})
+ c.Assert(err, ErrorMatches, "Document failed validation")
+ err = coll.Update(M{"a": 1}, M{"c": 1})
+ c.Assert(err, IsNil)
+ err = coll.DropCollection()
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestCreateCollectionStorageEngine(c *C) {
+ if !s.versionAtLeast(3, 0) {
+ c.Skip("storageEngine option depends on MongoDB 3.0+")
+ }
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+ coll := db.C("mycoll")
+
+ info := &mgo.CollectionInfo{
+ StorageEngine: M{"test": M{}},
+ }
+ err = coll.Create(info)
+ c.Assert(err, ErrorMatches, "test is not a registered storage engine for this server")
+}
+
+func (s *S) TestIsDupValues(c *C) {
+ c.Assert(mgo.IsDup(nil), Equals, false)
+ c.Assert(mgo.IsDup(&mgo.LastError{Code: 1}), Equals, false)
+ c.Assert(mgo.IsDup(&mgo.QueryError{Code: 1}), Equals, false)
+ c.Assert(mgo.IsDup(&mgo.LastError{Code: 11000}), Equals, true)
+ c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11000}), Equals, true)
+ c.Assert(mgo.IsDup(&mgo.LastError{Code: 11001}), Equals, true)
+ c.Assert(mgo.IsDup(&mgo.QueryError{Code: 11001}), Equals, true)
+ c.Assert(mgo.IsDup(&mgo.LastError{Code: 12582}), Equals, true)
+ c.Assert(mgo.IsDup(&mgo.QueryError{Code: 12582}), Equals, true)
+ lerr := &mgo.LastError{Code: 16460, Err: "error inserting 1 documents to shard ... caused by :: E11000 duplicate key error index: ..."}
+ c.Assert(mgo.IsDup(lerr), Equals, true)
+}
+
+func (s *S) TestIsDupPrimary(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, ErrorMatches, ".*duplicate key error.*")
+ c.Assert(mgo.IsDup(err), Equals, true)
+}
+
+func (s *S) TestIsDupUnique(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ index := mgo.Index{
+ Key: []string{"a", "b"},
+ Unique: true,
+ }
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndex(index)
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"a": 1, "b": 1})
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"a": 1, "b": 1})
+ c.Assert(err, ErrorMatches, ".*duplicate key error.*")
+ c.Assert(mgo.IsDup(err), Equals, true)
+}
+
+func (s *S) TestIsDupCapped(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ info := &mgo.CollectionInfo{
+ ForceIdIndex: true,
+ Capped: true,
+ MaxBytes: 1024,
+ }
+ err = coll.Create(info)
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"_id": 1})
+ // The error was different for capped collections before 2.6.
+ c.Assert(err, ErrorMatches, ".*duplicate key.*")
+ // The issue is reduced by using IsDup.
+ c.Assert(mgo.IsDup(err), Equals, true)
+}
+
+func (s *S) TestIsDupFindAndModify(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"n"}, Unique: true})
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"n": 2})
+ c.Assert(err, IsNil)
+ _, err = coll.Find(M{"n": 1}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, bson.M{})
+ c.Assert(err, ErrorMatches, ".*duplicate key error.*")
+ c.Assert(mgo.IsDup(err), Equals, true)
+}
+
+func (s *S) TestFindAndModify(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 42})
+
+ session.SetMode(mgo.Monotonic, true)
+
+ result := M{}
+ info, err := coll.Find(M{"n": 42}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 42)
+ c.Assert(info.Updated, Equals, 1)
+ c.Assert(info.Matched, Equals, 1)
+ c.Assert(info.Removed, Equals, 0)
+ c.Assert(info.UpsertedId, IsNil)
+
+ // A nil result parameter should be acceptable.
+ info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$unset": M{"missing": 1}}}, nil)
+ c.Assert(err, IsNil)
+ c.Assert(info.Updated, Equals, 1) // On 2.6+ this feels like a server mistake.
+ c.Assert(info.Matched, Equals, 1)
+ c.Assert(info.Removed, Equals, 0)
+ c.Assert(info.UpsertedId, IsNil)
+
+ result = M{}
+ info, err = coll.Find(M{"n": 43}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 44)
+ c.Assert(info.Updated, Equals, 1)
+ c.Assert(info.Removed, Equals, 0)
+ c.Assert(info.UpsertedId, IsNil)
+
+ result = M{}
+ info, err = coll.Find(M{"n": 50}).Apply(mgo.Change{Upsert: true, Update: M{"n": 51, "o": 52}}, result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], IsNil)
+ c.Assert(info.Updated, Equals, 0)
+ c.Assert(info.Removed, Equals, 0)
+ c.Assert(info.UpsertedId, NotNil)
+
+ result = M{}
+ info, err = coll.Find(nil).Sort("-n").Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}, ReturnNew: true}, result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], Equals, 52)
+ c.Assert(info.Updated, Equals, 1)
+ c.Assert(info.Removed, Equals, 0)
+ c.Assert(info.UpsertedId, IsNil)
+
+ result = M{}
+ info, err = coll.Find(M{"n": 52}).Select(M{"o": 1}).Apply(mgo.Change{Remove: true}, result)
+ c.Assert(err, IsNil)
+ c.Assert(result["n"], IsNil)
+ c.Assert(result["o"], Equals, 52)
+ c.Assert(info.Updated, Equals, 0)
+ c.Assert(info.Removed, Equals, 1)
+ c.Assert(info.UpsertedId, IsNil)
+
+ result = M{}
+ info, err = coll.Find(M{"n": 60}).Apply(mgo.Change{Remove: true}, result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+ c.Assert(len(result), Equals, 0)
+ c.Assert(info, IsNil)
+}
+
+func (s *S) TestFindAndModifyBug997828(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": "not-a-number"})
+
+ result := make(M)
+ _, err = coll.Find(M{"n": "not-a-number"}).Apply(mgo.Change{Update: M{"$inc": M{"n": 1}}}, result)
+ c.Assert(err, ErrorMatches, `(exception: )?Cannot apply \$inc .*`)
+ if s.versionAtLeast(2, 1) {
+ qerr, _ := err.(*mgo.QueryError)
+ c.Assert(qerr, NotNil, Commentf("err: %#v", err))
+ if s.versionAtLeast(2, 6) {
+ // Oh, the dance of error codes. :-(
+ c.Assert(qerr.Code, Equals, 16837)
+ } else {
+ c.Assert(qerr.Code, Equals, 10140)
+ }
+ } else {
+ lerr, _ := err.(*mgo.LastError)
+ c.Assert(lerr, NotNil, Commentf("err: %#v", err))
+ c.Assert(lerr.Code, Equals, 10140)
+ }
+}
+
+func (s *S) TestFindAndModifyErrmsgDoc(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"errmsg": "an error"})
+
+ var result M
+ _, err = coll.Find(M{}).Apply(mgo.Change{Update: M{"$set": M{"n": 1}}}, &result)
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestCountCollection(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ n, err := coll.Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+}
+
+func (s *S) TestCountQuery(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ n, err := coll.Find(M{"n": M{"$gt": 40}}).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 2)
+}
+
+func (s *S) TestCountQuerySorted(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ n, err := coll.Find(M{"n": M{"$gt": 40}}).Sort("n").Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 2)
+}
+
+func (s *S) TestCountSkipLimit(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ n, err := coll.Find(nil).Skip(1).Limit(3).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 3)
+
+ n, err = coll.Find(nil).Skip(1).Limit(5).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 4)
+}
+
+func (s *S) TestQueryExplain(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ m := M{}
+ query := coll.Find(nil).Limit(2)
+ err = query.Explain(m)
+ c.Assert(err, IsNil)
+ if m["queryPlanner"] != nil {
+ c.Assert(m["executionStats"].(M)["totalDocsExamined"], Equals, 2)
+ } else {
+ c.Assert(m["cursor"], Equals, "BasicCursor")
+ c.Assert(m["nscanned"], Equals, 2)
+ c.Assert(m["n"], Equals, 2)
+ }
+
+ n := 0
+ var result M
+ iter := query.Iter()
+ for iter.Next(&result) {
+ n++
+ }
+ c.Assert(iter.Close(), IsNil)
+ c.Assert(n, Equals, 2)
+}
+
+func (s *S) TestQuerySetMaxScan(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ query := coll.Find(nil).SetMaxScan(2)
+ var result []M
+ err = query.All(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result, HasLen, 2)
+}
+
+func (s *S) TestQuerySetMaxTime(c *C) {
+ if !s.versionAtLeast(2, 6) {
+ c.Skip("SetMaxTime only supported in 2.6+")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ coll := session.DB("mydb").C("mycoll")
+
+ for i := 0; i < 1000; i++ {
+ err := coll.Insert(M{"n": i})
+ c.Assert(err, IsNil)
+ }
+
+ query := coll.Find(nil)
+ query.SetMaxTime(1 * time.Millisecond)
+ query.Batch(2)
+ var result []M
+ err = query.All(&result)
+ c.Assert(err, ErrorMatches, "operation exceeded time limit")
+}
+
+func (s *S) TestQueryHint(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ coll.EnsureIndexKey("a")
+
+ m := M{}
+ err = coll.Find(nil).Hint("a").Explain(m)
+ c.Assert(err, IsNil)
+
+ if m["queryPlanner"] != nil {
+ m = m["queryPlanner"].(M)
+ m = m["winningPlan"].(M)
+ m = m["inputStage"].(M)
+ c.Assert(m["indexName"], Equals, "a_1")
+ } else {
+ c.Assert(m["indexBounds"], NotNil)
+ c.Assert(m["indexBounds"].(M)["a"], NotNil)
+ }
+}
+
+func (s *S) TestQueryComment(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ db := session.DB("mydb")
+ coll := db.C("mycoll")
+
+ err = db.Run(bson.M{"profile": 2}, nil)
+ c.Assert(err, IsNil)
+
+ ns := []int{40, 41, 42}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ query := coll.Find(bson.M{"n": 41})
+ query.Comment("some comment")
+ err = query.One(nil)
+ c.Assert(err, IsNil)
+
+ query = coll.Find(bson.M{"n": 41})
+ query.Comment("another comment")
+ err = query.One(nil)
+ c.Assert(err, IsNil)
+
+ commentField := "query.$comment"
+ nField := "query.$query.n"
+ if s.versionAtLeast(3, 2) {
+ commentField = "query.comment"
+ nField = "query.filter.n"
+ }
+ n, err := session.DB("mydb").C("system.profile").Find(bson.M{nField: 41, commentField: "some comment"}).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 1)
+}
+
+func (s *S) TestFindOneNotFound(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ result := struct{ A, B int }{}
+ err = coll.Find(M{"a": 1}).One(&result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+ c.Assert(err, ErrorMatches, "not found")
+ c.Assert(err == mgo.ErrNotFound, Equals, true)
+}
+
+func (s *S) TestFindIterNotFound(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ result := struct{ A, B int }{}
+ iter := coll.Find(M{"a": 1}).Iter()
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Err(), IsNil)
+}
+
+func (s *S) TestFindNil(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ result := struct{ N int }{}
+
+ err = coll.Find(nil).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 1)
+}
+
+func (s *S) TestFindId(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"_id": 41, "n": 41})
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"_id": 42, "n": 42})
+ c.Assert(err, IsNil)
+
+ result := struct{ N int }{}
+
+ err = coll.FindId(42).One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.N, Equals, 42)
+}
+
+func (s *S) TestFindIterAll(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ iter := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2).Iter()
+ result := struct{ N int }{}
+ for i := 2; i < 7; i++ {
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true, Commentf("err=%v", err))
+ c.Assert(result.N, Equals, ns[i])
+ if i == 1 {
+ stats := mgo.GetStats()
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ }
+ }
+
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Close(), IsNil)
+
+ session.Refresh() // Release socket.
+
+ stats := mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP
+ c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs.
+ if s.versionAtLeast(3, 2) {
+ // In 3.2+ responses come in batches inside the op reply docs.
+ c.Assert(stats.ReceivedDocs, Equals, 3)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, 5)
+ }
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestFindIterTwiceWithSameQuery(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for i := 40; i != 47; i++ {
+ err := coll.Insert(M{"n": i})
+ c.Assert(err, IsNil)
+ }
+
+ query := coll.Find(M{}).Sort("n")
+
+ iter1 := query.Skip(1).Iter()
+ iter2 := query.Skip(2).Iter()
+
+ var result struct{ N int }
+ ok := iter2.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.N, Equals, 42)
+ ok = iter1.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.N, Equals, 41)
+}
+
+func (s *S) TestFindIterWithoutResults(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ coll.Insert(M{"n": 42})
+
+ iter := coll.Find(M{"n": 0}).Iter()
+
+ result := struct{ N int }{}
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Close(), IsNil)
+ c.Assert(result.N, Equals, 0)
+}
+
+func (s *S) TestFindIterLimit(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3)
+ iter := query.Iter()
+
+ result := struct{ N int }{}
+ for i := 2; i < 5; i++ {
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.N, Equals, ns[i])
+ }
+
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Close(), IsNil)
+
+ session.Refresh() // Release socket.
+
+ stats := mgo.GetStats()
+ if s.versionAtLeast(3, 2) {
+ // Limit works properly in 3.2+, and results are batched in single doc.
+ c.Assert(stats.SentOps, Equals, 1) // 1*QUERY_OP
+ c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP
+ c.Assert(stats.ReceivedDocs, Equals, 1)
+ } else {
+ c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*KILL_CURSORS_OP
+ c.Assert(stats.ReceivedOps, Equals, 1) // and its REPLY_OP
+ c.Assert(stats.ReceivedDocs, Equals, 3)
+ }
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+var cursorTimeout = flag.Bool("cursor-timeout", false, "Enable cursor timeout test")
+
+func (s *S) TestFindIterCursorTimeout(c *C) {
+ if !*cursorTimeout {
+ c.Skip("-cursor-timeout")
+ }
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ type Doc struct {
+ Id int "_id"
+ }
+
+ coll := session.DB("test").C("test")
+ coll.Remove(nil)
+ for i := 0; i < 100; i++ {
+ err = coll.Insert(Doc{i})
+ c.Assert(err, IsNil)
+ }
+
+ session.SetBatch(1)
+ iter := coll.Find(nil).Iter()
+ var doc Doc
+ if !iter.Next(&doc) {
+ c.Fatalf("iterator failed to return any documents")
+ }
+
+ for i := 10; i > 0; i-- {
+ c.Logf("Sleeping... %d minutes to go...", i)
+ time.Sleep(1*time.Minute + 2*time.Second)
+ }
+
+ // Drain any existing documents that were fetched.
+ if !iter.Next(&doc) {
+ c.Fatalf("iterator with timed out cursor failed to return previously cached document")
+ }
+ if iter.Next(&doc) {
+ c.Fatalf("timed out cursor returned document")
+ }
+
+ c.Assert(iter.Err(), Equals, mgo.ErrCursor)
+}
+
+func (s *S) TestTooManyItemsLimitBug(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))
+
+ mgo.SetDebug(false)
+ coll := session.DB("mydb").C("mycoll")
+ words := strings.Split("foo bar baz", " ")
+ for i := 0; i < 5; i++ {
+ words = append(words, words...)
+ }
+ doc := bson.D{{"words", words}}
+ inserts := 10000
+ limit := 5000
+ iters := 0
+ c.Assert(inserts > limit, Equals, true)
+ for i := 0; i < inserts; i++ {
+ err := coll.Insert(&doc)
+ c.Assert(err, IsNil)
+ }
+ iter := coll.Find(nil).Limit(limit).Iter()
+ for iter.Next(&doc) {
+ if iters%100 == 0 {
+ c.Logf("Seen %d docments", iters)
+ }
+ iters++
+ }
+ c.Assert(iter.Close(), IsNil)
+ c.Assert(iters, Equals, limit)
+}
+
+func (s *S) TestBatchSizeZeroGetMore(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(runtime.NumCPU()))
+
+ mgo.SetDebug(false)
+ coll := session.DB("mydb").C("mycoll")
+ words := strings.Split("foo bar baz", " ")
+ for i := 0; i < 5; i++ {
+ words = append(words, words...)
+ }
+ doc := bson.D{{"words", words}}
+ inserts := 10000
+ iters := 0
+ for i := 0; i < inserts; i++ {
+ err := coll.Insert(&doc)
+ c.Assert(err, IsNil)
+ }
+ iter := coll.Find(nil).Iter()
+ for iter.Next(&doc) {
+ if iters%100 == 0 {
+ c.Logf("Seen %d docments", iters)
+ }
+ iters++
+ }
+ c.Assert(iter.Close(), IsNil)
+}
+
+func serverCursorsOpen(session *mgo.Session) int {
+ var result struct {
+ Cursors struct {
+ TotalOpen int `bson:"totalOpen"`
+ TimedOut int `bson:"timedOut"`
+ }
+ }
+ err := session.Run("serverStatus", &result)
+ if err != nil {
+ panic(err)
+ }
+ return result.Cursors.TotalOpen
+}
+
+func (s *S) TestFindIterLimitWithMore(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ // Insane amounts of logging otherwise due to the
+ // amount of data being shuffled.
+ mgo.SetDebug(false)
+ defer mgo.SetDebug(true)
+
+ // Should amount to more than 4MB bson payload,
+ // the default limit per result chunk.
+ const total = 4096
+ var d struct{ A [1024]byte }
+ docs := make([]interface{}, total)
+ for i := 0; i < total; i++ {
+ docs[i] = &d
+ }
+ err = coll.Insert(docs...)
+ c.Assert(err, IsNil)
+
+ n, err := coll.Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, total)
+
+ // First, try restricting to a single chunk with a negative limit.
+ nresults := 0
+ iter := coll.Find(nil).Limit(-total).Iter()
+ var discard struct{}
+ for iter.Next(&discard) {
+ nresults++
+ }
+ if nresults < total/2 || nresults >= total {
+ c.Fatalf("Bad result size with negative limit: %d", nresults)
+ }
+
+ cursorsOpen := serverCursorsOpen(session)
+
+ // Try again, with a positive limit. Should reach the end now,
+ // using multiple chunks.
+ nresults = 0
+ iter = coll.Find(nil).Limit(total).Iter()
+ for iter.Next(&discard) {
+ nresults++
+ }
+ c.Assert(nresults, Equals, total)
+
+ // Ensure the cursor used is properly killed.
+ c.Assert(serverCursorsOpen(session), Equals, cursorsOpen)
+
+ // Edge case, -MinInt == -MinInt.
+ nresults = 0
+ iter = coll.Find(nil).Limit(math.MinInt32).Iter()
+ for iter.Next(&discard) {
+ nresults++
+ }
+ if nresults < total/2 || nresults >= total {
+ c.Fatalf("Bad result size with MinInt32 limit: %d", nresults)
+ }
+}
+
+func (s *S) TestFindIterLimitWithBatch(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ // Ping the database to ensure the nonce has been received already.
+ c.Assert(session.Ping(), IsNil)
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Limit(3).Batch(2)
+ iter := query.Iter()
+ result := struct{ N int }{}
+ for i := 2; i < 5; i++ {
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.N, Equals, ns[i])
+ if i == 3 {
+ stats := mgo.GetStats()
+ if s.versionAtLeast(3, 2) {
+ // In 3.2+ responses come in batches inside the op reply docs.
+ c.Assert(stats.ReceivedDocs, Equals, 1)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ }
+ }
+ }
+
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Close(), IsNil)
+
+ session.Refresh() // Release socket.
+
+ stats := mgo.GetStats()
+ if s.versionAtLeast(3, 2) {
+ // In 3.2+ limit works properly even with multiple batches..
+ c.Assert(stats.SentOps, Equals, 2) // 1*QUERY_OP + 1*GET_MORE_OP
+ c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs
+
+ // In 3.2+ responses come in batches inside the op reply docs.
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ } else {
+ c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 1*GET_MORE_OP + 1*KILL_CURSORS_OP
+ c.Assert(stats.ReceivedOps, Equals, 2) // and its REPLY_OPs
+ c.Assert(stats.ReceivedDocs, Equals, 3)
+ }
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestFindIterSortWithBatch(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ // Without this, the logic above breaks because Mongo refuses to
+ // return a cursor with an in-memory sort.
+ coll.EnsureIndexKey("n")
+
+ // Ping the database to ensure the nonce has been received already.
+ c.Assert(session.Ping(), IsNil)
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ query := coll.Find(M{"n": M{"$lte": 44}}).Sort("-n").Batch(2)
+ iter := query.Iter()
+ ns = []int{46, 45, 44, 43, 42, 41, 40}
+ result := struct{ N int }{}
+ for i := 2; i < len(ns); i++ {
+ c.Logf("i=%d", i)
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.N, Equals, ns[i])
+ if i == 3 {
+ stats := mgo.GetStats()
+ if s.versionAtLeast(3, 2) {
+ // Find command in 3.2+ bundles batches in a single document.
+ c.Assert(stats.ReceivedDocs, Equals, 1)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ }
+ }
+ }
+
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Close(), IsNil)
+
+ session.Refresh() // Release socket.
+
+ stats := mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP
+ c.Assert(stats.ReceivedOps, Equals, 3) // and its REPLY_OPs
+ if s.versionAtLeast(3, 2) {
+ // Find command in 3.2+ bundles batches in a single document.
+ c.Assert(stats.ReceivedDocs, Equals, 3)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, 5)
+ }
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+// Test tailable cursors in a situation where Next has to sleep to
+// respect the timeout requested on Tail.
+func (s *S) TestFindTailTimeoutWithSleep(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ cresult := struct{ ErrMsg string }{}
+
+ db := session.DB("mydb")
+ err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult)
+ c.Assert(err, IsNil)
+ c.Assert(cresult.ErrMsg, Equals, "")
+ coll := db.C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ timeout := 5 * time.Second
+
+ query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2)
+ iter := query.Tail(timeout)
+
+ n := len(ns)
+ result := struct{ N int }{}
+ for i := 2; i != n; i++ {
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(iter.Err(), IsNil)
+ c.Assert(iter.Timeout(), Equals, false)
+ c.Assert(result.N, Equals, ns[i])
+ if i == 3 { // The batch boundary.
+ stats := mgo.GetStats()
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ }
+ }
+
+ mgo.ResetStats()
+
+ // The following call to Next will block.
+ done := make(chan bool)
+ defer func() { <-done }()
+ go func() {
+ // The internal AwaitData timing of MongoDB is around 2 seconds,
+ // so this should force mgo to sleep at least once by itself to
+ // respect the requested timeout.
+ c.Logf("[GOROUTINE] Starting and sleeping...")
+ time.Sleep(timeout - 2*time.Second)
+ c.Logf("[GOROUTINE] Woke up...")
+ session := session.New()
+ c.Logf("[GOROUTINE] Session created and will insert...")
+ err := coll.Insert(M{"n": 47})
+ c.Logf("[GOROUTINE] Insert attempted, err=%v...", err)
+ session.Close()
+ c.Logf("[GOROUTINE] Session closed.")
+ c.Check(err, IsNil)
+ done <- true
+ }()
+
+ c.Log("Will wait for Next with N=47...")
+ ok := iter.Next(&result)
+ c.Log("Next unblocked...")
+ c.Assert(ok, Equals, true)
+
+ c.Assert(iter.Err(), IsNil)
+ c.Assert(iter.Timeout(), Equals, false)
+ c.Assert(result.N, Equals, 47)
+ c.Log("Got Next with N=47!")
+
+ c.Log("Will wait for a result which will never come...")
+
+ started := time.Now()
+ ok = iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Err(), IsNil)
+ c.Assert(iter.Timeout(), Equals, true)
+ c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+
+ c.Log("Will now reuse the timed out tail cursor...")
+
+ coll.Insert(M{"n": 48})
+ ok = iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(iter.Close(), IsNil)
+ c.Assert(iter.Timeout(), Equals, false)
+ c.Assert(result.N, Equals, 48)
+}
+
+// Test tailable cursors in a situation where Next never gets to sleep once
+// to respect the timeout requested on Tail.
+func (s *S) TestFindTailTimeoutNoSleep(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ cresult := struct{ ErrMsg string }{}
+
+ db := session.DB("mydb")
+ err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult)
+ c.Assert(err, IsNil)
+ c.Assert(cresult.ErrMsg, Equals, "")
+ coll := db.C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ timeout := 1 * time.Second
+
+ query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2)
+ iter := query.Tail(timeout)
+
+ n := len(ns)
+ result := struct{ N int }{}
+ for i := 2; i != n; i++ {
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(iter.Err(), IsNil)
+ c.Assert(iter.Timeout(), Equals, false)
+ c.Assert(result.N, Equals, ns[i])
+ if i == 3 { // The batch boundary.
+ stats := mgo.GetStats()
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ }
+ }
+
+ // The following call to Next will block.
+ go func() {
+ // The internal AwaitData timing of MongoDB is around 2 seconds,
+ // so this item should arrive within the AwaitData threshold.
+ time.Sleep(500 * time.Millisecond)
+ session := session.New()
+ defer session.Close()
+ coll := session.DB("mydb").C("mycoll")
+ coll.Insert(M{"n": 47})
+ }()
+
+ c.Log("Will wait for Next with N=47...")
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(iter.Err(), IsNil)
+ c.Assert(iter.Timeout(), Equals, false)
+ c.Assert(result.N, Equals, 47)
+ c.Log("Got Next with N=47!")
+
+ c.Log("Will wait for a result which will never come...")
+
+ started := time.Now()
+ ok = iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Err(), IsNil)
+ c.Assert(iter.Timeout(), Equals, true)
+ c.Assert(started.Before(time.Now().Add(-timeout)), Equals, true)
+
+ c.Log("Will now reuse the timed out tail cursor...")
+
+ coll.Insert(M{"n": 48})
+ ok = iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(iter.Close(), IsNil)
+ c.Assert(iter.Timeout(), Equals, false)
+ c.Assert(result.N, Equals, 48)
+}
+
+// Test tailable cursors in a situation where Next never gets to sleep once
+// to respect the timeout requested on Tail.
+func (s *S) TestFindTailNoTimeout(c *C) {
+ if *fast {
+ c.Skip("-fast")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ cresult := struct{ ErrMsg string }{}
+
+ db := session.DB("mydb")
+ err = db.Run(bson.D{{"create", "mycoll"}, {"capped", true}, {"size", 1024}}, &cresult)
+ c.Assert(err, IsNil)
+ c.Assert(cresult.ErrMsg, Equals, "")
+ coll := db.C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2)
+ iter := query.Tail(-1)
+ c.Assert(err, IsNil)
+
+ n := len(ns)
+ result := struct{ N int }{}
+ for i := 2; i != n; i++ {
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.N, Equals, ns[i])
+ if i == 3 { // The batch boundary.
+ stats := mgo.GetStats()
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ }
+ }
+
+ mgo.ResetStats()
+
+ // The following call to Next will block.
+ go func() {
+ time.Sleep(5e8)
+ session := session.New()
+ defer session.Close()
+ coll := session.DB("mydb").C("mycoll")
+ coll.Insert(M{"n": 47})
+ }()
+
+ c.Log("Will wait for Next with N=47...")
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(iter.Err(), IsNil)
+ c.Assert(iter.Timeout(), Equals, false)
+ c.Assert(result.N, Equals, 47)
+ c.Log("Got Next with N=47!")
+
+ c.Log("Will wait for a result which will never come...")
+
+ gotNext := make(chan bool)
+ go func() {
+ ok := iter.Next(&result)
+ gotNext <- ok
+ }()
+
+ select {
+ case ok := <-gotNext:
+ c.Fatalf("Next returned: %v", ok)
+ case <-time.After(3e9):
+ // Good. Should still be sleeping at that point.
+ }
+
+ // Closing the session should cause Next to return.
+ session.Close()
+
+ select {
+ case ok := <-gotNext:
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Err(), ErrorMatches, "Closed explicitly")
+ c.Assert(iter.Timeout(), Equals, false)
+ case <-time.After(1e9):
+ c.Fatal("Closing the session did not unblock Next")
+ }
+}
+
+func (s *S) TestIterNextResetsResult(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{1, 2, 3}
+ for _, n := range ns {
+ coll.Insert(M{"n" + strconv.Itoa(n): n})
+ }
+
+ query := coll.Find(nil).Sort("$natural")
+
+ i := 0
+ var sresult *struct{ N1, N2, N3 int }
+ iter := query.Iter()
+ for iter.Next(&sresult) {
+ switch i {
+ case 0:
+ c.Assert(sresult.N1, Equals, 1)
+ c.Assert(sresult.N2+sresult.N3, Equals, 0)
+ case 1:
+ c.Assert(sresult.N2, Equals, 2)
+ c.Assert(sresult.N1+sresult.N3, Equals, 0)
+ case 2:
+ c.Assert(sresult.N3, Equals, 3)
+ c.Assert(sresult.N1+sresult.N2, Equals, 0)
+ }
+ i++
+ }
+ c.Assert(iter.Close(), IsNil)
+
+ i = 0
+ var mresult M
+ iter = query.Iter()
+ for iter.Next(&mresult) {
+ delete(mresult, "_id")
+ switch i {
+ case 0:
+ c.Assert(mresult, DeepEquals, M{"n1": 1})
+ case 1:
+ c.Assert(mresult, DeepEquals, M{"n2": 2})
+ case 2:
+ c.Assert(mresult, DeepEquals, M{"n3": 3})
+ }
+ i++
+ }
+ c.Assert(iter.Close(), IsNil)
+
+ i = 0
+ var iresult interface{}
+ iter = query.Iter()
+ for iter.Next(&iresult) {
+ mresult, ok := iresult.(bson.M)
+ c.Assert(ok, Equals, true, Commentf("%#v", iresult))
+ delete(mresult, "_id")
+ switch i {
+ case 0:
+ c.Assert(mresult, DeepEquals, bson.M{"n1": 1})
+ case 1:
+ c.Assert(mresult, DeepEquals, bson.M{"n2": 2})
+ case 2:
+ c.Assert(mresult, DeepEquals, bson.M{"n3": 3})
+ }
+ i++
+ }
+ c.Assert(iter.Close(), IsNil)
+}
+
+func (s *S) TestFindForOnIter(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2)
+ iter := query.Iter()
+
+ i := 2
+ var result *struct{ N int }
+ err = iter.For(&result, func() error {
+ c.Assert(i < 7, Equals, true)
+ c.Assert(result.N, Equals, ns[i])
+ if i == 1 {
+ stats := mgo.GetStats()
+ if s.versionAtLeast(3, 2) {
+ // Find command in 3.2+ bundles batches in a single document.
+ c.Assert(stats.ReceivedDocs, Equals, 1)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ }
+ }
+ i++
+ return nil
+ })
+ c.Assert(err, IsNil)
+
+ session.Refresh() // Release socket.
+
+ stats := mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP
+ c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs.
+ if s.versionAtLeast(3, 2) {
+ // Find command in 3.2+ bundles batches in a single document.
+ c.Assert(stats.ReceivedDocs, Equals, 3)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, 5)
+ }
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestFindFor(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ session.Refresh() // Release socket.
+
+ mgo.ResetStats()
+
+ query := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2)
+
+ i := 2
+ var result *struct{ N int }
+ err = query.For(&result, func() error {
+ c.Assert(i < 7, Equals, true)
+ c.Assert(result.N, Equals, ns[i])
+ if i == 1 {
+ stats := mgo.GetStats()
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ if s.versionAtLeast(3, 2) {
+ // Find command in 3.2+ bundles batches in a single document.
+ c.Assert(stats.ReceivedDocs, Equals, 1)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, 2)
+ }
+ }
+ i++
+ return nil
+ })
+ c.Assert(err, IsNil)
+
+ session.Refresh() // Release socket.
+
+ stats := mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 3) // 1*QUERY_OP + 2*GET_MORE_OP
+ c.Assert(stats.ReceivedOps, Equals, 3) // and their REPLY_OPs.
+ if s.versionAtLeast(3, 2) {
+ // Find command in 3.2+ bundles batches in a single document.
+ c.Assert(stats.ReceivedDocs, Equals, 3)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, 5)
+ }
+ c.Assert(stats.SocketsInUse, Equals, 0)
+}
+
+func (s *S) TestFindForStopOnError(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ query := coll.Find(M{"n": M{"$gte": 42}})
+ i := 2
+ var result *struct{ N int }
+ err = query.For(&result, func() error {
+ c.Assert(i < 4, Equals, true)
+ c.Assert(result.N, Equals, ns[i])
+ if i == 3 {
+ return fmt.Errorf("stop!")
+ }
+ i++
+ return nil
+ })
+ c.Assert(err, ErrorMatches, "stop!")
+}
+
+func (s *S) TestFindForResetsResult(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{1, 2, 3}
+ for _, n := range ns {
+ coll.Insert(M{"n" + strconv.Itoa(n): n})
+ }
+
+ query := coll.Find(nil).Sort("$natural")
+
+ i := 0
+ var sresult *struct{ N1, N2, N3 int }
+ err = query.For(&sresult, func() error {
+ switch i {
+ case 0:
+ c.Assert(sresult.N1, Equals, 1)
+ c.Assert(sresult.N2+sresult.N3, Equals, 0)
+ case 1:
+ c.Assert(sresult.N2, Equals, 2)
+ c.Assert(sresult.N1+sresult.N3, Equals, 0)
+ case 2:
+ c.Assert(sresult.N3, Equals, 3)
+ c.Assert(sresult.N1+sresult.N2, Equals, 0)
+ }
+ i++
+ return nil
+ })
+ c.Assert(err, IsNil)
+
+ i = 0
+ var mresult M
+ err = query.For(&mresult, func() error {
+ delete(mresult, "_id")
+ switch i {
+ case 0:
+ c.Assert(mresult, DeepEquals, M{"n1": 1})
+ case 1:
+ c.Assert(mresult, DeepEquals, M{"n2": 2})
+ case 2:
+ c.Assert(mresult, DeepEquals, M{"n3": 3})
+ }
+ i++
+ return nil
+ })
+ c.Assert(err, IsNil)
+
+ i = 0
+ var iresult interface{}
+ err = query.For(&iresult, func() error {
+ mresult, ok := iresult.(bson.M)
+ c.Assert(ok, Equals, true, Commentf("%#v", iresult))
+ delete(mresult, "_id")
+ switch i {
+ case 0:
+ c.Assert(mresult, DeepEquals, bson.M{"n1": 1})
+ case 1:
+ c.Assert(mresult, DeepEquals, bson.M{"n2": 2})
+ case 2:
+ c.Assert(mresult, DeepEquals, bson.M{"n3": 3})
+ }
+ i++
+ return nil
+ })
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestFindIterSnapshot(c *C) {
+ if s.versionAtLeast(3, 2) {
+ c.Skip("Broken in 3.2: https://jira.mongodb.org/browse/SERVER-21403")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Insane amounts of logging otherwise due to the
+ // amount of data being shuffled.
+ mgo.SetDebug(false)
+ defer mgo.SetDebug(true)
+
+ coll := session.DB("mydb").C("mycoll")
+
+ var a [1024000]byte
+
+ for n := 0; n < 10; n++ {
+ err := coll.Insert(M{"_id": n, "n": n, "a1": &a})
+ c.Assert(err, IsNil)
+ }
+
+ query := coll.Find(M{"n": M{"$gt": -1}}).Batch(2).Prefetch(0)
+ query.Snapshot()
+ iter := query.Iter()
+
+ seen := map[int]bool{}
+ result := struct {
+ Id int "_id"
+ }{}
+ for iter.Next(&result) {
+ if len(seen) == 2 {
+ // Grow all entries so that they have to move.
+ // Backwards so that the order is inverted.
+ for n := 10; n >= 0; n-- {
+ _, err := coll.Upsert(M{"_id": n}, M{"$set": M{"a2": &a}})
+ c.Assert(err, IsNil)
+ }
+ }
+ if seen[result.Id] {
+ c.Fatalf("seen duplicated key: %d", result.Id)
+ }
+ seen[result.Id] = true
+ }
+ c.Assert(iter.Close(), IsNil)
+}
+
+func (s *S) TestSort(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ coll.Insert(M{"a": 1, "b": 1})
+ coll.Insert(M{"a": 2, "b": 2})
+ coll.Insert(M{"a": 2, "b": 1})
+ coll.Insert(M{"a": 0, "b": 1})
+ coll.Insert(M{"a": 2, "b": 0})
+ coll.Insert(M{"a": 0, "b": 2})
+ coll.Insert(M{"a": 1, "b": 2})
+ coll.Insert(M{"a": 0, "b": 0})
+ coll.Insert(M{"a": 1, "b": 0})
+
+ query := coll.Find(M{})
+ query.Sort("-a") // Should be ignored.
+ query.Sort("-b", "a")
+ iter := query.Iter()
+
+ l := make([]int, 18)
+ r := struct{ A, B int }{}
+ for i := 0; i != len(l); i += 2 {
+ ok := iter.Next(&r)
+ c.Assert(ok, Equals, true)
+ c.Assert(err, IsNil)
+ l[i] = r.A
+ l[i+1] = r.B
+ }
+
+ c.Assert(l, DeepEquals, []int{0, 2, 1, 2, 2, 2, 0, 1, 1, 1, 2, 1, 0, 0, 1, 0, 2, 0})
+}
+
+func (s *S) TestSortWithBadArgs(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ f1 := func() { coll.Find(nil).Sort("") }
+ f2 := func() { coll.Find(nil).Sort("+") }
+ f3 := func() { coll.Find(nil).Sort("foo", "-") }
+
+ for _, f := range []func(){f1, f2, f3} {
+ c.Assert(f, PanicMatches, "Sort: empty field name")
+ }
+}
+
+func (s *S) TestSortScoreText(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ if !s.versionAtLeast(2, 4) {
+ c.Skip("Text search depends on 2.4+")
+ }
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndex(mgo.Index{
+ Key: []string{"$text:a", "$text:b"},
+ })
+ msg := "text search not enabled"
+ if err != nil && strings.Contains(err.Error(), msg) {
+ c.Skip(msg)
+ }
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{
+ "a": "none",
+ "b": "twice: foo foo",
+ })
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{
+ "a": "just once: foo",
+ "b": "none",
+ })
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{
+ "a": "many: foo foo foo",
+ "b": "none",
+ })
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{
+ "a": "none",
+ "b": "none",
+ "c": "ignore: foo",
+ })
+ c.Assert(err, IsNil)
+
+ query := coll.Find(M{"$text": M{"$search": "foo"}})
+ query.Select(M{"score": M{"$meta": "textScore"}})
+ query.Sort("$textScore:score")
+ iter := query.Iter()
+
+ var r struct{ A, B string }
+ var results []string
+ for iter.Next(&r) {
+ results = append(results, r.A, r.B)
+ }
+
+ c.Assert(results, DeepEquals, []string{
+ "many: foo foo foo", "none",
+ "none", "twice: foo foo",
+ "just once: foo", "none",
+ })
+}
+
+func (s *S) TestPrefetching(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ const total = 600
+ const batch = 100
+ mgo.SetDebug(false)
+ docs := make([]interface{}, total)
+ for i := 0; i != total; i++ {
+ docs[i] = bson.D{{"n", i}}
+ }
+ err = coll.Insert(docs...)
+ c.Assert(err, IsNil)
+
+ for testi := 0; testi < 5; testi++ {
+ mgo.ResetStats()
+
+ var iter *mgo.Iter
+ var beforeMore int
+
+ switch testi {
+ case 0: // The default session value.
+ session.SetBatch(batch)
+ iter = coll.Find(M{}).Iter()
+ beforeMore = 75
+
+ case 2: // Changing the session value.
+ session.SetBatch(batch)
+ session.SetPrefetch(0.27)
+ iter = coll.Find(M{}).Iter()
+ beforeMore = 73
+
+ case 1: // Changing via query methods.
+ iter = coll.Find(M{}).Prefetch(0.27).Batch(batch).Iter()
+ beforeMore = 73
+
+ case 3: // With prefetch on first document.
+ iter = coll.Find(M{}).Prefetch(1.0).Batch(batch).Iter()
+ beforeMore = 0
+
+ case 4: // Without prefetch.
+ iter = coll.Find(M{}).Prefetch(0).Batch(batch).Iter()
+ beforeMore = 100
+ }
+
+ pings := 0
+ for batchi := 0; batchi < len(docs)/batch-1; batchi++ {
+ c.Logf("Iterating over %d documents on batch %d", beforeMore, batchi)
+ var result struct{ N int }
+ for i := 0; i < beforeMore; i++ {
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err()))
+ }
+ beforeMore = 99
+ c.Logf("Done iterating.")
+
+ session.Run("ping", nil) // Roundtrip to settle down.
+ pings++
+
+ stats := mgo.GetStats()
+ if s.versionAtLeast(3, 2) {
+ // Find command in 3.2+ bundles batches in a single document.
+ c.Assert(stats.ReceivedDocs, Equals, (batchi+1)+pings)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, (batchi+1)*batch+pings)
+ }
+
+ c.Logf("Iterating over one more document on batch %d", batchi)
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true, Commentf("iter.Err: %v", iter.Err()))
+ c.Logf("Done iterating.")
+
+ session.Run("ping", nil) // Roundtrip to settle down.
+ pings++
+
+ stats = mgo.GetStats()
+ if s.versionAtLeast(3, 2) {
+ // Find command in 3.2+ bundles batches in a single document.
+ c.Assert(stats.ReceivedDocs, Equals, (batchi+2)+pings)
+ } else {
+ c.Assert(stats.ReceivedDocs, Equals, (batchi+2)*batch+pings)
+ }
+ }
+ }
+}
+
+func (s *S) TestSafeSetting(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Check the default
+ safe := session.Safe()
+ c.Assert(safe.W, Equals, 0)
+ c.Assert(safe.WMode, Equals, "")
+ c.Assert(safe.WTimeout, Equals, 0)
+ c.Assert(safe.FSync, Equals, false)
+ c.Assert(safe.J, Equals, false)
+
+ // Tweak it
+ session.SetSafe(&mgo.Safe{W: 1, WTimeout: 2, FSync: true})
+ safe = session.Safe()
+ c.Assert(safe.W, Equals, 1)
+ c.Assert(safe.WMode, Equals, "")
+ c.Assert(safe.WTimeout, Equals, 2)
+ c.Assert(safe.FSync, Equals, true)
+ c.Assert(safe.J, Equals, false)
+
+ // Reset it again.
+ session.SetSafe(&mgo.Safe{})
+ safe = session.Safe()
+ c.Assert(safe.W, Equals, 0)
+ c.Assert(safe.WMode, Equals, "")
+ c.Assert(safe.WTimeout, Equals, 0)
+ c.Assert(safe.FSync, Equals, false)
+ c.Assert(safe.J, Equals, false)
+
+ // Ensure safety to something more conservative.
+ session.SetSafe(&mgo.Safe{W: 5, WTimeout: 6, J: true})
+ safe = session.Safe()
+ c.Assert(safe.W, Equals, 5)
+ c.Assert(safe.WMode, Equals, "")
+ c.Assert(safe.WTimeout, Equals, 6)
+ c.Assert(safe.FSync, Equals, false)
+ c.Assert(safe.J, Equals, true)
+
+ // Ensure safety to something less conservative won't change it.
+ session.EnsureSafe(&mgo.Safe{W: 4, WTimeout: 7})
+ safe = session.Safe()
+ c.Assert(safe.W, Equals, 5)
+ c.Assert(safe.WMode, Equals, "")
+ c.Assert(safe.WTimeout, Equals, 6)
+ c.Assert(safe.FSync, Equals, false)
+ c.Assert(safe.J, Equals, true)
+
+ // But to something more conservative will.
+ session.EnsureSafe(&mgo.Safe{W: 6, WTimeout: 4, FSync: true})
+ safe = session.Safe()
+ c.Assert(safe.W, Equals, 6)
+ c.Assert(safe.WMode, Equals, "")
+ c.Assert(safe.WTimeout, Equals, 4)
+ c.Assert(safe.FSync, Equals, true)
+ c.Assert(safe.J, Equals, false)
+
+ // Even more conservative.
+ session.EnsureSafe(&mgo.Safe{WMode: "majority", WTimeout: 2})
+ safe = session.Safe()
+ c.Assert(safe.W, Equals, 0)
+ c.Assert(safe.WMode, Equals, "majority")
+ c.Assert(safe.WTimeout, Equals, 2)
+ c.Assert(safe.FSync, Equals, true)
+ c.Assert(safe.J, Equals, false)
+
+ // WMode always overrides, whatever it is, but J doesn't.
+ session.EnsureSafe(&mgo.Safe{WMode: "something", J: true})
+ safe = session.Safe()
+ c.Assert(safe.W, Equals, 0)
+ c.Assert(safe.WMode, Equals, "something")
+ c.Assert(safe.WTimeout, Equals, 2)
+ c.Assert(safe.FSync, Equals, true)
+ c.Assert(safe.J, Equals, false)
+
+ // EnsureSafe with nil does nothing.
+ session.EnsureSafe(nil)
+ safe = session.Safe()
+ c.Assert(safe.W, Equals, 0)
+ c.Assert(safe.WMode, Equals, "something")
+ c.Assert(safe.WTimeout, Equals, 2)
+ c.Assert(safe.FSync, Equals, true)
+ c.Assert(safe.J, Equals, false)
+
+ // Changing the safety of a cloned session doesn't touch the original.
+ clone := session.Clone()
+ defer clone.Close()
+ clone.EnsureSafe(&mgo.Safe{WMode: "foo"})
+ safe = session.Safe()
+ c.Assert(safe.WMode, Equals, "something")
+}
+
+func (s *S) TestSafeInsert(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ // Insert an element with a predefined key.
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+
+ mgo.ResetStats()
+
+ // Session should be safe by default, so inserting it again must fail.
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, ErrorMatches, ".*E11000 duplicate.*")
+ c.Assert(err.(*mgo.LastError).Code, Equals, 11000)
+
+ // It must have sent two operations (INSERT_OP + getLastError QUERY_OP)
+ stats := mgo.GetStats()
+
+ if s.versionAtLeast(2, 6) {
+ c.Assert(stats.SentOps, Equals, 1)
+ } else {
+ c.Assert(stats.SentOps, Equals, 2)
+ }
+
+ mgo.ResetStats()
+
+ // If we disable safety, though, it won't complain.
+ session.SetSafe(nil)
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, IsNil)
+
+ // Must have sent a single operation this time (just the INSERT_OP)
+ stats = mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 1)
+}
+
+func (s *S) TestSafeParameters(c *C) {
+ session, err := mgo.Dial("localhost:40011")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ // Tweak the safety parameters to something unachievable.
+ session.SetSafe(&mgo.Safe{W: 4, WTimeout: 100})
+ err = coll.Insert(M{"_id": 1})
+ c.Assert(err, ErrorMatches, "timeout|timed out waiting for slaves|Not enough data-bearing nodes|waiting for replication timed out") // :-(
+ if !s.versionAtLeast(2, 6) {
+ // 2.6 turned it into a query error.
+ c.Assert(err.(*mgo.LastError).WTimeout, Equals, true)
+ }
+}
+
+func (s *S) TestQueryErrorOne(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).One(nil)
+ c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*")
+ c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*")
+ // Oh, the dance of error codes. :-(
+ if s.versionAtLeast(3, 2) {
+ c.Assert(err.(*mgo.QueryError).Code, Equals, 2)
+ } else if s.versionAtLeast(2, 6) {
+ c.Assert(err.(*mgo.QueryError).Code, Equals, 17287)
+ } else {
+ c.Assert(err.(*mgo.QueryError).Code, Equals, 13097)
+ }
+}
+
+func (s *S) TestQueryErrorNext(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ iter := coll.Find(M{"a": 1}).Select(M{"a": M{"b": 1}}).Iter()
+
+ var result struct{}
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+
+ err = iter.Close()
+ c.Assert(err, ErrorMatches, ".*Unsupported projection option:.*")
+ c.Assert(err.(*mgo.QueryError).Message, Matches, ".*Unsupported projection option:.*")
+ // Oh, the dance of error codes. :-(
+ if s.versionAtLeast(3, 2) {
+ c.Assert(err.(*mgo.QueryError).Code, Equals, 2)
+ } else if s.versionAtLeast(2, 6) {
+ c.Assert(err.(*mgo.QueryError).Code, Equals, 17287)
+ } else {
+ c.Assert(err.(*mgo.QueryError).Code, Equals, 13097)
+ }
+ c.Assert(iter.Err(), Equals, err)
+}
+
+var indexTests = []struct {
+ index mgo.Index
+ expected M
+}{{
+ mgo.Index{
+ Key: []string{"a"},
+ Background: true,
+ },
+ M{
+ "name": "a_1",
+ "key": M{"a": 1},
+ "ns": "mydb.mycoll",
+ "background": true,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"a", "-b"},
+ Unique: true,
+ DropDups: true,
+ },
+ M{
+ "name": "a_1_b_-1",
+ "key": M{"a": 1, "b": -1},
+ "ns": "mydb.mycoll",
+ "unique": true,
+ "dropDups": true,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"@loc_old"}, // Obsolete
+ Min: -500,
+ Max: 500,
+ Bits: 32,
+ },
+ M{
+ "name": "loc_old_2d",
+ "key": M{"loc_old": "2d"},
+ "ns": "mydb.mycoll",
+ "min": -500.0,
+ "max": 500.0,
+ "bits": 32,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"$2d:loc"},
+ Min: -500,
+ Max: 500,
+ Bits: 32,
+ },
+ M{
+ "name": "loc_2d",
+ "key": M{"loc": "2d"},
+ "ns": "mydb.mycoll",
+ "min": -500.0,
+ "max": 500.0,
+ "bits": 32,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"$2d:loc"},
+ Minf: -500.1,
+ Maxf: 500.1,
+ Min: 1, // Should be ignored
+ Max: 2,
+ Bits: 32,
+ },
+ M{
+ "name": "loc_2d",
+ "key": M{"loc": "2d"},
+ "ns": "mydb.mycoll",
+ "min": -500.1,
+ "max": 500.1,
+ "bits": 32,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"$geoHaystack:loc", "type"},
+ BucketSize: 1,
+ },
+ M{
+ "name": "loc_geoHaystack_type_1",
+ "key": M{"loc": "geoHaystack", "type": 1},
+ "ns": "mydb.mycoll",
+ "bucketSize": 1.0,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"$text:a", "$text:b"},
+ Weights: map[string]int{"b": 42},
+ },
+ M{
+ "name": "a_text_b_text",
+ "key": M{"_fts": "text", "_ftsx": 1},
+ "ns": "mydb.mycoll",
+ "weights": M{"a": 1, "b": 42},
+ "default_language": "english",
+ "language_override": "language",
+ "textIndexVersion": 2,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"$text:a"},
+ DefaultLanguage: "portuguese",
+ LanguageOverride: "idioma",
+ },
+ M{
+ "name": "a_text",
+ "key": M{"_fts": "text", "_ftsx": 1},
+ "ns": "mydb.mycoll",
+ "weights": M{"a": 1},
+ "default_language": "portuguese",
+ "language_override": "idioma",
+ "textIndexVersion": 2,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"$text:$**"},
+ },
+ M{
+ "name": "$**_text",
+ "key": M{"_fts": "text", "_ftsx": 1},
+ "ns": "mydb.mycoll",
+ "weights": M{"$**": 1},
+ "default_language": "english",
+ "language_override": "language",
+ "textIndexVersion": 2,
+ },
+}, {
+ mgo.Index{
+ Key: []string{"cn"},
+ Name: "CustomName",
+ },
+ M{
+ "name": "CustomName",
+ "key": M{"cn": 1},
+ "ns": "mydb.mycoll",
+ },
+}}
+
+func (s *S) TestEnsureIndex(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ idxs := session.DB("mydb").C("system.indexes")
+
+ for _, test := range indexTests {
+ if !s.versionAtLeast(2, 4) && test.expected["textIndexVersion"] != nil {
+ continue
+ }
+
+ err = coll.EnsureIndex(test.index)
+ msg := "text search not enabled"
+ if err != nil && strings.Contains(err.Error(), msg) {
+ continue
+ }
+ c.Assert(err, IsNil)
+
+ expectedName := test.index.Name
+ if expectedName == "" {
+ expectedName, _ = test.expected["name"].(string)
+ }
+
+ obtained := M{}
+ err = idxs.Find(M{"name": expectedName}).One(obtained)
+ c.Assert(err, IsNil)
+
+ delete(obtained, "v")
+
+ if s.versionAtLeast(2, 7) {
+ // Was deprecated in 2.6, and not being reported by 2.7+.
+ delete(test.expected, "dropDups")
+ test.index.DropDups = false
+ }
+ if s.versionAtLeast(3, 2) && test.expected["textIndexVersion"] != nil {
+ test.expected["textIndexVersion"] = 3
+ }
+
+ c.Assert(obtained, DeepEquals, test.expected)
+
+ // The result of Indexes must match closely what was used to create the index.
+ indexes, err := coll.Indexes()
+ c.Assert(err, IsNil)
+ c.Assert(indexes, HasLen, 2)
+ gotIndex := indexes[0]
+ if gotIndex.Name == "_id_" {
+ gotIndex = indexes[1]
+ }
+ wantIndex := test.index
+ if wantIndex.Name == "" {
+ wantIndex.Name = gotIndex.Name
+ }
+ if strings.HasPrefix(wantIndex.Key[0], "@") {
+ wantIndex.Key[0] = "$2d:" + wantIndex.Key[0][1:]
+ }
+ if wantIndex.Minf == 0 && wantIndex.Maxf == 0 {
+ wantIndex.Minf = float64(wantIndex.Min)
+ wantIndex.Maxf = float64(wantIndex.Max)
+ } else {
+ wantIndex.Min = gotIndex.Min
+ wantIndex.Max = gotIndex.Max
+ }
+ if wantIndex.DefaultLanguage == "" {
+ wantIndex.DefaultLanguage = gotIndex.DefaultLanguage
+ }
+ if wantIndex.LanguageOverride == "" {
+ wantIndex.LanguageOverride = gotIndex.LanguageOverride
+ }
+ for name, _ := range gotIndex.Weights {
+ if _, ok := wantIndex.Weights[name]; !ok {
+ if wantIndex.Weights == nil {
+ wantIndex.Weights = make(map[string]int)
+ }
+ wantIndex.Weights[name] = 1
+ }
+ }
+ c.Assert(gotIndex, DeepEquals, wantIndex)
+
+ // Drop created index by key or by name if a custom name was used.
+ if test.index.Name == "" {
+ err = coll.DropIndex(test.index.Key...)
+ c.Assert(err, IsNil)
+ } else {
+ err = coll.DropIndexName(test.index.Name)
+ c.Assert(err, IsNil)
+ }
+ }
+}
+
+func (s *S) TestEnsureIndexWithBadInfo(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndex(mgo.Index{})
+ c.Assert(err, ErrorMatches, "invalid index key:.*")
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{""}})
+ c.Assert(err, ErrorMatches, "invalid index key:.*")
+}
+
+func (s *S) TestEnsureIndexWithUnsafeSession(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetSafe(nil)
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"a": 1})
+ c.Assert(err, IsNil)
+
+ // Should fail since there are duplicated entries.
+ index := mgo.Index{
+ Key: []string{"a"},
+ Unique: true,
+ }
+
+ err = coll.EnsureIndex(index)
+ c.Assert(err, ErrorMatches, ".*duplicate key error.*")
+}
+
+func (s *S) TestEnsureIndexKey(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ err = coll.EnsureIndexKey("a", "-b")
+ c.Assert(err, IsNil)
+
+ sysidx := session.DB("mydb").C("system.indexes")
+
+ result1 := M{}
+ err = sysidx.Find(M{"name": "a_1"}).One(result1)
+ c.Assert(err, IsNil)
+
+ result2 := M{}
+ err = sysidx.Find(M{"name": "a_1_b_-1"}).One(result2)
+ c.Assert(err, IsNil)
+
+ delete(result1, "v")
+ expected1 := M{
+ "name": "a_1",
+ "key": M{"a": 1},
+ "ns": "mydb.mycoll",
+ }
+ c.Assert(result1, DeepEquals, expected1)
+
+ delete(result2, "v")
+ expected2 := M{
+ "name": "a_1_b_-1",
+ "key": M{"a": 1, "b": -1},
+ "ns": "mydb.mycoll",
+ }
+ c.Assert(result2, DeepEquals, expected2)
+}
+
+func (s *S) TestEnsureIndexDropIndex(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ err = coll.EnsureIndexKey("-b")
+ c.Assert(err, IsNil)
+
+ err = coll.DropIndex("-b")
+ c.Assert(err, IsNil)
+
+ sysidx := session.DB("mydb").C("system.indexes")
+
+ err = sysidx.Find(M{"name": "a_1"}).One(nil)
+ c.Assert(err, IsNil)
+
+ err = sysidx.Find(M{"name": "b_1"}).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.DropIndex("a")
+ c.Assert(err, IsNil)
+
+ err = sysidx.Find(M{"name": "a_1"}).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.DropIndex("a")
+ c.Assert(err, ErrorMatches, "index not found.*")
+}
+
+func (s *S) TestEnsureIndexDropIndexName(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"b"}, Name: "a"})
+ c.Assert(err, IsNil)
+
+ err = coll.DropIndexName("a")
+ c.Assert(err, IsNil)
+
+ sysidx := session.DB("mydb").C("system.indexes")
+
+ err = sysidx.Find(M{"name": "a_1"}).One(nil)
+ c.Assert(err, IsNil)
+
+ err = sysidx.Find(M{"name": "a"}).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.DropIndexName("a_1")
+ c.Assert(err, IsNil)
+
+ err = sysidx.Find(M{"name": "a_1"}).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = coll.DropIndexName("a_1")
+ c.Assert(err, ErrorMatches, "index not found.*")
+}
+
+func (s *S) TestEnsureIndexCaching(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ mgo.ResetStats()
+
+ // Second EnsureIndex should be cached and do nothing.
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ stats := mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 0)
+
+ // Resetting the cache should make it contact the server again.
+ session.ResetIndexCache()
+
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SentOps > 0, Equals, true)
+
+ // Dropping the index should also drop the cached index key.
+ err = coll.DropIndex("a")
+ c.Assert(err, IsNil)
+
+ mgo.ResetStats()
+
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SentOps > 0, Equals, true)
+}
+
+func (s *S) TestEnsureIndexGetIndexes(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndexKey("-b")
+ c.Assert(err, IsNil)
+
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ // Obsolete.
+ err = coll.EnsureIndexKey("@c")
+ c.Assert(err, IsNil)
+
+ err = coll.EnsureIndexKey("$2d:d")
+ c.Assert(err, IsNil)
+
+ // Try to exercise cursor logic. 2.8.0-rc3 still ignores this.
+ session.SetBatch(2)
+
+ indexes, err := coll.Indexes()
+ c.Assert(err, IsNil)
+
+ c.Assert(indexes[0].Name, Equals, "_id_")
+ c.Assert(indexes[1].Name, Equals, "a_1")
+ c.Assert(indexes[1].Key, DeepEquals, []string{"a"})
+ c.Assert(indexes[2].Name, Equals, "b_-1")
+ c.Assert(indexes[2].Key, DeepEquals, []string{"-b"})
+ c.Assert(indexes[3].Name, Equals, "c_2d")
+ c.Assert(indexes[3].Key, DeepEquals, []string{"$2d:c"})
+ c.Assert(indexes[4].Name, Equals, "d_2d")
+ c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"})
+}
+
+func (s *S) TestEnsureIndexNameCaching(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"})
+ c.Assert(err, IsNil)
+
+ mgo.ResetStats()
+
+ // Second EnsureIndex should be cached and do nothing.
+ err = coll.EnsureIndexKey("a")
+ c.Assert(err, IsNil)
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"})
+ c.Assert(err, IsNil)
+
+ stats := mgo.GetStats()
+ c.Assert(stats.SentOps, Equals, 0)
+
+ // Resetting the cache should make it contact the server again.
+ session.ResetIndexCache()
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"})
+ c.Assert(err, IsNil)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SentOps > 0, Equals, true)
+
+ // Dropping the index should also drop the cached index key.
+ err = coll.DropIndexName("custom")
+ c.Assert(err, IsNil)
+
+ mgo.ResetStats()
+
+ err = coll.EnsureIndex(mgo.Index{Key: []string{"a"}, Name: "custom"})
+ c.Assert(err, IsNil)
+
+ stats = mgo.GetStats()
+ c.Assert(stats.SentOps > 0, Equals, true)
+}
+
+func (s *S) TestEnsureIndexEvalGetIndexes(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({b: -1})"}}, nil)
+ c.Assert(err, IsNil)
+ err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({a: 1})"}}, nil)
+ c.Assert(err, IsNil)
+ err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({c: -1, e: 1})"}}, nil)
+ c.Assert(err, IsNil)
+ err = session.Run(bson.D{{"eval", "db.getSiblingDB('mydb').mycoll.ensureIndex({d: '2d'})"}}, nil)
+ c.Assert(err, IsNil)
+
+ indexes, err := coll.Indexes()
+ c.Assert(err, IsNil)
+
+ c.Assert(indexes[0].Name, Equals, "_id_")
+ c.Assert(indexes[1].Name, Equals, "a_1")
+ c.Assert(indexes[1].Key, DeepEquals, []string{"a"})
+ c.Assert(indexes[2].Name, Equals, "b_-1")
+ c.Assert(indexes[2].Key, DeepEquals, []string{"-b"})
+ c.Assert(indexes[3].Name, Equals, "c_-1_e_1")
+ c.Assert(indexes[3].Key, DeepEquals, []string{"-c", "e"})
+ if s.versionAtLeast(2, 2) {
+ c.Assert(indexes[4].Name, Equals, "d_2d")
+ c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"})
+ } else {
+ c.Assert(indexes[4].Name, Equals, "d_")
+ c.Assert(indexes[4].Key, DeepEquals, []string{"$2d:d"})
+ }
+}
+
+var testTTL = flag.Bool("test-ttl", false, "test TTL collections (may take 1 minute)")
+
+func (s *S) TestEnsureIndexExpireAfter(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ session.SetSafe(nil)
+
+ coll := session.DB("mydb").C("mycoll")
+
+ err = coll.Insert(M{"n": 1, "t": time.Now().Add(-120 * time.Second)})
+ c.Assert(err, IsNil)
+ err = coll.Insert(M{"n": 2, "t": time.Now()})
+ c.Assert(err, IsNil)
+
+ // Should fail since there are duplicated entries.
+ index := mgo.Index{
+ Key: []string{"t"},
+ ExpireAfter: 1 * time.Minute,
+ }
+
+ err = coll.EnsureIndex(index)
+ c.Assert(err, IsNil)
+
+ indexes, err := coll.Indexes()
+ c.Assert(err, IsNil)
+ c.Assert(indexes[1].Name, Equals, "t_1")
+ c.Assert(indexes[1].ExpireAfter, Equals, 1*time.Minute)
+
+ if *testTTL {
+ worked := false
+ stop := time.Now().Add(70 * time.Second)
+ for time.Now().Before(stop) {
+ n, err := coll.Count()
+ c.Assert(err, IsNil)
+ if n == 1 {
+ worked = true
+ break
+ }
+ c.Assert(n, Equals, 2)
+ c.Logf("Still has 2 entries...")
+ time.Sleep(1 * time.Second)
+ }
+ if !worked {
+ c.Fatalf("TTL index didn't work")
+ }
+ }
+}
+
+func (s *S) TestDistinct(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for _, i := range []int{1, 4, 6, 2, 2, 3, 4} {
+ coll.Insert(M{"n": i})
+ }
+
+ var result []int
+ err = coll.Find(M{"n": M{"$gt": 2}}).Sort("n").Distinct("n", &result)
+
+ sort.IntSlice(result).Sort()
+ c.Assert(result, DeepEquals, []int{3, 4, 6})
+}
+
+func (s *S) TestMapReduce(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for _, i := range []int{1, 4, 6, 2, 2, 3, 4} {
+ coll.Insert(M{"n": i})
+ }
+
+ job := &mgo.MapReduce{
+ Map: "function() { emit(this.n, 1); }",
+ Reduce: "function(key, values) { return Array.sum(values); }",
+ }
+ var result []struct {
+ Id int "_id"
+ Value int
+ }
+
+ info, err := coll.Find(M{"n": M{"$gt": 2}}).MapReduce(job, &result)
+ c.Assert(err, IsNil)
+ c.Assert(info.InputCount, Equals, 4)
+ c.Assert(info.EmitCount, Equals, 4)
+ c.Assert(info.OutputCount, Equals, 3)
+ c.Assert(info.VerboseTime, IsNil)
+
+ expected := map[int]int{3: 1, 4: 2, 6: 1}
+ for _, item := range result {
+ c.Logf("Item: %#v", &item)
+ c.Assert(item.Value, Equals, expected[item.Id])
+ expected[item.Id] = -1
+ }
+}
+
+func (s *S) TestMapReduceFinalize(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for _, i := range []int{1, 4, 6, 2, 2, 3, 4} {
+ coll.Insert(M{"n": i})
+ }
+
+ job := &mgo.MapReduce{
+ Map: "function() { emit(this.n, 1) }",
+ Reduce: "function(key, values) { return Array.sum(values) }",
+ Finalize: "function(key, count) { return {count: count} }",
+ }
+ var result []struct {
+ Id int "_id"
+ Value struct{ Count int }
+ }
+ _, err = coll.Find(nil).MapReduce(job, &result)
+ c.Assert(err, IsNil)
+
+ expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1}
+ for _, item := range result {
+ c.Logf("Item: %#v", &item)
+ c.Assert(item.Value.Count, Equals, expected[item.Id])
+ expected[item.Id] = -1
+ }
+}
+
+func (s *S) TestMapReduceToCollection(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for _, i := range []int{1, 4, 6, 2, 2, 3, 4} {
+ coll.Insert(M{"n": i})
+ }
+
+ job := &mgo.MapReduce{
+ Map: "function() { emit(this.n, 1); }",
+ Reduce: "function(key, values) { return Array.sum(values); }",
+ Out: "mr",
+ }
+
+ info, err := coll.Find(nil).MapReduce(job, nil)
+ c.Assert(err, IsNil)
+ c.Assert(info.InputCount, Equals, 7)
+ c.Assert(info.EmitCount, Equals, 7)
+ c.Assert(info.OutputCount, Equals, 5)
+ c.Assert(info.Collection, Equals, "mr")
+ c.Assert(info.Database, Equals, "mydb")
+
+ expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1}
+ var item *struct {
+ Id int "_id"
+ Value int
+ }
+ mr := session.DB("mydb").C("mr")
+ iter := mr.Find(nil).Iter()
+ for iter.Next(&item) {
+ c.Logf("Item: %#v", &item)
+ c.Assert(item.Value, Equals, expected[item.Id])
+ expected[item.Id] = -1
+ }
+ c.Assert(iter.Close(), IsNil)
+}
+
+func (s *S) TestMapReduceToOtherDb(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for _, i := range []int{1, 4, 6, 2, 2, 3, 4} {
+ coll.Insert(M{"n": i})
+ }
+
+ job := &mgo.MapReduce{
+ Map: "function() { emit(this.n, 1); }",
+ Reduce: "function(key, values) { return Array.sum(values); }",
+ Out: bson.D{{"replace", "mr"}, {"db", "otherdb"}},
+ }
+
+ info, err := coll.Find(nil).MapReduce(job, nil)
+ c.Assert(err, IsNil)
+ c.Assert(info.InputCount, Equals, 7)
+ c.Assert(info.EmitCount, Equals, 7)
+ c.Assert(info.OutputCount, Equals, 5)
+ c.Assert(info.Collection, Equals, "mr")
+ c.Assert(info.Database, Equals, "otherdb")
+
+ expected := map[int]int{1: 1, 2: 2, 3: 1, 4: 2, 6: 1}
+ var item *struct {
+ Id int "_id"
+ Value int
+ }
+ mr := session.DB("otherdb").C("mr")
+ iter := mr.Find(nil).Iter()
+ for iter.Next(&item) {
+ c.Logf("Item: %#v", &item)
+ c.Assert(item.Value, Equals, expected[item.Id])
+ expected[item.Id] = -1
+ }
+ c.Assert(iter.Close(), IsNil)
+}
+
+func (s *S) TestMapReduceOutOfOrder(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for _, i := range []int{1, 4, 6, 2, 2, 3, 4} {
+ coll.Insert(M{"n": i})
+ }
+
+ job := &mgo.MapReduce{
+ Map: "function() { emit(this.n, 1); }",
+ Reduce: "function(key, values) { return Array.sum(values); }",
+ Out: bson.M{"a": "a", "z": "z", "replace": "mr", "db": "otherdb", "b": "b", "y": "y"},
+ }
+
+ info, err := coll.Find(nil).MapReduce(job, nil)
+ c.Assert(err, IsNil)
+ c.Assert(info.Collection, Equals, "mr")
+ c.Assert(info.Database, Equals, "otherdb")
+}
+
+func (s *S) TestMapReduceScope(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ coll.Insert(M{"n": 1})
+
+ job := &mgo.MapReduce{
+ Map: "function() { emit(this.n, x); }",
+ Reduce: "function(key, values) { return Array.sum(values); }",
+ Scope: M{"x": 42},
+ }
+
+ var result []bson.M
+ _, err = coll.Find(nil).MapReduce(job, &result)
+ c.Assert(len(result), Equals, 1)
+ c.Assert(result[0]["value"], Equals, 42.0)
+}
+
+func (s *S) TestMapReduceVerbose(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for i := 0; i < 100; i++ {
+ err = coll.Insert(M{"n": i})
+ c.Assert(err, IsNil)
+ }
+
+ job := &mgo.MapReduce{
+ Map: "function() { emit(this.n, 1); }",
+ Reduce: "function(key, values) { return Array.sum(values); }",
+ Verbose: true,
+ }
+
+ info, err := coll.Find(nil).MapReduce(job, nil)
+ c.Assert(err, IsNil)
+ c.Assert(info.VerboseTime, NotNil)
+}
+
+func (s *S) TestMapReduceLimit(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for _, i := range []int{1, 4, 6, 2, 2, 3, 4} {
+ coll.Insert(M{"n": i})
+ }
+
+ job := &mgo.MapReduce{
+ Map: "function() { emit(this.n, 1); }",
+ Reduce: "function(key, values) { return Array.sum(values); }",
+ }
+
+ var result []bson.M
+ _, err = coll.Find(nil).Limit(3).MapReduce(job, &result)
+ c.Assert(err, IsNil)
+ c.Assert(len(result), Equals, 3)
+}
+
+func (s *S) TestBuildInfo(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ info, err := session.BuildInfo()
+ c.Assert(err, IsNil)
+
+ var v []int
+ for i, a := range strings.Split(info.Version, ".") {
+ for _, token := range []string{"-rc", "-pre"} {
+ if i == 2 && strings.Contains(a, token) {
+ a = a[:strings.Index(a, token)]
+ info.VersionArray[len(info.VersionArray)-1] = 0
+ }
+ }
+ n, err := strconv.Atoi(a)
+ c.Assert(err, IsNil)
+ v = append(v, n)
+ }
+ for len(v) < 4 {
+ v = append(v, 0)
+ }
+
+ c.Assert(info.VersionArray, DeepEquals, v)
+ c.Assert(info.GitVersion, Matches, "[a-z0-9]+")
+
+ if s.versionAtLeast(3, 2) {
+ // It was deprecated in 3.2.
+ c.Assert(info.SysInfo, Equals, "")
+ } else {
+ c.Assert(info.SysInfo, Matches, ".*[0-9:]+.*")
+ }
+ if info.Bits != 32 && info.Bits != 64 {
+ c.Fatalf("info.Bits is %d", info.Bits)
+ }
+ if info.MaxObjectSize < 8192 {
+ c.Fatalf("info.MaxObjectSize seems too small: %d", info.MaxObjectSize)
+ }
+}
+
+func (s *S) TestZeroTimeRoundtrip(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ var d struct{ T time.Time }
+ conn := session.DB("mydb").C("mycoll")
+ err = conn.Insert(d)
+ c.Assert(err, IsNil)
+
+ var result bson.M
+ err = conn.Find(nil).One(&result)
+ c.Assert(err, IsNil)
+ t, isTime := result["t"].(time.Time)
+ c.Assert(isTime, Equals, true)
+ c.Assert(t, Equals, time.Time{})
+}
+
+func (s *S) TestFsyncLock(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ clone := session.Clone()
+ defer clone.Close()
+
+ err = session.FsyncLock()
+ c.Assert(err, IsNil)
+
+ done := make(chan time.Time)
+ go func() {
+ time.Sleep(3 * time.Second)
+ now := time.Now()
+ err := session.FsyncUnlock()
+ c.Check(err, IsNil)
+ done <- now
+ }()
+
+ err = clone.DB("mydb").C("mycoll").Insert(bson.M{"n": 1})
+ unlocked := time.Now()
+ unlocking := <-done
+ c.Assert(err, IsNil)
+
+ c.Assert(unlocked.After(unlocking), Equals, true)
+}
+
+func (s *S) TestFsync(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ // Not much to do here. Just a smoke check.
+ err = session.Fsync(false)
+ c.Assert(err, IsNil)
+ err = session.Fsync(true)
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestRepairCursor(c *C) {
+ if !s.versionAtLeast(2, 7) {
+ c.Skip("RepairCursor only works on 2.7+")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+ session.SetBatch(2)
+
+ coll := session.DB("mydb").C("mycoll3")
+ err = coll.DropCollection()
+
+ ns := []int{0, 10, 20, 30, 40, 50}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ repairIter := coll.Repair()
+
+ c.Assert(repairIter.Err(), IsNil)
+
+ result := struct{ N int }{}
+ resultCounts := map[int]int{}
+ for repairIter.Next(&result) {
+ resultCounts[result.N]++
+ }
+
+ c.Assert(repairIter.Next(&result), Equals, false)
+ c.Assert(repairIter.Err(), IsNil)
+ c.Assert(repairIter.Close(), IsNil)
+
+ // Verify that the results of the repair cursor are valid.
+ // The repair cursor can return multiple copies
+ // of the same document, so to check correctness we only
+ // need to verify that at least 1 of each document was returned.
+
+ for _, key := range ns {
+ c.Assert(resultCounts[key] > 0, Equals, true)
+ }
+}
+
+func (s *S) TestPipeIter(c *C) {
+ if !s.versionAtLeast(2, 1) {
+ c.Skip("Pipe only works on 2.1+")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ pipe := coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}})
+
+ // Ensure cursor logic is working by forcing a small batch.
+ pipe.Batch(2)
+
+ // Smoke test for AllowDiskUse.
+ pipe.AllowDiskUse()
+
+ iter := pipe.Iter()
+ result := struct{ N int }{}
+ for i := 2; i < 7; i++ {
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.N, Equals, ns[i])
+ }
+
+ c.Assert(iter.Next(&result), Equals, false)
+ c.Assert(iter.Close(), IsNil)
+}
+
+func (s *S) TestPipeAll(c *C) {
+ if !s.versionAtLeast(2, 1) {
+ c.Skip("Pipe only works on 2.1+")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err := coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ var result []struct{ N int }
+ err = coll.Pipe([]M{{"$match": M{"n": M{"$gte": 42}}}}).All(&result)
+ c.Assert(err, IsNil)
+ for i := 2; i < 7; i++ {
+ c.Assert(result[i-2].N, Equals, ns[i])
+ }
+}
+
+func (s *S) TestPipeOne(c *C) {
+ if !s.versionAtLeast(2, 1) {
+ c.Skip("Pipe only works on 2.1+")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ coll.Insert(M{"a": 1, "b": 2})
+
+ result := struct{ A, B int }{}
+
+ pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}})
+ err = pipe.One(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.A, Equals, 1)
+ c.Assert(result.B, Equals, 3)
+
+ pipe = coll.Pipe([]M{{"$match": M{"a": 2}}})
+ err = pipe.One(&result)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+}
+
+func (s *S) TestPipeExplain(c *C) {
+ if !s.versionAtLeast(2, 1) {
+ c.Skip("Pipe only works on 2.1+")
+ }
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ coll.Insert(M{"a": 1, "b": 2})
+
+ pipe := coll.Pipe([]M{{"$project": M{"a": 1, "b": M{"$add": []interface{}{"$b", 1}}}}})
+
+ // The explain command result changes across versions.
+ var result struct{ Ok int }
+ err = pipe.Explain(&result)
+ c.Assert(err, IsNil)
+ c.Assert(result.Ok, Equals, 1)
+}
+
+func (s *S) TestBatch1Bug(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for i := 0; i < 3; i++ {
+ err := coll.Insert(M{"n": i})
+ c.Assert(err, IsNil)
+ }
+
+ var ns []struct{ N int }
+ err = coll.Find(nil).Batch(1).All(&ns)
+ c.Assert(err, IsNil)
+ c.Assert(len(ns), Equals, 3)
+
+ session.SetBatch(1)
+ err = coll.Find(nil).All(&ns)
+ c.Assert(err, IsNil)
+ c.Assert(len(ns), Equals, 3)
+}
+
+func (s *S) TestInterfaceIterBug(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ for i := 0; i < 3; i++ {
+ err := coll.Insert(M{"n": i})
+ c.Assert(err, IsNil)
+ }
+
+ var result interface{}
+
+ i := 0
+ iter := coll.Find(nil).Sort("n").Iter()
+ for iter.Next(&result) {
+ c.Assert(result.(bson.M)["n"], Equals, i)
+ i++
+ }
+ c.Assert(iter.Close(), IsNil)
+}
+
+func (s *S) TestFindIterCloseKillsCursor(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ cursors := serverCursorsOpen(session)
+
+ coll := session.DB("mydb").C("mycoll")
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ err = coll.Insert(M{"n": n})
+ c.Assert(err, IsNil)
+ }
+
+ iter := coll.Find(nil).Batch(2).Iter()
+ c.Assert(iter.Next(bson.M{}), Equals, true)
+
+ c.Assert(iter.Close(), IsNil)
+ c.Assert(serverCursorsOpen(session), Equals, cursors)
+}
+
+func (s *S) TestFindIterDoneWithBatches(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ ns := []int{40, 41, 42, 43, 44, 45, 46}
+ for _, n := range ns {
+ coll.Insert(M{"n": n})
+ }
+
+ iter := coll.Find(M{"n": M{"$gte": 42}}).Sort("$natural").Prefetch(0).Batch(2).Iter()
+ result := struct{ N int }{}
+ for i := 2; i < 7; i++ {
+ // first check will be with pending local record;
+ // second will be with open cursor ID but no local
+ // records
+ c.Assert(iter.Done(), Equals, false)
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true, Commentf("err=%v", err))
+ }
+
+ c.Assert(iter.Done(), Equals, true)
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Close(), IsNil)
+}
+
+func (s *S) TestFindIterDoneErr(c *C) {
+ session, err := mgo.Dial("localhost:40002")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ iter := coll.Find(nil).Iter()
+
+ result := struct{}{}
+ ok := iter.Next(&result)
+ c.Assert(iter.Done(), Equals, true)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Err(), ErrorMatches, "unauthorized.*|not authorized.*")
+}
+
+func (s *S) TestFindIterDoneNotFound(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+
+ result := struct{ A, B int }{}
+ iter := coll.Find(M{"a": 1}).Iter()
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, false)
+ c.Assert(iter.Done(), Equals, true)
+}
+
+func (s *S) TestLogReplay(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ for i := 0; i < 5; i++ {
+ err = coll.Insert(M{"ts": time.Now()})
+ c.Assert(err, IsNil)
+ }
+
+ iter := coll.Find(nil).LogReplay().Iter()
+ if s.versionAtLeast(2, 6) {
+ // This used to fail in 2.4. Now it's just a smoke test.
+ c.Assert(iter.Err(), IsNil)
+ } else {
+ c.Assert(iter.Next(bson.M{}), Equals, false)
+ c.Assert(iter.Err(), ErrorMatches, "no ts field in query")
+ }
+}
+
+func (s *S) TestSetCursorTimeout(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 42})
+
+ // This is just a smoke test. Won't wait 10 minutes for an actual timeout.
+
+ session.SetCursorTimeout(0)
+
+ var result struct{ N int }
+ iter := coll.Find(nil).Iter()
+ c.Assert(iter.Next(&result), Equals, true)
+ c.Assert(result.N, Equals, 42)
+ c.Assert(iter.Next(&result), Equals, false)
+}
+
+func (s *S) TestNewIterNoServer(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ data, err := bson.Marshal(bson.M{"a": 1})
+
+ coll := session.DB("mydb").C("mycoll")
+ iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, nil)
+
+ var result struct{ A int }
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.A, Equals, 1)
+
+ ok = iter.Next(&result)
+ c.Assert(ok, Equals, false)
+
+ c.Assert(iter.Err(), ErrorMatches, "server not available")
+}
+
+func (s *S) TestNewIterNoServerPresetErr(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ data, err := bson.Marshal(bson.M{"a": 1})
+
+ coll := session.DB("mydb").C("mycoll")
+ iter := coll.NewIter(nil, []bson.Raw{{3, data}}, 42, fmt.Errorf("my error"))
+
+ var result struct{ A int }
+ ok := iter.Next(&result)
+ c.Assert(ok, Equals, true)
+ c.Assert(result.A, Equals, 1)
+
+ ok = iter.Next(&result)
+ c.Assert(ok, Equals, false)
+
+ c.Assert(iter.Err(), ErrorMatches, "my error")
+}
+
+func (s *S) TestBypassValidation(c *C) {
+ if !s.versionAtLeast(3, 2) {
+ c.Skip("validation supported on 3.2+")
+ }
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ err = coll.Insert(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ err = coll.Database.Run(bson.D{
+ {"collMod", "mycoll"},
+ {"validator", M{"s": M{"$type": "string"}}},
+ }, nil)
+ c.Assert(err, IsNil)
+
+ err = coll.Insert(M{"n": 2})
+ c.Assert(err, ErrorMatches, "Document failed validation")
+
+ err = coll.Update(M{"n": 1}, M{"n": 10})
+ c.Assert(err, ErrorMatches, "Document failed validation")
+
+ session.SetBypassValidation(true)
+
+ err = coll.Insert(M{"n": 3})
+ c.Assert(err, IsNil)
+
+ err = coll.Update(M{"n": 3}, M{"n": 4})
+ c.Assert(err, IsNil)
+
+ // Ensure this still works. Shouldn't be affected.
+ err = coll.Remove(M{"n": 1})
+ c.Assert(err, IsNil)
+
+ var result struct{ N int }
+ var ns []int
+ iter := coll.Find(nil).Iter()
+ for iter.Next(&result) {
+ ns = append(ns, result.N)
+ }
+ c.Assert(iter.Err(), IsNil)
+ sort.Ints(ns)
+ c.Assert(ns, DeepEquals, []int{4})
+}
+
+func (s *S) TestVersionAtLeast(c *C) {
+ tests := [][][]int{
+ {{3,2,1}, {3,2,0}},
+ {{3,2,1}, {3,2}},
+ {{3,2,1}, {2,5,5,5}},
+ {{3,2,1}, {2,5,5}},
+ {{3,2,1}, {2,5}},
+ }
+ for _, pair := range tests {
+ bi := mgo.BuildInfo{VersionArray: pair[0]}
+ c.Assert(bi.VersionAtLeast(pair[1]...), Equals, true)
+
+ bi = mgo.BuildInfo{VersionArray: pair[0]}
+ c.Assert(bi.VersionAtLeast(pair[0]...), Equals, true)
+
+ bi = mgo.BuildInfo{VersionArray: pair[1]}
+ c.Assert(bi.VersionAtLeast(pair[1]...), Equals, true)
+
+ bi = mgo.BuildInfo{VersionArray: pair[1]}
+ c.Assert(bi.VersionAtLeast(pair[0]...), Equals, false)
+ }
+}
+
+// --------------------------------------------------------------------------
+// Some benchmarks that require a running database.
+
+func (s *S) BenchmarkFindIterRaw(c *C) {
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ defer session.Close()
+
+ coll := session.DB("mydb").C("mycoll")
+ doc := bson.D{
+ {"f2", "a short string"},
+ {"f3", bson.D{{"1", "one"}, {"2", 2.0}}},
+ {"f4", []string{"a", "b", "c", "d", "e", "f", "g"}},
+ }
+
+ for i := 0; i < c.N+1; i++ {
+ err := coll.Insert(doc)
+ c.Assert(err, IsNil)
+ }
+
+ session.SetBatch(c.N)
+
+ var raw bson.Raw
+ iter := coll.Find(nil).Iter()
+ iter.Next(&raw)
+ c.ResetTimer()
+ i := 0
+ for iter.Next(&raw) {
+ i++
+ }
+ c.StopTimer()
+ c.Assert(iter.Err(), IsNil)
+ c.Assert(i, Equals, c.N)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/socket.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/socket.go
new file mode 100644
index 00000000000..8891dd5d734
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/socket.go
@@ -0,0 +1,707 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+type replyFunc func(err error, reply *replyOp, docNum int, docData []byte)
+
+type mongoSocket struct {
+ sync.Mutex
+ server *mongoServer // nil when cached
+ conn net.Conn
+ timeout time.Duration
+ addr string // For debugging only.
+ nextRequestId uint32
+ replyFuncs map[uint32]replyFunc
+ references int
+ creds []Credential
+ logout []Credential
+ cachedNonce string
+ gotNonce sync.Cond
+ dead error
+ serverInfo *mongoServerInfo
+}
+
+type queryOpFlags uint32
+
+const (
+ _ queryOpFlags = 1 << iota
+ flagTailable
+ flagSlaveOk
+ flagLogReplay
+ flagNoCursorTimeout
+ flagAwaitData
+)
+
+type queryOp struct {
+ collection string
+ query interface{}
+ skip int32
+ limit int32
+ selector interface{}
+ flags queryOpFlags
+ replyFunc replyFunc
+
+ mode Mode
+ options queryWrapper
+ hasOptions bool
+ serverTags []bson.D
+}
+
+type queryWrapper struct {
+ Query interface{} "$query"
+ OrderBy interface{} "$orderby,omitempty"
+ Hint interface{} "$hint,omitempty"
+ Explain bool "$explain,omitempty"
+ Snapshot bool "$snapshot,omitempty"
+ ReadPreference bson.D "$readPreference,omitempty"
+ MaxScan int "$maxScan,omitempty"
+ MaxTimeMS int "$maxTimeMS,omitempty"
+ Comment string "$comment,omitempty"
+}
+
+func (op *queryOp) finalQuery(socket *mongoSocket) interface{} {
+ if op.flags&flagSlaveOk != 0 && socket.ServerInfo().Mongos {
+ var modeName string
+ switch op.mode {
+ case Strong:
+ modeName = "primary"
+ case Monotonic, Eventual:
+ modeName = "secondaryPreferred"
+ case PrimaryPreferred:
+ modeName = "primaryPreferred"
+ case Secondary:
+ modeName = "secondary"
+ case SecondaryPreferred:
+ modeName = "secondaryPreferred"
+ case Nearest:
+ modeName = "nearest"
+ default:
+ panic(fmt.Sprintf("unsupported read mode: %d", op.mode))
+ }
+ op.hasOptions = true
+ op.options.ReadPreference = make(bson.D, 0, 2)
+ op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"mode", modeName})
+ if len(op.serverTags) > 0 {
+ op.options.ReadPreference = append(op.options.ReadPreference, bson.DocElem{"tags", op.serverTags})
+ }
+ }
+ if op.hasOptions {
+ if op.query == nil {
+ var empty bson.D
+ op.options.Query = empty
+ } else {
+ op.options.Query = op.query
+ }
+ debugf("final query is %#v\n", &op.options)
+ return &op.options
+ }
+ return op.query
+}
+
+type getMoreOp struct {
+ collection string
+ limit int32
+ cursorId int64
+ replyFunc replyFunc
+}
+
+type replyOp struct {
+ flags uint32
+ cursorId int64
+ firstDoc int32
+ replyDocs int32
+}
+
+type insertOp struct {
+ collection string // "database.collection"
+ documents []interface{} // One or more documents to insert
+ flags uint32
+}
+
+type updateOp struct {
+ Collection string `bson:"-"` // "database.collection"
+ Selector interface{} `bson:"q"`
+ Update interface{} `bson:"u"`
+ Flags uint32 `bson:"-"`
+ Multi bool `bson:"multi,omitempty"`
+ Upsert bool `bson:"upsert,omitempty"`
+}
+
+type deleteOp struct {
+ Collection string `bson:"-"` // "database.collection"
+ Selector interface{} `bson:"q"`
+ Flags uint32 `bson:"-"`
+ Limit int `bson:"limit"`
+}
+
+type killCursorsOp struct {
+ cursorIds []int64
+}
+
+type requestInfo struct {
+ bufferPos int
+ replyFunc replyFunc
+}
+
+func newSocket(server *mongoServer, conn net.Conn, timeout time.Duration) *mongoSocket {
+ socket := &mongoSocket{
+ conn: conn,
+ addr: server.Addr,
+ server: server,
+ replyFuncs: make(map[uint32]replyFunc),
+ }
+ socket.gotNonce.L = &socket.Mutex
+ if err := socket.InitialAcquire(server.Info(), timeout); err != nil {
+ panic("newSocket: InitialAcquire returned error: " + err.Error())
+ }
+ stats.socketsAlive(+1)
+ debugf("Socket %p to %s: initialized", socket, socket.addr)
+ socket.resetNonce()
+ go socket.readLoop()
+ return socket
+}
+
+// Server returns the server that the socket is associated with.
+// It returns nil while the socket is cached in its respective server.
+func (socket *mongoSocket) Server() *mongoServer {
+ socket.Lock()
+ server := socket.server
+ socket.Unlock()
+ return server
+}
+
+// ServerInfo returns details for the server at the time the socket
+// was initially acquired.
+func (socket *mongoSocket) ServerInfo() *mongoServerInfo {
+ socket.Lock()
+ serverInfo := socket.serverInfo
+ socket.Unlock()
+ return serverInfo
+}
+
+// InitialAcquire obtains the first reference to the socket, either
+// right after the connection is made or once a recycled socket is
+// being put back in use.
+func (socket *mongoSocket) InitialAcquire(serverInfo *mongoServerInfo, timeout time.Duration) error {
+ socket.Lock()
+ if socket.references > 0 {
+ panic("Socket acquired out of cache with references")
+ }
+ if socket.dead != nil {
+ dead := socket.dead
+ socket.Unlock()
+ return dead
+ }
+ socket.references++
+ socket.serverInfo = serverInfo
+ socket.timeout = timeout
+ stats.socketsInUse(+1)
+ stats.socketRefs(+1)
+ socket.Unlock()
+ return nil
+}
+
+// Acquire obtains an additional reference to the socket.
+// The socket will only be recycled when it's released as many
+// times as it's been acquired.
+func (socket *mongoSocket) Acquire() (info *mongoServerInfo) {
+ socket.Lock()
+ if socket.references == 0 {
+ panic("Socket got non-initial acquire with references == 0")
+ }
+ // We'll track references to dead sockets as well.
+ // Caller is still supposed to release the socket.
+ socket.references++
+ stats.socketRefs(+1)
+ serverInfo := socket.serverInfo
+ socket.Unlock()
+ return serverInfo
+}
+
+// Release decrements a socket reference. The socket will be
+// recycled once its released as many times as it's been acquired.
+func (socket *mongoSocket) Release() {
+ socket.Lock()
+ if socket.references == 0 {
+ panic("socket.Release() with references == 0")
+ }
+ socket.references--
+ stats.socketRefs(-1)
+ if socket.references == 0 {
+ stats.socketsInUse(-1)
+ server := socket.server
+ socket.Unlock()
+ socket.LogoutAll()
+ // If the socket is dead server is nil.
+ if server != nil {
+ server.RecycleSocket(socket)
+ }
+ } else {
+ socket.Unlock()
+ }
+}
+
+// SetTimeout changes the timeout used on socket operations.
+func (socket *mongoSocket) SetTimeout(d time.Duration) {
+ socket.Lock()
+ socket.timeout = d
+ socket.Unlock()
+}
+
+type deadlineType int
+
+const (
+ readDeadline deadlineType = 1
+ writeDeadline deadlineType = 2
+)
+
+func (socket *mongoSocket) updateDeadline(which deadlineType) {
+ var when time.Time
+ if socket.timeout > 0 {
+ when = time.Now().Add(socket.timeout)
+ }
+ whichstr := ""
+ switch which {
+ case readDeadline | writeDeadline:
+ whichstr = "read/write"
+ socket.conn.SetDeadline(when)
+ case readDeadline:
+ whichstr = "read"
+ socket.conn.SetReadDeadline(when)
+ case writeDeadline:
+ whichstr = "write"
+ socket.conn.SetWriteDeadline(when)
+ default:
+ panic("invalid parameter to updateDeadline")
+ }
+ debugf("Socket %p to %s: updated %s deadline to %s ahead (%s)", socket, socket.addr, whichstr, socket.timeout, when)
+}
+
+// Close terminates the socket use.
+func (socket *mongoSocket) Close() {
+ socket.kill(errors.New("Closed explicitly"), false)
+}
+
+func (socket *mongoSocket) kill(err error, abend bool) {
+ socket.Lock()
+ if socket.dead != nil {
+ debugf("Socket %p to %s: killed again: %s (previously: %s)", socket, socket.addr, err.Error(), socket.dead.Error())
+ socket.Unlock()
+ return
+ }
+ logf("Socket %p to %s: closing: %s (abend=%v)", socket, socket.addr, err.Error(), abend)
+ socket.dead = err
+ socket.conn.Close()
+ stats.socketsAlive(-1)
+ replyFuncs := socket.replyFuncs
+ socket.replyFuncs = make(map[uint32]replyFunc)
+ server := socket.server
+ socket.server = nil
+ socket.gotNonce.Broadcast()
+ socket.Unlock()
+ for _, replyFunc := range replyFuncs {
+ logf("Socket %p to %s: notifying replyFunc of closed socket: %s", socket, socket.addr, err.Error())
+ replyFunc(err, nil, -1, nil)
+ }
+ if abend {
+ server.AbendSocket(socket)
+ }
+}
+
+func (socket *mongoSocket) SimpleQuery(op *queryOp) (data []byte, err error) {
+ var wait, change sync.Mutex
+ var replyDone bool
+ var replyData []byte
+ var replyErr error
+ wait.Lock()
+ op.replyFunc = func(err error, reply *replyOp, docNum int, docData []byte) {
+ change.Lock()
+ if !replyDone {
+ replyDone = true
+ replyErr = err
+ if err == nil {
+ replyData = docData
+ }
+ }
+ change.Unlock()
+ wait.Unlock()
+ }
+ err = socket.Query(op)
+ if err != nil {
+ return nil, err
+ }
+ wait.Lock()
+ change.Lock()
+ data = replyData
+ err = replyErr
+ change.Unlock()
+ return data, err
+}
+
+func (socket *mongoSocket) Query(ops ...interface{}) (err error) {
+
+ if lops := socket.flushLogout(); len(lops) > 0 {
+ ops = append(lops, ops...)
+ }
+
+ buf := make([]byte, 0, 256)
+
+ // Serialize operations synchronously to avoid interrupting
+ // other goroutines while we can't really be sending data.
+ // Also, record id positions so that we can compute request
+ // ids at once later with the lock already held.
+ requests := make([]requestInfo, len(ops))
+ requestCount := 0
+
+ for _, op := range ops {
+ debugf("Socket %p to %s: serializing op: %#v", socket, socket.addr, op)
+ if qop, ok := op.(*queryOp); ok {
+ if cmd, ok := qop.query.(*findCmd); ok {
+ debugf("Socket %p to %s: find command: %#v", socket, socket.addr, cmd)
+ }
+ }
+ start := len(buf)
+ var replyFunc replyFunc
+ switch op := op.(type) {
+
+ case *updateOp:
+ buf = addHeader(buf, 2001)
+ buf = addInt32(buf, 0) // Reserved
+ buf = addCString(buf, op.Collection)
+ buf = addInt32(buf, int32(op.Flags))
+ debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector)
+ buf, err = addBSON(buf, op.Selector)
+ if err != nil {
+ return err
+ }
+ debugf("Socket %p to %s: serializing update document: %#v", socket, socket.addr, op.Update)
+ buf, err = addBSON(buf, op.Update)
+ if err != nil {
+ return err
+ }
+
+ case *insertOp:
+ buf = addHeader(buf, 2002)
+ buf = addInt32(buf, int32(op.flags))
+ buf = addCString(buf, op.collection)
+ for _, doc := range op.documents {
+ debugf("Socket %p to %s: serializing document for insertion: %#v", socket, socket.addr, doc)
+ buf, err = addBSON(buf, doc)
+ if err != nil {
+ return err
+ }
+ }
+
+ case *queryOp:
+ buf = addHeader(buf, 2004)
+ buf = addInt32(buf, int32(op.flags))
+ buf = addCString(buf, op.collection)
+ buf = addInt32(buf, op.skip)
+ buf = addInt32(buf, op.limit)
+ buf, err = addBSON(buf, op.finalQuery(socket))
+ if err != nil {
+ return err
+ }
+ if op.selector != nil {
+ buf, err = addBSON(buf, op.selector)
+ if err != nil {
+ return err
+ }
+ }
+ replyFunc = op.replyFunc
+
+ case *getMoreOp:
+ buf = addHeader(buf, 2005)
+ buf = addInt32(buf, 0) // Reserved
+ buf = addCString(buf, op.collection)
+ buf = addInt32(buf, op.limit)
+ buf = addInt64(buf, op.cursorId)
+ replyFunc = op.replyFunc
+
+ case *deleteOp:
+ buf = addHeader(buf, 2006)
+ buf = addInt32(buf, 0) // Reserved
+ buf = addCString(buf, op.Collection)
+ buf = addInt32(buf, int32(op.Flags))
+ debugf("Socket %p to %s: serializing selector document: %#v", socket, socket.addr, op.Selector)
+ buf, err = addBSON(buf, op.Selector)
+ if err != nil {
+ return err
+ }
+
+ case *killCursorsOp:
+ buf = addHeader(buf, 2007)
+ buf = addInt32(buf, 0) // Reserved
+ buf = addInt32(buf, int32(len(op.cursorIds)))
+ for _, cursorId := range op.cursorIds {
+ buf = addInt64(buf, cursorId)
+ }
+
+ default:
+ panic("internal error: unknown operation type")
+ }
+
+ setInt32(buf, start, int32(len(buf)-start))
+
+ if replyFunc != nil {
+ request := &requests[requestCount]
+ request.replyFunc = replyFunc
+ request.bufferPos = start
+ requestCount++
+ }
+ }
+
+ // Buffer is ready for the pipe. Lock, allocate ids, and enqueue.
+
+ socket.Lock()
+ if socket.dead != nil {
+ dead := socket.dead
+ socket.Unlock()
+ debugf("Socket %p to %s: failing query, already closed: %s", socket, socket.addr, socket.dead.Error())
+ // XXX This seems necessary in case the session is closed concurrently
+ // with a query being performed, but it's not yet tested:
+ for i := 0; i != requestCount; i++ {
+ request := &requests[i]
+ if request.replyFunc != nil {
+ request.replyFunc(dead, nil, -1, nil)
+ }
+ }
+ return dead
+ }
+
+ wasWaiting := len(socket.replyFuncs) > 0
+
+ // Reserve id 0 for requests which should have no responses.
+ requestId := socket.nextRequestId + 1
+ if requestId == 0 {
+ requestId++
+ }
+ socket.nextRequestId = requestId + uint32(requestCount)
+ for i := 0; i != requestCount; i++ {
+ request := &requests[i]
+ setInt32(buf, request.bufferPos+4, int32(requestId))
+ socket.replyFuncs[requestId] = request.replyFunc
+ requestId++
+ }
+
+ debugf("Socket %p to %s: sending %d op(s) (%d bytes)", socket, socket.addr, len(ops), len(buf))
+ stats.sentOps(len(ops))
+
+ socket.updateDeadline(writeDeadline)
+ _, err = socket.conn.Write(buf)
+ if !wasWaiting && requestCount > 0 {
+ socket.updateDeadline(readDeadline)
+ }
+ socket.Unlock()
+ return err
+}
+
+func fill(r net.Conn, b []byte) error {
+ l := len(b)
+ n, err := r.Read(b)
+ for n != l && err == nil {
+ var ni int
+ ni, err = r.Read(b[n:])
+ n += ni
+ }
+ return err
+}
+
+// Estimated minimum cost per socket: 1 goroutine + memory for the largest
+// document ever seen.
+func (socket *mongoSocket) readLoop() {
+ p := make([]byte, 36) // 16 from header + 20 from OP_REPLY fixed fields
+ s := make([]byte, 4)
+ conn := socket.conn // No locking, conn never changes.
+ for {
+ err := fill(conn, p)
+ if err != nil {
+ socket.kill(err, true)
+ return
+ }
+
+ totalLen := getInt32(p, 0)
+ responseTo := getInt32(p, 8)
+ opCode := getInt32(p, 12)
+
+ // Don't use socket.server.Addr here. socket is not
+ // locked and socket.server may go away.
+ debugf("Socket %p to %s: got reply (%d bytes)", socket, socket.addr, totalLen)
+
+ _ = totalLen
+
+ if opCode != 1 {
+ socket.kill(errors.New("opcode != 1, corrupted data?"), true)
+ return
+ }
+
+ reply := replyOp{
+ flags: uint32(getInt32(p, 16)),
+ cursorId: getInt64(p, 20),
+ firstDoc: getInt32(p, 28),
+ replyDocs: getInt32(p, 32),
+ }
+
+ stats.receivedOps(+1)
+ stats.receivedDocs(int(reply.replyDocs))
+
+ socket.Lock()
+ replyFunc, ok := socket.replyFuncs[uint32(responseTo)]
+ if ok {
+ delete(socket.replyFuncs, uint32(responseTo))
+ }
+ socket.Unlock()
+
+ if replyFunc != nil && reply.replyDocs == 0 {
+ replyFunc(nil, &reply, -1, nil)
+ } else {
+ for i := 0; i != int(reply.replyDocs); i++ {
+ err := fill(conn, s)
+ if err != nil {
+ if replyFunc != nil {
+ replyFunc(err, nil, -1, nil)
+ }
+ socket.kill(err, true)
+ return
+ }
+
+ b := make([]byte, int(getInt32(s, 0)))
+
+ // copy(b, s) in an efficient way.
+ b[0] = s[0]
+ b[1] = s[1]
+ b[2] = s[2]
+ b[3] = s[3]
+
+ err = fill(conn, b[4:])
+ if err != nil {
+ if replyFunc != nil {
+ replyFunc(err, nil, -1, nil)
+ }
+ socket.kill(err, true)
+ return
+ }
+
+ if globalDebug && globalLogger != nil {
+ m := bson.M{}
+ if err := bson.Unmarshal(b, m); err == nil {
+ debugf("Socket %p to %s: received document: %#v", socket, socket.addr, m)
+ }
+ }
+
+ if replyFunc != nil {
+ replyFunc(nil, &reply, i, b)
+ }
+
+ // XXX Do bound checking against totalLen.
+ }
+ }
+
+ socket.Lock()
+ if len(socket.replyFuncs) == 0 {
+ // Nothing else to read for now. Disable deadline.
+ socket.conn.SetReadDeadline(time.Time{})
+ } else {
+ socket.updateDeadline(readDeadline)
+ }
+ socket.Unlock()
+
+ // XXX Do bound checking against totalLen.
+ }
+}
+
+var emptyHeader = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+func addHeader(b []byte, opcode int) []byte {
+ i := len(b)
+ b = append(b, emptyHeader...)
+ // Enough for current opcodes.
+ b[i+12] = byte(opcode)
+ b[i+13] = byte(opcode >> 8)
+ return b
+}
+
+func addInt32(b []byte, i int32) []byte {
+ return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24))
+}
+
+func addInt64(b []byte, i int64) []byte {
+ return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24),
+ byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56))
+}
+
+func addCString(b []byte, s string) []byte {
+ b = append(b, []byte(s)...)
+ b = append(b, 0)
+ return b
+}
+
+func addBSON(b []byte, doc interface{}) ([]byte, error) {
+ if doc == nil {
+ return append(b, 5, 0, 0, 0, 0), nil
+ }
+ data, err := bson.Marshal(doc)
+ if err != nil {
+ return b, err
+ }
+ return append(b, data...), nil
+}
+
+func setInt32(b []byte, pos int, i int32) {
+ b[pos] = byte(i)
+ b[pos+1] = byte(i >> 8)
+ b[pos+2] = byte(i >> 16)
+ b[pos+3] = byte(i >> 24)
+}
+
+func getInt32(b []byte, pos int) int32 {
+ return (int32(b[pos+0])) |
+ (int32(b[pos+1]) << 8) |
+ (int32(b[pos+2]) << 16) |
+ (int32(b[pos+3]) << 24)
+}
+
+func getInt64(b []byte, pos int) int64 {
+ return (int64(b[pos+0])) |
+ (int64(b[pos+1]) << 8) |
+ (int64(b[pos+2]) << 16) |
+ (int64(b[pos+3]) << 24) |
+ (int64(b[pos+4]) << 32) |
+ (int64(b[pos+5]) << 40) |
+ (int64(b[pos+6]) << 48) |
+ (int64(b[pos+7]) << 56)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/stats.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/stats.go
new file mode 100644
index 00000000000..59723e60c2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/stats.go
@@ -0,0 +1,147 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo
+
+import (
+ "sync"
+)
+
+var stats *Stats
+var statsMutex sync.Mutex
+
+func SetStats(enabled bool) {
+ statsMutex.Lock()
+ if enabled {
+ if stats == nil {
+ stats = &Stats{}
+ }
+ } else {
+ stats = nil
+ }
+ statsMutex.Unlock()
+}
+
+func GetStats() (snapshot Stats) {
+ statsMutex.Lock()
+ snapshot = *stats
+ statsMutex.Unlock()
+ return
+}
+
+func ResetStats() {
+ statsMutex.Lock()
+ debug("Resetting stats")
+ old := stats
+ stats = &Stats{}
+ // These are absolute values:
+ stats.Clusters = old.Clusters
+ stats.SocketsInUse = old.SocketsInUse
+ stats.SocketsAlive = old.SocketsAlive
+ stats.SocketRefs = old.SocketRefs
+ statsMutex.Unlock()
+ return
+}
+
+type Stats struct {
+ Clusters int
+ MasterConns int
+ SlaveConns int
+ SentOps int
+ ReceivedOps int
+ ReceivedDocs int
+ SocketsAlive int
+ SocketsInUse int
+ SocketRefs int
+}
+
+func (stats *Stats) cluster(delta int) {
+ if stats != nil {
+ statsMutex.Lock()
+ stats.Clusters += delta
+ statsMutex.Unlock()
+ }
+}
+
+func (stats *Stats) conn(delta int, master bool) {
+ if stats != nil {
+ statsMutex.Lock()
+ if master {
+ stats.MasterConns += delta
+ } else {
+ stats.SlaveConns += delta
+ }
+ statsMutex.Unlock()
+ }
+}
+
+func (stats *Stats) sentOps(delta int) {
+ if stats != nil {
+ statsMutex.Lock()
+ stats.SentOps += delta
+ statsMutex.Unlock()
+ }
+}
+
+func (stats *Stats) receivedOps(delta int) {
+ if stats != nil {
+ statsMutex.Lock()
+ stats.ReceivedOps += delta
+ statsMutex.Unlock()
+ }
+}
+
+func (stats *Stats) receivedDocs(delta int) {
+ if stats != nil {
+ statsMutex.Lock()
+ stats.ReceivedDocs += delta
+ statsMutex.Unlock()
+ }
+}
+
+func (stats *Stats) socketsInUse(delta int) {
+ if stats != nil {
+ statsMutex.Lock()
+ stats.SocketsInUse += delta
+ statsMutex.Unlock()
+ }
+}
+
+func (stats *Stats) socketsAlive(delta int) {
+ if stats != nil {
+ statsMutex.Lock()
+ stats.SocketsAlive += delta
+ statsMutex.Unlock()
+ }
+}
+
+func (stats *Stats) socketRefs(delta int) {
+ if stats != nil {
+ statsMutex.Lock()
+ stats.SocketRefs += delta
+ statsMutex.Unlock()
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/suite_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/suite_test.go
new file mode 100644
index 00000000000..bac5d3f4a6f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/suite_test.go
@@ -0,0 +1,262 @@
+// mgo - MongoDB driver for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package mgo_test
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "net"
+ "os/exec"
+ "runtime"
+ "strconv"
+ "testing"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+var fast = flag.Bool("fast", false, "Skip slow tests")
+
+type M bson.M
+
+type cLogger C
+
+func (c *cLogger) Output(calldepth int, s string) error {
+ ns := time.Now().UnixNano()
+ t := float64(ns%100e9) / 1e9
+ ((*C)(c)).Logf("[LOG] %.05f %s", t, s)
+ return nil
+}
+
+func TestAll(t *testing.T) {
+ TestingT(t)
+}
+
+type S struct {
+ session *mgo.Session
+ stopped bool
+ build mgo.BuildInfo
+ frozen []string
+}
+
+func (s *S) versionAtLeast(v ...int) (result bool) {
+ for i := range v {
+ if i == len(s.build.VersionArray) {
+ return false
+ }
+ if s.build.VersionArray[i] != v[i] {
+ return s.build.VersionArray[i] >= v[i]
+ }
+ }
+ return true
+}
+
+var _ = Suite(&S{})
+
+func (s *S) SetUpSuite(c *C) {
+ mgo.SetDebug(true)
+ mgo.SetStats(true)
+ s.StartAll()
+
+ session, err := mgo.Dial("localhost:40001")
+ c.Assert(err, IsNil)
+ s.build, err = session.BuildInfo()
+ c.Check(err, IsNil)
+ session.Close()
+}
+
+func (s *S) SetUpTest(c *C) {
+ err := run("mongo --nodb harness/mongojs/dropall.js")
+ if err != nil {
+ panic(err.Error())
+ }
+ mgo.SetLogger((*cLogger)(c))
+ mgo.ResetStats()
+}
+
+func (s *S) TearDownTest(c *C) {
+ if s.stopped {
+ s.Stop(":40201")
+ s.Stop(":40202")
+ s.Stop(":40203")
+ s.StartAll()
+ }
+ for _, host := range s.frozen {
+ if host != "" {
+ s.Thaw(host)
+ }
+ }
+ var stats mgo.Stats
+ for i := 0; ; i++ {
+ stats = mgo.GetStats()
+ if stats.SocketsInUse == 0 && stats.SocketsAlive == 0 {
+ break
+ }
+ if i == 20 {
+ c.Fatal("Test left sockets in a dirty state")
+ }
+ c.Logf("Waiting for sockets to die: %d in use, %d alive", stats.SocketsInUse, stats.SocketsAlive)
+ time.Sleep(500 * time.Millisecond)
+ }
+ for i := 0; ; i++ {
+ stats = mgo.GetStats()
+ if stats.Clusters == 0 {
+ break
+ }
+ if i == 60 {
+ c.Fatal("Test left clusters alive")
+ }
+ c.Logf("Waiting for clusters to die: %d alive", stats.Clusters)
+ time.Sleep(1 * time.Second)
+ }
+}
+
+func (s *S) Stop(host string) {
+ // Give a moment for slaves to sync and avoid getting rollback issues.
+ panicOnWindows()
+ time.Sleep(2 * time.Second)
+ err := run("svc -d _harness/daemons/" + supvName(host))
+ if err != nil {
+ panic(err)
+ }
+ s.stopped = true
+}
+
+func (s *S) pid(host string) int {
+ // Note recent releases of lsof force 'f' to be present in the output (WTF?).
+ cmd := exec.Command("lsof", "-iTCP:"+hostPort(host), "-sTCP:LISTEN", "-Fpf")
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ panic(err)
+ }
+ pidstr := string(bytes.Fields(output[1:])[0])
+ pid, err := strconv.Atoi(pidstr)
+ if err != nil {
+ panic(fmt.Errorf("cannot convert pid to int: %q, command line: %q", pidstr, cmd.Args))
+ }
+ return pid
+}
+
+func (s *S) Freeze(host string) {
+ err := stop(s.pid(host))
+ if err != nil {
+ panic(err)
+ }
+ s.frozen = append(s.frozen, host)
+}
+
+func (s *S) Thaw(host string) {
+ err := cont(s.pid(host))
+ if err != nil {
+ panic(err)
+ }
+ for i, frozen := range s.frozen {
+ if frozen == host {
+ s.frozen[i] = ""
+ }
+ }
+}
+
+func (s *S) StartAll() {
+ if s.stopped {
+ // Restart any stopped nodes.
+ run("svc -u _harness/daemons/*")
+ err := run("mongo --nodb harness/mongojs/wait.js")
+ if err != nil {
+ panic(err)
+ }
+ s.stopped = false
+ }
+}
+
+func run(command string) error {
+ var output []byte
+ var err error
+ if runtime.GOOS == "windows" {
+ output, err = exec.Command("cmd", "/C", command).CombinedOutput()
+ } else {
+ output, err = exec.Command("/bin/sh", "-c", command).CombinedOutput()
+ }
+
+ if err != nil {
+ msg := fmt.Sprintf("Failed to execute: %s: %s\n%s", command, err.Error(), string(output))
+ return errors.New(msg)
+ }
+ return nil
+}
+
+var supvNames = map[string]string{
+ "40001": "db1",
+ "40002": "db2",
+ "40011": "rs1a",
+ "40012": "rs1b",
+ "40013": "rs1c",
+ "40021": "rs2a",
+ "40022": "rs2b",
+ "40023": "rs2c",
+ "40031": "rs3a",
+ "40032": "rs3b",
+ "40033": "rs3c",
+ "40041": "rs4a",
+ "40101": "cfg1",
+ "40102": "cfg2",
+ "40103": "cfg3",
+ "40201": "s1",
+ "40202": "s2",
+ "40203": "s3",
+}
+
+// supvName returns the daemon name for the given host address.
+func supvName(host string) string {
+ host, port, err := net.SplitHostPort(host)
+ if err != nil {
+ panic(err)
+ }
+ name, ok := supvNames[port]
+ if !ok {
+ panic("Unknown host: " + host)
+ }
+ return name
+}
+
+func hostPort(host string) string {
+ _, port, err := net.SplitHostPort(host)
+ if err != nil {
+ panic(err)
+ }
+ return port
+}
+
+func panicOnWindows() {
+ if runtime.GOOS == "windows" {
+ panic("the test suite is not yet fully supported on Windows")
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/syscall_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/syscall_test.go
new file mode 100644
index 00000000000..b8bbd7b340f
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/syscall_test.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mgo_test
+
+import (
+ "syscall"
+)
+
+func stop(pid int) (err error) {
+ return syscall.Kill(pid, syscall.SIGSTOP)
+}
+
+func cont(pid int) (err error) {
+ return syscall.Kill(pid, syscall.SIGCONT)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/syscall_windows_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/syscall_windows_test.go
new file mode 100644
index 00000000000..f2deaca86ec
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/syscall_windows_test.go
@@ -0,0 +1,11 @@
+package mgo_test
+
+func stop(pid int) (err error) {
+ panicOnWindows() // Always does.
+ return nil
+}
+
+func cont(pid int) (err error) {
+ panicOnWindows() // Always does.
+ return nil
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/chaos.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/chaos.go
new file mode 100644
index 00000000000..c98adb91d22
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/chaos.go
@@ -0,0 +1,68 @@
+package txn
+
+import (
+ mrand "math/rand"
+ "time"
+)
+
+var chaosEnabled = false
+var chaosSetting Chaos
+
+// Chaos holds parameters for the failure injection mechanism.
+type Chaos struct {
+ // KillChance is the 0.0 to 1.0 chance that a given checkpoint
+ // within the algorithm will raise an interruption that will
+ // stop the procedure.
+ KillChance float64
+
+ // SlowdownChance is the 0.0 to 1.0 chance that a given checkpoint
+ // within the algorithm will be delayed by Slowdown before
+ // continuing.
+ SlowdownChance float64
+ Slowdown time.Duration
+
+ // If Breakpoint is set, the above settings will only affect the
+ // named breakpoint.
+ Breakpoint string
+}
+
+// SetChaos sets the failure injection parameters to c.
+func SetChaos(c Chaos) {
+ chaosSetting = c
+ chaosEnabled = c.KillChance > 0 || c.SlowdownChance > 0
+}
+
+func chaos(bpname string) {
+ if !chaosEnabled {
+ return
+ }
+ switch chaosSetting.Breakpoint {
+ case "", bpname:
+ kc := chaosSetting.KillChance
+ if kc > 0 && mrand.Intn(1000) < int(kc*1000) {
+ panic(chaosError{})
+ }
+ if bpname == "insert" {
+ return
+ }
+ sc := chaosSetting.SlowdownChance
+ if sc > 0 && mrand.Intn(1000) < int(sc*1000) {
+ time.Sleep(chaosSetting.Slowdown)
+ }
+ }
+}
+
+type chaosError struct{}
+
+func (f *flusher) handleChaos(err *error) {
+ v := recover()
+ if v == nil {
+ return
+ }
+ if _, ok := v.(chaosError); ok {
+ f.debugf("Killed by chaos!")
+ *err = ErrChaos
+ return
+ }
+ panic(v)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/debug.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/debug.go
new file mode 100644
index 00000000000..8224bb31387
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/debug.go
@@ -0,0 +1,109 @@
+package txn
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "sync/atomic"
+
+ "gopkg.in/mgo.v2/bson"
+)
+
+var (
+ debugEnabled bool
+ logger log_Logger
+)
+
+type log_Logger interface {
+ Output(calldepth int, s string) error
+}
+
+// Specify the *log.Logger where logged messages should be sent to.
+func SetLogger(l log_Logger) {
+ logger = l
+}
+
+// SetDebug enables or disables debugging.
+func SetDebug(debug bool) {
+ debugEnabled = debug
+}
+
+var ErrChaos = fmt.Errorf("interrupted by chaos")
+
+var debugId uint32
+
+func debugPrefix() string {
+ d := atomic.AddUint32(&debugId, 1) - 1
+ s := make([]byte, 0, 10)
+ for i := uint(0); i < 8; i++ {
+ s = append(s, "abcdefghijklmnop"[(d>>(4*i))&0xf])
+ if d>>(4*(i+1)) == 0 {
+ break
+ }
+ }
+ s = append(s, ')', ' ')
+ return string(s)
+}
+
+func logf(format string, args ...interface{}) {
+ if logger != nil {
+ logger.Output(2, fmt.Sprintf(format, argsForLog(args)...))
+ }
+}
+
+func debugf(format string, args ...interface{}) {
+ if debugEnabled && logger != nil {
+ logger.Output(2, fmt.Sprintf(format, argsForLog(args)...))
+ }
+}
+
+func argsForLog(args []interface{}) []interface{} {
+ for i, arg := range args {
+ switch v := arg.(type) {
+ case bson.ObjectId:
+ args[i] = v.Hex()
+ case []bson.ObjectId:
+ lst := make([]string, len(v))
+ for j, id := range v {
+ lst[j] = id.Hex()
+ }
+ args[i] = lst
+ case map[docKey][]bson.ObjectId:
+ buf := &bytes.Buffer{}
+ var dkeys docKeys
+ for dkey := range v {
+ dkeys = append(dkeys, dkey)
+ }
+ sort.Sort(dkeys)
+ for i, dkey := range dkeys {
+ if i > 0 {
+ buf.WriteByte(' ')
+ }
+ buf.WriteString(fmt.Sprintf("%v: {", dkey))
+ for j, id := range v[dkey] {
+ if j > 0 {
+ buf.WriteByte(' ')
+ }
+ buf.WriteString(id.Hex())
+ }
+ buf.WriteByte('}')
+ }
+ args[i] = buf.String()
+ case map[docKey][]int64:
+ buf := &bytes.Buffer{}
+ var dkeys docKeys
+ for dkey := range v {
+ dkeys = append(dkeys, dkey)
+ }
+ sort.Sort(dkeys)
+ for i, dkey := range dkeys {
+ if i > 0 {
+ buf.WriteByte(' ')
+ }
+ buf.WriteString(fmt.Sprintf("%v: %v", dkey, v[dkey]))
+ }
+ args[i] = buf.String()
+ }
+ }
+ return args
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/dockey_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/dockey_test.go
new file mode 100644
index 00000000000..e8dee952cf4
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/dockey_test.go
@@ -0,0 +1,205 @@
+package txn
+
+import (
+ "sort"
+
+ . "gopkg.in/check.v1"
+)
+
+type DocKeySuite struct{}
+
+var _ = Suite(&DocKeySuite{})
+
+type T struct {
+ A int
+ B string
+}
+
+type T2 struct {
+ A int
+ B string
+}
+
+type T3 struct {
+ A int
+ B string
+}
+
+type T4 struct {
+ A int
+ B string
+}
+
+type T5 struct {
+ F int
+ Q string
+}
+
+type T6 struct {
+ A int
+ B string
+}
+
+type T7 struct {
+ A bool
+ B float64
+}
+
+type T8 struct {
+ A int
+ B string
+}
+
+type T9 struct {
+ A int
+ B string
+ C bool
+}
+
+type T10 struct {
+ C int `bson:"a"`
+ D string `bson:"b,omitempty"`
+}
+
+type T11 struct {
+ C int
+ D string
+}
+
+type T12 struct {
+ S string
+}
+
+type T13 struct {
+ p, q, r bool
+ S string
+}
+
+var docKeysTests = [][]docKeys{
+ {{
+ {"c", 1},
+ {"c", 5},
+ {"c", 2},
+ }, {
+ {"c", 1},
+ {"c", 2},
+ {"c", 5},
+ }}, {{
+ {"c", "foo"},
+ {"c", "bar"},
+ {"c", "bob"},
+ }, {
+ {"c", "bar"},
+ {"c", "bob"},
+ {"c", "foo"},
+ }}, {{
+ {"c", 0.2},
+ {"c", 0.07},
+ {"c", 0.9},
+ }, {
+ {"c", 0.07},
+ {"c", 0.2},
+ {"c", 0.9},
+ }}, {{
+ {"c", true},
+ {"c", false},
+ {"c", true},
+ }, {
+ {"c", false},
+ {"c", true},
+ {"c", true},
+ }}, {{
+ {"c", T{1, "b"}},
+ {"c", T{1, "a"}},
+ {"c", T{0, "b"}},
+ {"c", T{0, "a"}},
+ }, {
+ {"c", T{0, "a"}},
+ {"c", T{0, "b"}},
+ {"c", T{1, "a"}},
+ {"c", T{1, "b"}},
+ }}, {{
+ {"c", T{1, "a"}},
+ {"c", T{0, "a"}},
+ }, {
+ {"c", T{0, "a"}},
+ {"c", T{1, "a"}},
+ }}, {{
+ {"c", T3{0, "b"}},
+ {"c", T2{1, "b"}},
+ {"c", T3{1, "a"}},
+ {"c", T2{0, "a"}},
+ }, {
+ {"c", T2{0, "a"}},
+ {"c", T3{0, "b"}},
+ {"c", T3{1, "a"}},
+ {"c", T2{1, "b"}},
+ }}, {{
+ {"c", T5{1, "b"}},
+ {"c", T4{1, "b"}},
+ {"c", T5{0, "a"}},
+ {"c", T4{0, "a"}},
+ }, {
+ {"c", T4{0, "a"}},
+ {"c", T5{0, "a"}},
+ {"c", T4{1, "b"}},
+ {"c", T5{1, "b"}},
+ }}, {{
+ {"c", T6{1, "b"}},
+ {"c", T7{true, 0.2}},
+ {"c", T6{0, "a"}},
+ {"c", T7{false, 0.04}},
+ }, {
+ {"c", T6{0, "a"}},
+ {"c", T6{1, "b"}},
+ {"c", T7{false, 0.04}},
+ {"c", T7{true, 0.2}},
+ }}, {{
+ {"c", T9{1, "b", true}},
+ {"c", T8{1, "b"}},
+ {"c", T9{0, "a", false}},
+ {"c", T8{0, "a"}},
+ }, {
+ {"c", T9{0, "a", false}},
+ {"c", T8{0, "a"}},
+ {"c", T9{1, "b", true}},
+ {"c", T8{1, "b"}},
+ }}, {{
+ {"b", 2},
+ {"a", 5},
+ {"c", 2},
+ {"b", 1},
+ }, {
+ {"a", 5},
+ {"b", 1},
+ {"b", 2},
+ {"c", 2},
+ }}, {{
+ {"c", T11{1, "a"}},
+ {"c", T11{1, "a"}},
+ {"c", T10{1, "a"}},
+ }, {
+ {"c", T10{1, "a"}},
+ {"c", T11{1, "a"}},
+ {"c", T11{1, "a"}},
+ }}, {{
+ {"c", T12{"a"}},
+ {"c", T13{false, true, false, "a"}},
+ {"c", T12{"b"}},
+ {"c", T13{false, true, false, "b"}},
+ }, {
+ {"c", T12{"a"}},
+ {"c", T13{false, true, false, "a"}},
+ {"c", T12{"b"}},
+ {"c", T13{false, true, false, "b"}},
+ }},
+}
+
+func (s *DocKeySuite) TestSort(c *C) {
+ for _, test := range docKeysTests {
+ keys := test[0]
+ expected := test[1]
+ sort.Sort(keys)
+ c.Check(keys, DeepEquals, expected)
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/flusher.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/flusher.go
new file mode 100644
index 00000000000..f640a438084
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/flusher.go
@@ -0,0 +1,985 @@
+package txn
+
+import (
+ "fmt"
+
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+)
+
+func flush(r *Runner, t *transaction) error {
+ f := &flusher{
+ Runner: r,
+ goal: t,
+ goalKeys: make(map[docKey]bool),
+ queue: make(map[docKey][]token),
+ debugId: debugPrefix(),
+ }
+ for _, dkey := range f.goal.docKeys() {
+ f.goalKeys[dkey] = true
+ }
+ return f.run()
+}
+
+type flusher struct {
+ *Runner
+ goal *transaction
+ goalKeys map[docKey]bool
+ queue map[docKey][]token
+ debugId string
+}
+
+func (f *flusher) run() (err error) {
+ if chaosEnabled {
+ defer f.handleChaos(&err)
+ }
+
+ f.debugf("Processing %s", f.goal)
+ seen := make(map[bson.ObjectId]*transaction)
+ if err := f.recurse(f.goal, seen); err != nil {
+ return err
+ }
+ if f.goal.done() {
+ return nil
+ }
+
+ // Sparse workloads will generally be managed entirely by recurse.
+ // Getting here means one or more transactions have dependencies
+ // and perhaps cycles.
+
+ // Build successors data for Tarjan's sort. Must consider
+ // that entries in txn-queue are not necessarily valid.
+ successors := make(map[bson.ObjectId][]bson.ObjectId)
+ ready := true
+ for _, dqueue := range f.queue {
+ NextPair:
+ for i := 0; i < len(dqueue); i++ {
+ pred := dqueue[i]
+ predid := pred.id()
+ predt := seen[predid]
+ if predt == nil || predt.Nonce != pred.nonce() {
+ continue
+ }
+ predsuccids, ok := successors[predid]
+ if !ok {
+ successors[predid] = nil
+ }
+
+ for j := i + 1; j < len(dqueue); j++ {
+ succ := dqueue[j]
+ succid := succ.id()
+ succt := seen[succid]
+ if succt == nil || succt.Nonce != succ.nonce() {
+ continue
+ }
+ if _, ok := successors[succid]; !ok {
+ successors[succid] = nil
+ }
+
+ // Found a valid pred/succ pair.
+ i = j - 1
+ for _, predsuccid := range predsuccids {
+ if predsuccid == succid {
+ continue NextPair
+ }
+ }
+ successors[predid] = append(predsuccids, succid)
+ if succid == f.goal.Id {
+ // There are still pre-requisites to handle.
+ ready = false
+ }
+ continue NextPair
+ }
+ }
+ }
+ f.debugf("Queues: %v", f.queue)
+ f.debugf("Successors: %v", successors)
+ if ready {
+ f.debugf("Goal %s has no real pre-requisites", f.goal)
+ return f.advance(f.goal, nil, true)
+ }
+
+ // Robert Tarjan's algorithm for detecting strongly-connected
+ // components is used for topological sorting and detecting
+ // cycles at once. The order in which transactions are applied
+ // in commonly affected documents must be a global agreement.
+ sorted := tarjanSort(successors)
+ if debugEnabled {
+ f.debugf("Tarjan output: %v", sorted)
+ }
+ pull := make(map[bson.ObjectId]*transaction)
+ for i := len(sorted) - 1; i >= 0; i-- {
+ scc := sorted[i]
+ f.debugf("Flushing %v", scc)
+ if len(scc) == 1 {
+ pull[scc[0]] = seen[scc[0]]
+ }
+ for _, id := range scc {
+ if err := f.advance(seen[id], pull, true); err != nil {
+ return err
+ }
+ }
+ if len(scc) > 1 {
+ for _, id := range scc {
+ pull[id] = seen[id]
+ }
+ }
+ }
+ return nil
+}
+
+func (f *flusher) recurse(t *transaction, seen map[bson.ObjectId]*transaction) error {
+ seen[t.Id] = t
+ err := f.advance(t, nil, false)
+ if err != errPreReqs {
+ return err
+ }
+ for _, dkey := range t.docKeys() {
+ for _, dtt := range f.queue[dkey] {
+ id := dtt.id()
+ if seen[id] != nil {
+ continue
+ }
+ qt, err := f.load(id)
+ if err != nil {
+ return err
+ }
+ err = f.recurse(qt, seen)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (f *flusher) advance(t *transaction, pull map[bson.ObjectId]*transaction, force bool) error {
+ for {
+ switch t.State {
+ case tpreparing, tprepared:
+ revnos, err := f.prepare(t, force)
+ if err != nil {
+ return err
+ }
+ if t.State != tprepared {
+ continue
+ }
+ if err = f.assert(t, revnos, pull); err != nil {
+ return err
+ }
+ if t.State != tprepared {
+ continue
+ }
+ if err = f.checkpoint(t, revnos); err != nil {
+ return err
+ }
+ case tapplying:
+ return f.apply(t, pull)
+ case taborting:
+ return f.abortOrReload(t, nil, pull)
+ case tapplied, taborted:
+ return nil
+ default:
+ panic(fmt.Errorf("transaction in unknown state: %q", t.State))
+ }
+ }
+ panic("unreachable")
+}
+
+type stash string
+
+const (
+ stashStable stash = ""
+ stashInsert stash = "insert"
+ stashRemove stash = "remove"
+)
+
+type txnInfo struct {
+ Queue []token `bson:"txn-queue"`
+ Revno int64 `bson:"txn-revno,omitempty"`
+ Insert bson.ObjectId `bson:"txn-insert,omitempty"`
+ Remove bson.ObjectId `bson:"txn-remove,omitempty"`
+}
+
+type stashState string
+
+const (
+ stashNew stashState = ""
+ stashInserting stashState = "inserting"
+)
+
+var txnFields = bson.D{{"txn-queue", 1}, {"txn-revno", 1}, {"txn-remove", 1}, {"txn-insert", 1}}
+
+var errPreReqs = fmt.Errorf("transaction has pre-requisites and force is false")
+
+// prepare injects t's id onto txn-queue for all affected documents
+// and collects the current txn-queue and txn-revno values during
+// the process. If the prepared txn-queue indicates that there are
+// pre-requisite transactions to be applied and the force parameter
+// is false, errPreReqs will be returned. Otherwise, the current
+// tip revision numbers for all the documents are returned.
+func (f *flusher) prepare(t *transaction, force bool) (revnos []int64, err error) {
+ if t.State != tpreparing {
+ return f.rescan(t, force)
+ }
+ f.debugf("Preparing %s", t)
+
+ // dkeys being sorted means stable iteration across all runners. This
+ // isn't strictly required, but reduces the chances of cycles.
+ dkeys := t.docKeys()
+
+ revno := make(map[docKey]int64)
+ info := txnInfo{}
+ tt := tokenFor(t)
+NextDoc:
+ for _, dkey := range dkeys {
+ change := mgo.Change{
+ Update: bson.D{{"$addToSet", bson.D{{"txn-queue", tt}}}},
+ ReturnNew: true,
+ }
+ c := f.tc.Database.C(dkey.C)
+ cquery := c.FindId(dkey.Id).Select(txnFields)
+
+ RetryDoc:
+ change.Upsert = false
+ chaos("")
+ if _, err := cquery.Apply(change, &info); err == nil {
+ if info.Remove == "" {
+ // Fast path, unless workload is insert/remove heavy.
+ revno[dkey] = info.Revno
+ f.queue[dkey] = info.Queue
+ f.debugf("[A] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue)
+ continue NextDoc
+ } else {
+ // Handle remove in progress before preparing it.
+ if err := f.loadAndApply(info.Remove); err != nil {
+ return nil, err
+ }
+ goto RetryDoc
+ }
+ } else if err != mgo.ErrNotFound {
+ return nil, err
+ }
+
+ // Document missing. Use stash collection.
+ change.Upsert = true
+ chaos("")
+ _, err := f.sc.FindId(dkey).Apply(change, &info)
+ if err != nil {
+ return nil, err
+ }
+ if info.Insert != "" {
+ // Handle insert in progress before preparing it.
+ if err := f.loadAndApply(info.Insert); err != nil {
+ return nil, err
+ }
+ goto RetryDoc
+ }
+
+ // Must confirm stash is still in use and is the same one
+ // prepared, since applying a remove overwrites the stash.
+ docFound := false
+ stashFound := false
+ if err = c.FindId(dkey.Id).Select(txnFields).One(&info); err == nil {
+ docFound = true
+ } else if err != mgo.ErrNotFound {
+ return nil, err
+ } else if err = f.sc.FindId(dkey).One(&info); err == nil {
+ stashFound = true
+ if info.Revno == 0 {
+ // Missing revno in the stash only happens when it
+ // has been upserted, in which case it defaults to -1.
+ // Txn-inserted documents get revno -1 while in the stash
+ // for the first time, and -revno-1 == 2 when they go live.
+ info.Revno = -1
+ }
+ } else if err != mgo.ErrNotFound {
+ return nil, err
+ }
+
+ if docFound && info.Remove == "" || stashFound && info.Insert == "" {
+ for _, dtt := range info.Queue {
+ if dtt != tt {
+ continue
+ }
+ // Found tt properly prepared.
+ if stashFound {
+ f.debugf("[B] Prepared document %v on stash with revno %d and queue: %v", dkey, info.Revno, info.Queue)
+ } else {
+ f.debugf("[B] Prepared document %v with revno %d and queue: %v", dkey, info.Revno, info.Queue)
+ }
+ revno[dkey] = info.Revno
+ f.queue[dkey] = info.Queue
+ continue NextDoc
+ }
+ }
+
+ // The stash wasn't valid and tt got overwritten. Try again.
+ f.unstashToken(tt, dkey)
+ goto RetryDoc
+ }
+
+ // Save the prepared nonce onto t.
+ nonce := tt.nonce()
+ qdoc := bson.D{{"_id", t.Id}, {"s", tpreparing}}
+ udoc := bson.D{{"$set", bson.D{{"s", tprepared}, {"n", nonce}}}}
+ chaos("set-prepared")
+ err = f.tc.Update(qdoc, udoc)
+ if err == nil {
+ t.State = tprepared
+ t.Nonce = nonce
+ } else if err == mgo.ErrNotFound {
+ f.debugf("Can't save nonce of %s: LOST RACE", tt)
+ if err := f.reload(t); err != nil {
+ return nil, err
+ } else if t.State == tpreparing {
+ panic("can't save nonce yet transaction is still preparing")
+ } else if t.State != tprepared {
+ return t.Revnos, nil
+ }
+ tt = t.token()
+ } else if err != nil {
+ return nil, err
+ }
+
+ prereqs, found := f.hasPreReqs(tt, dkeys)
+ if !found {
+ // Must only happen when reloading above.
+ return f.rescan(t, force)
+ } else if prereqs && !force {
+ f.debugf("Prepared queue with %s [has prereqs & not forced].", tt)
+ return nil, errPreReqs
+ }
+ revnos = assembledRevnos(t.Ops, revno)
+ if !prereqs {
+ f.debugf("Prepared queue with %s [no prereqs]. Revnos: %v", tt, revnos)
+ } else {
+ f.debugf("Prepared queue with %s [forced] Revnos: %v", tt, revnos)
+ }
+ return revnos, nil
+}
+
+func (f *flusher) unstashToken(tt token, dkey docKey) error {
+ qdoc := bson.D{{"_id", dkey}, {"txn-queue", tt}}
+ udoc := bson.D{{"$pull", bson.D{{"txn-queue", tt}}}}
+ chaos("")
+ if err := f.sc.Update(qdoc, udoc); err == nil {
+ chaos("")
+ err = f.sc.Remove(bson.D{{"_id", dkey}, {"txn-queue", bson.D{}}})
+ } else if err != mgo.ErrNotFound {
+ return err
+ }
+ return nil
+}
+
+func (f *flusher) rescan(t *transaction, force bool) (revnos []int64, err error) {
+ f.debugf("Rescanning %s", t)
+ if t.State != tprepared {
+ panic(fmt.Errorf("rescanning transaction in invalid state: %q", t.State))
+ }
+
+ // dkeys being sorted means stable iteration across all
+ // runners. This isn't strictly required, but reduces the chances
+ // of cycles.
+ dkeys := t.docKeys()
+
+ tt := t.token()
+ if !force {
+ prereqs, found := f.hasPreReqs(tt, dkeys)
+ if found && prereqs {
+ // Its state is already known.
+ return nil, errPreReqs
+ }
+ }
+
+ revno := make(map[docKey]int64)
+ info := txnInfo{}
+ for _, dkey := range dkeys {
+ const retries = 3
+ retry := -1
+
+ RetryDoc:
+ retry++
+ c := f.tc.Database.C(dkey.C)
+ if err := c.FindId(dkey.Id).Select(txnFields).One(&info); err == mgo.ErrNotFound {
+ // Document is missing. Look in stash.
+ chaos("")
+ if err := f.sc.FindId(dkey).One(&info); err == mgo.ErrNotFound {
+ // Stash also doesn't exist. Maybe someone applied it.
+ if err := f.reload(t); err != nil {
+ return nil, err
+ } else if t.State != tprepared {
+ return t.Revnos, err
+ }
+ // Not applying either.
+ if retry < retries {
+ // Retry since there might be an insert/remove race.
+ goto RetryDoc
+ }
+ // Neither the doc nor the stash seem to exist.
+ return nil, fmt.Errorf("cannot find document %v for applying transaction %s", dkey, t)
+ } else if err != nil {
+ return nil, err
+ }
+ // Stash found.
+ if info.Insert != "" {
+ // Handle insert in progress before assuming ordering is good.
+ if err := f.loadAndApply(info.Insert); err != nil {
+ return nil, err
+ }
+ goto RetryDoc
+ }
+ if info.Revno == 0 {
+ // Missing revno in the stash means -1.
+ info.Revno = -1
+ }
+ } else if err != nil {
+ return nil, err
+ } else if info.Remove != "" {
+ // Handle remove in progress before assuming ordering is good.
+ if err := f.loadAndApply(info.Remove); err != nil {
+ return nil, err
+ }
+ goto RetryDoc
+ }
+ revno[dkey] = info.Revno
+
+ found := false
+ for _, id := range info.Queue {
+ if id == tt {
+ found = true
+ break
+ }
+ }
+ f.queue[dkey] = info.Queue
+ if !found {
+ // Rescanned transaction id was not in the queue. This could mean one
+ // of three things:
+ // 1) The transaction was applied and popped by someone else. This is
+ // the common case.
+ // 2) We've read an out-of-date queue from the stash. This can happen
+ // when someone else was paused for a long while preparing another
+ // transaction for this document, and improperly upserted to the
+ // stash when unpaused (after someone else inserted the document).
+ // This is rare but possible.
+ // 3) There's an actual bug somewhere, or outside interference. Worst
+ // possible case.
+ f.debugf("Rescanned document %v misses %s in queue: %v", dkey, tt, info.Queue)
+ err := f.reload(t)
+ if t.State == tpreparing || t.State == tprepared {
+ if retry < retries {
+ // Case 2.
+ goto RetryDoc
+ }
+ // Case 3.
+ return nil, fmt.Errorf("cannot find transaction %s in queue for document %v", t, dkey)
+ }
+ // Case 1.
+ return t.Revnos, err
+ }
+ }
+
+ prereqs, found := f.hasPreReqs(tt, dkeys)
+ if !found {
+ panic("rescanning loop guarantees that this can't happen")
+ } else if prereqs && !force {
+ f.debugf("Rescanned queue with %s: has prereqs, not forced", tt)
+ return nil, errPreReqs
+ }
+ revnos = assembledRevnos(t.Ops, revno)
+ if !prereqs {
+ f.debugf("Rescanned queue with %s: no prereqs, revnos: %v", tt, revnos)
+ } else {
+ f.debugf("Rescanned queue with %s: has prereqs, forced, revnos: %v", tt, revnos)
+ }
+ return revnos, nil
+}
+
+func assembledRevnos(ops []Op, revno map[docKey]int64) []int64 {
+ revnos := make([]int64, len(ops))
+ for i, op := range ops {
+ dkey := op.docKey()
+ revnos[i] = revno[dkey]
+ drevno := revno[dkey]
+ switch {
+ case op.Insert != nil && drevno < 0:
+ revno[dkey] = -drevno + 1
+ case op.Update != nil && drevno >= 0:
+ revno[dkey] = drevno + 1
+ case op.Remove && drevno >= 0:
+ revno[dkey] = -drevno - 1
+ }
+ }
+ return revnos
+}
+
+func (f *flusher) hasPreReqs(tt token, dkeys docKeys) (prereqs, found bool) {
+ found = true
+NextDoc:
+ for _, dkey := range dkeys {
+ for _, dtt := range f.queue[dkey] {
+ if dtt == tt {
+ continue NextDoc
+ } else if dtt.id() != tt.id() {
+ prereqs = true
+ }
+ }
+ found = false
+ }
+ return
+}
+
+func (f *flusher) reload(t *transaction) error {
+ var newt transaction
+ query := f.tc.FindId(t.Id)
+ query.Select(bson.D{{"s", 1}, {"n", 1}, {"r", 1}})
+ if err := query.One(&newt); err != nil {
+ return fmt.Errorf("failed to reload transaction: %v", err)
+ }
+ t.State = newt.State
+ t.Nonce = newt.Nonce
+ t.Revnos = newt.Revnos
+ f.debugf("Reloaded %s: %q", t, t.State)
+ return nil
+}
+
+func (f *flusher) loadAndApply(id bson.ObjectId) error {
+ t, err := f.load(id)
+ if err != nil {
+ return err
+ }
+ return f.advance(t, nil, true)
+}
+
+// assert verifies that all assertions in t match the content that t
+// will be applied upon. If an assertion fails, the transaction state
+// is changed to aborted.
+func (f *flusher) assert(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) error {
+ f.debugf("Asserting %s with revnos %v", t, revnos)
+ if t.State != tprepared {
+ panic(fmt.Errorf("asserting transaction in invalid state: %q", t.State))
+ }
+ qdoc := make(bson.D, 3)
+ revno := make(map[docKey]int64)
+ for i, op := range t.Ops {
+ dkey := op.docKey()
+ if _, ok := revno[dkey]; !ok {
+ revno[dkey] = revnos[i]
+ }
+ if op.Assert == nil {
+ continue
+ }
+ if op.Assert == DocMissing {
+ if revnos[i] >= 0 {
+ return f.abortOrReload(t, revnos, pull)
+ }
+ continue
+ }
+ if op.Insert != nil {
+ return fmt.Errorf("Insert can only Assert txn.DocMissing", op.Assert)
+ }
+ // if revnos[i] < 0 { abort }?
+
+ qdoc = append(qdoc[:0], bson.DocElem{"_id", op.Id})
+ if op.Assert != DocMissing {
+ var revnoq interface{}
+ if n := revno[dkey]; n == 0 {
+ revnoq = bson.D{{"$exists", false}}
+ } else {
+ revnoq = n
+ }
+ // XXX Add tt to the query here, once we're sure it's all working.
+ // Not having it increases the chances of breaking on bad logic.
+ qdoc = append(qdoc, bson.DocElem{"txn-revno", revnoq})
+ if op.Assert != DocExists {
+ qdoc = append(qdoc, bson.DocElem{"$or", []interface{}{op.Assert}})
+ }
+ }
+
+ c := f.tc.Database.C(op.C)
+ if err := c.Find(qdoc).Select(bson.D{{"_id", 1}}).One(nil); err == mgo.ErrNotFound {
+ // Assertion failed or someone else started applying.
+ return f.abortOrReload(t, revnos, pull)
+ } else if err != nil {
+ return err
+ }
+ }
+ f.debugf("Asserting %s succeeded", t)
+ return nil
+}
+
+func (f *flusher) abortOrReload(t *transaction, revnos []int64, pull map[bson.ObjectId]*transaction) (err error) {
+ f.debugf("Aborting or reloading %s (was %q)", t, t.State)
+ if t.State == tprepared {
+ qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}}
+ udoc := bson.D{{"$set", bson.D{{"s", taborting}}}}
+ chaos("set-aborting")
+ if err = f.tc.Update(qdoc, udoc); err == nil {
+ t.State = taborting
+ } else if err == mgo.ErrNotFound {
+ if err = f.reload(t); err != nil || t.State != taborting {
+ f.debugf("Won't abort %s. Reloaded state: %q", t, t.State)
+ return err
+ }
+ } else {
+ return err
+ }
+ } else if t.State != taborting {
+ panic(fmt.Errorf("aborting transaction in invalid state: %q", t.State))
+ }
+
+ if len(revnos) > 0 {
+ if pull == nil {
+ pull = map[bson.ObjectId]*transaction{t.Id: t}
+ }
+ seen := make(map[docKey]bool)
+ for i, op := range t.Ops {
+ dkey := op.docKey()
+ if seen[op.docKey()] {
+ continue
+ }
+ seen[dkey] = true
+
+ pullAll := tokensToPull(f.queue[dkey], pull, "")
+ if len(pullAll) == 0 {
+ continue
+ }
+ udoc := bson.D{{"$pullAll", bson.D{{"txn-queue", pullAll}}}}
+ chaos("")
+ if revnos[i] < 0 {
+ err = f.sc.UpdateId(dkey, udoc)
+ } else {
+ c := f.tc.Database.C(dkey.C)
+ err = c.UpdateId(dkey.Id, udoc)
+ }
+ if err != nil && err != mgo.ErrNotFound {
+ return err
+ }
+ }
+ }
+ udoc := bson.D{{"$set", bson.D{{"s", taborted}}}}
+ chaos("set-aborted")
+ if err := f.tc.UpdateId(t.Id, udoc); err != nil && err != mgo.ErrNotFound {
+ return err
+ }
+ t.State = taborted
+ f.debugf("Aborted %s", t)
+ return nil
+}
+
+func (f *flusher) checkpoint(t *transaction, revnos []int64) error {
+ var debugRevnos map[docKey][]int64
+ if debugEnabled {
+ debugRevnos = make(map[docKey][]int64)
+ for i, op := range t.Ops {
+ dkey := op.docKey()
+ debugRevnos[dkey] = append(debugRevnos[dkey], revnos[i])
+ }
+ f.debugf("Ready to apply %s. Saving revnos %v", t, debugRevnos)
+ }
+
+ // Save in t the txn-revno values the transaction must run on.
+ qdoc := bson.D{{"_id", t.Id}, {"s", tprepared}}
+ udoc := bson.D{{"$set", bson.D{{"s", tapplying}, {"r", revnos}}}}
+ chaos("set-applying")
+ err := f.tc.Update(qdoc, udoc)
+ if err == nil {
+ t.State = tapplying
+ t.Revnos = revnos
+ f.debugf("Ready to apply %s. Saving revnos %v: DONE", t, debugRevnos)
+ } else if err == mgo.ErrNotFound {
+ f.debugf("Ready to apply %s. Saving revnos %v: LOST RACE", t, debugRevnos)
+ return f.reload(t)
+ }
+ return nil
+}
+
+func (f *flusher) apply(t *transaction, pull map[bson.ObjectId]*transaction) error {
+ f.debugf("Applying transaction %s", t)
+ if t.State != tapplying {
+ panic(fmt.Errorf("applying transaction in invalid state: %q", t.State))
+ }
+ if pull == nil {
+ pull = map[bson.ObjectId]*transaction{t.Id: t}
+ }
+
+ logRevnos := append([]int64(nil), t.Revnos...)
+ logDoc := bson.D{{"_id", t.Id}}
+
+ tt := tokenFor(t)
+ for i := range t.Ops {
+ op := &t.Ops[i]
+ dkey := op.docKey()
+ dqueue := f.queue[dkey]
+ revno := t.Revnos[i]
+
+ var opName string
+ if debugEnabled {
+ opName = op.name()
+ f.debugf("Applying %s op %d (%s) on %v with txn-revno %d", t, i, opName, dkey, revno)
+ }
+
+ c := f.tc.Database.C(op.C)
+
+ qdoc := bson.D{{"_id", dkey.Id}, {"txn-revno", revno}, {"txn-queue", tt}}
+ if op.Insert != nil {
+ qdoc[0].Value = dkey
+ if revno == -1 {
+ qdoc[1].Value = bson.D{{"$exists", false}}
+ }
+ } else if revno == 0 {
+ // There's no document with revno 0. The only way to see it is
+ // when an existent document participates in a transaction the
+ // first time. Txn-inserted documents get revno -1 while in the
+ // stash for the first time, and -revno-1 == 2 when they go live.
+ qdoc[1].Value = bson.D{{"$exists", false}}
+ }
+
+ pullAll := tokensToPull(dqueue, pull, tt)
+
+ var d bson.D
+ var outcome string
+ var err error
+ switch {
+ case op.Update != nil:
+ if revno < 0 {
+ err = mgo.ErrNotFound
+ f.debugf("Won't try to apply update op; negative revision means the document is missing or stashed")
+ } else {
+ newRevno := revno + 1
+ logRevnos[i] = newRevno
+ if d, err = objToDoc(op.Update); err != nil {
+ return err
+ }
+ if d, err = addToDoc(d, "$pullAll", bson.D{{"txn-queue", pullAll}}); err != nil {
+ return err
+ }
+ if d, err = addToDoc(d, "$set", bson.D{{"txn-revno", newRevno}}); err != nil {
+ return err
+ }
+ chaos("")
+ err = c.Update(qdoc, d)
+ }
+ case op.Remove:
+ if revno < 0 {
+ err = mgo.ErrNotFound
+ } else {
+ newRevno := -revno - 1
+ logRevnos[i] = newRevno
+ nonce := newNonce()
+ stash := txnInfo{}
+ change := mgo.Change{
+ Update: bson.D{{"$push", bson.D{{"n", nonce}}}},
+ Upsert: true,
+ ReturnNew: true,
+ }
+ if _, err = f.sc.FindId(dkey).Apply(change, &stash); err != nil {
+ return err
+ }
+ change = mgo.Change{
+ Update: bson.D{{"$set", bson.D{{"txn-remove", t.Id}}}},
+ ReturnNew: true,
+ }
+ var info txnInfo
+ if _, err = c.Find(qdoc).Apply(change, &info); err == nil {
+ // The document still exists so the stash previously
+ // observed was either out of date or necessarily
+ // contained the token being applied.
+ f.debugf("Marked document %v to be removed on revno %d with queue: %v", dkey, info.Revno, info.Queue)
+ updated := false
+ if !hasToken(stash.Queue, tt) {
+ var set, unset bson.D
+ if revno == 0 {
+ // Missing revno in stash means -1.
+ set = bson.D{{"txn-queue", info.Queue}}
+ unset = bson.D{{"n", 1}, {"txn-revno", 1}}
+ } else {
+ set = bson.D{{"txn-queue", info.Queue}, {"txn-revno", newRevno}}
+ unset = bson.D{{"n", 1}}
+ }
+ qdoc := bson.D{{"_id", dkey}, {"n", nonce}}
+ udoc := bson.D{{"$set", set}, {"$unset", unset}}
+ if err = f.sc.Update(qdoc, udoc); err == nil {
+ updated = true
+ } else if err != mgo.ErrNotFound {
+ return err
+ }
+ }
+ if updated {
+ f.debugf("Updated stash for document %v with revno %d and queue: %v", dkey, newRevno, info.Queue)
+ } else {
+ f.debugf("Stash for document %v was up-to-date", dkey)
+ }
+ err = c.Remove(qdoc)
+ }
+ }
+ case op.Insert != nil:
+ if revno >= 0 {
+ err = mgo.ErrNotFound
+ } else {
+ newRevno := -revno + 1
+ logRevnos[i] = newRevno
+ if d, err = objToDoc(op.Insert); err != nil {
+ return err
+ }
+ change := mgo.Change{
+ Update: bson.D{{"$set", bson.D{{"txn-insert", t.Id}}}},
+ ReturnNew: true,
+ }
+ chaos("")
+ var info txnInfo
+ if _, err = f.sc.Find(qdoc).Apply(change, &info); err == nil {
+ f.debugf("Stash for document %v has revno %d and queue: %v", dkey, info.Revno, info.Queue)
+ d = setInDoc(d, bson.D{{"_id", op.Id}, {"txn-revno", newRevno}, {"txn-queue", info.Queue}})
+ // Unlikely yet unfortunate race in here if this gets seriously
+ // delayed. If someone inserts+removes meanwhile, this will
+ // reinsert, and there's no way to avoid that while keeping the
+ // collection clean or compromising sharding. applyOps can solve
+ // the former, but it can't shard (SERVER-1439).
+ chaos("insert")
+ err = c.Insert(d)
+ if err == nil || mgo.IsDup(err) {
+ if err == nil {
+ f.debugf("New document %v inserted with revno %d and queue: %v", dkey, info.Revno, info.Queue)
+ } else {
+ f.debugf("Document %v already existed", dkey)
+ }
+ chaos("")
+ if err = f.sc.Remove(qdoc); err == nil {
+ f.debugf("Stash for document %v removed", dkey)
+ }
+ }
+ }
+ }
+ case op.Assert != nil:
+ // Pure assertion. No changes to apply.
+ }
+ if err == nil {
+ outcome = "DONE"
+ } else if err == mgo.ErrNotFound || mgo.IsDup(err) {
+ outcome = "MISS"
+ err = nil
+ } else {
+ outcome = err.Error()
+ }
+ if debugEnabled {
+ f.debugf("Applying %s op %d (%s) on %v with txn-revno %d: %s", t, i, opName, dkey, revno, outcome)
+ }
+ if err != nil {
+ return err
+ }
+
+ if f.lc != nil && op.isChange() {
+ // Add change to the log document.
+ var dr bson.D
+ for li := range logDoc {
+ elem := &logDoc[li]
+ if elem.Name == op.C {
+ dr = elem.Value.(bson.D)
+ break
+ }
+ }
+ if dr == nil {
+ logDoc = append(logDoc, bson.DocElem{op.C, bson.D{{"d", []interface{}{}}, {"r", []int64{}}}})
+ dr = logDoc[len(logDoc)-1].Value.(bson.D)
+ }
+ dr[0].Value = append(dr[0].Value.([]interface{}), op.Id)
+ dr[1].Value = append(dr[1].Value.([]int64), logRevnos[i])
+ }
+ }
+ t.State = tapplied
+
+ if f.lc != nil {
+ // Insert log document into the changelog collection.
+ f.debugf("Inserting %s into change log", t)
+ err := f.lc.Insert(logDoc)
+ if err != nil && !mgo.IsDup(err) {
+ return err
+ }
+ }
+
+ // It's been applied, so errors are ignored here. It's fine for someone
+ // else to win the race and mark it as applied, and it's also fine for
+ // it to remain pending until a later point when someone will perceive
+ // it has been applied and mark it at such.
+ f.debugf("Marking %s as applied", t)
+ chaos("set-applied")
+ f.tc.Update(bson.D{{"_id", t.Id}, {"s", tapplying}}, bson.D{{"$set", bson.D{{"s", tapplied}}}})
+ return nil
+}
+
+func tokensToPull(dqueue []token, pull map[bson.ObjectId]*transaction, dontPull token) []token {
+ var result []token
+ for j := len(dqueue) - 1; j >= 0; j-- {
+ dtt := dqueue[j]
+ if dtt == dontPull {
+ continue
+ }
+ if _, ok := pull[dtt.id()]; ok {
+ // It was handled before and this is a leftover invalid
+ // nonce in the queue. Cherry-pick it out.
+ result = append(result, dtt)
+ }
+ }
+ return result
+}
+
+func objToDoc(obj interface{}) (d bson.D, err error) {
+ data, err := bson.Marshal(obj)
+ if err != nil {
+ return nil, err
+ }
+ err = bson.Unmarshal(data, &d)
+ if err != nil {
+ return nil, err
+ }
+ return d, err
+}
+
+func addToDoc(doc bson.D, key string, add bson.D) (bson.D, error) {
+ for i := range doc {
+ elem := &doc[i]
+ if elem.Name != key {
+ continue
+ }
+ if old, ok := elem.Value.(bson.D); ok {
+ elem.Value = append(old, add...)
+ return doc, nil
+ } else {
+ return nil, fmt.Errorf("invalid %q value in change document: %#v", key, elem.Value)
+ }
+ }
+ return append(doc, bson.DocElem{key, add}), nil
+}
+
+func setInDoc(doc bson.D, set bson.D) bson.D {
+ dlen := len(doc)
+NextS:
+ for s := range set {
+ sname := set[s].Name
+ for d := 0; d < dlen; d++ {
+ if doc[d].Name == sname {
+ doc[d].Value = set[s].Value
+ continue NextS
+ }
+ }
+ doc = append(doc, set[s])
+ }
+ return doc
+}
+
+func hasToken(tokens []token, tt token) bool {
+ for _, ttt := range tokens {
+ if ttt == tt {
+ return true
+ }
+ }
+ return false
+}
+
+func (f *flusher) debugf(format string, args ...interface{}) {
+ if !debugEnabled {
+ return
+ }
+ debugf(f.debugId+format, args...)
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/output.txt b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/output.txt
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/output.txt
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/sim_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/sim_test.go
new file mode 100644
index 00000000000..a369ded7c31
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/sim_test.go
@@ -0,0 +1,388 @@
+package txn_test
+
+import (
+ "flag"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "gopkg.in/mgo.v2/dbtest"
+ "gopkg.in/mgo.v2/txn"
+ . "gopkg.in/check.v1"
+ "math/rand"
+ "time"
+)
+
+var (
+ duration = flag.Duration("duration", 200*time.Millisecond, "duration for each simulation")
+ seed = flag.Int64("seed", 0, "seed for rand")
+)
+
+type params struct {
+ killChance float64
+ slowdownChance float64
+ slowdown time.Duration
+
+ unsafe bool
+ workers int
+ accounts int
+ changeHalf bool
+ reinsertCopy bool
+ reinsertZeroed bool
+ changelog bool
+
+ changes int
+}
+
+func (s *S) TestSim1Worker(c *C) {
+ simulate(c, &s.server, params{
+ workers: 1,
+ accounts: 4,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSim4WorkersDense(c *C) {
+ simulate(c, &s.server, params{
+ workers: 4,
+ accounts: 2,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSim4WorkersSparse(c *C) {
+ simulate(c, &s.server, params{
+ workers: 4,
+ accounts: 10,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSimHalf1Worker(c *C) {
+ simulate(c, &s.server, params{
+ workers: 1,
+ accounts: 4,
+ changeHalf: true,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSimHalf4WorkersDense(c *C) {
+ simulate(c, &s.server, params{
+ workers: 4,
+ accounts: 2,
+ changeHalf: true,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSimHalf4WorkersSparse(c *C) {
+ simulate(c, &s.server, params{
+ workers: 4,
+ accounts: 10,
+ changeHalf: true,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSimReinsertCopy1Worker(c *C) {
+ simulate(c, &s.server, params{
+ workers: 1,
+ accounts: 10,
+ reinsertCopy: true,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSimReinsertCopy4Workers(c *C) {
+ simulate(c, &s.server, params{
+ workers: 4,
+ accounts: 10,
+ reinsertCopy: true,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSimReinsertZeroed1Worker(c *C) {
+ simulate(c, &s.server, params{
+ workers: 1,
+ accounts: 10,
+ reinsertZeroed: true,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSimReinsertZeroed4Workers(c *C) {
+ simulate(c, &s.server, params{
+ workers: 4,
+ accounts: 10,
+ reinsertZeroed: true,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ })
+}
+
+func (s *S) TestSimChangeLog(c *C) {
+ simulate(c, &s.server, params{
+ workers: 4,
+ accounts: 10,
+ killChance: 0.01,
+ slowdownChance: 0.3,
+ slowdown: 100 * time.Millisecond,
+ changelog: true,
+ })
+}
+
+type balanceChange struct {
+ id bson.ObjectId
+ origin int
+ target int
+ amount int
+}
+
+func simulate(c *C, server *dbtest.DBServer, params params) {
+ seed := *seed
+ if seed == 0 {
+ seed = time.Now().UnixNano()
+ }
+ rand.Seed(seed)
+ c.Logf("Seed: %v", seed)
+
+ txn.SetChaos(txn.Chaos{
+ KillChance: params.killChance,
+ SlowdownChance: params.slowdownChance,
+ Slowdown: params.slowdown,
+ })
+ defer txn.SetChaos(txn.Chaos{})
+
+ session := server.Session()
+ defer session.Close()
+
+ db := session.DB("test")
+ tc := db.C("tc")
+
+ runner := txn.NewRunner(tc)
+
+ tclog := db.C("tc.log")
+ if params.changelog {
+ info := mgo.CollectionInfo{
+ Capped: true,
+ MaxBytes: 1000000,
+ }
+ err := tclog.Create(&info)
+ c.Assert(err, IsNil)
+ runner.ChangeLog(tclog)
+ }
+
+ accounts := db.C("accounts")
+ for i := 0; i < params.accounts; i++ {
+ err := accounts.Insert(M{"_id": i, "balance": 300})
+ c.Assert(err, IsNil)
+ }
+ var stop time.Time
+ if params.changes <= 0 {
+ stop = time.Now().Add(*duration)
+ }
+
+ max := params.accounts
+ if params.reinsertCopy || params.reinsertZeroed {
+ max = int(float64(params.accounts) * 1.5)
+ }
+
+ changes := make(chan balanceChange, 1024)
+
+ //session.SetMode(mgo.Eventual, true)
+ for i := 0; i < params.workers; i++ {
+ go func() {
+ n := 0
+ for {
+ if n > 0 && n == params.changes {
+ break
+ }
+ if !stop.IsZero() && time.Now().After(stop) {
+ break
+ }
+
+ change := balanceChange{
+ id: bson.NewObjectId(),
+ origin: rand.Intn(max),
+ target: rand.Intn(max),
+ amount: 100,
+ }
+
+ var old Account
+ var oldExists bool
+ if params.reinsertCopy || params.reinsertZeroed {
+ if err := accounts.FindId(change.origin).One(&old); err != mgo.ErrNotFound {
+ c.Check(err, IsNil)
+ change.amount = old.Balance
+ oldExists = true
+ }
+ }
+
+ var ops []txn.Op
+ switch {
+ case params.reinsertCopy && oldExists:
+ ops = []txn.Op{{
+ C: "accounts",
+ Id: change.origin,
+ Assert: M{"balance": change.amount},
+ Remove: true,
+ }, {
+ C: "accounts",
+ Id: change.target,
+ Assert: txn.DocMissing,
+ Insert: M{"balance": change.amount},
+ }}
+ case params.reinsertZeroed && oldExists:
+ ops = []txn.Op{{
+ C: "accounts",
+ Id: change.target,
+ Assert: txn.DocMissing,
+ Insert: M{"balance": 0},
+ }, {
+ C: "accounts",
+ Id: change.origin,
+ Assert: M{"balance": change.amount},
+ Remove: true,
+ }, {
+ C: "accounts",
+ Id: change.target,
+ Assert: txn.DocExists,
+ Update: M{"$inc": M{"balance": change.amount}},
+ }}
+ case params.changeHalf:
+ ops = []txn.Op{{
+ C: "accounts",
+ Id: change.origin,
+ Assert: M{"balance": M{"$gte": change.amount}},
+ Update: M{"$inc": M{"balance": -change.amount / 2}},
+ }, {
+ C: "accounts",
+ Id: change.target,
+ Assert: txn.DocExists,
+ Update: M{"$inc": M{"balance": change.amount / 2}},
+ }, {
+ C: "accounts",
+ Id: change.origin,
+ Update: M{"$inc": M{"balance": -change.amount / 2}},
+ }, {
+ C: "accounts",
+ Id: change.target,
+ Update: M{"$inc": M{"balance": change.amount / 2}},
+ }}
+ default:
+ ops = []txn.Op{{
+ C: "accounts",
+ Id: change.origin,
+ Assert: M{"balance": M{"$gte": change.amount}},
+ Update: M{"$inc": M{"balance": -change.amount}},
+ }, {
+ C: "accounts",
+ Id: change.target,
+ Assert: txn.DocExists,
+ Update: M{"$inc": M{"balance": change.amount}},
+ }}
+ }
+
+ err := runner.Run(ops, change.id, nil)
+ if err != nil && err != txn.ErrAborted && err != txn.ErrChaos {
+ c.Check(err, IsNil)
+ }
+ n++
+ changes <- change
+ }
+ changes <- balanceChange{}
+ }()
+ }
+
+ alive := params.workers
+ changeLog := make([]balanceChange, 0, 1024)
+ for alive > 0 {
+ change := <-changes
+ if change.id == "" {
+ alive--
+ } else {
+ changeLog = append(changeLog, change)
+ }
+ }
+ c.Check(len(changeLog), Not(Equals), 0, Commentf("No operations were even attempted."))
+
+ txn.SetChaos(txn.Chaos{})
+ err := runner.ResumeAll()
+ c.Assert(err, IsNil)
+
+ n, err := accounts.Count()
+ c.Check(err, IsNil)
+ c.Check(n, Equals, params.accounts, Commentf("Number of accounts has changed."))
+
+ n, err = accounts.Find(M{"balance": M{"$lt": 0}}).Count()
+ c.Check(err, IsNil)
+ c.Check(n, Equals, 0, Commentf("There are %d accounts with negative balance.", n))
+
+ globalBalance := 0
+ iter := accounts.Find(nil).Iter()
+ account := Account{}
+ for iter.Next(&account) {
+ globalBalance += account.Balance
+ }
+ c.Check(iter.Close(), IsNil)
+ c.Check(globalBalance, Equals, params.accounts*300, Commentf("Total amount of money should be constant."))
+
+ // Compute and verify the exact final state of all accounts.
+ balance := make(map[int]int)
+ for i := 0; i < params.accounts; i++ {
+ balance[i] += 300
+ }
+ var applied, aborted int
+ for _, change := range changeLog {
+ err := runner.Resume(change.id)
+ if err == txn.ErrAborted {
+ aborted++
+ continue
+ } else if err != nil {
+ c.Fatalf("resuming %s failed: %v", change.id, err)
+ }
+ balance[change.origin] -= change.amount
+ balance[change.target] += change.amount
+ applied++
+ }
+ iter = accounts.Find(nil).Iter()
+ for iter.Next(&account) {
+ c.Assert(account.Balance, Equals, balance[account.Id])
+ }
+ c.Check(iter.Close(), IsNil)
+ c.Logf("Total transactions: %d (%d applied, %d aborted)", len(changeLog), applied, aborted)
+
+ if params.changelog {
+ n, err := tclog.Count()
+ c.Assert(err, IsNil)
+ // Check if the capped collection is full.
+ dummy := make([]byte, 1024)
+ tclog.Insert(M{"_id": bson.NewObjectId(), "dummy": dummy})
+ m, err := tclog.Count()
+ c.Assert(err, IsNil)
+ if m == n+1 {
+ // Wasn't full, so it must have seen it all.
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, applied)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/tarjan.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/tarjan.go
new file mode 100644
index 00000000000..e56541c9b62
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/tarjan.go
@@ -0,0 +1,94 @@
+package txn
+
+import (
+ "gopkg.in/mgo.v2/bson"
+ "sort"
+)
+
+func tarjanSort(successors map[bson.ObjectId][]bson.ObjectId) [][]bson.ObjectId {
+ // http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
+ data := &tarjanData{
+ successors: successors,
+ nodes: make([]tarjanNode, 0, len(successors)),
+ index: make(map[bson.ObjectId]int, len(successors)),
+ }
+
+ for id := range successors {
+ id := bson.ObjectId(string(id))
+ if _, seen := data.index[id]; !seen {
+ data.strongConnect(id)
+ }
+ }
+
+ // Sort connected components to stabilize the algorithm.
+ for _, ids := range data.output {
+ if len(ids) > 1 {
+ sort.Sort(idList(ids))
+ }
+ }
+ return data.output
+}
+
+type tarjanData struct {
+ successors map[bson.ObjectId][]bson.ObjectId
+ output [][]bson.ObjectId
+
+ nodes []tarjanNode
+ stack []bson.ObjectId
+ index map[bson.ObjectId]int
+}
+
+type tarjanNode struct {
+ lowlink int
+ stacked bool
+}
+
+type idList []bson.ObjectId
+
+func (l idList) Len() int { return len(l) }
+func (l idList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l idList) Less(i, j int) bool { return l[i] < l[j] }
+
+func (data *tarjanData) strongConnect(id bson.ObjectId) *tarjanNode {
+ index := len(data.nodes)
+ data.index[id] = index
+ data.stack = append(data.stack, id)
+ data.nodes = append(data.nodes, tarjanNode{index, true})
+ node := &data.nodes[index]
+
+ for _, succid := range data.successors[id] {
+ succindex, seen := data.index[succid]
+ if !seen {
+ succnode := data.strongConnect(succid)
+ if succnode.lowlink < node.lowlink {
+ node.lowlink = succnode.lowlink
+ }
+ } else if data.nodes[succindex].stacked {
+ // Part of the current strongly-connected component.
+ if succindex < node.lowlink {
+ node.lowlink = succindex
+ }
+ }
+ }
+
+ if node.lowlink == index {
+ // Root node; pop stack and output new
+ // strongly-connected component.
+ var scc []bson.ObjectId
+ i := len(data.stack) - 1
+ for {
+ stackid := data.stack[i]
+ stackindex := data.index[stackid]
+ data.nodes[stackindex].stacked = false
+ scc = append(scc, stackid)
+ if stackindex == index {
+ break
+ }
+ i--
+ }
+ data.stack = data.stack[:i]
+ data.output = append(data.output, scc)
+ }
+
+ return node
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/tarjan_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/tarjan_test.go
new file mode 100644
index 00000000000..79745c39be6
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/tarjan_test.go
@@ -0,0 +1,44 @@
+package txn
+
+import (
+ "fmt"
+ "gopkg.in/mgo.v2/bson"
+ . "gopkg.in/check.v1"
+)
+
+type TarjanSuite struct{}
+
+var _ = Suite(TarjanSuite{})
+
+func bid(n int) bson.ObjectId {
+ return bson.ObjectId(fmt.Sprintf("%024d", n))
+}
+
+func bids(ns ...int) (ids []bson.ObjectId) {
+ for _, n := range ns {
+ ids = append(ids, bid(n))
+ }
+ return
+}
+
+func (TarjanSuite) TestExample(c *C) {
+ successors := map[bson.ObjectId][]bson.ObjectId{
+ bid(1): bids(2, 3),
+ bid(2): bids(1, 5),
+ bid(3): bids(4),
+ bid(4): bids(3, 5),
+ bid(5): bids(6),
+ bid(6): bids(7),
+ bid(7): bids(8),
+ bid(8): bids(6, 9),
+ bid(9): bids(),
+ }
+
+ c.Assert(tarjanSort(successors), DeepEquals, [][]bson.ObjectId{
+ bids(9),
+ bids(6, 7, 8),
+ bids(5),
+ bids(3, 4),
+ bids(1, 2),
+ })
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/txn.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/txn.go
new file mode 100644
index 00000000000..204b3cf1d8d
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/txn.go
@@ -0,0 +1,611 @@
+// The txn package implements support for multi-document transactions.
+//
+// For details check the following blog post:
+//
+// http://blog.labix.org/2012/08/22/multi-doc-transactions-for-mongodb
+//
+package txn
+
+import (
+ "encoding/binary"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+
+ crand "crypto/rand"
+ mrand "math/rand"
+)
+
+type state int
+
+const (
+ tpreparing state = 1 // One or more documents not prepared
+ tprepared state = 2 // Prepared but not yet ready to run
+ taborting state = 3 // Assertions failed, cleaning up
+ tapplying state = 4 // Changes are in progress
+ taborted state = 5 // Pre-conditions failed, nothing done
+ tapplied state = 6 // All changes applied
+)
+
+func (s state) String() string {
+ switch s {
+ case tpreparing:
+ return "preparing"
+ case tprepared:
+ return "prepared"
+ case taborting:
+ return "aborting"
+ case tapplying:
+ return "applying"
+ case taborted:
+ return "aborted"
+ case tapplied:
+ return "applied"
+ }
+ panic(fmt.Errorf("unknown state: %d", s))
+}
+
+var rand *mrand.Rand
+var randmu sync.Mutex
+
+func init() {
+ var seed int64
+ err := binary.Read(crand.Reader, binary.BigEndian, &seed)
+ if err != nil {
+ panic(err)
+ }
+ rand = mrand.New(mrand.NewSource(seed))
+}
+
+type transaction struct {
+ Id bson.ObjectId `bson:"_id"`
+ State state `bson:"s"`
+ Info interface{} `bson:"i,omitempty"`
+ Ops []Op `bson:"o"`
+ Nonce string `bson:"n,omitempty"`
+ Revnos []int64 `bson:"r,omitempty"`
+
+ docKeysCached docKeys
+}
+
+func (t *transaction) String() string {
+ if t.Nonce == "" {
+ return t.Id.Hex()
+ }
+ return string(t.token())
+}
+
+func (t *transaction) done() bool {
+ return t.State == tapplied || t.State == taborted
+}
+
+func (t *transaction) token() token {
+ if t.Nonce == "" {
+ panic("transaction has no nonce")
+ }
+ return tokenFor(t)
+}
+
+func (t *transaction) docKeys() docKeys {
+ if t.docKeysCached != nil {
+ return t.docKeysCached
+ }
+ dkeys := make(docKeys, 0, len(t.Ops))
+NextOp:
+ for _, op := range t.Ops {
+ dkey := op.docKey()
+ for i := range dkeys {
+ if dkey == dkeys[i] {
+ continue NextOp
+ }
+ }
+ dkeys = append(dkeys, dkey)
+ }
+ sort.Sort(dkeys)
+ t.docKeysCached = dkeys
+ return dkeys
+}
+
+// tokenFor returns a unique transaction token that
+// is composed by t's id and a nonce. If t already has
+// a nonce assigned to it, it will be used, otherwise
+// a new nonce will be generated.
+func tokenFor(t *transaction) token {
+ nonce := t.Nonce
+ if nonce == "" {
+ nonce = newNonce()
+ }
+ return token(t.Id.Hex() + "_" + nonce)
+}
+
+func newNonce() string {
+ randmu.Lock()
+ r := rand.Uint32()
+ randmu.Unlock()
+ n := make([]byte, 8)
+ for i := uint(0); i < 8; i++ {
+ n[i] = "0123456789abcdef"[(r>>(4*i))&0xf]
+ }
+ return string(n)
+}
+
+type token string
+
+func (tt token) id() bson.ObjectId { return bson.ObjectIdHex(string(tt[:24])) }
+func (tt token) nonce() string { return string(tt[25:]) }
+
+// Op represents an operation to a single document that may be
+// applied as part of a transaction with other operations.
+type Op struct {
+ // C and Id identify the collection and document this operation
+ // refers to. Id is matched against the "_id" document field.
+ C string `bson:"c"`
+ Id interface{} `bson:"d"`
+
+ // Assert optionally holds a query document that is used to
+ // test the operation document at the time the transaction is
+ // going to be applied. The assertions for all operations in
+ // a transaction are tested before any changes take place,
+ // and the transaction is entirely aborted if any of them
+ // fails. This is also the only way to prevent a transaction
+ // from being being applied (the transaction continues despite
+ // the outcome of Insert, Update, and Remove).
+ Assert interface{} `bson:"a,omitempty"`
+
+ // The Insert, Update and Remove fields describe the mutation
+ // intended by the operation. At most one of them may be set
+ // per operation. If none are set, Assert must be set and the
+ // operation becomes a read-only test.
+ //
+ // Insert holds the document to be inserted at the time the
+ // transaction is applied. The Id field will be inserted
+ // into the document automatically as its _id field. The
+ // transaction will continue even if the document already
+ // exists. Use Assert with txn.DocMissing if the insertion is
+ // required.
+ //
+ // Update holds the update document to be applied at the time
+ // the transaction is applied. The transaction will continue
+ // even if a document with Id is missing. Use Assert to
+ // test for the document presence or its contents.
+ //
+ // Remove indicates whether to remove the document with Id.
+ // The transaction continues even if the document doesn't yet
+ // exist at the time the transaction is applied. Use Assert
+ // with txn.DocExists to make sure it will be removed.
+ Insert interface{} `bson:"i,omitempty"`
+ Update interface{} `bson:"u,omitempty"`
+ Remove bool `bson:"r,omitempty"`
+}
+
+func (op *Op) isChange() bool {
+ return op.Update != nil || op.Insert != nil || op.Remove
+}
+
+func (op *Op) docKey() docKey {
+ return docKey{op.C, op.Id}
+}
+
+func (op *Op) name() string {
+ switch {
+ case op.Update != nil:
+ return "update"
+ case op.Insert != nil:
+ return "insert"
+ case op.Remove:
+ return "remove"
+ case op.Assert != nil:
+ return "assert"
+ }
+ return "none"
+}
+
+const (
+ // DocExists and DocMissing may be used on an operation's
+ // Assert value to assert that the document with the given
+ // Id exists or does not exist, respectively.
+ DocExists = "d+"
+ DocMissing = "d-"
+)
+
+// A Runner applies operations as part of a transaction onto any number
+// of collections within a database. See the Run method for details.
+type Runner struct {
+ tc *mgo.Collection // txns
+ sc *mgo.Collection // stash
+ lc *mgo.Collection // log
+}
+
+// NewRunner returns a new transaction runner that uses tc to hold its
+// transactions.
+//
+// Multiple transaction collections may exist in a single database, but
+// all collections that are touched by operations in a given transaction
+// collection must be handled exclusively by it.
+//
+// A second collection with the same name of tc but suffixed by ".stash"
+// will be used for implementing the transactional behavior of insert
+// and remove operations.
+func NewRunner(tc *mgo.Collection) *Runner {
+ return &Runner{tc, tc.Database.C(tc.Name + ".stash"), nil}
+}
+
+var ErrAborted = fmt.Errorf("transaction aborted")
+
+// Run creates a new transaction with ops and runs it immediately.
+// The id parameter specifies the transaction id, and may be written
+// down ahead of time to later verify the success of the change and
+// resume it, when the procedure is interrupted for any reason. If
+// empty, a random id will be generated.
+// The info parameter, if not nil, is included under the "i"
+// field of the transaction document.
+//
+// Operations across documents are not atomically applied, but are
+// guaranteed to be eventually all applied in the order provided or
+// all aborted, as long as the affected documents are only modified
+// through transactions. If documents are simultaneously modified
+// by transactions and out of transactions the behavior is undefined.
+//
+// If Run returns no errors, all operations were applied successfully.
+// If it returns ErrAborted, one or more operations can't be applied
+// and the transaction was entirely aborted with no changes performed.
+// Otherwise, if the transaction is interrupted while running for any
+// reason, it may be resumed explicitly or by attempting to apply
+// another transaction on any of the documents targeted by ops, as
+// long as the interruption was made after the transaction document
+// itself was inserted. Run Resume with the obtained transaction id
+// to confirm whether the transaction was applied or not.
+//
+// Any number of transactions may be run concurrently, with one
+// runner or many.
+func (r *Runner) Run(ops []Op, id bson.ObjectId, info interface{}) (err error) {
+ const efmt = "error in transaction op %d: %s"
+ for i := range ops {
+ op := &ops[i]
+ if op.C == "" || op.Id == nil {
+ return fmt.Errorf(efmt, i, "C or Id missing")
+ }
+ changes := 0
+ if op.Insert != nil {
+ changes++
+ }
+ if op.Update != nil {
+ changes++
+ }
+ if op.Remove {
+ changes++
+ }
+ if changes > 1 {
+ return fmt.Errorf(efmt, i, "more than one of Insert/Update/Remove set")
+ }
+ if changes == 0 && op.Assert == nil {
+ return fmt.Errorf(efmt, i, "none of Assert/Insert/Update/Remove set")
+ }
+ }
+ if id == "" {
+ id = bson.NewObjectId()
+ }
+
+ // Insert transaction sooner rather than later, to stay on the safer side.
+ t := transaction{
+ Id: id,
+ Ops: ops,
+ State: tpreparing,
+ Info: info,
+ }
+ if err = r.tc.Insert(&t); err != nil {
+ return err
+ }
+ if err = flush(r, &t); err != nil {
+ return err
+ }
+ if t.State == taborted {
+ return ErrAborted
+ } else if t.State != tapplied {
+ panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
+ }
+ return nil
+}
+
+// ResumeAll resumes all pending transactions. All ErrAborted errors
+// from individual transactions are ignored.
+func (r *Runner) ResumeAll() (err error) {
+ debugf("Resuming all unfinished transactions")
+ iter := r.tc.Find(bson.D{{"s", bson.D{{"$in", []state{tpreparing, tprepared, tapplying}}}}}).Iter()
+ var t transaction
+ for iter.Next(&t) {
+ if t.State == tapplied || t.State == taborted {
+ continue
+ }
+ debugf("Resuming %s from %q", t.Id, t.State)
+ if err := flush(r, &t); err != nil {
+ return err
+ }
+ if !t.done() {
+ panic(fmt.Errorf("invalid state for %s after flush: %q", &t, t.State))
+ }
+ }
+ return nil
+}
+
+// Resume resumes the transaction with id. It returns mgo.ErrNotFound
+// if the transaction is not found. Otherwise, it has the same semantics
+// of the Run method after the transaction is inserted.
+func (r *Runner) Resume(id bson.ObjectId) (err error) {
+ t, err := r.load(id)
+ if err != nil {
+ return err
+ }
+ if !t.done() {
+ debugf("Resuming %s from %q", t, t.State)
+ if err := flush(r, t); err != nil {
+ return err
+ }
+ }
+ if t.State == taborted {
+ return ErrAborted
+ } else if t.State != tapplied {
+ panic(fmt.Errorf("invalid state for %s after flush: %q", t, t.State))
+ }
+ return nil
+}
+
+// ChangeLog enables logging of changes to the given collection
+// every time a transaction that modifies content is done being
+// applied.
+//
+// Saved documents are in the format:
+//
+// {"_id": <txn id>, <collection>: {"d": [<doc id>, ...], "r": [<doc revno>, ...]}}
+//
+// The document revision is the value of the txn-revno field after
+// the change has been applied. Negative values indicate the document
+// was not present in the collection. Revisions will not change when
+// updates or removes are applied to missing documents or inserts are
+// attempted when the document isn't present.
+func (r *Runner) ChangeLog(logc *mgo.Collection) {
+ r.lc = logc
+}
+
+// PurgeMissing removes from collections any state that refers to transaction
+// documents that for whatever reason have been lost from the system (removed
+// by accident or lost in a hard crash, for example).
+//
+// This method should very rarely be needed, if at all, and should never be
+// used during the normal operation of an application. Its purpose is to put
+// a system that has seen unavoidable corruption back in a working state.
+func (r *Runner) PurgeMissing(collections ...string) error {
+ type M map[string]interface{}
+ type S []interface{}
+
+ type TDoc struct {
+ Id interface{} "_id"
+ TxnQueue []string "txn-queue"
+ }
+
+ found := make(map[bson.ObjectId]bool)
+
+ sort.Strings(collections)
+ for _, collection := range collections {
+ c := r.tc.Database.C(collection)
+ iter := c.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter()
+ var tdoc TDoc
+ for iter.Next(&tdoc) {
+ for _, txnToken := range tdoc.TxnQueue {
+ txnId := bson.ObjectIdHex(txnToken[:24])
+ if found[txnId] {
+ continue
+ }
+ if r.tc.FindId(txnId).One(nil) == nil {
+ found[txnId] = true
+ continue
+ }
+ logf("WARNING: purging from document %s/%v the missing transaction id %s", collection, tdoc.Id, txnId)
+ err := c.UpdateId(tdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
+ if err != nil {
+ return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
+ }
+ }
+ }
+ if err := iter.Close(); err != nil {
+ return fmt.Errorf("transaction queue iteration error for %s: %v", collection, err)
+ }
+ }
+
+ type StashTDoc struct {
+ Id docKey "_id"
+ TxnQueue []string "txn-queue"
+ }
+
+ iter := r.sc.Find(nil).Select(bson.M{"_id": 1, "txn-queue": 1}).Iter()
+ var stdoc StashTDoc
+ for iter.Next(&stdoc) {
+ for _, txnToken := range stdoc.TxnQueue {
+ txnId := bson.ObjectIdHex(txnToken[:24])
+ if found[txnId] {
+ continue
+ }
+ if r.tc.FindId(txnId).One(nil) == nil {
+ found[txnId] = true
+ continue
+ }
+ logf("WARNING: purging from stash document %s/%v the missing transaction id %s", stdoc.Id.C, stdoc.Id.Id, txnId)
+ err := r.sc.UpdateId(stdoc.Id, M{"$pull": M{"txn-queue": M{"$regex": "^" + txnId.Hex() + "_*"}}})
+ if err != nil {
+ return fmt.Errorf("error purging missing transaction %s: %v", txnId.Hex(), err)
+ }
+ }
+ }
+ if err := iter.Close(); err != nil {
+ return fmt.Errorf("transaction stash iteration error: %v", err)
+ }
+
+ return nil
+}
+
+func (r *Runner) load(id bson.ObjectId) (*transaction, error) {
+ var t transaction
+ err := r.tc.FindId(id).One(&t)
+ if err == mgo.ErrNotFound {
+ return nil, fmt.Errorf("cannot find transaction %s", id)
+ } else if err != nil {
+ return nil, err
+ }
+ return &t, nil
+}
+
+type typeNature int
+
+const (
+ // The order of these values matters. Transactions
+ // from applications using different ordering will
+ // be incompatible with each other.
+ _ typeNature = iota
+ natureString
+ natureInt
+ natureFloat
+ natureBool
+ natureStruct
+)
+
+func valueNature(v interface{}) (value interface{}, nature typeNature) {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.String:
+ return rv.String(), natureString
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), natureInt
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return int64(rv.Uint()), natureInt
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), natureFloat
+ case reflect.Bool:
+ return rv.Bool(), natureBool
+ case reflect.Struct:
+ return v, natureStruct
+ }
+ panic("document id type unsupported by txn: " + rv.Kind().String())
+}
+
+type docKey struct {
+ C string
+ Id interface{}
+}
+
+type docKeys []docKey
+
+func (ks docKeys) Len() int { return len(ks) }
+func (ks docKeys) Swap(i, j int) { ks[i], ks[j] = ks[j], ks[i] }
+func (ks docKeys) Less(i, j int) bool {
+ a, b := ks[i], ks[j]
+ if a.C != b.C {
+ return a.C < b.C
+ }
+ return valuecmp(a.Id, b.Id) == -1
+}
+
+func valuecmp(a, b interface{}) int {
+ av, an := valueNature(a)
+ bv, bn := valueNature(b)
+ if an < bn {
+ return -1
+ }
+ if an > bn {
+ return 1
+ }
+
+ if av == bv {
+ return 0
+ }
+ var less bool
+ switch an {
+ case natureString:
+ less = av.(string) < bv.(string)
+ case natureInt:
+ less = av.(int64) < bv.(int64)
+ case natureFloat:
+ less = av.(float64) < bv.(float64)
+ case natureBool:
+ less = !av.(bool) && bv.(bool)
+ case natureStruct:
+ less = structcmp(av, bv) == -1
+ default:
+ panic("unreachable")
+ }
+ if less {
+ return -1
+ }
+ return 1
+}
+
+func structcmp(a, b interface{}) int {
+ av := reflect.ValueOf(a)
+ bv := reflect.ValueOf(b)
+
+ var ai, bi = 0, 0
+ var an, bn = av.NumField(), bv.NumField()
+ var avi, bvi interface{}
+ var af, bf reflect.StructField
+ for {
+ for ai < an {
+ af = av.Type().Field(ai)
+ if isExported(af.Name) {
+ avi = av.Field(ai).Interface()
+ ai++
+ break
+ }
+ ai++
+ }
+ for bi < bn {
+ bf = bv.Type().Field(bi)
+ if isExported(bf.Name) {
+ bvi = bv.Field(bi).Interface()
+ bi++
+ break
+ }
+ bi++
+ }
+ if n := valuecmp(avi, bvi); n != 0 {
+ return n
+ }
+ nameA := getFieldName(af)
+ nameB := getFieldName(bf)
+ if nameA < nameB {
+ return -1
+ }
+ if nameA > nameB {
+ return 1
+ }
+ if ai == an && bi == bn {
+ return 0
+ }
+ if ai == an || bi == bn {
+ if ai == bn {
+ return -1
+ }
+ return 1
+ }
+ }
+ panic("unreachable")
+}
+
+func isExported(name string) bool {
+ a := name[0]
+ return a >= 'A' && a <= 'Z'
+}
+
+func getFieldName(f reflect.StructField) string {
+ name := f.Tag.Get("bson")
+ if i := strings.Index(name, ","); i >= 0 {
+ name = name[:i]
+ }
+ if name == "" {
+ name = strings.ToLower(f.Name)
+ }
+ return name
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/txn_test.go b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/txn_test.go
new file mode 100644
index 00000000000..12923ca1209
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/mgo.v2/txn/txn_test.go
@@ -0,0 +1,778 @@
+package txn_test
+
+import (
+ "flag"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ . "gopkg.in/check.v1"
+ "gopkg.in/mgo.v2"
+ "gopkg.in/mgo.v2/bson"
+ "gopkg.in/mgo.v2/dbtest"
+ "gopkg.in/mgo.v2/txn"
+)
+
+func TestAll(t *testing.T) {
+ TestingT(t)
+}
+
+type S struct {
+ server dbtest.DBServer
+ session *mgo.Session
+ db *mgo.Database
+ tc, sc *mgo.Collection
+ accounts *mgo.Collection
+ runner *txn.Runner
+}
+
+var _ = Suite(&S{})
+
+type M map[string]interface{}
+
+func (s *S) SetUpSuite(c *C) {
+ s.server.SetPath(c.MkDir())
+}
+
+func (s *S) TearDownSuite(c *C) {
+ s.server.Stop()
+}
+
+func (s *S) SetUpTest(c *C) {
+ s.server.Wipe()
+
+ txn.SetChaos(txn.Chaos{})
+ txn.SetLogger(c)
+ txn.SetDebug(true)
+
+ s.session = s.server.Session()
+ s.db = s.session.DB("test")
+ s.tc = s.db.C("tc")
+ s.sc = s.db.C("tc.stash")
+ s.accounts = s.db.C("accounts")
+ s.runner = txn.NewRunner(s.tc)
+}
+
+func (s *S) TearDownTest(c *C) {
+ txn.SetLogger(nil)
+ txn.SetDebug(false)
+ s.session.Close()
+}
+
+type Account struct {
+ Id int `bson:"_id"`
+ Balance int
+}
+
+func (s *S) TestDocExists(c *C) {
+ err := s.accounts.Insert(M{"_id": 0, "balance": 300})
+ c.Assert(err, IsNil)
+
+ exists := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Assert: txn.DocExists,
+ }}
+ missing := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Assert: txn.DocMissing,
+ }}
+
+ err = s.runner.Run(exists, "", nil)
+ c.Assert(err, IsNil)
+ err = s.runner.Run(missing, "", nil)
+ c.Assert(err, Equals, txn.ErrAborted)
+
+ err = s.accounts.RemoveId(0)
+ c.Assert(err, IsNil)
+
+ err = s.runner.Run(exists, "", nil)
+ c.Assert(err, Equals, txn.ErrAborted)
+ err = s.runner.Run(missing, "", nil)
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestInsert(c *C) {
+ err := s.accounts.Insert(M{"_id": 0, "balance": 300})
+ c.Assert(err, IsNil)
+
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Insert: M{"balance": 200},
+ }}
+
+ err = s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ var account Account
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 300)
+
+ ops[0].Id = 1
+ err = s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ err = s.accounts.FindId(1).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 200)
+}
+
+func (s *S) TestInsertStructID(c *C) {
+ type id struct {
+ FirstName string
+ LastName string
+ }
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: id{FirstName: "John", LastName: "Jones"},
+ Assert: txn.DocMissing,
+ Insert: M{"balance": 200},
+ }, {
+ C: "accounts",
+ Id: id{FirstName: "Sally", LastName: "Smith"},
+ Assert: txn.DocMissing,
+ Insert: M{"balance": 800},
+ }}
+
+ err := s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ n, err := s.accounts.Find(nil).Count()
+ c.Assert(err, IsNil)
+ c.Assert(n, Equals, 2)
+}
+
+func (s *S) TestRemove(c *C) {
+ err := s.accounts.Insert(M{"_id": 0, "balance": 300})
+ c.Assert(err, IsNil)
+
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Remove: true,
+ }}
+
+ err = s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ err = s.accounts.FindId(0).One(nil)
+ c.Assert(err, Equals, mgo.ErrNotFound)
+
+ err = s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+}
+
+func (s *S) TestUpdate(c *C) {
+ var err error
+ err = s.accounts.Insert(M{"_id": 0, "balance": 200})
+ c.Assert(err, IsNil)
+ err = s.accounts.Insert(M{"_id": 1, "balance": 200})
+ c.Assert(err, IsNil)
+
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Update: M{"$inc": M{"balance": 100}},
+ }}
+
+ err = s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ var account Account
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 300)
+
+ ops[0].Id = 1
+
+ err = s.accounts.FindId(1).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 200)
+}
+
+func (s *S) TestInsertUpdate(c *C) {
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Insert: M{"_id": 0, "balance": 200},
+ }, {
+ C: "accounts",
+ Id: 0,
+ Update: M{"$inc": M{"balance": 100}},
+ }}
+
+ err := s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ var account Account
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 300)
+
+ err = s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 400)
+}
+
+func (s *S) TestUpdateInsert(c *C) {
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Update: M{"$inc": M{"balance": 100}},
+ }, {
+ C: "accounts",
+ Id: 0,
+ Insert: M{"_id": 0, "balance": 200},
+ }}
+
+ err := s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ var account Account
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 200)
+
+ err = s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 300)
+}
+
+func (s *S) TestInsertRemoveInsert(c *C) {
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Insert: M{"_id": 0, "balance": 200},
+ }, {
+ C: "accounts",
+ Id: 0,
+ Remove: true,
+ }, {
+ C: "accounts",
+ Id: 0,
+ Insert: M{"_id": 0, "balance": 300},
+ }}
+
+ err := s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ var account Account
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 300)
+}
+
+func (s *S) TestQueueStashing(c *C) {
+ txn.SetChaos(txn.Chaos{
+ KillChance: 1,
+ Breakpoint: "set-applying",
+ })
+
+ opses := [][]txn.Op{{{
+ C: "accounts",
+ Id: 0,
+ Insert: M{"balance": 100},
+ }}, {{
+ C: "accounts",
+ Id: 0,
+ Remove: true,
+ }}, {{
+ C: "accounts",
+ Id: 0,
+ Insert: M{"balance": 200},
+ }}, {{
+ C: "accounts",
+ Id: 0,
+ Update: M{"$inc": M{"balance": 100}},
+ }}}
+
+ var last bson.ObjectId
+ for _, ops := range opses {
+ last = bson.NewObjectId()
+ err := s.runner.Run(ops, last, nil)
+ c.Assert(err, Equals, txn.ErrChaos)
+ }
+
+ txn.SetChaos(txn.Chaos{})
+ err := s.runner.Resume(last)
+ c.Assert(err, IsNil)
+
+ var account Account
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 300)
+}
+
+func (s *S) TestInfo(c *C) {
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Assert: txn.DocMissing,
+ }}
+
+ id := bson.NewObjectId()
+ err := s.runner.Run(ops, id, M{"n": 42})
+ c.Assert(err, IsNil)
+
+ var t struct{ I struct{ N int } }
+ err = s.tc.FindId(id).One(&t)
+ c.Assert(err, IsNil)
+ c.Assert(t.I.N, Equals, 42)
+}
+
+func (s *S) TestErrors(c *C) {
+ doc := bson.M{"foo": 1}
+ tests := []txn.Op{{
+ C: "c",
+ Id: 0,
+ }, {
+ C: "c",
+ Id: 0,
+ Insert: doc,
+ Remove: true,
+ }, {
+ C: "c",
+ Id: 0,
+ Insert: doc,
+ Update: doc,
+ }, {
+ C: "c",
+ Id: 0,
+ Update: doc,
+ Remove: true,
+ }, {
+ C: "c",
+ Assert: doc,
+ }, {
+ Id: 0,
+ Assert: doc,
+ }}
+
+ txn.SetChaos(txn.Chaos{KillChance: 1.0})
+ for _, op := range tests {
+ c.Logf("op: %v", op)
+ err := s.runner.Run([]txn.Op{op}, "", nil)
+ c.Assert(err, ErrorMatches, "error in transaction op 0: .*")
+ }
+}
+
+func (s *S) TestAssertNestedOr(c *C) {
+ // Assert uses $or internally. Ensure nesting works.
+ err := s.accounts.Insert(M{"_id": 0, "balance": 300})
+ c.Assert(err, IsNil)
+
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Assert: bson.D{{"$or", []bson.D{{{"balance", 100}}, {{"balance", 300}}}}},
+ Update: bson.D{{"$inc", bson.D{{"balance", 100}}}},
+ }}
+
+ err = s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ var account Account
+ err = s.accounts.FindId(0).One(&account)
+ c.Assert(err, IsNil)
+ c.Assert(account.Balance, Equals, 400)
+}
+
+func (s *S) TestVerifyFieldOrdering(c *C) {
+ // Used to have a map in certain operations, which means
+ // the ordering of fields would be messed up.
+ fields := bson.D{{"a", 1}, {"b", 2}, {"c", 3}}
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Insert: fields,
+ }}
+
+ err := s.runner.Run(ops, "", nil)
+ c.Assert(err, IsNil)
+
+ var d bson.D
+ err = s.accounts.FindId(0).One(&d)
+ c.Assert(err, IsNil)
+
+ var filtered bson.D
+ for _, e := range d {
+ switch e.Name {
+ case "a", "b", "c":
+ filtered = append(filtered, e)
+ }
+ }
+ c.Assert(filtered, DeepEquals, fields)
+}
+
+func (s *S) TestChangeLog(c *C) {
+ chglog := s.db.C("chglog")
+ s.runner.ChangeLog(chglog)
+
+ ops := []txn.Op{{
+ C: "debts",
+ Id: 0,
+ Assert: txn.DocMissing,
+ }, {
+ C: "accounts",
+ Id: 0,
+ Insert: M{"balance": 300},
+ }, {
+ C: "accounts",
+ Id: 1,
+ Insert: M{"balance": 300},
+ }, {
+ C: "people",
+ Id: "joe",
+ Insert: M{"accounts": []int64{0, 1}},
+ }}
+ id := bson.NewObjectId()
+ err := s.runner.Run(ops, id, nil)
+ c.Assert(err, IsNil)
+
+ type IdList []interface{}
+ type Log struct {
+ Docs IdList "d"
+ Revnos []int64 "r"
+ }
+ var m map[string]*Log
+ err = chglog.FindId(id).One(&m)
+ c.Assert(err, IsNil)
+
+ c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{2, 2}})
+ c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{2}})
+ c.Assert(m["debts"], IsNil)
+
+ ops = []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Update: M{"$inc": M{"balance": 100}},
+ }, {
+ C: "accounts",
+ Id: 1,
+ Update: M{"$inc": M{"balance": 100}},
+ }}
+ id = bson.NewObjectId()
+ err = s.runner.Run(ops, id, nil)
+ c.Assert(err, IsNil)
+
+ m = nil
+ err = chglog.FindId(id).One(&m)
+ c.Assert(err, IsNil)
+
+ c.Assert(m["accounts"], DeepEquals, &Log{IdList{0, 1}, []int64{3, 3}})
+ c.Assert(m["people"], IsNil)
+
+ ops = []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Remove: true,
+ }, {
+ C: "people",
+ Id: "joe",
+ Remove: true,
+ }}
+ id = bson.NewObjectId()
+ err = s.runner.Run(ops, id, nil)
+ c.Assert(err, IsNil)
+
+ m = nil
+ err = chglog.FindId(id).One(&m)
+ c.Assert(err, IsNil)
+
+ c.Assert(m["accounts"], DeepEquals, &Log{IdList{0}, []int64{-4}})
+ c.Assert(m["people"], DeepEquals, &Log{IdList{"joe"}, []int64{-3}})
+}
+
+func (s *S) TestPurgeMissing(c *C) {
+ txn.SetChaos(txn.Chaos{
+ KillChance: 1,
+ Breakpoint: "set-applying",
+ })
+
+ err := s.accounts.Insert(M{"_id": 0, "balance": 100})
+ c.Assert(err, IsNil)
+ err = s.accounts.Insert(M{"_id": 1, "balance": 100})
+ c.Assert(err, IsNil)
+
+ ops1 := []txn.Op{{
+ C: "accounts",
+ Id: 3,
+ Insert: M{"balance": 100},
+ }}
+
+ ops2 := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Remove: true,
+ }, {
+ C: "accounts",
+ Id: 1,
+ Update: M{"$inc": M{"balance": 100}},
+ }, {
+ C: "accounts",
+ Id: 2,
+ Insert: M{"balance": 100},
+ }}
+
+ first := bson.NewObjectId()
+ c.Logf("---- Running ops1 under transaction %q, to be canceled by chaos", first.Hex())
+ err = s.runner.Run(ops1, first, nil)
+ c.Assert(err, Equals, txn.ErrChaos)
+
+ last := bson.NewObjectId()
+ c.Logf("---- Running ops2 under transaction %q, to be canceled by chaos", last.Hex())
+ err = s.runner.Run(ops2, last, nil)
+ c.Assert(err, Equals, txn.ErrChaos)
+
+ c.Logf("---- Removing transaction %q", last.Hex())
+ err = s.tc.RemoveId(last)
+ c.Assert(err, IsNil)
+
+ c.Logf("---- Disabling chaos and attempting to resume all")
+ txn.SetChaos(txn.Chaos{})
+ err = s.runner.ResumeAll()
+ c.Assert(err, IsNil)
+
+ again := bson.NewObjectId()
+ c.Logf("---- Running ops2 again under transaction %q, to fail for missing transaction", again.Hex())
+ err = s.runner.Run(ops2, again, nil)
+ c.Assert(err, ErrorMatches, "cannot find transaction .*")
+
+ c.Logf("---- Purging missing transactions")
+ err = s.runner.PurgeMissing("accounts")
+ c.Assert(err, IsNil)
+
+ c.Logf("---- Resuming pending transactions")
+ err = s.runner.ResumeAll()
+ c.Assert(err, IsNil)
+
+ expect := []struct{ Id, Balance int }{
+ {0, -1},
+ {1, 200},
+ {2, 100},
+ {3, 100},
+ }
+ var got Account
+ for _, want := range expect {
+ err = s.accounts.FindId(want.Id).One(&got)
+ if want.Balance == -1 {
+ if err != mgo.ErrNotFound {
+ c.Errorf("Account %d should not exist, find got err=%#v", err)
+ }
+ } else if err != nil {
+ c.Errorf("Account %d should have balance of %d, but wasn't found", want.Id, want.Balance)
+ } else if got.Balance != want.Balance {
+ c.Errorf("Account %d should have balance of %d, got %d", want.Id, want.Balance, got.Balance)
+ }
+ }
+}
+
+func (s *S) TestTxnQueueStashStressTest(c *C) {
+ txn.SetChaos(txn.Chaos{
+ SlowdownChance: 0.3,
+ Slowdown: 50 * time.Millisecond,
+ })
+ defer txn.SetChaos(txn.Chaos{})
+
+ // So we can run more iterations of the test in less time.
+ txn.SetDebug(false)
+
+ const runners = 10
+ const inserts = 10
+ const repeat = 100
+
+ for r := 0; r < repeat; r++ {
+ var wg sync.WaitGroup
+ wg.Add(runners)
+ for i := 0; i < runners; i++ {
+ go func(i, r int) {
+ defer wg.Done()
+
+ session := s.session.New()
+ defer session.Close()
+ runner := txn.NewRunner(s.tc.With(session))
+
+ for j := 0; j < inserts; j++ {
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: fmt.Sprintf("insert-%d-%d", r, j),
+ Insert: bson.M{
+ "added-by": i,
+ },
+ }}
+ err := runner.Run(ops, "", nil)
+ if err != txn.ErrAborted {
+ c.Check(err, IsNil)
+ }
+ }
+ }(i, r)
+ }
+ wg.Wait()
+ }
+}
+
+func (s *S) TestPurgeMissingPipelineSizeLimit(c *C) {
+ // This test ensures that PurgeMissing can handle very large
+ // txn-queue fields. Previous iterations of PurgeMissing would
+ // trigger a 16MB aggregation pipeline result size limit when run
+ // against a documents or stashes with large numbers of txn-queue
+ // entries. PurgeMissing now no longer uses aggregation pipelines
+ // to work around this limit.
+
+ // The pipeline result size limitation was removed from MongoDB in 2.6 so
+ // this test is only run for older MongoDB version.
+ build, err := s.session.BuildInfo()
+ c.Assert(err, IsNil)
+ if build.VersionAtLeast(2, 6) {
+ c.Skip("This tests a problem that can only happen with MongoDB < 2.6 ")
+ }
+
+ // Insert a single document to work with.
+ err = s.accounts.Insert(M{"_id": 0, "balance": 100})
+ c.Assert(err, IsNil)
+
+ ops := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Update: M{"$inc": M{"balance": 100}},
+ }}
+
+ // Generate one successful transaction.
+ good := bson.NewObjectId()
+ c.Logf("---- Running ops under transaction %q", good.Hex())
+ err = s.runner.Run(ops, good, nil)
+ c.Assert(err, IsNil)
+
+ // Generate another transaction which which will go missing.
+ missing := bson.NewObjectId()
+ c.Logf("---- Running ops under transaction %q (which will go missing)", missing.Hex())
+ err = s.runner.Run(ops, missing, nil)
+ c.Assert(err, IsNil)
+
+ err = s.tc.RemoveId(missing)
+ c.Assert(err, IsNil)
+
+ // Generate a txn-queue on the test document that's large enough
+ // that it used to cause PurgeMissing to exceed MongoDB's pipeline
+ // result 16MB size limit (MongoDB 2.4 and older only).
+ //
+ // The contents of the txn-queue field doesn't matter, only that
+ // it's big enough to trigger the size limit. The required size
+ // can also be achieved by using multiple documents as long as the
+ // cumulative size of all the txn-queue fields exceeds the
+ // pipeline limit. A single document is easier to work with for
+ // this test however.
+ //
+ // The txn id of the successful transaction is used fill the
+ // txn-queue because this takes advantage of a short circuit in
+ // PurgeMissing, dramatically speeding up the test run time.
+ const fakeQueueLen = 250000
+ fakeTxnQueue := make([]string, fakeQueueLen)
+ token := good.Hex() + "_12345678" // txn id + nonce
+ for i := 0; i < fakeQueueLen; i++ {
+ fakeTxnQueue[i] = token
+ }
+
+ err = s.accounts.UpdateId(0, bson.M{
+ "$set": bson.M{"txn-queue": fakeTxnQueue},
+ })
+ c.Assert(err, IsNil)
+
+ // PurgeMissing could hit the same pipeline result size limit when
+ // processing the txn-queue fields of stash documents so insert
+ // the large txn-queue there too to ensure that no longer happens.
+ err = s.sc.Insert(
+ bson.D{{"c", "accounts"}, {"id", 0}},
+ bson.M{"txn-queue": fakeTxnQueue},
+ )
+ c.Assert(err, IsNil)
+
+ c.Logf("---- Purging missing transactions")
+ err = s.runner.PurgeMissing("accounts")
+ c.Assert(err, IsNil)
+}
+
+var flaky = flag.Bool("flaky", false, "Include flaky tests")
+
+func (s *S) TestTxnQueueStressTest(c *C) {
+ // This fails about 20% of the time on Mongo 3.2 (I haven't tried
+ // other versions) with account balance being 3999 instead of
+ // 4000. That implies that some updates are being lost. This is
+ // bad and we'll need to chase it down in the near future - the
+ // only reason it's being skipped now is that it's already failing
+ // and it's better to have the txn tests running without this one
+ // than to have them not running at all.
+ if !*flaky {
+ c.Skip("Fails intermittently - disabling until fixed")
+ }
+ txn.SetChaos(txn.Chaos{
+ SlowdownChance: 0.3,
+ Slowdown: 50 * time.Millisecond,
+ })
+ defer txn.SetChaos(txn.Chaos{})
+
+ // So we can run more iterations of the test in less time.
+ txn.SetDebug(false)
+
+ err := s.accounts.Insert(M{"_id": 0, "balance": 0}, M{"_id": 1, "balance": 0})
+ c.Assert(err, IsNil)
+
+ // Run half of the operations changing account 0 and then 1,
+ // and the other half in the opposite order.
+ ops01 := []txn.Op{{
+ C: "accounts",
+ Id: 0,
+ Update: M{"$inc": M{"balance": 1}},
+ }, {
+ C: "accounts",
+ Id: 1,
+ Update: M{"$inc": M{"balance": 1}},
+ }}
+
+ ops10 := []txn.Op{{
+ C: "accounts",
+ Id: 1,
+ Update: M{"$inc": M{"balance": 1}},
+ }, {
+ C: "accounts",
+ Id: 0,
+ Update: M{"$inc": M{"balance": 1}},
+ }}
+
+ ops := [][]txn.Op{ops01, ops10}
+
+ const runners = 4
+ const changes = 1000
+
+ var wg sync.WaitGroup
+ wg.Add(runners)
+ for n := 0; n < runners; n++ {
+ n := n
+ go func() {
+ defer wg.Done()
+ for i := 0; i < changes; i++ {
+ err = s.runner.Run(ops[n%2], "", nil)
+ c.Assert(err, IsNil)
+ }
+ }()
+ }
+ wg.Wait()
+
+ for id := 0; id < 2; id++ {
+ var account Account
+ err = s.accounts.FindId(id).One(&account)
+ if account.Balance != runners*changes {
+ c.Errorf("Account should have balance of %d, got %d", runners*changes, account.Balance)
+ }
+ }
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/LICENSE b/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/LICENSE
new file mode 100644
index 00000000000..a4249bb31dd
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/LICENSE
@@ -0,0 +1,29 @@
+tomb - support for clean goroutine termination in Go.
+
+Copyright (c) 2010-2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/README.md b/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/README.md
new file mode 100644
index 00000000000..e7f282b5aa9
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/README.md
@@ -0,0 +1,4 @@
+Installation and usage
+----------------------
+
+See [gopkg.in/tomb.v2](https://gopkg.in/tomb.v2) for documentation and usage details.
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/tomb.go b/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/tomb.go
new file mode 100644
index 00000000000..28bc552b2cb
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/tomb.go
@@ -0,0 +1,223 @@
+// Copyright (c) 2011 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of the copyright holder nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The tomb package handles clean goroutine tracking and termination.
+//
+// The zero value of a Tomb is ready to handle the creation of a tracked
+// goroutine via its Go method, and then any tracked goroutine may call
+// the Go method again to create additional tracked goroutines at
+// any point.
+//
+// If any of the tracked goroutines returns a non-nil error, or the
+// Kill or Killf method is called by any goroutine in the system (tracked
+// or not), the tomb Err is set, Alive is set to false, and the Dying
+// channel is closed to flag that all tracked goroutines are supposed
+// to willingly terminate as soon as possible.
+//
+// Once all tracked goroutines terminate, the Dead channel is closed,
+// and Wait unblocks and returns the first non-nil error presented
+// to the tomb via a result or an explicit Kill or Killf method call,
+// or nil if there were no errors.
+//
+// It is okay to create further goroutines via the Go method while
+// the tomb is in a dying state. The final dead state is only reached
+// once all tracked goroutines terminate, at which point calling
+// the Go method again will cause a runtime panic.
+//
+// Tracked functions and methods that are still running while the tomb
+// is in dying state may choose to return ErrDying as their error value.
+// This preserves the well established non-nil error convention, but is
+// understood by the tomb as a clean termination. The Err and Wait
+// methods will still return nil if all observed errors were either
+// nil or ErrDying.
+//
+// For background and a detailed example, see the following blog post:
+//
+// http://blog.labix.org/2011/10/09/death-of-goroutines-under-control
+//
+package tomb
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+)
+
+// A Tomb tracks the lifecycle of one or more goroutines as alive,
+// dying or dead, and the reason for their death.
+//
+// See the package documentation for details.
+type Tomb struct {
+ m sync.Mutex
+ alive int
+ dying chan struct{}
+ dead chan struct{}
+ reason error
+}
+
+var (
+ ErrStillAlive = errors.New("tomb: still alive")
+ ErrDying = errors.New("tomb: dying")
+)
+
+func (t *Tomb) init() {
+ t.m.Lock()
+ if t.dead == nil {
+ t.dead = make(chan struct{})
+ t.dying = make(chan struct{})
+ t.reason = ErrStillAlive
+ }
+ t.m.Unlock()
+}
+
+// Dead returns the channel that can be used to wait until
+// all goroutines have finished running.
+func (t *Tomb) Dead() <-chan struct{} {
+ t.init()
+ return t.dead
+}
+
+// Dying returns the channel that can be used to wait until
+// t.Kill is called.
+func (t *Tomb) Dying() <-chan struct{} {
+ t.init()
+ return t.dying
+}
+
+// Wait blocks until all goroutines have finished running, and
+// then returns the reason for their death.
+func (t *Tomb) Wait() error {
+ t.init()
+ <-t.dead
+ t.m.Lock()
+ reason := t.reason
+ t.m.Unlock()
+ return reason
+}
+
+// Go runs f in a new goroutine and tracks its termination.
+//
+// If f returns a non-nil error, t.Kill is called with that
+// error as the death reason parameter.
+//
+// It is f's responsibility to monitor the tomb and return
+// appropriately once it is in a dying state.
+//
+// It is safe for the f function to call the Go method again
+// to create additional tracked goroutines. Once all tracked
+// goroutines return, the Dead channel is closed and the
+// Wait method unblocks and returns the death reason.
+//
+// Calling the Go method after all tracked goroutines return
+// causes a runtime panic. For that reason, calling the Go
+// method a second time out of a tracked goroutine is unsafe.
+func (t *Tomb) Go(f func() error) {
+ t.init()
+ t.m.Lock()
+ defer t.m.Unlock()
+ select {
+ case <-t.dead:
+ panic("tomb.Go called after all goroutines terminated")
+ default:
+ }
+ t.alive++
+ go t.run(f)
+}
+
+func (t *Tomb) run(f func() error) {
+ err := f()
+ t.m.Lock()
+ defer t.m.Unlock()
+ t.alive--
+ if t.alive == 0 || err != nil {
+ t.kill(err)
+ if t.alive == 0 {
+ close(t.dead)
+ }
+ }
+}
+
+// Kill puts the tomb in a dying state for the given reason,
+// closes the Dying channel, and sets Alive to false.
+//
+// Althoguh Kill may be called multiple times, only the first
+// non-nil error is recorded as the death reason.
+//
+// If reason is ErrDying, the previous reason isn't replaced
+// even if nil. It's a runtime error to call Kill with ErrDying
+// if t is not in a dying state.
+func (t *Tomb) Kill(reason error) {
+ t.init()
+ t.m.Lock()
+ defer t.m.Unlock()
+ t.kill(reason)
+}
+
+func (t *Tomb) kill(reason error) {
+ if reason == ErrStillAlive {
+ panic("tomb: Kill with ErrStillAlive")
+ }
+ if reason == ErrDying {
+ if t.reason == ErrStillAlive {
+ panic("tomb: Kill with ErrDying while still alive")
+ }
+ return
+ }
+ if t.reason == ErrStillAlive {
+ t.reason = reason
+ close(t.dying)
+ return
+ }
+ if t.reason == nil {
+ t.reason = reason
+ return
+ }
+}
+
+// Killf calls the Kill method with an error built providing the received
+// parameters to fmt.Errorf. The generated error is also returned.
+func (t *Tomb) Killf(f string, a ...interface{}) error {
+ err := fmt.Errorf(f, a...)
+ t.Kill(err)
+ return err
+}
+
+// Err returns the death reason, or ErrStillAlive if the tomb
+// is not in a dying or dead state.
+func (t *Tomb) Err() (reason error) {
+ t.init()
+ t.m.Lock()
+ reason = t.reason
+ t.m.Unlock()
+ return
+}
+
+// Alive returns true if the tomb is not in a dying or dead state.
+func (t *Tomb) Alive() bool {
+ return t.Err() == ErrStillAlive
+}
diff --git a/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/tomb_test.go b/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/tomb_test.go
new file mode 100644
index 00000000000..a1064dffe65
--- /dev/null
+++ b/src/mongo/gotools/vendor/src/gopkg.in/tomb.v2/tomb_test.go
@@ -0,0 +1,183 @@
+package tomb_test
+
+import (
+ "errors"
+ "gopkg.in/tomb.v2"
+ "reflect"
+ "testing"
+)
+
+func nothing() error { return nil }
+
+func TestNewTomb(t *testing.T) {
+ tb := &tomb.Tomb{}
+ checkState(t, tb, false, false, tomb.ErrStillAlive)
+}
+
+func TestGo(t *testing.T) {
+ tb := &tomb.Tomb{}
+ alive := make(chan bool)
+ tb.Go(func() error {
+ alive <- true
+ tb.Go(func() error {
+ alive <- true
+ <-tb.Dying()
+ return nil
+ })
+ <-tb.Dying()
+ return nil
+ })
+ <-alive
+ <-alive
+ checkState(t, tb, false, false, tomb.ErrStillAlive)
+ tb.Kill(nil)
+ tb.Wait()
+ checkState(t, tb, true, true, nil)
+}
+
+func TestGoErr(t *testing.T) {
+ first := errors.New("first error")
+ second := errors.New("first error")
+ tb := &tomb.Tomb{}
+ alive := make(chan bool)
+ tb.Go(func() error {
+ alive <- true
+ tb.Go(func() error {
+ alive <- true
+ return first
+ })
+ <-tb.Dying()
+ return second
+ })
+ <-alive
+ <-alive
+ tb.Wait()
+ checkState(t, tb, true, true, first)
+}
+
+func TestGoPanic(t *testing.T) {
+ // ErrDying being used properly, after a clean death.
+ tb := &tomb.Tomb{}
+ tb.Go(nothing)
+ tb.Wait()
+ defer func() {
+ err := recover()
+ if err != "tomb.Go called after all goroutines terminated" {
+ t.Fatalf("Wrong panic on post-death tomb.Go call: %v", err)
+ }
+ checkState(t, tb, true, true, nil)
+ }()
+ tb.Go(nothing)
+}
+
+func TestKill(t *testing.T) {
+ // a nil reason flags the goroutine as dying
+ tb := &tomb.Tomb{}
+ tb.Kill(nil)
+ checkState(t, tb, true, false, nil)
+
+ // a non-nil reason now will override Kill
+ err := errors.New("some error")
+ tb.Kill(err)
+ checkState(t, tb, true, false, err)
+
+ // another non-nil reason won't replace the first one
+ tb.Kill(errors.New("ignore me"))
+ checkState(t, tb, true, false, err)
+
+ tb.Go(nothing)
+ tb.Wait()
+ checkState(t, tb, true, true, err)
+}
+
+func TestKillf(t *testing.T) {
+ tb := &tomb.Tomb{}
+
+ err := tb.Killf("BO%s", "OM")
+ if s := err.Error(); s != "BOOM" {
+ t.Fatalf(`Killf("BO%s", "OM"): want "BOOM", got %q`, s)
+ }
+ checkState(t, tb, true, false, err)
+
+ // another non-nil reason won't replace the first one
+ tb.Killf("ignore me")
+ checkState(t, tb, true, false, err)
+
+ tb.Go(nothing)
+ tb.Wait()
+ checkState(t, tb, true, true, err)
+}
+
+func TestErrDying(t *testing.T) {
+ // ErrDying being used properly, after a clean death.
+ tb := &tomb.Tomb{}
+ tb.Kill(nil)
+ tb.Kill(tomb.ErrDying)
+ checkState(t, tb, true, false, nil)
+
+ // ErrDying being used properly, after an errorful death.
+ err := errors.New("some error")
+ tb.Kill(err)
+ tb.Kill(tomb.ErrDying)
+ checkState(t, tb, true, false, err)
+
+ // ErrDying being used badly, with an alive tomb.
+ tb = &tomb.Tomb{}
+ defer func() {
+ err := recover()
+ if err != "tomb: Kill with ErrDying while still alive" {
+ t.Fatalf("Wrong panic on Kill(ErrDying): %v", err)
+ }
+ checkState(t, tb, false, false, tomb.ErrStillAlive)
+ }()
+ tb.Kill(tomb.ErrDying)
+}
+
+func TestKillErrStillAlivePanic(t *testing.T) {
+ tb := &tomb.Tomb{}
+ defer func() {
+ err := recover()
+ if err != "tomb: Kill with ErrStillAlive" {
+ t.Fatalf("Wrong panic on Kill(ErrStillAlive): %v", err)
+ }
+ checkState(t, tb, false, false, tomb.ErrStillAlive)
+ }()
+ tb.Kill(tomb.ErrStillAlive)
+}
+
+func checkState(t *testing.T, tb *tomb.Tomb, wantDying, wantDead bool, wantErr error) {
+ select {
+ case <-tb.Dying():
+ if !wantDying {
+ t.Error("<-Dying: should block")
+ }
+ default:
+ if wantDying {
+ t.Error("<-Dying: should not block")
+ }
+ }
+ seemsDead := false
+ select {
+ case <-tb.Dead():
+ if !wantDead {
+ t.Error("<-Dead: should block")
+ }
+ seemsDead = true
+ default:
+ if wantDead {
+ t.Error("<-Dead: should not block")
+ }
+ }
+ if err := tb.Err(); err != wantErr {
+ t.Errorf("Err: want %#v, got %#v", wantErr, err)
+ }
+ if wantDead && seemsDead {
+ waitErr := tb.Wait()
+ switch {
+ case waitErr == tomb.ErrStillAlive:
+ t.Errorf("Wait should not return ErrStillAlive")
+ case !reflect.DeepEqual(waitErr, wantErr):
+ t.Errorf("Wait: want %#v, got %#v", wantErr, waitErr)
+ }
+ }
+}